refactor vram lock/unlock code into new vmem.cpp functions
This commit is contained in:
parent
13a2f8bc61
commit
ad5ab6240b
|
@ -659,3 +659,101 @@ void _vmem_enable_mmu(bool enable)
|
|||
_vmem_set_p0_mappings();
|
||||
}
|
||||
}
|
||||
|
||||
void _vmem_protect_vram(u32 addr, u32 size)
|
||||
{
|
||||
addr &= VRAM_MASK;
|
||||
if (!mmu_enabled())
|
||||
{
|
||||
mem_region_lock(virt_ram_base + 0x04000000 + addr, size); // P0
|
||||
//mem_region_lock(virt_ram_base + 0x06000000 + addr, size); // P0 - mirror
|
||||
if (VRAM_SIZE == 0x800000)
|
||||
{
|
||||
// wraps when only 8MB VRAM
|
||||
mem_region_lock(virt_ram_base + 0x04000000 + addr + VRAM_SIZE, size);
|
||||
//mem_region_lock(virt_ram_base + 0x06000000 + addr + VRAM_SIZE, size);
|
||||
}
|
||||
}
|
||||
if (_nvmem_4gb_space())
|
||||
{
|
||||
mem_region_lock(virt_ram_base + 0x84000000 + addr, size); // P1
|
||||
//mem_region_lock(virt_ram_base + 0x86000000 + addr, size); // P1 - mirror
|
||||
// We should also lock P2 and P3, and the mirrors, but they don't seem to be used...
|
||||
//mem_region_lock(virt_ram_base + 0xA4000000 + addr, size); // P2
|
||||
//mem_region_lock(virt_ram_base + 0xA6000000 + addr, size); // P2 - mirror
|
||||
//mem_region_lock(virt_ram_base + 0xC4000000 + addr, size); // P3
|
||||
//mem_region_lock(virt_ram_base + 0xC6000000 + addr, size); // P3 - mirror
|
||||
if (VRAM_SIZE == 0x800000)
|
||||
{
|
||||
mem_region_lock(virt_ram_base + 0x84000000 + addr + VRAM_SIZE, size);
|
||||
//mem_region_lock(virt_ram_base + 0x86000000 + addr + VRAM_SIZE, size);
|
||||
//mem_region_lock(virt_ram_base + 0xA4000000 + addr + VRAM_SIZE, size);
|
||||
//mem_region_lock(virt_ram_base + 0xC4000000 + addr + VRAM_SIZE, size);
|
||||
}
|
||||
vmem32_protect_vram(addr, size);
|
||||
}
|
||||
}
|
||||
|
||||
void _vmem_unprotect_vram(u32 addr, u32 size)
|
||||
{
|
||||
addr &= VRAM_MASK;
|
||||
if (!mmu_enabled())
|
||||
{
|
||||
mem_region_unlock(virt_ram_base + 0x04000000 + addr, size); // P0
|
||||
//mem_region_unlock(virt_ram_base + 0x06000000 + addr, size); // P0 - mirror
|
||||
if (VRAM_SIZE == 0x800000)
|
||||
{
|
||||
// wraps when only 8MB VRAM
|
||||
mem_region_unlock(virt_ram_base + 0x04000000 + addr + VRAM_SIZE, size);
|
||||
//mem_region_unlock(virt_ram_base + 0x06000000 + addr + VRAM_SIZE, size);
|
||||
}
|
||||
}
|
||||
if (_nvmem_4gb_space())
|
||||
{
|
||||
mem_region_unlock(virt_ram_base + 0x84000000 + addr, size); // P1
|
||||
//mem_region_unlock(virt_ram_base + 0x86000000 + addr, size); // P1 - mirror
|
||||
// We should also lock P2 and P3, and the mirrors, but they don't seem to be used...
|
||||
//mem_region_unlock(virt_ram_base + 0xA4000000 + addr, size); // P2
|
||||
//mem_region_unlock(virt_ram_base + 0xA6000000 + addr, size); // P2 - mirror
|
||||
//mem_region_unlock(virt_ram_base + 0xC4000000 + addr, size); // P3
|
||||
//mem_region_unlock(virt_ram_base + 0xC6000000 + addr, size); // P3 - mirror
|
||||
if (VRAM_SIZE == 0x800000)
|
||||
{
|
||||
mem_region_unlock(virt_ram_base + 0x84000000 + addr + VRAM_SIZE, size);
|
||||
//mem_region_unlock(virt_ram_base + 0x86000000 + addr + VRAM_SIZE, size);
|
||||
//mem_region_unlock(virt_ram_base + 0xA4000000 + addr + VRAM_SIZE, size);
|
||||
//mem_region_unlock(virt_ram_base + 0xC4000000 + addr + VRAM_SIZE, size);
|
||||
}
|
||||
vmem32_unprotect_vram(addr, size);
|
||||
}
|
||||
}
|
||||
|
||||
u32 _vmem_get_vram_offset(void *addr)
|
||||
{
|
||||
ptrdiff_t offset = (u8*)addr - virt_ram_base;
|
||||
if (_nvmem_4gb_space())
|
||||
{
|
||||
if (mmu_enabled())
|
||||
{
|
||||
// Only kernel mirrors
|
||||
if (offset < 0x80000000 || offset >= 0xE0000000)
|
||||
return -1;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (offset < 0 || offset >= 0xE0000000)
|
||||
return -1;
|
||||
}
|
||||
offset &= 0x1FFFFFFF;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (offset < 0 || offset >= 0x20000000)
|
||||
return -1;
|
||||
}
|
||||
if ((offset >> 24) != 4)
|
||||
return -1;
|
||||
verify((((u8*)addr - virt_ram_base) >> 29) == 0 || (((u8*)addr - virt_ram_base) >> 29) == 4); // others areas aren't mapped atm
|
||||
|
||||
return offset & VRAM_MASK;
|
||||
}
|
||||
|
|
|
@ -117,3 +117,7 @@ void _vmem_enable_mmu(bool enable);
|
|||
#define MAP_RAM_START_OFFSET 0
|
||||
#define MAP_VRAM_START_OFFSET (MAP_RAM_START_OFFSET+RAM_SIZE)
|
||||
#define MAP_ARAM_START_OFFSET (MAP_VRAM_START_OFFSET+VRAM_SIZE)
|
||||
|
||||
void _vmem_protect_vram(u32 addr, u32 size);
|
||||
void _vmem_unprotect_vram(u32 addr, u32 size);
|
||||
u32 _vmem_get_vram_offset(void *addr);
|
||||
|
|
|
@ -62,7 +62,11 @@ static const u64 AREA7_ADDRESS = 0x7C000000L;
|
|||
#define VRAM_PROT_SEGMENT (1024 * 1024) // vram protection regions are grouped by 1MB segment
|
||||
|
||||
static std::unordered_set<u32> vram_mapped_pages;
|
||||
static std::vector<vram_block*> vram_blocks[VRAM_SIZE / VRAM_PROT_SEGMENT];
|
||||
struct vram_lock {
|
||||
u32 start;
|
||||
u32 end;
|
||||
};
|
||||
static std::vector<vram_lock> vram_blocks[VRAM_SIZE / VRAM_PROT_SEGMENT];
|
||||
static u8 sram_mapped_pages[KERNEL_SPACE / PAGE_SIZE / 8]; // bit set to 1 if page is mapped
|
||||
|
||||
bool vmem32_inited;
|
||||
|
@ -144,27 +148,29 @@ static void vmem32_unprotect_buffer(u32 start, u32 size)
|
|||
#endif
|
||||
}
|
||||
|
||||
void vmem32_protect_vram(vram_block *block)
|
||||
void vmem32_protect_vram(u32 addr, u32 size)
|
||||
{
|
||||
if (!vmem32_inited)
|
||||
return;
|
||||
for (int i = block->start / VRAM_PROT_SEGMENT; i <= block->end / VRAM_PROT_SEGMENT; i++)
|
||||
for (int page = (addr & VRAM_MASK) / VRAM_PROT_SEGMENT; page <= ((addr & VRAM_MASK) + size - 1) / VRAM_PROT_SEGMENT; page++)
|
||||
{
|
||||
vram_blocks[i].push_back(block);
|
||||
vram_blocks[page].push_back({ addr, addr + size - 1 });
|
||||
}
|
||||
}
|
||||
void vmem32_unprotect_vram(vram_block *block)
|
||||
void vmem32_unprotect_vram(u32 addr, u32 size)
|
||||
{
|
||||
if (!vmem32_inited)
|
||||
return;
|
||||
for (int page = block->start / VRAM_PROT_SEGMENT; page <= block->end / VRAM_PROT_SEGMENT; page++)
|
||||
for (int page = (addr & VRAM_MASK) / VRAM_PROT_SEGMENT; page <= ((addr & VRAM_MASK) + size - 1) / VRAM_PROT_SEGMENT; page++)
|
||||
{
|
||||
for (int i = 0; i < vram_blocks[page].size(); i++)
|
||||
if (vram_blocks[page][i] == block)
|
||||
{
|
||||
vram_blocks[page].erase(vram_blocks[page].begin() + i);
|
||||
break;
|
||||
}
|
||||
std::vector<vram_lock>& block_list = vram_blocks[page];
|
||||
for (auto it = block_list.begin(); it != block_list.end(); )
|
||||
{
|
||||
if (it->start >= addr && it->end < addr + size)
|
||||
it = block_list.erase(it);
|
||||
else
|
||||
it++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -258,15 +264,15 @@ static u32 vmem32_map_mmu(u32 address, bool write)
|
|||
}
|
||||
verify(vmem32_map_buffer(vpn, page_size, offset, page_size, (entry->Data.PR & 1) != 0) != NULL);
|
||||
u32 end = start + page_size;
|
||||
const vector<vram_block *>& blocks = vram_blocks[start / VRAM_PROT_SEGMENT];
|
||||
const vector<vram_lock>& blocks = vram_blocks[start / VRAM_PROT_SEGMENT];
|
||||
|
||||
vramlist_lock.Lock();
|
||||
for (int i = blocks.size() - 1; i >= 0; i--)
|
||||
{
|
||||
if (blocks[i]->start < end && blocks[i]->end >= start)
|
||||
if (blocks[i].start < end && blocks[i].end >= start)
|
||||
{
|
||||
u32 prot_start = max(start, blocks[i]->start);
|
||||
u32 prot_size = min(end, blocks[i]->end + 1) - prot_start;
|
||||
u32 prot_start = max(start, blocks[i].start);
|
||||
u32 prot_size = min(end, blocks[i].end + 1) - prot_start;
|
||||
prot_size += prot_start % PAGE_SIZE;
|
||||
prot_start &= ~PAGE_MASK;
|
||||
vmem32_protect_buffer(vpn + (prot_start & (page_size - 1)), prot_size);
|
||||
|
|
|
@ -24,8 +24,8 @@ bool vmem32_init();
|
|||
void vmem32_term();
|
||||
bool vmem32_handle_signal(void *fault_addr, bool write, u32 exception_pc);
|
||||
void vmem32_flush_mmu();
|
||||
void vmem32_protect_vram(vram_block *block);
|
||||
void vmem32_unprotect_vram(vram_block *block);
|
||||
void vmem32_protect_vram(u32 addr, u32 size);
|
||||
void vmem32_unprotect_vram(u32 addr, u32 size);
|
||||
|
||||
extern bool vmem32_inited;
|
||||
static inline bool vmem32_enabled() {
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
#include "hw/pvr/pvr_mem.h"
|
||||
#include "rend/TexCache.h"
|
||||
#include "rend/gui.h"
|
||||
#include "hw/mem/_vmem.h"
|
||||
|
||||
#include "deps/zlib/zlib.h"
|
||||
|
||||
|
@ -205,7 +206,7 @@ TA_context* read_frame(const char* file, u8* vram_ref = NULL) {
|
|||
fread(&t, 1, sizeof(t), fw);
|
||||
verify(t == VRAM_SIZE);
|
||||
|
||||
vram.UnLockRegion(0, VRAM_SIZE);
|
||||
_vmem_unprotect_vram(0, VRAM_SIZE);
|
||||
|
||||
uLongf compressed_size;
|
||||
|
||||
|
|
|
@ -208,34 +208,7 @@ vram_block* libCore_vramlock_Lock(u32 start_offset64,u32 end_offset64,void* user
|
|||
{
|
||||
vramlist_lock.Lock();
|
||||
|
||||
vram.LockRegion(block->start, block->len);
|
||||
|
||||
//TODO: Fix this for 32M wrap as well
|
||||
if (_nvmem_enabled() && VRAM_SIZE == 0x800000) {
|
||||
vram.LockRegion(block->start + VRAM_SIZE, block->len);
|
||||
}
|
||||
if (!mmu_enabled())
|
||||
{
|
||||
if (_nvmem_4gb_space())
|
||||
{
|
||||
// In 4GB mode, vram.LockRegion() locks in the P1 area only so we also need to lock P0
|
||||
// We should also lock P2 and P3 but they don't seem to be used...
|
||||
mem_region_lock(virt_ram_base + 0x04000000 + block->start, block->len);
|
||||
//mem_region_lock(virt_ram_base + 0xA4000000 + block->start, block->len);
|
||||
//mem_region_lock(virt_ram_base + 0xC4000000 + block->start, block->len);
|
||||
if (VRAM_SIZE == 0x800000)
|
||||
{
|
||||
mem_region_lock(virt_ram_base + 0x04000000 + block->start + VRAM_SIZE, block->len);
|
||||
//mem_region_lock(virt_ram_base + 0xA4000000 + block->start + VRAM_SIZE, block->len);
|
||||
//mem_region_lock(virt_ram_base + 0xC4000000 + block->start + VRAM_SIZE, block->len);
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
vmem32_protect_vram(block);
|
||||
}
|
||||
|
||||
_vmem_protect_vram(block->start, block->len);
|
||||
vramlock_list_add(block);
|
||||
|
||||
vramlist_lock.Unlock();
|
||||
|
@ -271,19 +244,7 @@ bool VramLockedWriteOffset(size_t offset)
|
|||
}
|
||||
list->clear();
|
||||
|
||||
u32 aligned_offset = (u32)offset & ~(PAGE_SIZE - 1);
|
||||
vram.UnLockRegion(aligned_offset, PAGE_SIZE);
|
||||
|
||||
//TODO: Fix this for 32M wrap as well
|
||||
if (_nvmem_enabled() && VRAM_SIZE == 0x800000) {
|
||||
vram.UnLockRegion(aligned_offset + VRAM_SIZE, PAGE_SIZE);
|
||||
}
|
||||
if (_nvmem_4gb_space() && !mmu_enabled())
|
||||
{
|
||||
mem_region_unlock(virt_ram_base + 0x04000000 + aligned_offset, PAGE_SIZE);
|
||||
if (VRAM_SIZE == 0x800000)
|
||||
mem_region_unlock(virt_ram_base + 0x04000000 + aligned_offset + VRAM_SIZE, PAGE_SIZE);
|
||||
}
|
||||
_vmem_unprotect_vram((u32)offset, PAGE_SIZE);
|
||||
|
||||
vramlist_lock.Unlock();
|
||||
}
|
||||
|
@ -296,20 +257,10 @@ bool VramLockedWriteOffset(size_t offset)
|
|||
|
||||
bool VramLockedWrite(u8* address)
|
||||
{
|
||||
size_t offset=address-vram.data;
|
||||
|
||||
if (offset < 0x01000000)
|
||||
return VramLockedWriteOffset(offset & (VRAM_SIZE - 1));
|
||||
if (_nvmem_4gb_space() && !mmu_enabled())
|
||||
{
|
||||
offset = address - virt_ram_base;
|
||||
if (offset >= 0x04000000 && offset < 0x05000000)
|
||||
return VramLockedWriteOffset((offset - 0x04000000) & (VRAM_SIZE - 1));
|
||||
// 32MB wrap not set yet
|
||||
//if (offset >= 0x06000000 && offset < 0x070000000)
|
||||
// return VramLockedWriteOffset((offset - 0x06000000) & (VRAM_SIZE - 1));
|
||||
}
|
||||
return false;
|
||||
u32 offset = _vmem_get_vram_offset(address);
|
||||
if (offset == -1)
|
||||
return false;
|
||||
return VramLockedWriteOffset(offset);
|
||||
}
|
||||
|
||||
//unlocks mem
|
||||
|
@ -328,8 +279,7 @@ void libCore_vramlock_Unlock_block_wb(vram_block* block)
|
|||
msgboxf("Error : block end is after vram , skipping unlock",MBX_OK);
|
||||
else
|
||||
{
|
||||
if (mmu_enabled())
|
||||
vmem32_unprotect_vram(block);
|
||||
_vmem_unprotect_vram(block->start, block->len);
|
||||
vramlock_list_remove(block);
|
||||
//more work needed
|
||||
free(block);
|
||||
|
|
|
@ -585,7 +585,7 @@ void ReadRTTBuffer() {
|
|||
}
|
||||
}
|
||||
}
|
||||
vram.UnLockRegion(0, 2 * vram.size);
|
||||
_vmem_unprotect_vram(0, VRAM_SIZE);
|
||||
|
||||
glPixelStorei(GL_PACK_ALIGNMENT, 1);
|
||||
u16 *dst = (u16 *)&vram[tex_addr];
|
||||
|
@ -644,14 +644,8 @@ void ReadRTTBuffer() {
|
|||
// Restore VRAM locks
|
||||
for (TexCacheIter i = TexCache.begin(); i != TexCache.end(); i++)
|
||||
{
|
||||
if (i->second.lock_block != NULL) {
|
||||
vram.LockRegion(i->second.sa_tex, i->second.sa + i->second.size - i->second.sa_tex);
|
||||
|
||||
//TODO: Fix this for 32M wrap as well
|
||||
if (_nvmem_enabled() && VRAM_SIZE == 0x800000) {
|
||||
vram.LockRegion(i->second.sa_tex + VRAM_SIZE, i->second.sa + i->second.size - i->second.sa_tex);
|
||||
}
|
||||
}
|
||||
if (i->second.lock_block != NULL)
|
||||
_vmem_protect_vram(i->second.sa_tex, i->second.sa + i->second.size - i->second.sa_tex);
|
||||
}
|
||||
}
|
||||
else
|
||||
|
|
Loading…
Reference in New Issue