updated memory code to share physical regions between multiple address spaces

This commit is contained in:
Anthony Pesch 2016-09-27 23:26:20 -07:00
parent bc632abacd
commit 4966985f89
17 changed files with 402 additions and 320 deletions

View File

@ -152,6 +152,7 @@ set(REDREAM_SOURCES
src/hw/arm/arm.c src/hw/arm/arm.c
src/hw/gdrom/disc.c src/hw/gdrom/disc.c
src/hw/gdrom/gdrom.c src/hw/gdrom/gdrom.c
src/hw/holly/g2.c
src/hw/holly/holly.c src/hw/holly/holly.c
src/hw/holly/pvr.c src/hw/holly/pvr.c
src/hw/holly/ta.c src/hw/holly/ta.c

View File

@ -41,7 +41,7 @@ static bool emu_load_bios(struct emu *emu, const char *path) {
return false; return false;
} }
uint8_t *bios = as_translate(emu->dc->sh4->memory->space, BIOS_BEGIN); uint8_t *bios = memory_translate(emu->dc->memory, "system rom", BIOS_BEGIN);
int n = (int)fread(bios, sizeof(uint8_t), size, fp); int n = (int)fread(bios, sizeof(uint8_t), size, fp);
fclose(fp); fclose(fp);
@ -73,7 +73,7 @@ static bool emu_load_flash(struct emu *emu, const char *path) {
return false; return false;
} }
uint8_t *flash = as_translate(emu->dc->sh4->memory->space, FLASH_BEGIN); uint8_t *flash = memory_translate(emu->dc->memory, "system rom", FLASH_BEGIN);
int n = (int)fread(flash, sizeof(uint8_t), size, fp); int n = (int)fread(flash, sizeof(uint8_t), size, fp);
fclose(fp); fclose(fp);
@ -96,7 +96,7 @@ static bool emu_launch_bin(struct emu *emu, const char *path) {
fseek(fp, 0, SEEK_SET); fseek(fp, 0, SEEK_SET);
// load to 0x0c010000 (area 3) which is where 1ST_READ.BIN is loaded to // load to 0x0c010000 (area 3) which is where 1ST_READ.BIN is loaded to
uint8_t *data = as_translate(emu->dc->sh4->memory->space, 0x0c010000); uint8_t *data = memory_translate(emu->dc->memory, "system ram", 0x00010000);
int n = (int)fread(data, sizeof(uint8_t), size, fp); int n = (int)fread(data, sizeof(uint8_t), size, fp);
fclose(fp); fclose(fp);

View File

@ -92,8 +92,8 @@ static bool aica_init(struct device *dev) {
struct dreamcast *dc = aica->dc; struct dreamcast *dc = aica->dc;
aica->arm = dc->arm; aica->arm = dc->arm;
aica->aica_regs = as_translate(dc->sh4->memory->space, 0x00700000); aica->aica_regs = memory_translate(dc->memory, "aica reg ram", 0x00000000);
aica->wave_ram = as_translate(dc->sh4->memory->space, 0x00800000); aica->wave_ram = memory_translate(dc->memory, "aica wave ram", 0x00000000);
aica->common_data = (struct common_data *)(aica->aica_regs + 0x2800); aica->common_data = (struct common_data *)(aica->aica_regs + 0x2800);
arm_suspend(aica->arm); arm_suspend(aica->arm);
@ -113,7 +113,9 @@ void aica_destroy(struct aica *aica) {
// clang-format off // clang-format off
AM_BEGIN(struct aica, aica_reg_map); AM_BEGIN(struct aica, aica_reg_map);
AM_RANGE(0x00000000, 0x00010fff) AM_HANDLE((r8_cb)&aica_reg_r8, AM_RANGE(0x00000000, 0x00010fff) AM_MOUNT("aica reg ram")
AM_RANGE(0x00000000, 0x00010fff) AM_HANDLE("aica reg",
(r8_cb)&aica_reg_r8,
(r16_cb)&aica_reg_r16, (r16_cb)&aica_reg_r16,
(r32_cb)&aica_reg_r32, (r32_cb)&aica_reg_r32,
NULL, NULL,
@ -124,7 +126,9 @@ AM_BEGIN(struct aica, aica_reg_map);
AM_END(); AM_END();
AM_BEGIN(struct aica, aica_data_map); AM_BEGIN(struct aica, aica_data_map);
AM_RANGE(0x00000000, 0x00ffffff) AM_HANDLE((r8_cb)&aica_wave_r8, AM_RANGE(0x00000000, 0x007fffff) AM_MOUNT("aica wave ram")
AM_RANGE(0x00000000, 0x007fffff) AM_HANDLE("aica wave",
(r8_cb)&aica_wave_r8,
(r16_cb)&aica_wave_r16, (r16_cb)&aica_wave_r16,
(r32_cb)&aica_wave_r32, (r32_cb)&aica_wave_r32,
NULL, NULL,

View File

@ -7,12 +7,12 @@ struct arm {
}; };
static bool arm_init(struct device *dev) { static bool arm_init(struct device *dev) {
// struct arm *arm = container_of(dev, struct arm, base); // struct arm *arm = (struct arm *)dev;
return true; return true;
} }
static void arm_run(struct device *dev, int64_t ns) { static void arm_run(struct device *dev, int64_t ns) {
// struct arm *arm = container_of(dev, struct arm, base); // struct arm *arm = (struct arm *)dev;
} }
void arm_suspend(struct arm *arm) { void arm_suspend(struct arm *arm) {
@ -26,10 +26,12 @@ void arm_resume(struct arm *arm) {
struct arm *arm_create(struct dreamcast *dc) { struct arm *arm_create(struct dreamcast *dc) {
struct arm *arm = dc_create_device(dc, sizeof(struct arm), "arm", &arm_init); struct arm *arm = dc_create_device(dc, sizeof(struct arm), "arm", &arm_init);
arm->execute = dc_create_execute_interface(&arm_run); arm->execute = dc_create_execute_interface(&arm_run);
arm->memory = dc_create_memory_interface(dc, &arm_data_map);
return arm; return arm;
} }
void arm_destroy(struct arm *arm) { void arm_destroy(struct arm *arm) {
dc_destroy_memory_interface(arm->memory);
dc_destroy_execute_interface(arm->execute); dc_destroy_execute_interface(arm->execute);
dc_destroy_device((struct device *)arm); dc_destroy_device((struct device *)arm);
} }

View File

@ -5,6 +5,7 @@
#include "hw/arm/arm.h" #include "hw/arm/arm.h"
#include "hw/debugger.h" #include "hw/debugger.h"
#include "hw/gdrom/gdrom.h" #include "hw/gdrom/gdrom.h"
#include "hw/holly/g2.h"
#include "hw/holly/holly.h" #include "hw/holly/holly.h"
#include "hw/holly/pvr.h" #include "hw/holly/pvr.h"
#include "hw/holly/ta.h" #include "hw/holly/ta.h"
@ -160,6 +161,7 @@ struct dreamcast *dc_create(struct rb *rb) {
dc->arm = arm_create(dc); dc->arm = arm_create(dc);
dc->aica = aica_create(dc); dc->aica = aica_create(dc);
dc->holly = holly_create(dc); dc->holly = holly_create(dc);
dc->g2 = g2_create(dc);
dc->gdrom = gdrom_create(dc); dc->gdrom = gdrom_create(dc);
dc->maple = maple_create(dc); dc->maple = maple_create(dc);
dc->pvr = pvr_create(dc); dc->pvr = pvr_create(dc);
@ -178,6 +180,7 @@ void dc_destroy(struct dreamcast *dc) {
pvr_destroy(dc->pvr); pvr_destroy(dc->pvr);
maple_destroy(dc->maple); maple_destroy(dc->maple);
gdrom_destroy(dc->gdrom); gdrom_destroy(dc->gdrom);
g2_destroy(dc->g2);
holly_destroy(dc->holly); holly_destroy(dc->holly);
aica_destroy(dc->aica); aica_destroy(dc->aica);
arm_destroy(dc->arm); arm_destroy(dc->arm);

View File

@ -13,6 +13,7 @@ struct arm;
struct debugger; struct debugger;
struct device; struct device;
struct dreamcast; struct dreamcast;
struct g2;
struct gdrom; struct gdrom;
struct holly; struct holly;
struct maple; struct maple;
@ -107,6 +108,7 @@ struct dreamcast {
struct arm *arm; struct arm *arm;
struct aica *aica; struct aica *aica;
struct holly *holly; struct holly *holly;
struct g2 *g2;
struct gdrom *gdrom; struct gdrom *gdrom;
struct maple *maple; struct maple *maple;
struct pvr *pvr; struct pvr *pvr;

36
src/hw/holly/g2.c Normal file
View File

@ -0,0 +1,36 @@
#include "hw/dreamcast.h"
struct g2 {
struct device;
};
static bool g2_init(struct device *dev) {
return true;
}
struct g2 *g2_create(struct dreamcast *dc) {
struct g2 *g2 = dc_create_device(dc, sizeof(struct g2), "g2", &g2_init);
return g2;
}
void g2_destroy(struct g2 *g2) {
dc_destroy_device((struct device *)g2);
}
// clang-format off
AM_BEGIN(struct g2, g2_modem_map);
AM_RANGE(0x00000000, 0x0007ffff) AM_MOUNT("modem reg")
AM_END();
AM_BEGIN(struct g2, g2_expansion0_map);
AM_RANGE(0x00000000, 0x00ffffff) AM_MOUNT("expansion 0")
AM_END();
AM_BEGIN(struct g2, g2_expansion1_map);
AM_RANGE(0x00000000, 0x008fffff) AM_MOUNT("expansion 1")
AM_END();
AM_BEGIN(struct g2, g2_expansion2_map);
AM_RANGE(0x00000000, 0x03ffffff) AM_MOUNT("expansion 2")
AM_END();
// clang-format on

15
src/hw/holly/g2.h Normal file
View File

@ -0,0 +1,15 @@
#ifndef G2_H
#define G2_H
struct dreamcast;
struct g2;
struct g2 *g2_create(struct dreamcast *dc);
void g2_destroy(struct g2 *g2);
AM_DECLARE(g2_modem_map);
AM_DECLARE(g2_expansion0_map);
AM_DECLARE(g2_expansion1_map);
AM_DECLARE(g2_expansion2_map);
#endif

View File

@ -400,7 +400,8 @@ void holly_destroy(struct holly *hl) {
// clang-format off // clang-format off
AM_BEGIN(struct holly, holly_reg_map); AM_BEGIN(struct holly, holly_reg_map);
AM_RANGE(0x00000000, 0x00001fff) AM_HANDLE((r8_cb)&holly_reg_r8, AM_RANGE(0x00000000, 0x00001fff) AM_HANDLE("holly reg",
(r8_cb)&holly_reg_r8,
(r16_cb)&holly_reg_r16, (r16_cb)&holly_reg_r16,
(r32_cb)&holly_reg_r32, (r32_cb)&holly_reg_r32,
NULL, NULL,

View File

@ -167,9 +167,8 @@ static bool pvr_init(struct device *dev) {
pvr->scheduler = dc->scheduler; pvr->scheduler = dc->scheduler;
pvr->holly = dc->holly; pvr->holly = dc->holly;
pvr->space = dc->sh4->memory->space; pvr->palette_ram = memory_translate(dc->memory, "palette ram", 0x00000000);
pvr->palette_ram = as_translate(pvr->space, 0x005f9000); pvr->video_ram = memory_translate(dc->memory, "video ram", 0x00000000);
pvr->video_ram = as_translate(pvr->space, 0x04000000);
#define PVR_REG_R32(name) \ #define PVR_REG_R32(name) \
pvr->reg_data[name] = pvr; \ pvr->reg_data[name] = pvr; \
@ -207,33 +206,37 @@ void pvr_destroy(struct pvr *pvr) {
// clang-format off // clang-format off
AM_BEGIN(struct pvr, pvr_reg_map); AM_BEGIN(struct pvr, pvr_reg_map);
AM_RANGE(0x00000000, 0x00000fff) AM_HANDLE(NULL, AM_RANGE(0x00001000, 0x00001fff) AM_MOUNT("palette ram")
NULL, AM_RANGE(0x00000000, 0x00000fff) AM_HANDLE("pvr reg",
(r32_cb)&pvr_reg_r32, NULL,
NULL, NULL,
NULL, (r32_cb)&pvr_reg_r32,
NULL, NULL,
(w32_cb)&pvr_reg_w32, NULL,
NULL) NULL,
AM_RANGE(0x00001000, 0x00001fff) AM_HANDLE(NULL, (w32_cb)&pvr_reg_w32,
NULL, NULL)
(r32_cb)&pvr_palette_r32, AM_RANGE(0x00001000, 0x00001fff) AM_HANDLE("pvr palette",
NULL, NULL,
NULL, NULL,
NULL, (r32_cb)&pvr_palette_r32,
(w32_cb)&pvr_palette_w32, NULL,
NULL) NULL,
AM_END() NULL,
(w32_cb)&pvr_palette_w32,
NULL)
AM_END();
AM_BEGIN(struct pvr, pvr_vram_map); AM_BEGIN(struct pvr, pvr_vram_map);
AM_RANGE(0x00000000, 0x007fffff) AM_MOUNT() AM_RANGE(0x00000000, 0x007fffff) AM_MOUNT("video ram")
AM_RANGE(0x01000000, 0x017fffff) AM_HANDLE((r8_cb)&pvr_vram_interleaved_r8, AM_RANGE(0x01000000, 0x017fffff) AM_HANDLE("video ram interleaved",
(r16_cb)&pvr_vram_interleaved_r16, (r8_cb)&pvr_vram_interleaved_r8,
(r32_cb)&pvr_vram_interleaved_r32, (r16_cb)&pvr_vram_interleaved_r16,
NULL, (r32_cb)&pvr_vram_interleaved_r32,
(w8_cb)&pvr_vram_interleaved_w8, NULL,
(w16_cb)&pvr_vram_interleaved_w16, (w8_cb)&pvr_vram_interleaved_w8,
(w32_cb)&pvr_vram_interleaved_w32, (w16_cb)&pvr_vram_interleaved_w16,
NULL) (w32_cb)&pvr_vram_interleaved_w32,
NULL)
AM_END(); AM_END();
// clang-format on // clang-format on

View File

@ -14,7 +14,6 @@ struct pvr {
struct device; struct device;
struct scheduler *scheduler; struct scheduler *scheduler;
struct holly *holly; struct holly *holly;
struct address_space *space;
uint8_t *palette_ram; uint8_t *palette_ram;
uint8_t *video_ram; uint8_t *video_ram;
uint32_t reg[NUM_PVR_REGS]; uint32_t reg[NUM_PVR_REGS];

View File

@ -15,6 +15,8 @@
#include "sys/thread.h" #include "sys/thread.h"
#include "ui/nuklear.h" #include "ui/nuklear.h"
DEFINE_OPTION_BOOL(texcache, true, "Enable SIGSEGV-based texture caching");
#define TA_MAX_CONTEXTS 32 #define TA_MAX_CONTEXTS 32
struct ta_texture_entry { struct ta_texture_entry {
@ -463,19 +465,21 @@ static void ta_register_texture(struct ta *ta, union tsp tsp, union tcw tcw) {
// add write callback in order to invalidate on future writes. the callback // add write callback in order to invalidate on future writes. the callback
// address will be page aligned, therefore it will be triggered falsely in // address will be page aligned, therefore it will be triggered falsely in
// some cases. over invalidate in these cases // some cases. over invalidate in these cases
if (!entry->texture_watch) { if (OPTION_texcache) {
entry->texture_watch = if (!entry->texture_watch) {
add_single_write_watch(entry->base.texture, entry->base.texture_size, entry->texture_watch =
&ta_texture_invalidated, entry); add_single_write_watch(entry->base.texture, entry->base.texture_size,
&ta_texture_invalidated, entry);
}
if (entry->base.palette && !entry->palette_watch) {
entry->palette_watch =
add_single_write_watch(entry->base.palette, entry->base.palette_size,
&ta_palette_invalidated, entry);
}
} }
if (entry->base.palette && !entry->palette_watch) { // add new entries to the trace
entry->palette_watch =
add_single_write_watch(entry->base.palette, entry->base.palette_size,
&ta_palette_invalidated, entry);
}
// ad new entries to the trace
if (ta->trace_writer && new_entry) { if (ta->trace_writer && new_entry) {
trace_writer_insert_texture(ta->trace_writer, tsp, tcw, entry->base.palette, trace_writer_insert_texture(ta->trace_writer, tsp, tcw, entry->base.palette,
entry->base.palette_size, entry->base.texture, entry->base.palette_size, entry->base.texture,
@ -715,8 +719,8 @@ static bool ta_init(struct device *dev) {
ta->holly = dc->holly; ta->holly = dc->holly;
ta->pvr = dc->pvr; ta->pvr = dc->pvr;
ta->space = dc->sh4->memory->space; ta->space = dc->sh4->memory->space;
ta->video_ram = as_translate(ta->space, 0x04000000); ta->video_ram = memory_translate(dc->memory, "video ram", 0x00000000);
ta->palette_ram = as_translate(ta->space, 0x005f9000); ta->palette_ram = memory_translate(dc->memory, "palette ram", 0x00000000);
for (int i = 0; i < array_size(ta->entries); i++) { for (int i = 0; i < array_size(ta->entries); i++) {
struct ta_texture_entry *entry = &ta->entries[i]; struct ta_texture_entry *entry = &ta->entries[i];
@ -882,7 +886,8 @@ void ta_destroy(struct ta *ta) {
// clang-format off // clang-format off
AM_BEGIN(struct ta, ta_fifo_map); AM_BEGIN(struct ta, ta_fifo_map);
AM_RANGE(0x0000000, 0x07fffff) AM_HANDLE(NULL, AM_RANGE(0x0000000, 0x07fffff) AM_HANDLE("ta poly fifo",
NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
@ -890,7 +895,8 @@ AM_BEGIN(struct ta, ta_fifo_map);
NULL, NULL,
(w32_cb)&ta_write_poly_fifo, (w32_cb)&ta_write_poly_fifo,
NULL) NULL)
AM_RANGE(0x1000000, 0x1ffffff) AM_HANDLE(NULL, AM_RANGE(0x1000000, 0x1ffffff) AM_HANDLE("ta texture fifo",
NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,

View File

@ -9,11 +9,10 @@ struct memory {
shmem_handle_t shmem; shmem_handle_t shmem;
uint32_t shmem_size; uint32_t shmem_size;
uint8_t *shmem_base;
struct physical_region physical_regions[MAX_REGIONS]; struct memory_region regions[MAX_REGIONS];
int num_physical_regions; int num_regions;
struct mmio_region mmio_regions[MAX_REGIONS];
int num_mmio_regions;
}; };
static int is_page_aligned(uint32_t start, uint32_t size) { static int is_page_aligned(uint32_t start, uint32_t size) {
@ -21,7 +20,7 @@ static int is_page_aligned(uint32_t start, uint32_t size) {
((start + size) & ((1 << PAGE_OFFSET_BITS) - 1)) == 0; ((start + size) & ((1 << PAGE_OFFSET_BITS) - 1)) == 0;
} }
static int get_total_page_size(int num_pages) { static uint32_t get_total_page_size(int num_pages) {
return (uint32_t)num_pages * PAGE_SIZE; return (uint32_t)num_pages * PAGE_SIZE;
} }
@ -35,26 +34,15 @@ static uint32_t get_page_offset(uint32_t addr) {
} }
// pack and unpack page entry bitstrings // pack and unpack page entry bitstrings
static page_entry_t pack_page_entry(int physical_handle, static page_entry_t pack_page_entry(int region_handle, uint32_t region_offset) {
uint32_t physical_offset, int mmio_handle, return region_offset | region_handle;
uint32_t mmio_offset) {
return ((page_entry_t)(physical_offset | physical_handle) << 32) |
(mmio_handle | mmio_offset);
} }
static uint32_t get_physical_offset(page_entry_t page) { static int get_region_offset(page_entry_t page) {
return (page >> 32) & REGION_OFFSET_MASK;
}
static int get_physical_handle(page_entry_t page) {
return (page >> 32) & REGION_HANDLE_MASK;
}
static int get_mmio_offset(page_entry_t page) {
return page & REGION_OFFSET_MASK; return page & REGION_OFFSET_MASK;
} }
static int get_mmio_handle(page_entry_t page) { static int get_region_handle(page_entry_t page) {
return page & REGION_HANDLE_MASK; return page & REGION_HANDLE_MASK;
} }
@ -129,53 +117,80 @@ static bool reserve_address_space(uint8_t **base) {
return false; return false;
} }
struct physical_region *memory_create_physical_region(struct memory *memory, struct memory_region *memory_get_region(struct memory *memory,
uint32_t size) { const char *name) {
CHECK_LT(memory->num_physical_regions, MAX_REGIONS); for (int i = 1; i < memory->num_regions; i++) {
struct memory_region *region = &memory->regions[i];
memory->num_physical_regions++; if (!strcmp(region->name, name)) {
return region;
}
}
struct physical_region *region = return NULL;
&memory->physical_regions[memory->num_physical_regions]; }
region->handle = memory->num_physical_regions;
region->shmem_offset = memory->shmem_size;
region->size = size;
// ensure the shared memory regions are aligned to the allocation granularity, struct memory_region *memory_create_physical_region(struct memory *memory,
// otherwise it will confusingly fail to map further down the line const char *name,
size_t granularity = get_allocation_granularity(); uint32_t size) {
CHECK((memory->shmem_size & (granularity - 1)) == 0 && struct memory_region *region = memory_get_region(memory, name);
((memory->shmem_size + size) & (granularity - 1)) == 0);
memory->shmem_size += size; if (!region) {
CHECK_LT(memory->num_regions, MAX_REGIONS);
region = &memory->regions[memory->num_regions];
region->type = REGION_PHYSICAL;
region->handle = memory->num_regions++;
region->name = name;
region->size = size;
region->physical.shmem_offset = memory->shmem_size;
memory->shmem_size += size;
// ensure physical memory regions are aligned to the allocation granularity,
// otherwise it will confusingly fail to map further down the line
size_t granularity = get_allocation_granularity();
CHECK((memory->shmem_size & (granularity - 1)) == 0 &&
((memory->shmem_size + size) & (granularity - 1)) == 0);
}
return region; return region;
} }
struct mmio_region *memory_create_mmio_region(struct memory *memory, struct memory_region *memory_create_mmio_region(
uint32_t size, void *data, struct memory *memory, const char *name, uint32_t size, void *data,
r8_cb r8, r16_cb r16, r32_cb r32, r8_cb r8, r16_cb r16, r32_cb r32, r64_cb r64, w8_cb w8, w16_cb w16,
r64_cb r64, w8_cb w8, w16_cb w16, w32_cb w32, w64_cb w64) {
w32_cb w32, w64_cb w64) { struct memory_region *region = memory_get_region(memory, name);
CHECK_LT(memory->num_mmio_regions, MAX_REGIONS);
memory->num_mmio_regions++; if (!region) {
CHECK_LT(memory->num_regions, MAX_REGIONS);
struct mmio_region *region = &memory->mmio_regions[memory->num_mmio_regions]; region = &memory->regions[memory->num_regions];
region->handle = memory->num_mmio_regions; region->type = REGION_MMIO;
region->size = size; region->handle = memory->num_regions++;
region->data = data; region->name = name;
region->read8 = r8; region->size = size;
region->read16 = r16; region->mmio.data = data;
region->read32 = r32; region->mmio.read8 = r8;
region->read64 = r64; region->mmio.read16 = r16;
region->write8 = w8; region->mmio.read32 = r32;
region->write16 = w16; region->mmio.read64 = r64;
region->write32 = w32; region->mmio.write8 = w8;
region->write64 = w64; region->mmio.write16 = w16;
region->mmio.write32 = w32;
region->mmio.write64 = w64;
}
return region; return region;
} }
uint8_t *memory_translate(struct memory *memory, const char *name,
uint32_t offset) {
struct memory_region *region = memory_get_region(memory, name);
CHECK_NOTNULL(region);
return memory->shmem_base + region->physical.shmem_offset + offset;
}
static bool memory_create_shmem(struct memory *memory) { static bool memory_create_shmem(struct memory *memory) {
// create the shared memory object to back the address space // create the shared memory object to back the address space
memory->shmem = memory->shmem =
@ -190,6 +205,7 @@ static bool memory_create_shmem(struct memory *memory) {
} }
static void memory_destroy_shmem(struct memory *memory) { static void memory_destroy_shmem(struct memory *memory) {
CHECK(unmap_shared_memory(memory->shmem, 0, memory->shmem_size));
destroy_shared_memory(memory->shmem); destroy_shared_memory(memory->shmem);
} }
@ -206,10 +222,20 @@ bool memory_init(struct memory *memory) {
dev->memory->mapper(dev, memory->dc, &map); dev->memory->mapper(dev, memory->dc, &map);
// apply the map to create the address space // apply the map to create the address space
CHECK(as_map(dev->memory->space, &map)); CHECK(as_map(dev->memory->space, dev->name, &map));
} }
} }
// map raw address space
if (!reserve_address_space(&memory->shmem_base)) {
return false;
}
if (!map_shared_memory(memory->shmem, 0, memory->shmem_base,
memory->shmem_size, ACC_READWRITE)) {
return false;
}
return true; return true;
} }
@ -219,7 +245,7 @@ struct memory *memory_create(struct dreamcast *dc) {
memory->dc = dc; memory->dc = dc;
memory->shmem = SHMEM_INVALID; memory->shmem = SHMEM_INVALID;
// 0 page is reserved, meaning all valid page entries must be non-zero // 0 page is reserved, meaning all valid page entries must be non-zero
memory->num_physical_regions = 1; memory->num_regions = 1;
return memory; return memory;
} }
@ -237,7 +263,7 @@ static struct address_map_entry *address_map_alloc_entry(
return entry; return entry;
} }
void am_physical(struct address_map *am, struct physical_region *region, void am_physical(struct address_map *am, struct memory_region *region,
uint32_t size, uint32_t addr, uint32_t addr_mask) { uint32_t size, uint32_t addr, uint32_t addr_mask) {
struct address_map_entry *entry = address_map_alloc_entry(am); struct address_map_entry *entry = address_map_alloc_entry(am);
entry->type = MAP_ENTRY_PHYSICAL; entry->type = MAP_ENTRY_PHYSICAL;
@ -247,8 +273,8 @@ void am_physical(struct address_map *am, struct physical_region *region,
entry->physical.region = region; entry->physical.region = region;
} }
void am_mmio(struct address_map *am, struct mmio_region *region, uint32_t size, void am_mmio(struct address_map *am, struct memory_region *region,
uint32_t addr, uint32_t addr_mask) { uint32_t size, uint32_t addr, uint32_t addr_mask) {
struct address_map_entry *entry = address_map_alloc_entry(am); struct address_map_entry *entry = address_map_alloc_entry(am);
entry->type = MAP_ENTRY_MMIO; entry->type = MAP_ENTRY_MMIO;
entry->size = size; entry->size = size;
@ -278,19 +304,18 @@ void am_mirror(struct address_map *am, uint32_t physical_addr, uint32_t size,
entry->mirror.physical_addr = physical_addr; entry->mirror.physical_addr = physical_addr;
} }
#define define_read_bytes(name, type) \ #define define_read_bytes(name, data_type) \
type as_##name(struct address_space *space, uint32_t addr) { \ data_type as_##name(struct address_space *space, uint32_t addr) { \
page_entry_t page = space->pages[get_page_index(addr)]; \ page_entry_t page = space->pages[get_page_index(addr)]; \
DCHECK(page); \ DCHECK(page); \
int mmio_handle = get_mmio_handle(page); \ int region_handle = get_region_handle(page); \
if (!mmio_handle) { \ struct memory_region *region = &space->dc->memory->regions[region_handle]; \
return *(type *)(space->base + addr); \ if (region->type == REGION_PHYSICAL) { \
} \ return *(data_type *)(space->base + addr); \
struct mmio_region *region = \ } \
&space->dc->memory->mmio_regions[mmio_handle]; \ uint32_t region_offset = get_region_offset(page); \
uint32_t region_offset = get_mmio_offset(page); \ uint32_t page_offset = get_page_offset(addr); \
uint32_t page_offset = get_page_offset(addr); \ return region->mmio.name(region->mmio.data, region_offset + page_offset); \
return region->name(region->data, region_offset + page_offset); \
} }
define_read_bytes(read8, uint8_t); define_read_bytes(read8, uint8_t);
@ -298,20 +323,20 @@ define_read_bytes(read16, uint16_t);
define_read_bytes(read32, uint32_t); define_read_bytes(read32, uint32_t);
define_read_bytes(read64, uint64_t); define_read_bytes(read64, uint64_t);
#define define_write_bytes(name, type) \ #define define_write_bytes(name, data_type) \
void as_##name(struct address_space *space, uint32_t addr, type value) { \ void as_##name(struct address_space *space, uint32_t addr, \
page_entry_t page = space->pages[get_page_index(addr)]; \ data_type value) { \
DCHECK(page); \ page_entry_t page = space->pages[get_page_index(addr)]; \
int mmio_handle = get_mmio_handle(page); \ DCHECK(page); \
if (!mmio_handle) { \ int region_handle = get_region_handle(page); \
*(type *)(space->base + addr) = value; \ struct memory_region *region = &space->dc->memory->regions[region_handle]; \
return; \ if (region->type == REGION_PHYSICAL) { \
} \ *(data_type *)(space->base + addr) = value; \
struct mmio_region *region = \ return; \
&space->dc->memory->mmio_regions[mmio_handle]; \ } \
uint32_t region_offset = get_mmio_offset(page); \ uint32_t region_offset = get_region_offset(page); \
uint32_t page_offset = get_page_offset(addr); \ uint32_t page_offset = get_page_offset(addr); \
region->name(region->data, region_offset + page_offset, value); \ region->mmio.name(region->mmio.data, region_offset + page_offset, value); \
} }
define_write_bytes(write8, uint8_t); define_write_bytes(write8, uint8_t);
@ -357,24 +382,6 @@ void as_memcpy(struct address_space *space, uint32_t dst, uint32_t src,
} }
} }
void as_lookup(struct address_space *space, uint32_t addr, uint8_t **ptr,
struct physical_region **physical_region,
uint32_t *physical_offset, struct mmio_region **mmio_region,
uint32_t *mmio_offset) {
page_entry_t page = space->pages[get_page_index(addr)];
int physical_handle = get_physical_handle(page);
int mmio_handle = get_mmio_handle(page);
*ptr = space->base + addr;
*physical_region = physical_handle
? &space->dc->memory->physical_regions[physical_handle]
: NULL;
*physical_offset = get_physical_offset(page) + get_page_offset(addr);
*mmio_region =
mmio_handle ? &space->dc->memory->mmio_regions[mmio_handle] : NULL;
*mmio_offset = get_mmio_offset(page) + get_page_offset(addr);
}
static void as_merge_map(struct address_space *space, static void as_merge_map(struct address_space *space,
const struct address_map *map, uint32_t offset) { const struct address_map *map, uint32_t offset) {
// iterate regions in the supplied memory map in the other added, flattening // iterate regions in the supplied memory map in the other added, flattening
@ -397,35 +404,31 @@ static void as_merge_map(struct address_space *space,
switch (entry->type) { switch (entry->type) {
case MAP_ENTRY_PHYSICAL: { case MAP_ENTRY_PHYSICAL: {
struct physical_region *physical_region = entry->physical.region; struct memory_region *region = entry->physical.region;
for (int i = 0; i < num_pages; i++) { for (int i = 0; i < num_pages; i++) {
uint32_t physical_offset = get_total_page_size(i); uint32_t region_offset = get_total_page_size(i);
space->pages[first_page + i] = space->pages[first_page + i] =
pack_page_entry(physical_region->handle, physical_offset, 0, 0); pack_page_entry(region->handle, region_offset);
} }
} break; } break;
case MAP_ENTRY_MMIO: { case MAP_ENTRY_MMIO: {
struct mmio_region *mmio_region = entry->mmio.region; struct memory_region *region = entry->mmio.region;
for (int i = 0; i < num_pages; i++) { for (int i = 0; i < num_pages; i++) {
uint32_t mmio_offset = get_total_page_size(i); uint32_t region_offset = get_total_page_size(i);
page_entry_t page = space->pages[first_page + i];
int physical_handle = get_physical_handle(page);
uint32_t physical_offset = get_physical_offset(page);
space->pages[first_page + i] = space->pages[first_page + i] =
pack_page_entry(physical_handle, physical_offset, pack_page_entry(region->handle, region_offset);
mmio_region->handle, mmio_offset);
} }
} break; } break;
case MAP_ENTRY_DEVICE: { case MAP_ENTRY_DEVICE: {
struct address_map device_map = {0}; struct address_map device_map = {0};
entry->device.mapper(entry->device.device, space->dc, &device_map); entry->device.mapper(entry->device.device, space->dc, &device_map);
as_merge_map(space, &device_map, addr); as_merge_map(space, &device_map, addr);
} break; } break;
@ -446,13 +449,6 @@ static void as_merge_map(struct address_space *space,
} }
} }
static uint32_t as_get_page_offset(struct address_space *space,
page_entry_t page) {
const struct physical_region *region =
&space->dc->memory->physical_regions[get_physical_handle(page)];
return region->shmem_offset + get_physical_offset(page);
}
static int as_num_adj_pages(struct address_space *space, int first_page_index) { static int as_num_adj_pages(struct address_space *space, int first_page_index) {
int i; int i;
@ -460,10 +456,24 @@ static int as_num_adj_pages(struct address_space *space, int first_page_index) {
page_entry_t page = space->pages[i]; page_entry_t page = space->pages[i];
page_entry_t next_page = space->pages[i + 1]; page_entry_t next_page = space->pages[i + 1];
uint32_t page_offset = as_get_page_offset(space, page); int region_handle = get_region_handle(page);
uint32_t next_page_offset = as_get_page_offset(space, next_page); uint32_t region_offset = get_region_offset(page);
const struct memory_region *region =
&space->dc->memory->regions[region_handle];
if ((next_page_offset - page_offset) != PAGE_SIZE) { int next_region_handle = get_region_handle(next_page);
uint32_t next_region_offset = get_region_offset(next_page);
const struct memory_region *next_region =
&space->dc->memory->regions[next_region_handle];
if (region->type == REGION_MMIO && next_region_handle != region_handle) {
break;
}
uint32_t page_delta =
(next_region->physical.shmem_offset + next_region_offset) -
(region->physical.shmem_offset + region_offset);
if (region->type == REGION_PHYSICAL && page_delta != PAGE_SIZE) {
break; break;
} }
} }
@ -471,72 +481,24 @@ static int as_num_adj_pages(struct address_space *space, int first_page_index) {
return (i + 1) - first_page_index; return (i + 1) - first_page_index;
} }
static bool as_map_pages(struct address_space *space, uint8_t *base) { bool as_map(struct address_space *space, const char *name,
for (int page_index = 0; page_index < NUM_PAGES;) { const struct address_map *map) {
page_entry_t page = space->pages[page_index];
if (!page) {
page_index++;
continue;
}
// batch map adjacent pages, mmap is fairly slow
int num_pages = as_num_adj_pages(space, page_index);
uint32_t size = get_total_page_size(num_pages);
// mmap the virtual address range to the raw address space
uint32_t addr = get_total_page_size(page_index);
uint32_t page_offset = as_get_page_offset(space, page);
if (!map_shared_memory(space->dc->memory->shmem, page_offset, base + addr,
size, ACC_READWRITE)) {
return false;
}
page_index += num_pages;
}
return true;
}
bool as_map(struct address_space *space, const struct address_map *map) {
as_unmap(space); as_unmap(space);
// flatten the supplied address map out into a virtual page table // flatten the supplied address map out into a virtual page table
as_merge_map(space, map, 0); as_merge_map(space, map, 0);
// map the virtual page table into both the base and protected mirrors #if 0
if (!reserve_address_space(&space->base) || LOG_INFO("===-----------------------------------------------------===");
!as_map_pages(space, space->base)) { LOG_INFO("%s address space", name);
LOG_INFO("===-----------------------------------------------------===");
#endif
if (!reserve_address_space(&space->base)) {
return false; return false;
} }
if (!reserve_address_space(&space->protected_base) || // iterate the virtual page table, mapping it into the reserved address space
!as_map_pages(space, space->protected_base)) {
return false;
}
// protect dynamic regions in the protected address space
for (int page_index = 0; page_index < NUM_PAGES; page_index++) {
page_entry_t page = space->pages[page_index];
int mmio_index = get_mmio_handle(page);
if (!mmio_index) {
continue;
}
uint32_t addr = get_total_page_size(page_index);
protect_pages(space->protected_base + addr, PAGE_SIZE, ACC_NONE);
}
return true;
}
static void as_unmap_pages(struct address_space *space, uint8_t *base) {
if (!base) {
return;
}
for (int page_index = 0; page_index < NUM_PAGES;) { for (int page_index = 0; page_index < NUM_PAGES;) {
page_entry_t page = space->pages[page_index]; page_entry_t page = space->pages[page_index];
@ -545,30 +507,67 @@ static void as_unmap_pages(struct address_space *space, uint8_t *base) {
continue; continue;
} }
uint32_t addr = get_total_page_size(page_index); int region_handle = get_region_handle(page);
uint32_t region_offset = get_region_offset(page);
struct memory_region *region = &space->dc->memory->regions[region_handle];
// batch adjacent pages, mmap is fairly slow
uint8_t *addr = space->base + get_total_page_size(page_index);
int num_pages = as_num_adj_pages(space, page_index); int num_pages = as_num_adj_pages(space, page_index);
uint32_t size = get_total_page_size(num_pages); uint32_t size = get_total_page_size(num_pages);
CHECK(unmap_shared_memory(space->dc->memory->shmem, base + addr, size)); #if 0
LOG_INFO("[0x%08x, 0x%08x] %s+0x%x", addr, addr + size - 1, region->name,
region_offset);
#endif
if (region->type == REGION_PHYSICAL) {
// map virtual address range to backing shared memory object for physical
// regions
uint32_t shmem_offset = region->physical.shmem_offset + region_offset;
if (!map_shared_memory(space->dc->memory->shmem, shmem_offset, addr, size,
ACC_READWRITE)) {
return false;
}
} else {
// disable access to virtual address range for mmio regions, resulting in
// SIGSEGV on access
if (!map_shared_memory(space->dc->memory->shmem, 0, addr, size,
ACC_NONE)) {
return false;
}
}
page_index += num_pages;
}
return true;
}
void as_unmap(struct address_space *space) {
for (int page_index = 0; page_index < NUM_PAGES;) {
page_entry_t page = space->pages[page_index];
if (!page) {
page_index++;
continue;
}
uint8_t *addr = space->base + get_total_page_size(page_index);
int num_pages = as_num_adj_pages(space, page_index);
uint32_t size = get_total_page_size(num_pages);
CHECK(unmap_shared_memory(space->dc->memory->shmem, addr, size));
page_index += num_pages; page_index += num_pages;
} }
} }
void as_unmap(struct address_space *space) {
as_unmap_pages(space, space->base);
as_unmap_pages(space, space->protected_base);
}
uint8_t *as_translate(struct address_space *space, uint32_t addr) { uint8_t *as_translate(struct address_space *space, uint32_t addr) {
return space->base + addr; return space->base + addr;
} }
uint8_t *as_translate_protected(struct address_space *space, uint32_t addr) {
return space->protected_base + addr;
}
struct address_space *as_create(struct dreamcast *dc) { struct address_space *as_create(struct dreamcast *dc) {
struct address_space *space = calloc(1, sizeof(struct address_space)); struct address_space *space = calloc(1, sizeof(struct address_space));
space->dc = dc; space->dc = dc;

View File

@ -17,36 +17,46 @@ typedef void (*w16_cb)(void *, uint32_t, uint16_t);
typedef void (*w32_cb)(void *, uint32_t, uint32_t); typedef void (*w32_cb)(void *, uint32_t, uint32_t);
typedef void (*w64_cb)(void *, uint32_t, uint64_t); typedef void (*w64_cb)(void *, uint32_t, uint64_t);
enum region_type { REGION_PHYSICAL, REGION_MMIO };
struct memory_region {
enum region_type type;
int handle;
const char *name;
uint32_t size;
union {
struct {
uint32_t shmem_offset;
} physical;
struct {
void *data;
r8_cb read8;
r16_cb read16;
r32_cb read32;
r64_cb read64;
w8_cb write8;
w16_cb write16;
w32_cb write32;
w64_cb write64;
} mmio;
};
};
struct memory; struct memory;
struct physical_region { struct memory_region *memory_create_physical_region(struct memory *memory,
int handle; const char *name,
uint32_t size; uint32_t size);
uint32_t shmem_offset; struct memory_region *memory_create_mmio_region(
}; struct memory *memory, const char *name, uint32_t size, void *data,
r8_cb r8, r16_cb r16, r32_cb r32, r64_cb r64, w8_cb w8, w16_cb w16,
struct mmio_region { w32_cb w32, w64_cb w64);
int handle;
uint32_t size;
void *data;
r8_cb read8;
r16_cb read16;
r32_cb read32;
r64_cb read64;
w8_cb write8;
w16_cb write16;
w32_cb write32;
w64_cb write64;
};
struct physical_region *memory_create_physical_region(struct memory *memory,
uint32_t size);
struct mmio_region *memory_create_mmio_region(struct memory *memory,
uint32_t size, void *data,
r8_cb r8, r16_cb r16, r32_cb r32,
r64_cb r64, w8_cb w8, w16_cb w16,
w32_cb w32, w64_cb w64);
uint8_t *memory_translate(struct memory *memory, const char *name,
uint32_t offset);
bool memory_init(struct memory *memory); bool memory_init(struct memory *memory);
struct memory *memory_create(struct dreamcast *dc); struct memory *memory_create(struct dreamcast *dc);
@ -72,17 +82,18 @@ void memory_destroy(struct memory *memory);
size = end_ - begin_ + 1; \ size = end_ - begin_ + 1; \
mask = 0xffffffff; mask = 0xffffffff;
#define AM_MASK(mask_) mask = mask_; #define AM_MASK(mask_) mask = mask_;
#define AM_MOUNT() \ #define AM_MOUNT(name) \
{ \ { \
struct physical_region *region = \ struct memory_region *region = \
memory_create_physical_region(machine->memory, size); \ memory_create_physical_region(machine->memory, name, size); \
am_physical(map, region, size, begin, mask); \ am_physical(map, region, size, begin, mask); \
} }
#define AM_HANDLE(r8, r16, r32, r64, w8, w16, w32, w64) \ #define AM_HANDLE(name, r8, r16, r32, r64, w8, w16, w32, w64) \
{ \ { \
struct mmio_region *region = memory_create_mmio_region( \ struct memory_region *region = \
machine->memory, size, self, r8, r16, r32, r64, w8, w16, w32, w64); \ memory_create_mmio_region(machine->memory, name, size, self, r8, r16, \
am_mmio(map, region, size, begin, mask); \ r32, r64, w8, w16, w32, w64); \
am_mmio(map, region, size, begin, mask); \
} }
#define AM_DEVICE(name, cb) \ #define AM_DEVICE(name, cb) \
{ \ { \
@ -116,11 +127,11 @@ struct address_map_entry {
union { union {
struct { struct {
struct physical_region *region; struct memory_region *region;
} physical; } physical;
struct { struct {
struct mmio_region *region; struct memory_region *region;
} mmio; } mmio;
struct { struct {
@ -139,10 +150,10 @@ struct address_map {
int num_entries; int num_entries;
}; };
void am_physical(struct address_map *am, struct physical_region *region, void am_physical(struct address_map *am, struct memory_region *region,
uint32_t size, uint32_t addr, uint32_t addr_mask); uint32_t size, uint32_t addr, uint32_t addr_mask);
void am_mmio(struct address_map *am, struct mmio_region *region, uint32_t size, void am_mmio(struct address_map *am, struct memory_region *region,
uint32_t addr, uint32_t addr_mask); uint32_t size, uint32_t addr, uint32_t addr_mask);
void am_device(struct address_map *am, void *device, address_map_cb mapper, void am_device(struct address_map *am, void *device, address_map_cb mapper,
uint32_t size, uint32_t addr, uint32_t addr_mask); uint32_t size, uint32_t addr, uint32_t addr_mask);
void am_mirror(struct address_map *am, uint32_t physical_addr, uint32_t size, void am_mirror(struct address_map *am, uint32_t physical_addr, uint32_t size,
@ -161,13 +172,12 @@ void am_mirror(struct address_map *am, uint32_t physical_addr, uint32_t size,
#define REGION_OFFSET_MASK (page_entry_t)(~REGION_HANDLE_MASK) #define REGION_OFFSET_MASK (page_entry_t)(~REGION_HANDLE_MASK)
#define MAX_REGIONS (1 << PAGE_OFFSET_BITS) #define MAX_REGIONS (1 << PAGE_OFFSET_BITS)
typedef uint64_t page_entry_t; typedef uint32_t page_entry_t;
struct address_space { struct address_space {
struct dreamcast *dc; struct dreamcast *dc;
page_entry_t pages[NUM_PAGES]; page_entry_t pages[NUM_PAGES];
uint8_t *base; uint8_t *base;
uint8_t *protected_base;
}; };
void as_memcpy_to_guest(struct address_space *space, uint32_t virtual_dest, void as_memcpy_to_guest(struct address_space *space, uint32_t virtual_dest,
@ -176,10 +186,6 @@ void as_memcpy_to_host(struct address_space *space, void *ptr,
uint32_t virtual_src, uint32_t size); uint32_t virtual_src, uint32_t size);
void as_memcpy(struct address_space *space, uint32_t virtual_dest, void as_memcpy(struct address_space *space, uint32_t virtual_dest,
uint32_t virtual_src, uint32_t size); uint32_t virtual_src, uint32_t size);
void as_lookup(struct address_space *space, uint32_t virtual_addr,
uint8_t **ptr, struct physical_region **physical_region,
uint32_t *physical_offset, struct mmio_region **mmio_region,
uint32_t *mmio_offset);
uint8_t as_read8(struct address_space *space, uint32_t addr); uint8_t as_read8(struct address_space *space, uint32_t addr);
uint16_t as_read16(struct address_space *space, uint32_t addr); uint16_t as_read16(struct address_space *space, uint32_t addr);
@ -190,10 +196,10 @@ void as_write16(struct address_space *space, uint32_t addr, uint16_t value);
void as_write32(struct address_space *space, uint32_t addr, uint32_t value); void as_write32(struct address_space *space, uint32_t addr, uint32_t value);
void as_write64(struct address_space *space, uint32_t addr, uint64_t value); void as_write64(struct address_space *space, uint32_t addr, uint64_t value);
bool as_map(struct address_space *space, const struct address_map *map); bool as_map(struct address_space *space, const char *name,
const struct address_map *map);
void as_unmap(struct address_space *space); void as_unmap(struct address_space *space);
uint8_t *as_translate(struct address_space *space, uint32_t addr); uint8_t *as_translate(struct address_space *space, uint32_t addr);
uint8_t *as_translate_protected(struct address_space *space, uint32_t addr);
struct address_space *as_create(struct dreamcast *dc); struct address_space *as_create(struct dreamcast *dc);
void as_destroy(struct address_space *space); void as_destroy(struct address_space *space);

View File

@ -5,6 +5,7 @@
#include "hw/aica/aica.h" #include "hw/aica/aica.h"
#include "hw/debugger.h" #include "hw/debugger.h"
#include "hw/dreamcast.h" #include "hw/dreamcast.h"
#include "hw/holly/g2.h"
#include "hw/holly/holly.h" #include "hw/holly/holly.h"
#include "hw/holly/pvr.h" #include "hw/holly/pvr.h"
#include "hw/holly/ta.h" #include "hw/holly/ta.h"
@ -703,7 +704,7 @@ static bool sh4_init(struct device *dev) {
sh4->space = sh4->memory->space; sh4->space = sh4->memory->space;
sh4->memory_if = (struct jit_memory_interface){ sh4->memory_if = (struct jit_memory_interface){
&sh4->ctx, sh4->memory->space->protected_base, &sh4->ctx, sh4->memory->space->base,
sh4->memory->space, &as_read8, sh4->memory->space, &as_read8,
&as_read16, &as_read32, &as_read16, &as_read32,
&as_read64, &as_write8, &as_read64, &as_write8,
@ -951,14 +952,8 @@ void sh4_destroy(struct sh4 *sh4) {
// clang-format off // clang-format off
AM_BEGIN(struct sh4, sh4_data_map) AM_BEGIN(struct sh4, sh4_data_map)
AM_RANGE(0x00000000, 0x03ffffff) AM_MOUNT() // area 0 AM_RANGE(0x00000000, 0x0021ffff) AM_MOUNT("system rom")
AM_RANGE(0x04000000, 0x07ffffff) AM_MOUNT() // area 1 AM_RANGE(0x0c000000, 0x0cffffff) AM_MOUNT("system ram")
AM_RANGE(0x08000000, 0x0bffffff) AM_MOUNT() // area 2
AM_RANGE(0x0c000000, 0x0cffffff) AM_MOUNT() // area 3
AM_RANGE(0x10000000, 0x13ffffff) AM_MOUNT() // area 4
AM_RANGE(0x14000000, 0x17ffffff) AM_MOUNT() // area 5
AM_RANGE(0x18000000, 0x1bffffff) AM_MOUNT() // area 6
AM_RANGE(0x1c000000, 0x1fffffff) AM_MOUNT() // area 7
// main ram mirrors // main ram mirrors
AM_RANGE(0x0d000000, 0x0dffffff) AM_MIRROR(0x0c000000) AM_RANGE(0x0d000000, 0x0dffffff) AM_MIRROR(0x0c000000)
@ -968,13 +963,18 @@ AM_BEGIN(struct sh4, sh4_data_map)
// external devices // external devices
AM_RANGE(0x005f6000, 0x005f7fff) AM_DEVICE("holly", holly_reg_map) AM_RANGE(0x005f6000, 0x005f7fff) AM_DEVICE("holly", holly_reg_map)
AM_RANGE(0x005f8000, 0x005f9fff) AM_DEVICE("pvr", pvr_reg_map) AM_RANGE(0x005f8000, 0x005f9fff) AM_DEVICE("pvr", pvr_reg_map)
AM_RANGE(0x00600000, 0x0067ffff) AM_DEVICE("g2", g2_modem_map)
AM_RANGE(0x00700000, 0x00710fff) AM_DEVICE("aica", aica_reg_map) AM_RANGE(0x00700000, 0x00710fff) AM_DEVICE("aica", aica_reg_map)
AM_RANGE(0x00800000, 0x00ffffff) AM_DEVICE("aica", aica_data_map) AM_RANGE(0x00800000, 0x00ffffff) AM_DEVICE("aica", aica_data_map)
AM_RANGE(0x01000000, 0x01ffffff) AM_DEVICE("g2", g2_expansion0_map)
AM_RANGE(0x02700000, 0x02ffffff) AM_DEVICE("g2", g2_expansion1_map)
AM_RANGE(0x04000000, 0x057fffff) AM_DEVICE("pvr", pvr_vram_map) AM_RANGE(0x04000000, 0x057fffff) AM_DEVICE("pvr", pvr_vram_map)
AM_RANGE(0x10000000, 0x11ffffff) AM_DEVICE("ta", ta_fifo_map) AM_RANGE(0x10000000, 0x11ffffff) AM_DEVICE("ta", ta_fifo_map)
AM_RANGE(0x14000000, 0x17ffffff) AM_DEVICE("g2", g2_expansion2_map)
// internal registers // internal registers
AM_RANGE(0x1e000000, 0x1fffffff) AM_HANDLE((r8_cb)&sh4_reg_r8, AM_RANGE(0x1e000000, 0x1fffffff) AM_HANDLE("sh4 reg",
(r8_cb)&sh4_reg_r8,
(r16_cb)&sh4_reg_r16, (r16_cb)&sh4_reg_r16,
(r32_cb)&sh4_reg_r32, (r32_cb)&sh4_reg_r32,
NULL, NULL,
@ -993,7 +993,8 @@ AM_BEGIN(struct sh4, sh4_data_map)
AM_RANGE(0xe0000000, 0xffffffff) AM_MIRROR(0x00000000) // p4 AM_RANGE(0xe0000000, 0xffffffff) AM_MIRROR(0x00000000) // p4
// internal cache and sq only accessible through p4 // internal cache and sq only accessible through p4
AM_RANGE(0x7c000000, 0x7fffffff) AM_HANDLE((r8_cb)&sh4_cache_r8, AM_RANGE(0x7c000000, 0x7fffffff) AM_HANDLE("sh4 cache",
(r8_cb)&sh4_cache_r8,
(r16_cb)&sh4_cache_r16, (r16_cb)&sh4_cache_r16,
(r32_cb)&sh4_cache_r32, (r32_cb)&sh4_cache_r32,
(r64_cb)&sh4_cache_r64, (r64_cb)&sh4_cache_r64,
@ -1002,7 +1003,8 @@ AM_BEGIN(struct sh4, sh4_data_map)
(w32_cb)&sh4_cache_w32, (w32_cb)&sh4_cache_w32,
(w64_cb)&sh4_cache_w64) (w64_cb)&sh4_cache_w64)
AM_RANGE(0xe0000000, 0xe3ffffff) AM_HANDLE((r8_cb)&sh4_sq_r8, AM_RANGE(0xe0000000, 0xe3ffffff) AM_HANDLE("sh4 sq",
(r8_cb)&sh4_sq_r8,
(r16_cb)&sh4_sq_r16, (r16_cb)&sh4_sq_r16,
(r32_cb)&sh4_sq_r32, (r32_cb)&sh4_sq_r32,
NULL, NULL,

View File

@ -1,5 +1,6 @@
#include "jit/frontend/sh4/sh4_translate.h" #include "jit/frontend/sh4/sh4_translate.h"
#include "core/assert.h" #include "core/assert.h"
#include "core/option.h"
#include "core/profiler.h" #include "core/profiler.h"
#include "jit/frontend/sh4/sh4_analyze.h" #include "jit/frontend/sh4/sh4_analyze.h"
#include "jit/frontend/sh4/sh4_context.h" #include "jit/frontend/sh4/sh4_context.h"
@ -7,6 +8,8 @@
#include "jit/frontend/sh4/sh4_frontend.h" #include "jit/frontend/sh4/sh4_frontend.h"
#include "jit/ir/ir.h" #include "jit/ir/ir.h"
DEFINE_OPTION_BOOL(fastmem, true, "Enable SIGSEGV-based fast memory access");
// //
// fsca estimate lookup table // fsca estimate lookup table
// //
@ -37,14 +40,14 @@ static emit_cb emit_callbacks[NUM_SH4_OPS] = {
// helper functions for accessing the sh4 context, macros are used to cut // helper functions for accessing the sh4 context, macros are used to cut
// down on copy and paste // down on copy and paste
#define load_guest(addr, type) \ #define load_guest(addr, type) \
((flags & SH4_SLOWMEM) ? ir_load_slow(ir, addr, type) \ ((!OPTION_fastmem || (flags & SH4_SLOWMEM)) ? ir_load_slow(ir, addr, type) \
: ir_load_fast(ir, addr, type)) : ir_load_fast(ir, addr, type))
#define store_guest(addr, v) \ #define store_guest(addr, v) \
do { \ do { \
((flags & SH4_SLOWMEM) ? ir_store_slow(ir, addr, v) \ ((!OPTION_fastmem || (flags & SH4_SLOWMEM)) ? ir_store_slow(ir, addr, v) \
: ir_store_fast(ir, addr, v)); \ : ir_store_fast(ir, addr, v)); \
} while (0) } while (0)
#define load_gpr(n, type) \ #define load_gpr(n, type) \

View File

@ -76,7 +76,7 @@ bool reserve_pages(void *ptr, size_t size) {
// knowing this, an existing mapping can be detected by not using MAP_FIXED, // knowing this, an existing mapping can be detected by not using MAP_FIXED,
// and comparing the returned mapped address with the requested address // and comparing the returned mapped address with the requested address
void *res = void *res =
mmap(ptr, size, PROT_NONE, MAP_ANON | MAP_NORESERVE | MAP_PRIVATE, -1, 0); mmap(ptr, size, PROT_NONE, MAP_SHARED | MAP_ANON | MAP_NORESERVE, -1, 0);
if (res == MAP_FAILED) { if (res == MAP_FAILED) {
return false; return false;