added data_mask to mmio callbacks

converted mmio callbacks over to all using uint32_t
replaced texcache / fastmem settings with preprocessor defines that disable both for debug builds
This commit is contained in:
Anthony Pesch 2016-10-08 13:49:18 -07:00
parent 0afc29526f
commit 6195a53ee5
8 changed files with 257 additions and 352 deletions

View File

@ -103,91 +103,80 @@ static void aica_timer_reschedule(struct aica *aica, int n, uint32_t period) {
scheduler_start_timer(aica->scheduler, timer_cbs[n], aica, remaining);
}
/*static uint32_t aica_channel_reg_read(struct aica *aica, uint32_t addr) {
/*static uint32_t aica_channel_reg_read(struct aica *aica, uint32_t addr,
uint32_t data_mask) {
int ch = addr >> 7;
addr &= 0x7f;
return aica->aica_regs[(ch << 7) | addr];
}
static void aica_channel_reg_write(struct aica *aica, uint32_t addr,
uint32_t value) {
uint32_t data, uint32_t data_mask) {
int ch = addr >> 7;
addr &= 0x7f;
aica->aica_regs[(ch << 7) | addr] = value;
aica->aica_regs[(ch << 7) | addr] = data;
}
static uint32_t aica_common_reg_read(struct aica *aica, uint32_t addr) {
static uint32_t aica_common_reg_read(struct aica *aica, uint32_t addr, uint32_t
data_mask) {
return 0;
}
static void aica_common_reg_write(struct aica *aica, uint32_t addr,
uint32_t value) {}*/
uint32_t data, uint32_t data_mask) {}*/
#define define_reg_read(name, type) \
type aica_reg_##name(struct aica *aica, uint32_t addr) { \
if (addr < 0x2000) { /* channel */ \
} else if (addr >= 0x2800 && addr < 0x2d08 /* common */) { \
addr -= 0x2800; \
switch (addr) { \
case 0x90: /* TIMA */ \
return (aica_timer_tctl(aica, 0) << 8) | aica_timer_tcnt(aica, 0); \
case 0x94: /* TIMB */ \
return (aica_timer_tctl(aica, 1) << 8) | aica_timer_tcnt(aica, 1); \
case 0x98: /* TIMC */ \
return (aica_timer_tctl(aica, 2) << 8) | aica_timer_tcnt(aica, 2); \
} \
return *(type *)&aica->aica_regs[0x2800 + addr]; \
} \
return *(type *)&aica->aica_regs[addr]; \
uint32_t aica_reg_read(struct aica *aica, uint32_t addr, uint32_t data_mask) {
if (addr < 0x2000) { /* channel */
} else if (addr >= 0x2800 && addr < 0x2d08 /* common */) {
addr -= 0x2800;
switch (addr) {
case 0x90: /* TIMA */
return (aica_timer_tctl(aica, 0) << 8) | aica_timer_tcnt(aica, 0);
case 0x94: /* TIMB */
return (aica_timer_tctl(aica, 1) << 8) | aica_timer_tcnt(aica, 1);
case 0x98: /* TIMC */
return (aica_timer_tctl(aica, 2) << 8) | aica_timer_tcnt(aica, 2);
}
return READ_DATA(&aica->aica_regs[0x2800 + addr]);
}
define_reg_read(r8, uint8_t);
define_reg_read(r16, uint16_t);
define_reg_read(r32, uint32_t);
#define define_reg_write(name, type) \
void aica_reg_##name(struct aica *aica, uint32_t addr, type value) { \
*(type *)&aica->aica_regs[addr] = value; \
if (addr < 0x2000) { /* channel */ \
} else if (addr >= 0x2800 && addr < 0x2d08 /* common */) { \
addr -= 0x2800; \
switch (addr) { \
case 0x90: { /* TIMA */ \
aica_timer_reschedule(aica, 0, \
AICA_TIMER_PERIOD - aica_timer_tcnt(aica, 0)); \
} break; \
case 0x94: { /* TIMB */ \
aica_timer_reschedule(aica, 1, \
AICA_TIMER_PERIOD - aica_timer_tcnt(aica, 1)); \
} break; \
case 0x98: { /* TIMC */ \
aica_timer_reschedule(aica, 2, \
AICA_TIMER_PERIOD - aica_timer_tcnt(aica, 2)); \
} break; \
case 0x400: { /* ARMRST */ \
if (value) { \
arm_suspend(aica->arm); \
} else { \
arm_resume(aica->arm); \
} \
} break; \
} \
} \
return READ_DATA(&aica->aica_regs[addr]);
}
define_reg_write(w8, uint8_t);
define_reg_write(w16, uint16_t);
define_reg_write(w32, uint32_t);
void aica_reg_write(struct aica *aica, uint32_t addr, uint32_t data,
uint32_t data_mask) {
WRITE_DATA(&aica->aica_regs[addr]);
#define define_read_wave(name, type) \
type aica_wave_##name(struct aica *aica, uint32_t addr) { \
return *(type *)&aica->wave_ram[addr]; \
if (addr < 0x2000) { /* channel */
} else if (addr >= 0x2800 && addr < 0x2d08 /* common */) {
addr -= 0x2800;
switch (addr) {
case 0x90: { /* TIMA */
aica_timer_reschedule(aica, 0,
AICA_TIMER_PERIOD - aica_timer_tcnt(aica, 0));
} break;
case 0x94: { /* TIMB */
aica_timer_reschedule(aica, 1,
AICA_TIMER_PERIOD - aica_timer_tcnt(aica, 1));
} break;
case 0x98: { /* TIMC */
aica_timer_reschedule(aica, 2,
AICA_TIMER_PERIOD - aica_timer_tcnt(aica, 2));
} break;
case 0x400: { /* ARMRST */
if (data) {
arm_suspend(aica->arm);
} else {
arm_resume(aica->arm);
}
} break;
}
}
}
define_read_wave(r8, uint8_t);
define_read_wave(r16, uint16_t);
uint32_t aica_wave_r32(struct aica *aica, uint32_t addr) {
uint32_t aica_wave_read(struct aica *aica, uint32_t addr, uint32_t data_mask) {
if (DATA_SIZE() == 4) {
// FIXME temp hacks to get Crazy Taxi 1 booting
if (addr == 0x104 || addr == 0x284 || addr == 0x288) {
return 0x54494e49;
@ -207,18 +196,15 @@ uint32_t aica_wave_r32(struct aica *aica, uint32_t addr) {
addr == 0xb3c0 || addr == 0xb3d0 || addr == 0xb3e0 || addr == 0xb3f0) {
return 0x0;
}
return *(uint32_t *)&aica->wave_ram[addr];
}
#define define_write_wave(name, type) \
void aica_wave_##name(struct aica *aica, uint32_t addr, type value) { \
*(type *)&aica->wave_ram[addr] = value; \
return READ_DATA(&aica->wave_ram[addr]);
}
define_write_wave(w8, uint8_t);
define_write_wave(w16, uint16_t);
define_write_wave(w32, uint32_t);
void aica_wave_write(struct aica *aica, uint32_t addr, uint32_t data,
uint32_t data_mask) {
WRITE_DATA(&aica->wave_ram[addr]);
}
static void aica_run(struct device *dev, int64_t ns) {
struct aica *aica = (struct aica *)dev;
@ -271,22 +257,14 @@ AM_BEGIN(struct aica, aica_reg_map);
// over allocate a bit to match the allocation granularity of the host
AM_RANGE(0x00000000, 0x00010fff) AM_MOUNT("aica reg ram")
AM_RANGE(0x00000000, 0x00010fff) AM_HANDLE("aica reg",
(r8_cb)&aica_reg_r8,
(r16_cb)&aica_reg_r16,
(r32_cb)&aica_reg_r32,
(w8_cb)&aica_reg_w8,
(w16_cb)&aica_reg_w16,
(w32_cb)&aica_reg_w32)
(mmio_read_cb)&aica_reg_read,
(mmio_write_cb)&aica_reg_write)
AM_END();
AM_BEGIN(struct aica, aica_data_map);
AM_RANGE(0x00000000, 0x007fffff) AM_MOUNT("aica wave ram")
AM_RANGE(0x00000000, 0x007fffff) AM_HANDLE("aica wave",
(r8_cb)&aica_wave_r8,
(r16_cb)&aica_wave_r16,
(r32_cb)&aica_wave_r32,
(w8_cb)&aica_wave_w8,
(w16_cb)&aica_wave_w16,
(w32_cb)&aica_wave_w32)
(mmio_read_cb)&aica_wave_read,
(mmio_write_cb)&aica_wave_write)
AM_END();
// clang-format on

View File

@ -193,34 +193,25 @@ static void holly_update_interrupts(struct holly *hl) {
// TODO check for hardware DMA initiation
}
#define define_reg_read(name, type) \
type holly_reg_##name(struct holly *hl, uint32_t addr) { \
uint32_t offset = addr >> 2; \
reg_read_cb read = holly_cb[offset].read; \
if (read) { \
return (type)read(hl->dc); \
} \
return (type)hl->reg[offset]; \
uint32_t holly_reg_read(struct holly *hl, uint32_t addr, uint32_t data_mask) {
uint32_t offset = addr >> 2;
reg_read_cb read = holly_cb[offset].read;
if (read) {
return read(hl->dc);
}
return hl->reg[offset];
}
define_reg_read(r8, uint8_t);
define_reg_read(r16, uint16_t);
define_reg_read(r32, uint32_t);
#define define_reg_write(name, type) \
void holly_reg_##name(struct holly *hl, uint32_t addr, type value) { \
uint32_t offset = addr >> 2; \
reg_write_cb write = holly_cb[offset].write; \
if (write) { \
write(hl->dc, value); \
return; \
} \
hl->reg[offset] = (uint32_t)value; \
void holly_reg_write(struct holly *hl, uint32_t addr, uint32_t data,
uint32_t data_mask) {
uint32_t offset = addr >> 2;
reg_write_cb write = holly_cb[offset].write;
if (write) {
write(hl->dc, data);
return;
}
hl->reg[offset] = data;
}
define_reg_write(w8, uint8_t);
define_reg_write(w16, uint16_t);
define_reg_write(w32, uint32_t);
static bool holly_init(struct device *dev) {
struct holly *hl = (struct holly *)dev;
@ -450,12 +441,8 @@ REG_W32(holly_cb, SB_PDST) {
// clang-format off
AM_BEGIN(struct holly, holly_reg_map);
AM_RANGE(0x00000000, 0x00001fff) AM_HANDLE("holly reg",
(r8_cb)&holly_reg_r8,
(r16_cb)&holly_reg_r16,
(r32_cb)&holly_reg_r32,
(w8_cb)&holly_reg_w8,
(w16_cb)&holly_reg_w16,
(w32_cb)&holly_reg_w32)
(mmio_read_cb)&holly_reg_read,
(mmio_write_cb)&holly_reg_write)
AM_END();
AM_BEGIN(struct holly, holly_modem_map);

View File

@ -156,9 +156,10 @@ struct memory_region *memory_create_physical_region(struct memory *memory,
return region;
}
struct memory_region *memory_create_mmio_region(
struct memory *memory, const char *name, uint32_t size, void *data,
r8_cb r8, r16_cb r16, r32_cb r32, w8_cb w8, w16_cb w16, w32_cb w32) {
struct memory_region *memory_create_mmio_region(struct memory *memory,
const char *name, uint32_t size,
void *data, mmio_read_cb read,
mmio_write_cb write) {
struct memory_region *region = memory_get_region(memory, name);
if (!region) {
@ -170,12 +171,8 @@ struct memory_region *memory_create_mmio_region(
region->name = name;
region->size = size;
region->mmio.data = data;
region->mmio.read8 = r8;
region->mmio.read16 = r16;
region->mmio.read32 = r32;
region->mmio.write8 = w8;
region->mmio.write16 = w16;
region->mmio.write32 = w32;
region->mmio.read = read;
region->mmio.write = write;
}
return region;
@ -312,7 +309,8 @@ void am_mirror(struct address_map *am, uint32_t physical_addr, uint32_t size,
} \
uint32_t region_offset = get_region_offset(page); \
uint32_t page_offset = get_page_offset(addr); \
return region->mmio.name(region->mmio.data, region_offset + page_offset); \
return region->mmio.read(region->mmio.data, region_offset + page_offset, \
(1ull << (sizeof(data_type) * 8)) - 1); \
}
define_read_bytes(read8, uint8_t);
@ -320,19 +318,19 @@ define_read_bytes(read16, uint16_t);
define_read_bytes(read32, uint32_t);
#define define_write_bytes(name, data_type) \
void as_##name(struct address_space *space, uint32_t addr, \
data_type value) { \
void as_##name(struct address_space *space, uint32_t addr, data_type data) { \
page_entry_t page = space->pages[get_page_index(addr)]; \
DCHECK(page); \
int region_handle = get_region_handle(page); \
struct memory_region *region = &space->dc->memory->regions[region_handle]; \
if (region->type == REGION_PHYSICAL) { \
*(data_type *)(space->base + addr) = value; \
*(data_type *)(space->base + addr) = data; \
return; \
} \
uint32_t region_offset = get_region_offset(page); \
uint32_t page_offset = get_page_offset(addr); \
region->mmio.name(region->mmio.data, region_offset + page_offset, value); \
region->mmio.write(region->mmio.data, region_offset + page_offset, data, \
(1ull << (sizeof(data_type) * 8)) - 1); \
}
define_write_bytes(write8, uint8_t);

View File

@ -8,15 +8,17 @@ struct dreamcast;
#define ADDRESS_SPACE_SIZE (UINT64_C(1) << 32)
typedef uint8_t (*r8_cb)(void *, uint32_t);
typedef uint16_t (*r16_cb)(void *, uint32_t);
typedef uint32_t (*r32_cb)(void *, uint32_t);
typedef void (*w8_cb)(void *, uint32_t, uint8_t);
typedef void (*w16_cb)(void *, uint32_t, uint16_t);
typedef void (*w32_cb)(void *, uint32_t, uint32_t);
// helpers for mmio callbacks, assume data is always a uint32_t
#define DATA_SIZE() (ctz64((uint64_t)data_mask + 1) >> 3)
#define READ_DATA(ptr) (*(uint32_t *)(ptr)&data_mask)
#define WRITE_DATA(ptr) \
(*(uint32_t *)(ptr) = (*(uint32_t *)(ptr) & ~data_mask) | (data & data_mask))
enum region_type { REGION_PHYSICAL, REGION_MMIO };
typedef uint32_t (*mmio_read_cb)(void *, uint32_t, uint32_t);
typedef void (*mmio_write_cb)(void *, uint32_t, uint32_t, uint32_t);
struct memory_region {
enum region_type type;
@ -31,12 +33,8 @@ struct memory_region {
struct {
void *data;
r8_cb read8;
r16_cb read16;
r32_cb read32;
w8_cb write8;
w16_cb write16;
w32_cb write32;
mmio_read_cb read;
mmio_write_cb write;
} mmio;
};
};
@ -46,9 +44,10 @@ struct memory;
struct memory_region *memory_create_physical_region(struct memory *memory,
const char *name,
uint32_t size);
struct memory_region *memory_create_mmio_region(
struct memory *memory, const char *name, uint32_t size, void *data,
r8_cb r8, r16_cb r16, r32_cb r32, w8_cb w8, w16_cb w16, w32_cb w32);
struct memory_region *memory_create_mmio_region(struct memory *memory,
const char *name, uint32_t size,
void *data, mmio_read_cb read,
mmio_write_cb write);
uint8_t *memory_translate(struct memory *memory, const char *name,
uint32_t offset);
@ -83,10 +82,10 @@ void memory_destroy(struct memory *memory);
memory_create_physical_region(machine->memory, name, size); \
am_physical(map, region, size, begin, mask); \
}
#define AM_HANDLE(name, r8, r16, r32, w8, w16, w32) \
#define AM_HANDLE(name, read, write) \
{ \
struct memory_region *region = memory_create_mmio_region( \
machine->memory, name, size, self, r8, r16, r32, w8, w16, w32); \
machine->memory, name, size, self, read, write); \
am_mmio(map, region, size, begin, mask); \
}
#define AM_DEVICE(name, cb) \
@ -184,9 +183,9 @@ void as_memcpy(struct address_space *space, uint32_t virtual_dest,
uint8_t as_read8(struct address_space *space, uint32_t addr);
uint16_t as_read16(struct address_space *space, uint32_t addr);
uint32_t as_read32(struct address_space *space, uint32_t addr);
void as_write8(struct address_space *space, uint32_t addr, uint8_t value);
void as_write16(struct address_space *space, uint32_t addr, uint16_t value);
void as_write32(struct address_space *space, uint32_t addr, uint32_t value);
void as_write8(struct address_space *space, uint32_t addr, uint8_t data);
void as_write16(struct address_space *space, uint32_t addr, uint16_t data);
void as_write32(struct address_space *space, uint32_t addr, uint32_t data);
bool as_map(struct address_space *space, const char *name,
const struct address_map *map);

View File

@ -74,7 +74,8 @@ static void pvr_reconfigure_spg(struct pvr *pvr) {
pvr, HZ_TO_NANO(pvr->line_clock));
}
static uint32_t pvr_reg_r32(struct pvr *pvr, uint32_t addr) {
static uint32_t pvr_reg_read(struct pvr *pvr, uint32_t addr,
uint32_t data_mask) {
uint32_t offset = addr >> 2;
reg_read_cb read = pvr_cb[offset].read;
@ -85,7 +86,8 @@ static uint32_t pvr_reg_r32(struct pvr *pvr, uint32_t addr) {
return pvr->reg[offset];
}
static void pvr_reg_w32(struct pvr *pvr, uint32_t addr, uint32_t value) {
static void pvr_reg_write(struct pvr *pvr, uint32_t addr, uint32_t data,
uint32_t data_mask) {
uint32_t offset = addr >> 2;
reg_write_cb write = pvr_cb[offset].write;
@ -96,19 +98,21 @@ static void pvr_reg_w32(struct pvr *pvr, uint32_t addr, uint32_t value) {
}
if (write) {
write(pvr->dc, value);
write(pvr->dc, data);
return;
}
pvr->reg[offset] = (uint32_t)value;
pvr->reg[offset] = data;
}
static uint32_t pvr_palette_r32(struct pvr *pvr, uint32_t addr) {
return *(uint32_t *)&pvr->palette_ram[addr];
static uint32_t pvr_palette_read(struct pvr *pvr, uint32_t addr,
uint32_t data_mask) {
return READ_DATA(&pvr->palette_ram[addr]);
}
static void pvr_palette_w32(struct pvr *pvr, uint32_t addr, uint32_t value) {
*(uint32_t *)&pvr->palette_ram[addr] = value;
static void pvr_palette_write(struct pvr *pvr, uint32_t addr, uint32_t data,
uint32_t data_mask) {
WRITE_DATA(&pvr->palette_ram[addr]);
}
static uint32_t MAP64(uint32_t addr) {
@ -132,27 +136,18 @@ static uint32_t MAP64(uint32_t addr) {
(addr & 0x3));
}
#define define_vram_interleaved_read(name, type) \
static type pvr_vram_interleaved_##name(struct pvr *pvr, uint32_t addr) { \
addr = MAP64(addr); \
return *(type *)&pvr->video_ram[addr]; \
static uint32_t pvr_vram_interleaved_read(struct pvr *pvr, uint32_t addr,
uint32_t data_mask) {
addr = MAP64(addr);
return READ_DATA(&pvr->video_ram[addr]);
}
define_vram_interleaved_read(r8, uint8_t);
define_vram_interleaved_read(r16, uint16_t);
define_vram_interleaved_read(r32, uint32_t);
#define define_vram_interleaved_write(name, type) \
static void pvr_vram_interleaved_##name(struct pvr *pvr, uint32_t addr, \
type value) { \
addr = MAP64(addr); \
*(type *)&pvr->video_ram[addr] = value; \
static void pvr_vram_interleaved_write(struct pvr *pvr, uint32_t addr,
uint32_t data, uint32_t data_mask) {
addr = MAP64(addr);
WRITE_DATA(&pvr->video_ram[addr]);
}
define_vram_interleaved_write(w8, uint8_t);
define_vram_interleaved_write(w16, uint16_t);
define_vram_interleaved_write(w32, uint32_t);
static bool pvr_init(struct device *dev) {
struct pvr *pvr = (struct pvr *)dev;
struct dreamcast *dc = pvr->dc;
@ -199,29 +194,17 @@ REG_W32(pvr_cb, FB_R_CTRL) {
AM_BEGIN(struct pvr, pvr_reg_map);
AM_RANGE(0x00001000, 0x00001fff) AM_MOUNT("palette ram")
AM_RANGE(0x00000000, 0x00000fff) AM_HANDLE("pvr reg",
NULL,
NULL,
(r32_cb)&pvr_reg_r32,
NULL,
NULL,
(w32_cb)&pvr_reg_w32)
(mmio_read_cb)&pvr_reg_read,
(mmio_write_cb)&pvr_reg_write)
AM_RANGE(0x00001000, 0x00001fff) AM_HANDLE("pvr palette",
NULL,
NULL,
(r32_cb)&pvr_palette_r32,
NULL,
NULL,
(w32_cb)&pvr_palette_w32)
(mmio_read_cb)&pvr_palette_read,
(mmio_write_cb)&pvr_palette_write)
AM_END();
AM_BEGIN(struct pvr, pvr_vram_map);
AM_RANGE(0x00000000, 0x007fffff) AM_MOUNT("video ram")
AM_RANGE(0x01000000, 0x017fffff) AM_HANDLE("video ram interleaved",
(r8_cb)&pvr_vram_interleaved_r8,
(r16_cb)&pvr_vram_interleaved_r16,
(r32_cb)&pvr_vram_interleaved_r32,
(w8_cb)&pvr_vram_interleaved_w8,
(w16_cb)&pvr_vram_interleaved_w16,
(w32_cb)&pvr_vram_interleaved_w32)
(mmio_read_cb)&pvr_vram_interleaved_read,
(mmio_write_cb)&pvr_vram_interleaved_write)
AM_END();
// clang-format on

View File

@ -15,8 +15,6 @@
#include "sys/thread.h"
#include "ui/nuklear.h"
DEFINE_OPTION_BOOL(texcache, true, "Enable SIGSEGV-based texture caching");
#define TA_MAX_CONTEXTS 32
#define TA_YUV420_MACROBLOCK_SIZE 384
#define TA_YUV422_MACROBLOCK_SIZE 512
@ -370,12 +368,12 @@ static void ta_init_context(struct ta *ta, uint32_t addr) {
ctx->vertex_type = 0;
}
static void ta_write_context(struct ta *ta, uint32_t addr, uint32_t value) {
static void ta_write_context(struct ta *ta, uint32_t addr, uint32_t data) {
struct tile_ctx *ctx = ta_get_context(ta, addr);
CHECK_NOTNULL(ctx);
CHECK_LT(ctx->size + 4, TA_MAX_PARAMS);
*(uint32_t *)&ctx->params[ctx->size] = value;
*(uint32_t *)&ctx->params[ctx->size] = data;
ctx->size += 4;
// each TA command is either 32 or 64 bytes, with the pcw being in the first
@ -473,7 +471,7 @@ static void ta_register_texture(struct ta *ta, union tsp tsp, union tcw tcw) {
// add write callback in order to invalidate on future writes. the callback
// address will be page aligned, therefore it will be triggered falsely in
// some cases. over invalidate in these cases
if (OPTION_texcache) {
#ifdef NDEBUG
if (!entry->texture_watch) {
entry->texture_watch = add_single_write_watch(
entry->texture, entry->texture_size, &ta_texture_invalidated, entry);
@ -483,7 +481,7 @@ static void ta_register_texture(struct ta *ta, union tsp tsp, union tcw tcw) {
entry->palette_watch = add_single_write_watch(
entry->palette, entry->palette_size, &ta_palette_invalidated, entry);
}
}
#endif
// add modified entries to the trace
if (ta->trace_writer && (new_entry || entry->dirty)) {
@ -771,16 +769,21 @@ static void ta_yuv_process_macroblock(struct ta *ta) {
}
}
static void ta_write_poly_fifo(struct ta *ta, uint32_t addr, uint32_t value) {
ta_write_context(ta, ta->pvr->TA_ISP_BASE->base_address, value);
static void ta_poly_fifo_write(struct ta *ta, uint32_t addr, uint32_t data,
uint32_t data_mask) {
CHECK_EQ(DATA_SIZE(), 4);
ta_write_context(ta, ta->pvr->TA_ISP_BASE->base_address, data);
}
static void ta_write_yuv_fifo(struct ta *ta, uint32_t addr, uint32_t value) {
static void ta_yuv_fifo_write(struct ta *ta, uint32_t addr, uint32_t data,
uint32_t data_mask) {
struct holly *holly = ta->holly;
struct pvr *pvr = ta->pvr;
CHECK_EQ(DATA_SIZE(), 4);
// append data to current macroblock
*(uint32_t *)&ta->yuv_macroblock[ta->yuv_macroblock_pos] = value;
*(uint32_t *)&ta->yuv_macroblock[ta->yuv_macroblock_pos] = data;
ta->yuv_macroblock_pos += 4;
if (ta->yuv_macroblock_pos >= ta->yuv_macroblock_size) {
@ -789,10 +792,11 @@ static void ta_write_yuv_fifo(struct ta *ta, uint32_t addr, uint32_t value) {
}
}
static void ta_write_texture_fifo(struct ta *ta, uint32_t addr,
uint32_t value) {
static void ta_texture_fifo_write(struct ta *ta, uint32_t addr, uint32_t data,
uint32_t data_mask) {
CHECK_EQ(DATA_SIZE(), 4);
addr &= 0xeeffffff;
*(uint32_t *)&ta->video_ram[addr] = value;
*(uint32_t *)&ta->video_ram[addr] = data;
}
static bool ta_init(struct device *dev) {
@ -1001,24 +1005,12 @@ REG_W32(pvr_cb, TA_YUV_TEX_BASE) {
AM_BEGIN(struct ta, ta_fifo_map);
AM_RANGE(0x00000000, 0x007fffff) AM_HANDLE("ta poly fifo",
NULL,
NULL,
NULL,
NULL,
NULL,
(w32_cb)&ta_write_poly_fifo)
(mmio_write_cb)&ta_poly_fifo_write)
AM_RANGE(0x00800000, 0x00ffffff) AM_HANDLE("ta yuv fifo",
NULL,
NULL,
NULL,
NULL,
NULL,
(w32_cb)&ta_write_yuv_fifo)
(mmio_write_cb)&ta_yuv_fifo_write)
AM_RANGE(0x01000000, 0x01ffffff) AM_HANDLE("ta texture fifo",
NULL,
NULL,
NULL,
NULL,
NULL,
(w32_cb)&ta_write_texture_fifo)
(mmio_write_cb)&ta_texture_fifo_write)
AM_END();
// clang-format on

View File

@ -457,83 +457,59 @@ static void sh4_fpscr_updated(struct sh4_ctx *ctx, uint64_t old_fpscr) {
// *size = 4;
// }
#define define_reg_read(name, type) \
static type sh4_reg_##name(struct sh4 *sh4, uint32_t addr) { \
uint32_t offset = SH4_REG_OFFSET(addr); \
reg_read_cb read = sh4_cb[offset].read; \
if (read) { \
return read(sh4->dc); \
} \
return (type)sh4->reg[offset]; \
static uint32_t sh4_reg_read(struct sh4 *sh4, uint32_t addr,
uint32_t data_mask) {
uint32_t offset = SH4_REG_OFFSET(addr);
reg_read_cb read = sh4_cb[offset].read;
if (read) {
return read(sh4->dc);
}
return sh4->reg[offset];
}
define_reg_read(r8, uint8_t);
define_reg_read(r16, uint16_t);
define_reg_read(r32, uint32_t);
#define define_reg_write(name, type) \
static void sh4_reg_##name(struct sh4 *sh4, uint32_t addr, type value) { \
uint32_t offset = SH4_REG_OFFSET(addr); \
reg_write_cb write = sh4_cb[offset].write; \
if (write) { \
write(sh4->dc, value); \
return; \
} \
sh4->reg[offset] = (uint32_t)value; \
static void sh4_reg_write(struct sh4 *sh4, uint32_t addr, uint32_t data,
uint32_t data_mask) {
uint32_t offset = SH4_REG_OFFSET(addr);
reg_write_cb write = sh4_cb[offset].write;
if (write) {
write(sh4->dc, data);
return;
}
sh4->reg[offset] = data;
}
define_reg_write(w8, uint8_t);
define_reg_write(w16, uint16_t);
define_reg_write(w32, uint32_t);
// with OIX, bit 25, rather than bit 13, determines which 4kb bank to use
#define CACHE_OFFSET(addr, OIX) \
((OIX ? ((addr & 0x2000000) >> 13) : ((addr & 0x2000) >> 1)) | (addr & 0xfff))
#define define_cache_read(name, type) \
static type sh4_cache_##name(struct sh4 *sh4, uint32_t addr) { \
CHECK_EQ(sh4->CCR->ORA, 1u); \
addr = CACHE_OFFSET(addr, sh4->CCR->OIX); \
return *(type *)&sh4->cache[addr]; \
static uint32_t sh4_cache_read(struct sh4 *sh4, uint32_t addr,
uint32_t data_mask) {
CHECK_EQ(sh4->CCR->ORA, 1u);
addr = CACHE_OFFSET(addr, sh4->CCR->OIX);
return READ_DATA(&sh4->cache[addr]);
}
define_cache_read(r8, uint8_t);
define_cache_read(r16, uint16_t);
define_cache_read(r32, uint32_t);
#define define_cache_write(name, type) \
static void sh4_cache_##name(struct sh4 *sh4, uint32_t addr, type value) { \
CHECK_EQ(sh4->CCR->ORA, 1u); \
addr = CACHE_OFFSET(addr, sh4->CCR->OIX); \
*(type *)&sh4->cache[addr] = value; \
static void sh4_cache_write(struct sh4 *sh4, uint32_t addr, uint32_t data,
uint32_t data_mask) {
CHECK_EQ(sh4->CCR->ORA, 1u);
addr = CACHE_OFFSET(addr, sh4->CCR->OIX);
WRITE_DATA(&sh4->cache[addr]);
}
define_cache_write(w8, uint8_t);
define_cache_write(w16, uint16_t);
define_cache_write(w32, uint32_t);
#define define_sq_read(name, type) \
static type sh4_sq_##name(struct sh4 *sh4, uint32_t addr) { \
uint32_t sqi = (addr & 0x20) >> 5; \
unsigned idx = (addr & 0x1c) >> 2; \
return (type)sh4->ctx.sq[sqi][idx]; \
static uint32_t sh4_sq_read(struct sh4 *sh4, uint32_t addr,
uint32_t data_mask) {
uint32_t sqi = (addr & 0x20) >> 5;
unsigned idx = (addr & 0x1c) >> 2;
return sh4->ctx.sq[sqi][idx];
}
define_sq_read(r8, uint8_t);
define_sq_read(r16, uint16_t);
define_sq_read(r32, uint32_t);
#define define_sq_write(name, type) \
static void sh4_sq_##name(struct sh4 *sh4, uint32_t addr, type value) { \
uint32_t sqi = (addr & 0x20) >> 5; \
uint32_t idx = (addr & 0x1c) >> 2; \
sh4->ctx.sq[sqi][idx] = (uint32_t)value; \
static void sh4_sq_write(struct sh4 *sh4, uint32_t addr, uint32_t data,
uint32_t data_mask) {
uint32_t sqi = (addr & 0x20) >> 5;
uint32_t idx = (addr & 0x1c) >> 2;
sh4->ctx.sq[sqi][idx] = data;
}
define_sq_write(w8, uint8_t);
define_sq_write(w16, uint16_t);
define_sq_write(w32, uint32_t);
static bool sh4_init(struct device *dev) {
struct sh4 *sh4 = (struct sh4 *)dev;
struct dreamcast *dc = sh4->dc;
@ -976,12 +952,8 @@ AM_BEGIN(struct sh4, sh4_data_map)
// internal registers
AM_RANGE(0x1e000000, 0x1fffffff) AM_HANDLE("sh4 reg",
(r8_cb)&sh4_reg_r8,
(r16_cb)&sh4_reg_r16,
(r32_cb)&sh4_reg_r32,
(w8_cb)&sh4_reg_w8,
(w16_cb)&sh4_reg_w16,
(w32_cb)&sh4_reg_w32)
(mmio_read_cb)&sh4_reg_read,
(mmio_write_cb)&sh4_reg_write)
// physical mirrors
AM_RANGE(0x20000000, 0x3fffffff) AM_MIRROR(0x00000000) // p0
@ -994,18 +966,10 @@ AM_BEGIN(struct sh4, sh4_data_map)
// internal cache and sq only accessible through p4
AM_RANGE(0x7c000000, 0x7fffffff) AM_HANDLE("sh4 cache",
(r8_cb)&sh4_cache_r8,
(r16_cb)&sh4_cache_r16,
(r32_cb)&sh4_cache_r32,
(w8_cb)&sh4_cache_w8,
(w16_cb)&sh4_cache_w16,
(w32_cb)&sh4_cache_w32)
(mmio_read_cb)&sh4_cache_read,
(mmio_write_cb)&sh4_cache_write)
AM_RANGE(0xe0000000, 0xe3ffffff) AM_HANDLE("sh4 sq",
(r8_cb)&sh4_sq_r8,
(r16_cb)&sh4_sq_r16,
(r32_cb)&sh4_sq_r32,
(w8_cb)&sh4_sq_w8,
(w16_cb)&sh4_sq_w16,
(w32_cb)&sh4_sq_w32)
(mmio_read_cb)&sh4_sq_read,
(mmio_write_cb)&sh4_sq_write)
AM_END();
// clang-format on

View File

@ -1,6 +1,5 @@
#include "jit/frontend/sh4/sh4_translate.h"
#include "core/assert.h"
#include "core/option.h"
#include "core/profiler.h"
#include "jit/frontend/sh4/sh4_analyze.h"
#include "jit/frontend/sh4/sh4_context.h"
@ -8,8 +7,6 @@
#include "jit/frontend/sh4/sh4_frontend.h"
#include "jit/ir/ir.h"
DEFINE_OPTION_BOOL(fastmem, true, "Enable SIGSEGV-based fast memory access");
//
// fsca estimate lookup table
//
@ -40,15 +37,22 @@ static emit_cb emit_callbacks[NUM_SH4_OPS] = {
// helper functions for accessing the sh4 context, macros are used to cut
// down on copy and paste
#ifdef NDEBUG
#define load_guest(addr, type) \
((!OPTION_fastmem || (flags & SH4_SLOWMEM)) ? ir_load_slow(ir, addr, type) \
((flags & SH4_SLOWMEM) ? ir_load_slow(ir, addr, type) \
: ir_load_fast(ir, addr, type))
#define store_guest(addr, v) \
do { \
((!OPTION_fastmem || (flags & SH4_SLOWMEM)) ? ir_store_slow(ir, addr, v) \
((flags & SH4_SLOWMEM) ? ir_store_slow(ir, addr, v) \
: ir_store_fast(ir, addr, v)); \
} while (0)
#else
#define load_guest(addr, type) ir_load_slow(ir, addr, type)
#define store_guest(addr, v) \
do { \
ir_store_slow(ir, addr, v); \
} while (0)
#endif
#define load_gpr(n, type) \
ir_load_context(ir, offsetof(struct sh4_ctx, r[n]), type)