mirror of https://github.com/inolen/redream.git
fixed compilation errors under msvc caused by recent c refactor
This commit is contained in:
parent
27ff52335f
commit
ce36ab436d
|
@ -68,10 +68,10 @@ static int interval_tree_cmp(const struct rb_node *rb_lhs,
|
|||
struct interval_node *lhs = rb_entry(rb_lhs, struct interval_node, base);
|
||||
struct interval_node *rhs = rb_entry(rb_rhs, struct interval_node, base);
|
||||
|
||||
int cmp = lhs->low - rhs->low;
|
||||
int cmp = (int)(lhs->low - rhs->low);
|
||||
|
||||
if (!cmp) {
|
||||
cmp = lhs->high - rhs->high;
|
||||
cmp = (int)(lhs->high - rhs->high);
|
||||
}
|
||||
|
||||
return cmp;
|
||||
|
|
|
@ -29,41 +29,41 @@ void list_sort(struct list *list, list_node_cmp cmp);
|
|||
for (struct list_node *it = (list)->head, *it##_next = it ? it->next : NULL; \
|
||||
it; it = it##_next, it##_next = it ? it->next : NULL)
|
||||
|
||||
#define list_entry(n, type, member) container_of(n, type, member)
|
||||
#define list_entry(n, type, member) container_of_safe(n, type, member)
|
||||
|
||||
#define list_add_after_entry(list, after, member, n) \
|
||||
list_add_after(list, (after) ? &(after)->member : NULL, &(n)->member)
|
||||
|
||||
#define list_first_entry(list, type, member) \
|
||||
((list)->head ? list_entry((list)->head, type, member) : NULL)
|
||||
list_entry((list)->head, type, member)
|
||||
|
||||
#define list_last_entry(list, type, member) \
|
||||
((list)->tail ? list_entry((list)->tail, type, member) : NULL)
|
||||
list_entry((list)->tail, type, member)
|
||||
|
||||
#define list_next_entry(n, member) \
|
||||
((n)->member.next ? list_entry((n)->member.next, TYPEOF(*(n)), member) : NULL)
|
||||
#define list_next_entry(n, type, member) \
|
||||
list_entry((n)->member.next, type, member)
|
||||
|
||||
#define list_prev_entry(n, member) \
|
||||
((n)->member.prev ? list_entry((n)->member.prev, TYPEOF(*(n)), member) : NULL)
|
||||
#define list_prev_entry(n, type, member) \
|
||||
list_entry((n)->member.prev, type, member)
|
||||
|
||||
#define list_for_each_entry(it, list, type, member) \
|
||||
for (type *it = list_first_entry(list, type, member); it; \
|
||||
it = list_next_entry(it, member))
|
||||
it = list_next_entry(it, type, member))
|
||||
|
||||
#define list_for_each_entry_safe(it, list, type, member) \
|
||||
for (type *it = list_first_entry(list, type, member), \
|
||||
*it##_next = it ? list_next_entry(it, member) : NULL; \
|
||||
it; \
|
||||
it = it##_next, it##_next = it ? list_next_entry(it, member) : NULL)
|
||||
#define list_for_each_entry_safe(it, list, type, member) \
|
||||
for (type *it = list_first_entry(list, type, member), \
|
||||
*it##_next = it ? list_next_entry(it, type, member) : NULL; \
|
||||
it; it = it##_next, \
|
||||
it##_next = it ? list_next_entry(it, type, member) : NULL)
|
||||
|
||||
#define list_for_each_entry_reverse(it, list, type, member) \
|
||||
for (type *it = list_last_entry(list, type, member); it; \
|
||||
it = list_prev_entry(it, member))
|
||||
it = list_prev_entry(it, type, member))
|
||||
|
||||
#define list_for_each_entry_safe_reverse(it, list, type, member) \
|
||||
for (type *it = list_last_entry(list, type, member), \
|
||||
*it##_next = it ? list_prev_entry(it, member) : NULL; \
|
||||
it; \
|
||||
it = it##_next, it##_next = it ? list_prev_entry(it, member) : NULL)
|
||||
#define list_for_each_entry_safe_reverse(it, list, type, member) \
|
||||
for (type *it = list_last_entry(list, type, member), \
|
||||
*it##_next = it ? list_prev_entry(it, type, member) : NULL; \
|
||||
it; it = it##_next, \
|
||||
it##_next = it ? list_prev_entry(it, type, member) : NULL)
|
||||
|
||||
#endif
|
||||
|
|
|
@ -2,6 +2,13 @@
|
|||
#include "core/assert.h"
|
||||
#include "core/core.h"
|
||||
|
||||
#define SWAP_NODE(a, b) \
|
||||
do { \
|
||||
mm_type tmp = (a); \
|
||||
(a) = (b); \
|
||||
(b) = tmp; \
|
||||
} while (0)
|
||||
|
||||
static inline bool mm_is_max_level(int index) {
|
||||
int n = index + 1;
|
||||
int log2 = 0;
|
||||
|
@ -47,7 +54,7 @@ static void mm_sift_up(mm_type *begin, int size, int index, mm_cmp cmp) {
|
|||
// the node with its parent and check min (max) levels up to the root until
|
||||
// the min-max order property is satisfied
|
||||
if (cmp(*(begin + index), *(begin + ancestor_index)) ^ max_level) {
|
||||
SWAP(*(begin + ancestor_index), *(begin + index));
|
||||
SWAP_NODE(*(begin + ancestor_index), *(begin + index));
|
||||
index = ancestor_index;
|
||||
}
|
||||
// if the node is greater (smaller) than its parent, then it is greater
|
||||
|
@ -68,7 +75,7 @@ static void mm_sift_up(mm_type *begin, int size, int index, mm_cmp cmp) {
|
|||
}
|
||||
|
||||
// swap node with parent
|
||||
SWAP(*(begin + ancestor_index), *(begin + index));
|
||||
SWAP_NODE(*(begin + ancestor_index), *(begin + index));
|
||||
index = ancestor_index;
|
||||
}
|
||||
}
|
||||
|
@ -102,7 +109,7 @@ static void mm_sift_down(mm_type *begin, int size, int index, mm_cmp cmp) {
|
|||
}
|
||||
|
||||
// swap the node with the smallest (largest) descendant
|
||||
SWAP(*(begin + index), *(begin + smallest));
|
||||
SWAP_NODE(*(begin + index), *(begin + smallest));
|
||||
|
||||
// if the swapped node was a child, then the current node, its child, and
|
||||
// its grandchild are all ordered correctly at this point satisfying the
|
||||
|
@ -114,7 +121,7 @@ static void mm_sift_down(mm_type *begin, int size, int index, mm_cmp cmp) {
|
|||
// if the node's new parent is now smaller than it, swap again
|
||||
int parent = mm_parent(smallest);
|
||||
if (cmp(*(begin + parent), *(begin + smallest)) ^ max_level) {
|
||||
SWAP(*(begin + parent), *(begin + smallest));
|
||||
SWAP_NODE(*(begin + parent), *(begin + smallest));
|
||||
}
|
||||
|
||||
// if the swapped node was a grandchild, iteration must continue to
|
||||
|
@ -185,8 +192,8 @@ void mm_pop_min(mm_type *begin, int size, mm_cmp cmp) {
|
|||
}
|
||||
|
||||
mm_type *min = mm_find_min(begin, size, cmp);
|
||||
SWAP(*min, *(begin + size - 1));
|
||||
mm_sift_down(begin, size - 1, min - begin, cmp);
|
||||
SWAP_NODE(*min, *(begin + size - 1));
|
||||
mm_sift_down(begin, size - 1, (int)(min - begin), cmp);
|
||||
}
|
||||
|
||||
void mm_pop_max(mm_type *begin, int size, mm_cmp cmp) {
|
||||
|
@ -195,6 +202,6 @@ void mm_pop_max(mm_type *begin, int size, mm_cmp cmp) {
|
|||
}
|
||||
|
||||
mm_type *max = mm_find_max(begin, size, cmp);
|
||||
SWAP(*max, *(begin + size - 1));
|
||||
mm_sift_down(begin, size - 1, max - begin, cmp);
|
||||
SWAP_NODE(*max, *(begin + size - 1));
|
||||
mm_sift_down(begin, size - 1, (int)(max - begin), cmp);
|
||||
}
|
||||
|
|
|
@ -41,40 +41,23 @@ struct rb_node *rb_last(struct rb_tree *t);
|
|||
struct rb_node *rb_prev(struct rb_node *n);
|
||||
struct rb_node *rb_next(struct rb_node *n);
|
||||
|
||||
#define rb_entry(n, type, member) container_of(n, type, member)
|
||||
#define rb_entry(n, type, member) container_of_safe(n, type, member)
|
||||
|
||||
#define rb_first_entry(t, type, member) \
|
||||
({ \
|
||||
struct rb_node *n = rb_first(t); \
|
||||
(n ? rb_entry(n, type, member) : NULL); \
|
||||
})
|
||||
#define rb_first_entry(t, type, member) rb_entry(rb_first(t), type, member)
|
||||
|
||||
#define rb_last_entry(t, type, member) \
|
||||
({ \
|
||||
struct rb_node *n = rb_last(t); \
|
||||
n ? rb_entry(n, type, member) : NULL \
|
||||
})
|
||||
#define rb_last_entry(t, type, member) rb_entry(rb_last(t), type, member)
|
||||
|
||||
#define rb_next_entry(entry, member) \
|
||||
({ \
|
||||
struct rb_node *n = rb_next(&entry->member); \
|
||||
(n ? rb_entry(n, TYPEOF(*(entry)), member) : NULL); \
|
||||
})
|
||||
#define rb_next_entry(entry, type, member) \
|
||||
rb_entry(rb_next(&entry->member), type, member)
|
||||
|
||||
#define rb_prev_entry(entry, member) \
|
||||
({ \
|
||||
struct rb_node *n = rb_prev(&entry->member); \
|
||||
(n ? rb_entry(n, TYPEOF(*(entry)), member) : NULL); \
|
||||
})
|
||||
#define rb_prev_entry(entry, type, member) \
|
||||
rb_entry(rb_prev(&entry->member), type, member)
|
||||
|
||||
#define rb_find_entry(t, search, member, cb) \
|
||||
({ \
|
||||
struct rb_node *it = rb_find(t, &(search)->member, cb); \
|
||||
(it ? rb_entry(it, TYPEOF(*search), member) : NULL); \
|
||||
})
|
||||
#define rb_find_entry(t, search, type, member, cb) \
|
||||
rb_entry(rb_find(t, &(search)->member, cb), type, member)
|
||||
|
||||
#define rb_for_each_entry(it, t, type, member) \
|
||||
for (type *it = rb_first_entry(t, type, member); it; \
|
||||
it = rb_next_entry(it, member))
|
||||
it = rb_next_entry(it, type, member))
|
||||
|
||||
#endif
|
||||
|
|
|
@ -102,8 +102,8 @@ static int tracer_texture_cmp(const struct rb_node *rb_lhs,
|
|||
rb_entry(rb_lhs, const struct tracer_texture_entry, live_it);
|
||||
const struct tracer_texture_entry *rhs =
|
||||
rb_entry(rb_rhs, const struct tracer_texture_entry, live_it);
|
||||
return tr_texture_key(lhs->base.tsp, lhs->base.tcw) -
|
||||
tr_texture_key(rhs->base.tsp, rhs->base.tcw);
|
||||
return (int)(tr_texture_key(lhs->base.tsp, lhs->base.tcw) -
|
||||
tr_texture_key(rhs->base.tsp, rhs->base.tcw));
|
||||
}
|
||||
|
||||
static struct rb_callbacks tracer_texture_cb = {&tracer_texture_cmp, NULL,
|
||||
|
@ -116,7 +116,8 @@ static struct tracer_texture_entry *tracer_find_texture(struct tracer *tracer,
|
|||
search.base.tsp = tsp;
|
||||
search.base.tcw = tcw;
|
||||
|
||||
return rb_find_entry(&tracer->live_textures, &search, live_it,
|
||||
return rb_find_entry(&tracer->live_textures, &search,
|
||||
struct tracer_texture_entry, live_it,
|
||||
&tracer_texture_cb);
|
||||
}
|
||||
|
||||
|
@ -339,9 +340,9 @@ static void tracer_render_scrubber_menu(struct tracer *tracer) {
|
|||
ctx->style.window.spacing = nk_vec2(0.0f, 0.0f);
|
||||
|
||||
struct nk_panel layout;
|
||||
struct nk_rect bounds = {0.0f,
|
||||
tracer->window->height - SCRUBBER_WINDOW_HEIGHT,
|
||||
tracer->window->width, SCRUBBER_WINDOW_HEIGHT};
|
||||
struct nk_rect bounds = {
|
||||
0.0f, (float)tracer->window->height - SCRUBBER_WINDOW_HEIGHT,
|
||||
(float)tracer->window->width, SCRUBBER_WINDOW_HEIGHT};
|
||||
nk_flags flags = NK_WINDOW_NO_SCROLLBAR;
|
||||
|
||||
if (nk_begin(ctx, &layout, "context scrubber", bounds, flags)) {
|
||||
|
@ -579,7 +580,7 @@ static void tracer_param_tooltip(struct tracer *tracer, int list_type,
|
|||
|
||||
// TODO separator
|
||||
|
||||
nk_layout_row_static(ctx, 40.0f, 40.0f, 1);
|
||||
nk_layout_row_static(ctx, 40.0f, 40, 1);
|
||||
nk_image(ctx, nk_image_id((int)surf->texture));
|
||||
|
||||
nk_layout_row_dynamic(ctx, ctx->style.font.height, 1);
|
||||
|
@ -636,7 +637,8 @@ static void tracer_render_side_menu(struct tracer *tracer) {
|
|||
if (nk_tree_push(ctx, NK_TREE_TAB, "filters", NK_MINIMIZED)) {
|
||||
for (int i = 0; i < TA_NUM_PARAMS; i++) {
|
||||
snprintf(label, sizeof(label), "Show %s", s_param_names[i]);
|
||||
nk_checkbox_text(ctx, label, strlen(label), &tracer->show_params[i]);
|
||||
nk_checkbox_text(ctx, label, (int)strlen(label),
|
||||
&tracer->show_params[i]);
|
||||
}
|
||||
|
||||
nk_tree_pop(ctx);
|
||||
|
@ -697,7 +699,7 @@ static void tracer_render_side_menu(struct tracer *tracer) {
|
|||
|
||||
// texture menu
|
||||
if (nk_tree_push(ctx, NK_TREE_TAB, "textures", 0)) {
|
||||
nk_layout_row_static(ctx, 40.0f, 40.0f, 4);
|
||||
nk_layout_row_static(ctx, 40.0f, 40, 4);
|
||||
|
||||
rb_for_each_entry(entry, &tracer->live_textures,
|
||||
struct tracer_texture_entry, live_it) {
|
||||
|
@ -714,18 +716,18 @@ static void tracer_render_side_menu(struct tracer *tracer) {
|
|||
ctx->style.window.spacing = nk_vec2(0.0f, 0.0f);
|
||||
|
||||
if (nk_tooltip_begin(ctx, &tooltip, 380.0f)) {
|
||||
nk_layout_row_static(ctx, 184.0f, 184.0f, 2);
|
||||
nk_layout_row_static(ctx, 184.0f, 184, 2);
|
||||
|
||||
if (nk_group_begin(ctx, &tab, "texture preview",
|
||||
NK_WINDOW_NO_SCROLLBAR)) {
|
||||
nk_layout_row_static(ctx, 184.0f, 184.0f, 1);
|
||||
nk_layout_row_static(ctx, 184.0f, 184, 1);
|
||||
nk_image(ctx, nk_image_id((int)entry->base.handle));
|
||||
nk_group_end(ctx);
|
||||
}
|
||||
|
||||
if (nk_group_begin(ctx, &tab, "texture info",
|
||||
NK_WINDOW_NO_SCROLLBAR)) {
|
||||
nk_layout_row_static(ctx, ctx->style.font.height, 184.0f, 1);
|
||||
nk_layout_row_static(ctx, ctx->style.font.height, 184, 1);
|
||||
nk_labelf(ctx, NK_TEXT_LEFT, "addr: 0x%08x",
|
||||
entry->base.tcw.texture_addr << 3);
|
||||
nk_labelf(ctx, NK_TEXT_LEFT, "format: %s",
|
||||
|
@ -859,7 +861,7 @@ struct tracer *tracer_create(struct window *window) {
|
|||
tracer->window = window;
|
||||
tracer->listener = (struct window_listener){
|
||||
tracer, &tracer_paint, NULL, &tracer_keydown,
|
||||
NULL, NULL, &tracer_close, {}};
|
||||
NULL, NULL, &tracer_close, {0}};
|
||||
tracer->provider =
|
||||
(struct texture_provider){tracer, &tracer_texture_provider_find_texture};
|
||||
tracer->rb = window->rb;
|
||||
|
|
|
@ -142,5 +142,5 @@ struct disc *disc_create_gdi(const char *filename) {
|
|||
}
|
||||
|
||||
void disc_destroy(struct disc *disc) {
|
||||
return disc->destroy(disc);
|
||||
disc->destroy(disc);
|
||||
}
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
#include "sys/filesystem.h"
|
||||
|
||||
static const int SECTOR_SIZE = 2352;
|
||||
#define SECTOR_SIZE 2352
|
||||
|
||||
struct track {
|
||||
int num;
|
||||
|
|
|
@ -9,8 +9,8 @@
|
|||
#define SWAP_24(fad) \
|
||||
(((fad & 0xff) << 16) | (fad & 0x00ff00) | ((fad & 0xff0000) >> 16))
|
||||
|
||||
static const int SPI_CMD_SIZE = 12;
|
||||
static const int SUBCODE_SIZE = 100;
|
||||
#define SPI_CMD_SIZE 12
|
||||
#define SUBCODE_SIZE 100
|
||||
|
||||
// internal gdrom state machine
|
||||
enum gd_event {
|
||||
|
|
|
@ -145,7 +145,7 @@ REG_W32(struct holly *hl, SB_C2DST) {
|
|||
}
|
||||
|
||||
// FIXME what are SB_LMMODE0 / SB_LMMODE1
|
||||
struct sh4_dtr dtr = {};
|
||||
struct sh4_dtr dtr = {0};
|
||||
dtr.channel = 2;
|
||||
dtr.rw = false;
|
||||
dtr.addr = *hl->SB_C2DSTAT;
|
||||
|
@ -188,7 +188,7 @@ REG_W32(struct holly *hl, SB_GDST) {
|
|||
uint8_t sector_data[SECTOR_SIZE];
|
||||
int n = gdrom_dma_read(hl->gdrom, sector_data, sizeof(sector_data));
|
||||
|
||||
struct sh4_dtr dtr = {};
|
||||
struct sh4_dtr dtr = {0};
|
||||
dtr.channel = 0;
|
||||
dtr.rw = true;
|
||||
dtr.data = sector_data;
|
||||
|
@ -341,9 +341,9 @@ static bool holly_init(struct device *dev) {
|
|||
return true;
|
||||
}
|
||||
|
||||
void holly_raise_interrupt(struct holly *hl, enum holly_interrupt intr) {
|
||||
enum holly_interrupt_type type = intr & HOLLY_INTC_MASK;
|
||||
uint32_t irq = (uint32_t)(intr & ~HOLLY_INTC_MASK);
|
||||
void holly_raise_interrupt(struct holly *hl, holly_interrupt_t intr) {
|
||||
enum holly_interrupt_type type = HOLLY_INTERRUPT_TYPE(intr);
|
||||
uint32_t irq = HOLLY_INTERRUPT_IRQ(intr);
|
||||
|
||||
if (intr == HOLLY_INTC_PCVOINT) {
|
||||
maple_vblank(hl->maple);
|
||||
|
@ -366,9 +366,9 @@ void holly_raise_interrupt(struct holly *hl, enum holly_interrupt intr) {
|
|||
holly_update_sh4_interrupts(hl);
|
||||
}
|
||||
|
||||
void holly_clear_interrupt(struct holly *hl, enum holly_interrupt intr) {
|
||||
enum holly_interrupt_type type = intr & HOLLY_INTC_MASK;
|
||||
uint32_t irq = (uint32_t)(intr & ~HOLLY_INTC_MASK);
|
||||
void holly_clear_interrupt(struct holly *hl, holly_interrupt_t intr) {
|
||||
enum holly_interrupt_type type = HOLLY_INTERRUPT_TYPE(intr);
|
||||
uint32_t irq = HOLLY_INTERRUPT_IRQ(intr);
|
||||
|
||||
switch (type) {
|
||||
case HOLLY_INTC_NRM:
|
||||
|
|
|
@ -28,8 +28,8 @@ struct holly {
|
|||
#undef HOLLY_REG
|
||||
};
|
||||
|
||||
void holly_raise_interrupt(struct holly *hl, enum holly_interrupt intr);
|
||||
void holly_clear_interrupt(struct holly *hl, enum holly_interrupt intr);
|
||||
void holly_raise_interrupt(struct holly *hl, holly_interrupt_t intr);
|
||||
void holly_clear_interrupt(struct holly *hl, holly_interrupt_t intr);
|
||||
|
||||
struct holly *holly_create(struct dreamcast *dc);
|
||||
void holly_destroy(struct holly *hl);
|
||||
|
|
|
@ -12,138 +12,143 @@ enum {
|
|||
};
|
||||
|
||||
// interrupts
|
||||
static const uint64_t HOLLY_INTC_MASK = 0x0000000f00000000;
|
||||
|
||||
#define HOLLY_INTERRUPT(type, irq) (((uint64_t)type << 32) | irq)
|
||||
#define HOLLY_INTERRUPT_TYPE(intr) (intr >> 32)
|
||||
#define HOLLY_INTERRUPT_IRQ(intr) ((uint32_t)intr)
|
||||
|
||||
enum holly_interrupt_type {
|
||||
HOLLY_INTC_NRM = 0x100000000,
|
||||
HOLLY_INTC_EXT = 0x200000000,
|
||||
HOLLY_INTC_ERR = 0x300000000
|
||||
HOLLY_INTC_NRM = 0x1,
|
||||
HOLLY_INTC_EXT = 0x2,
|
||||
HOLLY_INTC_ERR = 0x3
|
||||
};
|
||||
|
||||
enum holly_interrupt {
|
||||
//
|
||||
// HOLLY_INTC_NRM
|
||||
//
|
||||
// Video End of Render
|
||||
HOLLY_INTC_PCEOVINT = HOLLY_INTC_NRM | 0x1,
|
||||
// ISP End of Render
|
||||
HOLLY_INTC_PCEOIINT = HOLLY_INTC_NRM | 0x2,
|
||||
// TSP End of Render
|
||||
HOLLY_INTC_PCEOTINT = HOLLY_INTC_NRM | 0x4,
|
||||
// VBlank In
|
||||
HOLLY_INTC_PCVIINT = HOLLY_INTC_NRM | 0x8,
|
||||
// VBlank Out
|
||||
HOLLY_INTC_PCVOINT = HOLLY_INTC_NRM | 0x10,
|
||||
// HBlank In
|
||||
HOLLY_INTC_PCHIINT = HOLLY_INTC_NRM | 0x20,
|
||||
// End Of YUV Data Storage
|
||||
HOLLY_INTC_TAYUVINT = HOLLY_INTC_NRM | 0x40,
|
||||
// End Of Opaque List Storage
|
||||
HOLLY_INTC_TAEOINT = HOLLY_INTC_NRM | 0x80,
|
||||
// End Of Opaque Modifier Volume List Storage
|
||||
HOLLY_INTC_TAEOMINT = HOLLY_INTC_NRM | 0x100,
|
||||
// End Of Translucent List Storage
|
||||
HOLLY_INTC_TAETINT = HOLLY_INTC_NRM | 0x200,
|
||||
// End Of Translucent Modifier Volume List Storage
|
||||
HOLLY_INTC_TAETMINT = HOLLY_INTC_NRM | 0x400,
|
||||
// PVR End of DMA
|
||||
HOLLY_INTC_PIDEINT = HOLLY_INTC_NRM | 0x800,
|
||||
// MAPLE End of DMA
|
||||
HOLLY_INTC_MDEINT = HOLLY_INTC_NRM | 0x1000,
|
||||
// MAPLE VBlank Over
|
||||
HOLLY_INTC_MVOINT = HOLLY_INTC_NRM | 0x2000,
|
||||
// G1 End of DMA
|
||||
HOLLY_INTC_G1DEINT = HOLLY_INTC_NRM | 0x4000,
|
||||
// G2 End of AICA-DMA
|
||||
HOLLY_INTC_G2DEAINT = HOLLY_INTC_NRM | 0x8000,
|
||||
// G2 End of Ext-DMA1
|
||||
HOLLY_INTC_G2DE1INT = HOLLY_INTC_NRM | 0x10000,
|
||||
// G2 End of Ext-DMA2
|
||||
HOLLY_INTC_G2DE2INT = HOLLY_INTC_NRM | 0x20000,
|
||||
// G2 End of Dev-DMA
|
||||
HOLLY_INTC_G2DEDINT = HOLLY_INTC_NRM | 0x40000,
|
||||
// End of ch2-DMA
|
||||
HOLLY_INTC_DTDE2INT = HOLLY_INTC_NRM | 0x80000,
|
||||
// End of Sort-DMA
|
||||
HOLLY_INTC_DTDESINT = HOLLY_INTC_NRM | 0x100000,
|
||||
// End Of Punch Through List Storage
|
||||
HOLLY_INTC_TAEPTIN = HOLLY_INTC_NRM | 0x200000,
|
||||
//
|
||||
// HOLLY_INTC_EXT
|
||||
//
|
||||
// From GD-ROM Drive
|
||||
HOLLY_INTC_G1GDINT = HOLLY_INTC_EXT | 0x1,
|
||||
// From AICA
|
||||
HOLLY_INTC_G2AICINT = HOLLY_INTC_EXT | 0x2,
|
||||
// From Modem
|
||||
HOLLY_INTC_G2MDMINT = HOLLY_INTC_EXT | 0x4,
|
||||
// From External Device
|
||||
HOLLY_INTC_G2EXTINT = HOLLY_INTC_EXT | 0x8,
|
||||
//
|
||||
// HOLLY_INTC_ERR
|
||||
//
|
||||
// ISP Out of Cache
|
||||
HOLLY_INTC_PCIOCINT = HOLLY_INTC_ERR | 0x1,
|
||||
// Hazard Processing of Strip Buffer
|
||||
HOLLY_INTC_PCHZDINT = HOLLY_INTC_ERR | 0x2,
|
||||
// ISP/TSP Parameter Limit Address
|
||||
HOLLY_INTC_TAPOFINT = HOLLY_INTC_ERR | 0x4,
|
||||
// Object List Limit Address
|
||||
HOLLY_INTC_TALOFINT = HOLLY_INTC_ERR | 0x8,
|
||||
// Illegal Parameter Input
|
||||
HOLLY_INTC_TAIPINT = HOLLY_INTC_ERR | 0x10,
|
||||
// TA FIFO Over Flow
|
||||
HOLLY_INTC_TAFOFINT = HOLLY_INTC_ERR | 0x20,
|
||||
// PVR Illegal Address Set
|
||||
HOLLY_INTC_PIIAINT = HOLLY_INTC_ERR | 0x40,
|
||||
// PVR DMA Over Run
|
||||
HOLLY_INTC_PIORINT = HOLLY_INTC_ERR | 0x80,
|
||||
// MAPLE Illegal Address Set
|
||||
HOLLY_INTC_MIAINT = HOLLY_INTC_ERR | 0x100,
|
||||
// MAPLE DMA Over Run
|
||||
HOLLY_INTC_MORINT = HOLLY_INTC_ERR | 0x200,
|
||||
// MAPLE Write FIFO Overf Flow
|
||||
HOLLY_INTC_MFOFINT = HOLLY_INTC_ERR | 0x400,
|
||||
// MAPLE Illegal Command
|
||||
HOLLY_INTC_MICINT = HOLLY_INTC_ERR | 0x800,
|
||||
// G1 Illegal Address Set
|
||||
HOLLY_INTC_G1IAINT = HOLLY_INTC_ERR | 0x1000,
|
||||
// G1 DMA Over Run
|
||||
HOLLY_INTC_G1ORINT = HOLLY_INTC_ERR | 0x2000,
|
||||
// G1 Access at DMA
|
||||
HOLLY_INTC_G1ATINT = HOLLY_INTC_ERR | 0x4000,
|
||||
// G2 AICA-DMA Illegal Address Set
|
||||
HOLLY_INTC_G2IAAINT = HOLLY_INTC_ERR | 0x8000,
|
||||
// G2 Ext1-DMA Illegal Address Set
|
||||
HOLLY_INTC_G2IA1INT = HOLLY_INTC_ERR | 0x10000,
|
||||
// G2 Ext2-DMA Illegal Address Set
|
||||
HOLLY_INTC_G2IA2INT = HOLLY_INTC_ERR | 0x20000,
|
||||
// G2 Dev-DMA Illegal Address Set
|
||||
HOLLY_INTC_G2IADINT = HOLLY_INTC_ERR | 0x40000,
|
||||
// G2 AICA-DMA Over Run
|
||||
HOLLY_INTC_G2ORAINT = HOLLY_INTC_ERR | 0x80000,
|
||||
// G2 Ext1-DMA Over Run
|
||||
HOLLY_INTC_G2OR1INT = HOLLY_INTC_ERR | 0x100000,
|
||||
// G2 Ext2-DMA Over Run
|
||||
HOLLY_INTC_G2OR2INT = HOLLY_INTC_ERR | 0x200000,
|
||||
// G2 Dev-DMA Over Run
|
||||
HOLLY_INTC_G2ORDINT = HOLLY_INTC_ERR | 0x400000,
|
||||
// G2 AICA-DMA Time Out
|
||||
HOLLY_INTC_G2TOAINT = HOLLY_INTC_ERR | 0x800000,
|
||||
// G2 Ext1-DMA Time Out
|
||||
HOLLY_INTC_G2TO1INT = HOLLY_INTC_ERR | 0x1000000,
|
||||
// G2 Ext2-DMA Time Out
|
||||
HOLLY_INTC_G2TO2INT = HOLLY_INTC_ERR | 0x2000000,
|
||||
// G2 Dev-DMA Time Out
|
||||
HOLLY_INTC_G2TODINT = HOLLY_INTC_ERR | 0x4000000,
|
||||
// G2 Time Out in CPU Accessing
|
||||
HOLLY_INTC_G2TOCINT = HOLLY_INTC_ERR | 0x8000000,
|
||||
// Sort-DMA Command Error
|
||||
HOLLY_INTC_DTCESINT = HOLLY_INTC_ERR | 0x10000000,
|
||||
HOLLY_INTC_RESERVED1 = HOLLY_INTC_ERR | 0x20000000,
|
||||
HOLLY_INTC_RESERVED2 = HOLLY_INTC_ERR | 0x40000000,
|
||||
// SH4 Accessing to Inhibited Area
|
||||
HOLLY_INTC_CIHINT = HOLLY_INTC_ERR | 0x80000000
|
||||
};
|
||||
// using a typedef and defines here as msvc (as of visual studio 2015) doesn't
|
||||
// support 64-bit enums
|
||||
typedef uint64_t holly_interrupt_t;
|
||||
|
||||
//
|
||||
// HOLLY_INTC_NRM
|
||||
//
|
||||
// Video End of Render
|
||||
#define HOLLY_INTC_PCEOVINT HOLLY_INTERRUPT(HOLLY_INTC_NRM, 0x1)
|
||||
// ISP End of Render
|
||||
#define HOLLY_INTC_PCEOIINT HOLLY_INTERRUPT(HOLLY_INTC_NRM, 0x2)
|
||||
// TSP End of Render
|
||||
#define HOLLY_INTC_PCEOTINT HOLLY_INTERRUPT(HOLLY_INTC_NRM, 0x4)
|
||||
// VBlank In
|
||||
#define HOLLY_INTC_PCVIINT HOLLY_INTERRUPT(HOLLY_INTC_NRM, 0x8)
|
||||
// VBlank Out
|
||||
#define HOLLY_INTC_PCVOINT HOLLY_INTERRUPT(HOLLY_INTC_NRM, 0x10)
|
||||
// HBlank In
|
||||
#define HOLLY_INTC_PCHIINT HOLLY_INTERRUPT(HOLLY_INTC_NRM, 0x20)
|
||||
// End Of YUV Data Storage
|
||||
#define HOLLY_INTC_TAYUVINT HOLLY_INTERRUPT(HOLLY_INTC_NRM, 0x40)
|
||||
// End Of Opaque List Storage
|
||||
#define HOLLY_INTC_TAEOINT HOLLY_INTERRUPT(HOLLY_INTC_NRM, 0x80)
|
||||
// End Of Opaque Modifier Volume List Storage
|
||||
#define HOLLY_INTC_TAEOMINT HOLLY_INTERRUPT(HOLLY_INTC_NRM, 0x100)
|
||||
// End Of Translucent List Storage
|
||||
#define HOLLY_INTC_TAETINT HOLLY_INTERRUPT(HOLLY_INTC_NRM, 0x200)
|
||||
// End Of Translucent Modifier Volume List Storage
|
||||
#define HOLLY_INTC_TAETMINT HOLLY_INTERRUPT(HOLLY_INTC_NRM, 0x400)
|
||||
// PVR End of DMA
|
||||
#define HOLLY_INTC_PIDEINT HOLLY_INTERRUPT(HOLLY_INTC_NRM, 0x800)
|
||||
// MAPLE End of DMA
|
||||
#define HOLLY_INTC_MDEINT HOLLY_INTERRUPT(HOLLY_INTC_NRM, 0x1000)
|
||||
// MAPLE VBlank Over
|
||||
#define HOLLY_INTC_MVOINT HOLLY_INTERRUPT(HOLLY_INTC_NRM, 0x2000)
|
||||
// G1 End of DMA
|
||||
#define HOLLY_INTC_G1DEINT HOLLY_INTERRUPT(HOLLY_INTC_NRM, 0x4000)
|
||||
// G2 End of AICA-DMA
|
||||
#define HOLLY_INTC_G2DEAINT HOLLY_INTERRUPT(HOLLY_INTC_NRM, 0x8000)
|
||||
// G2 End of Ext-DMA1
|
||||
#define HOLLY_INTC_G2DE1INT HOLLY_INTERRUPT(HOLLY_INTC_NRM, 0x10000)
|
||||
// G2 End of Ext-DMA2
|
||||
#define HOLLY_INTC_G2DE2INT HOLLY_INTERRUPT(HOLLY_INTC_NRM, 0x20000)
|
||||
// G2 End of Dev-DMA
|
||||
#define HOLLY_INTC_G2DEDINT HOLLY_INTERRUPT(HOLLY_INTC_NRM, 0x40000)
|
||||
// End of ch2-DMA
|
||||
#define HOLLY_INTC_DTDE2INT HOLLY_INTERRUPT(HOLLY_INTC_NRM, 0x80000)
|
||||
// End of Sort-DMA
|
||||
#define HOLLY_INTC_DTDESINT HOLLY_INTERRUPT(HOLLY_INTC_NRM, 0x100000)
|
||||
// End Of Punch Through List Storage
|
||||
#define HOLLY_INTC_TAEPTIN HOLLY_INTERRUPT(HOLLY_INTC_NRM, 0x200000)
|
||||
//
|
||||
// HOLLY_INTC_EXT
|
||||
//
|
||||
// From GD-ROM Drive
|
||||
#define HOLLY_INTC_G1GDINT HOLLY_INTERRUPT(HOLLY_INTC_EXT, 0x1)
|
||||
// From AICA
|
||||
#define HOLLY_INTC_G2AICINT HOLLY_INTERRUPT(HOLLY_INTC_EXT, 0x2)
|
||||
// From Modem
|
||||
#define HOLLY_INTC_G2MDMINT HOLLY_INTERRUPT(HOLLY_INTC_EXT, 0x4)
|
||||
// From External Device
|
||||
#define HOLLY_INTC_G2EXTINT HOLLY_INTERRUPT(HOLLY_INTC_EXT, 0x8)
|
||||
//
|
||||
// HOLLY_INTC_ERR
|
||||
//
|
||||
// ISP Out of Cache
|
||||
#define HOLLY_INTC_PCIOCINT HOLLY_INTERRUPT(HOLLY_INTC_ERR, 0x1)
|
||||
// Hazard Processing of Strip Buffer
|
||||
#define HOLLY_INTC_PCHZDINT HOLLY_INTERRUPT(HOLLY_INTC_ERR, 0x2)
|
||||
// ISP/TSP Parameter Limit Address
|
||||
#define HOLLY_INTC_TAPOFINT HOLLY_INTERRUPT(HOLLY_INTC_ERR, 0x4)
|
||||
// Object List Limit Address
|
||||
#define HOLLY_INTC_TALOFINT HOLLY_INTERRUPT(HOLLY_INTC_ERR, 0x8)
|
||||
// Illegal Parameter Input
|
||||
#define HOLLY_INTC_TAIPINT HOLLY_INTERRUPT(HOLLY_INTC_ERR, 0x10)
|
||||
// TA FIFO Over Flow
|
||||
#define HOLLY_INTC_TAFOFINT HOLLY_INTERRUPT(HOLLY_INTC_ERR, 0x20)
|
||||
// PVR Illegal Address Set
|
||||
#define HOLLY_INTC_PIIAINT HOLLY_INTERRUPT(HOLLY_INTC_ERR, 0x40)
|
||||
// PVR DMA Over Run
|
||||
#define HOLLY_INTC_PIORINT HOLLY_INTERRUPT(HOLLY_INTC_ERR, 0x80)
|
||||
// MAPLE Illegal Address Set
|
||||
#define HOLLY_INTC_MIAINT HOLLY_INTERRUPT(HOLLY_INTC_ERR, 0x100)
|
||||
// MAPLE DMA Over Run
|
||||
#define HOLLY_INTC_MORINT HOLLY_INTERRUPT(HOLLY_INTC_ERR, 0x200)
|
||||
// MAPLE Write FIFO Overf Flow
|
||||
#define HOLLY_INTC_MFOFINT HOLLY_INTERRUPT(HOLLY_INTC_ERR, 0x400)
|
||||
// MAPLE Illegal Command
|
||||
#define HOLLY_INTC_MICINT HOLLY_INTERRUPT(HOLLY_INTC_ERR, 0x800)
|
||||
// G1 Illegal Address Set
|
||||
#define HOLLY_INTC_G1IAINT HOLLY_INTERRUPT(HOLLY_INTC_ERR, 0x1000)
|
||||
// G1 DMA Over Run
|
||||
#define HOLLY_INTC_G1ORINT HOLLY_INTERRUPT(HOLLY_INTC_ERR, 0x2000)
|
||||
// G1 Access at DMA
|
||||
#define HOLLY_INTC_G1ATINT HOLLY_INTERRUPT(HOLLY_INTC_ERR, 0x4000)
|
||||
// G2 AICA-DMA Illegal Address Set
|
||||
#define HOLLY_INTC_G2IAAINT HOLLY_INTERRUPT(HOLLY_INTC_ERR, 0x8000)
|
||||
// G2 Ext1-DMA Illegal Address Set
|
||||
#define HOLLY_INTC_G2IA1INT HOLLY_INTERRUPT(HOLLY_INTC_ERR, 0x10000)
|
||||
// G2 Ext2-DMA Illegal Address Set
|
||||
#define HOLLY_INTC_G2IA2INT HOLLY_INTERRUPT(HOLLY_INTC_ERR, 0x20000)
|
||||
// G2 Dev-DMA Illegal Address Set
|
||||
#define HOLLY_INTC_G2IADINT HOLLY_INTERRUPT(HOLLY_INTC_ERR, 0x40000)
|
||||
// G2 AICA-DMA Over Run
|
||||
#define HOLLY_INTC_G2ORAINT HOLLY_INTERRUPT(HOLLY_INTC_ERR, 0x80000)
|
||||
// G2 Ext1-DMA Over Run
|
||||
#define HOLLY_INTC_G2OR1INT HOLLY_INTERRUPT(HOLLY_INTC_ERR, 0x100000)
|
||||
// G2 Ext2-DMA Over Run
|
||||
#define HOLLY_INTC_G2OR2INT HOLLY_INTERRUPT(HOLLY_INTC_ERR, 0x200000)
|
||||
// G2 Dev-DMA Over Run
|
||||
#define HOLLY_INTC_G2ORDINT HOLLY_INTERRUPT(HOLLY_INTC_ERR, 0x400000)
|
||||
// G2 AICA-DMA Time Out
|
||||
#define HOLLY_INTC_G2TOAINT HOLLY_INTERRUPT(HOLLY_INTC_ERR, 0x800000)
|
||||
// G2 Ext1-DMA Time Out
|
||||
#define HOLLY_INTC_G2TO1INT HOLLY_INTERRUPT(HOLLY_INTC_ERR, 0x1000000)
|
||||
// G2 Ext2-DMA Time Out
|
||||
#define HOLLY_INTC_G2TO2INT HOLLY_INTERRUPT(HOLLY_INTC_ERR, 0x2000000)
|
||||
// G2 Dev-DMA Time Out
|
||||
#define HOLLY_INTC_G2TODINT HOLLY_INTERRUPT(HOLLY_INTC_ERR, 0x4000000)
|
||||
// G2 Time Out in CPU Accessing
|
||||
#define HOLLY_INTC_G2TOCINT HOLLY_INTERRUPT(HOLLY_INTC_ERR, 0x8000000)
|
||||
// Sort-DMA Command Error
|
||||
#define HOLLY_INTC_DTCESINT HOLLY_INTERRUPT(HOLLY_INTC_ERR, 0x10000000)
|
||||
#define HOLLY_INTC_RESERVED1 HOLLY_INTERRUPT(HOLLY_INTC_ERR, 0x20000000)
|
||||
#define HOLLY_INTC_RESERVED2 HOLLY_INTERRUPT(HOLLY_INTC_ERR, 0x40000000)
|
||||
// SH4 Accessing to Inhibited Area
|
||||
#define HOLLY_INTC_CIHINT HOLLY_INTERRUPT(HOLLY_INTC_ERR, 0x80000000)
|
||||
|
||||
#endif
|
||||
|
|
|
@ -82,7 +82,7 @@ int g_param_sizes[0x100 * TA_NUM_PARAMS * TA_NUM_VERT_TYPES];
|
|||
int g_poly_types[0x100 * TA_NUM_PARAMS * TA_NUM_LISTS];
|
||||
int g_vertex_types[0x100 * TA_NUM_PARAMS * TA_NUM_LISTS];
|
||||
|
||||
static enum holly_interrupt list_interrupts[] = {
|
||||
static holly_interrupt_t list_interrupts[] = {
|
||||
HOLLY_INTC_TAEOINT, // TA_LIST_OPAQUE
|
||||
HOLLY_INTC_TAEOMINT, // TA_LIST_OPAQUE_MODVOL
|
||||
HOLLY_INTC_TAETINT, // TA_LIST_TRANSLUCENT
|
||||
|
@ -96,15 +96,15 @@ static int ta_entry_cmp(const struct rb_node *rb_lhs,
|
|||
rb_entry(rb_lhs, const struct ta_texture_entry, live_it);
|
||||
const struct ta_texture_entry *rhs =
|
||||
rb_entry(rb_rhs, const struct ta_texture_entry, live_it);
|
||||
return tr_texture_key(lhs->base.tsp, lhs->base.tcw) -
|
||||
tr_texture_key(rhs->base.tsp, rhs->base.tcw);
|
||||
return (int)(tr_texture_key(lhs->base.tsp, lhs->base.tcw) -
|
||||
tr_texture_key(rhs->base.tsp, rhs->base.tcw));
|
||||
}
|
||||
|
||||
static int ta_context_cmp(const struct rb_node *rb_lhs,
|
||||
const struct rb_node *rb_rhs) {
|
||||
const struct tile_ctx *lhs = rb_entry(rb_lhs, const struct tile_ctx, live_it);
|
||||
const struct tile_ctx *rhs = rb_entry(rb_rhs, const struct tile_ctx, live_it);
|
||||
return lhs->addr - rhs->addr;
|
||||
return (int)(lhs->addr - rhs->addr);
|
||||
}
|
||||
|
||||
static struct rb_callbacks ta_entry_cb = {&ta_entry_cmp, NULL, NULL};
|
||||
|
@ -264,7 +264,8 @@ static struct ta_texture_entry *ta_find_texture(struct ta *ta, union tsp tsp,
|
|||
search.base.tsp = tsp;
|
||||
search.base.tcw = tcw;
|
||||
|
||||
return rb_find_entry(&ta->live_entries, &search, live_it, &ta_entry_cb);
|
||||
return rb_find_entry(&ta->live_entries, &search, struct ta_texture_entry,
|
||||
live_it, &ta_entry_cb);
|
||||
}
|
||||
|
||||
static struct texture_entry *ta_texture_provider_find_texture(void *data,
|
||||
|
@ -312,7 +313,8 @@ static struct tile_ctx *ta_get_context(struct ta *ta, uint32_t addr) {
|
|||
struct tile_ctx search;
|
||||
search.addr = addr;
|
||||
|
||||
return rb_find_entry(&ta->live_contexts, &search, live_it, &ta_context_cb);
|
||||
return rb_find_entry(&ta->live_contexts, &search, struct tile_ctx, live_it,
|
||||
&ta_context_cb);
|
||||
}
|
||||
|
||||
static struct tile_ctx *ta_alloc_context(struct ta *ta, uint32_t addr) {
|
||||
|
|
|
@ -25,12 +25,12 @@ void get_next_trace_filename(char *filename, size_t size) {
|
|||
static bool trace_patch_pointers(void *begin, int size) {
|
||||
struct trace_cmd *prev_cmd = NULL;
|
||||
struct trace_cmd *curr_cmd = NULL;
|
||||
void *ptr = begin;
|
||||
void *end = ptr + size;
|
||||
uint8_t *ptr = begin;
|
||||
uint8_t *end = ptr + size;
|
||||
|
||||
while (ptr < end) {
|
||||
prev_cmd = curr_cmd;
|
||||
curr_cmd = ptr;
|
||||
curr_cmd = (struct trace_cmd *)ptr;
|
||||
|
||||
// set prev / next pointers
|
||||
if (prev_cmd) {
|
||||
|
@ -151,7 +151,7 @@ void trace_writer_insert_texture(struct trace_writer *writer, union tsp tsp,
|
|||
union tcw tcw, const uint8_t *palette,
|
||||
int palette_size, const uint8_t *texture,
|
||||
int texture_size) {
|
||||
struct trace_cmd cmd = {};
|
||||
struct trace_cmd cmd = {0};
|
||||
cmd.type = TRACE_CMD_TEXTURE;
|
||||
cmd.texture.tsp = tsp;
|
||||
cmd.texture.tcw = tcw;
|
||||
|
@ -171,7 +171,7 @@ void trace_writer_insert_texture(struct trace_writer *writer, union tsp tsp,
|
|||
|
||||
void trace_writer_render_context(struct trace_writer *writer,
|
||||
struct tile_ctx *ctx) {
|
||||
struct trace_cmd cmd = {};
|
||||
struct trace_cmd cmd = {0};
|
||||
cmd.type = TRACE_CMD_CONTEXT;
|
||||
cmd.context.autosort = ctx->autosort;
|
||||
cmd.context.stride = ctx->stride;
|
||||
|
|
|
@ -202,7 +202,7 @@ bool memory_init(struct memory *memory) {
|
|||
list_for_each_entry(dev, &memory->dc->devices, struct device, it) {
|
||||
if (dev->memory) {
|
||||
// create the actual address map
|
||||
struct address_map map = {};
|
||||
struct address_map map = {0};
|
||||
dev->memory->mapper(dev, memory->dc, &map);
|
||||
|
||||
// apply the map to create the address space
|
||||
|
@ -383,7 +383,7 @@ static void as_merge_map(struct address_space *space,
|
|||
const struct address_map_entry *entry = &map->entries[i];
|
||||
|
||||
// iterate each mirror of the entry
|
||||
struct mirror_iterator it = {};
|
||||
struct mirror_iterator it = {0};
|
||||
|
||||
mirror_iterator_init(&it, offset + entry->addr, entry->addr_mask);
|
||||
|
||||
|
@ -424,7 +424,7 @@ static void as_merge_map(struct address_space *space,
|
|||
} break;
|
||||
|
||||
case MAP_ENTRY_DEVICE: {
|
||||
struct address_map device_map = {};
|
||||
struct address_map device_map = {0};
|
||||
entry->device.mapper(entry->device.device, space->dc, &device_map);
|
||||
as_merge_map(space, &device_map, addr);
|
||||
} break;
|
||||
|
@ -480,7 +480,7 @@ static bool as_map_pages(struct address_space *space, uint8_t *base) {
|
|||
continue;
|
||||
}
|
||||
|
||||
// batch map djacent pages, mmap is fairly slow
|
||||
// batch map adjacent pages, mmap is fairly slow
|
||||
int num_pages = as_num_adj_pages(space, page_index);
|
||||
uint32_t size = get_total_page_size(num_pages);
|
||||
|
||||
|
|
|
@ -146,8 +146,8 @@ static code_pointer_t sh4_cache_compile_code_inner(struct sh4_cache *cache,
|
|||
struct sh4_block search;
|
||||
search.guest_addr = guest_addr;
|
||||
|
||||
struct sh4_block *unlinked =
|
||||
rb_find_entry(&cache->blocks, &search, it, &block_map_cb);
|
||||
struct sh4_block *unlinked = rb_find_entry(
|
||||
&cache->blocks, &search, struct sh4_block, it, &block_map_cb);
|
||||
|
||||
if (unlinked) {
|
||||
flags |= unlinked->flags;
|
||||
|
@ -156,7 +156,7 @@ static code_pointer_t sh4_cache_compile_code_inner(struct sh4_cache *cache,
|
|||
}
|
||||
|
||||
// translate the SH4 into IR
|
||||
struct ir ir = {};
|
||||
struct ir ir = {0};
|
||||
ir.buffer = cache->ir_buffer;
|
||||
ir.capacity = sizeof(cache->ir_buffer);
|
||||
|
||||
|
@ -222,7 +222,8 @@ struct sh4_block *sh4_cache_get_block(struct sh4_cache *cache,
|
|||
struct sh4_block search;
|
||||
search.guest_addr = guest_addr;
|
||||
|
||||
return rb_find_entry(&cache->blocks, &search, it, &block_map_cb);
|
||||
return rb_find_entry(&cache->blocks, &search, struct sh4_block, it,
|
||||
&block_map_cb);
|
||||
}
|
||||
|
||||
void sh4_cache_remove_blocks(struct sh4_cache *cache, uint32_t guest_addr) {
|
||||
|
|
|
@ -406,7 +406,7 @@ const uint8_t *x64_backend_emit(struct x64_backend *backend, struct ir *ir,
|
|||
x64_backend_emit_body(backend, ir);
|
||||
x64_backend_emit_epilog(backend, ir, stack_size);
|
||||
|
||||
*size = backend->codegen->getCurr() - fn;
|
||||
*size = (int)(backend->codegen->getCurr() - fn);
|
||||
|
||||
return fn;
|
||||
}
|
||||
|
@ -1678,7 +1678,7 @@ struct jit_backend *x64_backend_create(struct jit_memory_interface *memory_if) {
|
|||
CHECK_EQ(res, CS_ERR_OK);
|
||||
|
||||
// make the code buffer executable
|
||||
int page_size = get_page_size();
|
||||
int page_size = (int)get_page_size();
|
||||
void *aligned_code = (void *)align_down((intptr_t)x64_code, page_size);
|
||||
int aligned_code_size = align_up(x64_code_size, page_size);
|
||||
bool success =
|
||||
|
|
|
@ -6,7 +6,7 @@ void sh4_analyze_block(uint32_t guest_addr, uint8_t *guest_ptr, int flags,
|
|||
*size = 0;
|
||||
|
||||
while (true) {
|
||||
struct sh4_instr instr = {};
|
||||
struct sh4_instr instr = {0};
|
||||
instr.addr = guest_addr;
|
||||
instr.opcode = *(uint16_t *)guest_ptr;
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ static void sh4_frontend_dump_code(struct jit_frontend *base,
|
|||
int i = 0;
|
||||
|
||||
while (i < size) {
|
||||
struct sh4_instr instr = {};
|
||||
struct sh4_instr instr = {0};
|
||||
instr.addr = guest_addr + i;
|
||||
instr.opcode = *(uint16_t *)(guest_ptr + i);
|
||||
sh4_disasm(&instr);
|
||||
|
@ -42,7 +42,7 @@ static void sh4_frontend_dump_code(struct jit_frontend *base,
|
|||
i += 2;
|
||||
|
||||
if (instr.flags & SH4_FLAG_DELAYED) {
|
||||
struct sh4_instr delay = {};
|
||||
struct sh4_instr delay = {0};
|
||||
delay.addr = guest_addr + i;
|
||||
delay.opcode = *(uint16_t *)(guest_ptr + i);
|
||||
sh4_disasm(&delay);
|
||||
|
|
|
@ -59,11 +59,8 @@ static emit_cb emit_callbacks[NUM_SH4_OPS] = {
|
|||
// swizzle 32-bit fp loads, see notes in sh4_context.h
|
||||
#define swizzle_fpr(n, type) (ir_type_size(type) == 4 ? ((n) ^ 1) : (n))
|
||||
|
||||
#define load_fpr(n, type) \
|
||||
({ \
|
||||
int tmp = swizzle_fpr(n, type); \
|
||||
ir_load_context(ir, offsetof(struct sh4_ctx, fr[tmp]), type); \
|
||||
})
|
||||
#define load_fpr(n, type) \
|
||||
ir_load_context(ir, offsetof(struct sh4_ctx, fr[swizzle_fpr(n, type)]), type)
|
||||
|
||||
#define store_fpr(n, v) \
|
||||
do { \
|
||||
|
@ -71,11 +68,8 @@ static emit_cb emit_callbacks[NUM_SH4_OPS] = {
|
|||
ir_store_context(ir, offsetof(struct sh4_ctx, fr[tmp]), v); \
|
||||
} while (0)
|
||||
|
||||
#define load_xfr(n, type) \
|
||||
({ \
|
||||
int tmp = swizzle_fpr(n, type); \
|
||||
ir_load_context(ir, offsetof(struct sh4_ctx, xf[tmp]), type); \
|
||||
})
|
||||
#define load_xfr(n, type) \
|
||||
ir_load_context(ir, offsetof(struct sh4_ctx, xf[swizzle_fpr(n, type)]), type)
|
||||
|
||||
#define store_xfr(n, v) \
|
||||
do { \
|
||||
|
@ -83,7 +77,7 @@ static emit_cb emit_callbacks[NUM_SH4_OPS] = {
|
|||
ir_store_context(ir, offsetof(struct sh4_ctx, xf[tmp]), v); \
|
||||
} while (0)
|
||||
|
||||
#define load_sr() (ir_load_context(ir, offsetof(struct sh4_ctx, sr), VALUE_I32))
|
||||
#define load_sr() ir_load_context(ir, offsetof(struct sh4_ctx, sr), VALUE_I32)
|
||||
|
||||
#define store_sr(v) \
|
||||
do { \
|
||||
|
@ -112,12 +106,9 @@ static emit_cb emit_callbacks[NUM_SH4_OPS] = {
|
|||
ir_store_context(ir, offsetof(struct sh4_ctx, gbr), v); \
|
||||
} while (0)
|
||||
|
||||
#define load_fpscr() \
|
||||
({ \
|
||||
struct ir_value *v = \
|
||||
ir_load_context(ir, offsetof(struct sh4_ctx, fpscr), VALUE_I32); \
|
||||
ir_and(ir, v, ir_alloc_i32(ir, 0x003fffff)); \
|
||||
})
|
||||
#define load_fpscr() \
|
||||
ir_and(ir, ir_load_context(ir, offsetof(struct sh4_ctx, fpscr), VALUE_I32), \
|
||||
ir_alloc_i32(ir, 0x003fffff))
|
||||
|
||||
#define store_fpscr(v) \
|
||||
do { \
|
||||
|
@ -2194,7 +2185,7 @@ void sh4_translate(uint32_t guest_addr, uint8_t *guest_ptr, int size, int flags,
|
|||
int guest_cycles = 0;
|
||||
|
||||
while (i < size) {
|
||||
struct sh4_instr instr = {};
|
||||
struct sh4_instr instr = {0};
|
||||
instr.addr = guest_addr + i;
|
||||
instr.opcode = *(uint16_t *)(guest_ptr + i);
|
||||
|
||||
|
@ -2234,7 +2225,7 @@ void sh4_translate(uint32_t guest_addr, uint8_t *guest_ptr, int size, int flags,
|
|||
}
|
||||
|
||||
// emit block epilog
|
||||
ir->current_instr = list_prev_entry(tail_instr, it);
|
||||
ir->current_instr = list_prev_entry(tail_instr, struct ir_instr, it);
|
||||
|
||||
// update remaining cycles
|
||||
struct ir_value *num_cycles =
|
||||
|
|
|
@ -245,13 +245,13 @@ void ir_store_slow(struct ir *ir, struct ir_value *addr, struct ir_value *v) {
|
|||
struct ir_value *ir_load_context(struct ir *ir, size_t offset,
|
||||
enum ir_type type) {
|
||||
struct ir_instr *instr = ir_append_instr(ir, OP_LOAD_CONTEXT, type);
|
||||
ir_set_arg0(ir, instr, ir_alloc_i32(ir, offset));
|
||||
ir_set_arg0(ir, instr, ir_alloc_i32(ir, (int32_t)offset));
|
||||
return instr->result;
|
||||
}
|
||||
|
||||
void ir_store_context(struct ir *ir, size_t offset, struct ir_value *v) {
|
||||
struct ir_instr *instr = ir_append_instr(ir, OP_STORE_CONTEXT, VALUE_V);
|
||||
ir_set_arg0(ir, instr, ir_alloc_i32(ir, offset));
|
||||
ir_set_arg0(ir, instr, ir_alloc_i32(ir, (int32_t)offset));
|
||||
ir_set_arg1(ir, instr, v);
|
||||
}
|
||||
|
||||
|
|
|
@ -212,7 +212,7 @@ int ir_parse_value(struct ir_parser *p, struct ir *ir,
|
|||
if (instr->tag == slot) {
|
||||
break;
|
||||
}
|
||||
instr = list_next_entry(instr, it);
|
||||
instr = list_next_entry(instr, struct ir_instr, it);
|
||||
}
|
||||
CHECK_NOTNULL(instr);
|
||||
|
||||
|
@ -276,7 +276,7 @@ int ir_parse_operator(struct ir_parser *p, struct ir *ir) {
|
|||
int ir_parse_instr(struct ir_parser *p, struct ir *ir) {
|
||||
int slot = -1;
|
||||
enum ir_type type = VALUE_V;
|
||||
struct ir_value *arg[3] = {};
|
||||
struct ir_value *arg[3] = {0};
|
||||
|
||||
// parse result type and slot number
|
||||
if (p->tok == TOK_TYPE) {
|
||||
|
@ -329,7 +329,7 @@ int ir_parse_instr(struct ir_parser *p, struct ir *ir) {
|
|||
}
|
||||
|
||||
int ir_read(FILE *input, struct ir *ir) {
|
||||
struct ir_parser p = {};
|
||||
struct ir_parser p = {0};
|
||||
p.input = input;
|
||||
|
||||
while (1) {
|
||||
|
|
|
@ -4,14 +4,14 @@
|
|||
#include "core/constructor.h"
|
||||
#include "core/list.h"
|
||||
|
||||
#define DEFINE_STAT(name, desc) \
|
||||
static int STAT_##name; \
|
||||
static struct pass_stat STAT_T_##name = {#name, desc, &STAT_##name, {}}; \
|
||||
CONSTRUCTOR(STAT_REGISTER_##name) { \
|
||||
pass_stat_register(&STAT_T_##name); \
|
||||
} \
|
||||
DESTRUCTOR(STAT_UNREGISTER_##name) { \
|
||||
pass_stat_unregister(&STAT_T_##name); \
|
||||
#define DEFINE_STAT(name, desc) \
|
||||
static int STAT_##name; \
|
||||
static struct pass_stat STAT_T_##name = {#name, desc, &STAT_##name, {0}}; \
|
||||
CONSTRUCTOR(STAT_REGISTER_##name) { \
|
||||
pass_stat_register(&STAT_T_##name); \
|
||||
} \
|
||||
DESTRUCTOR(STAT_UNREGISTER_##name) { \
|
||||
pass_stat_unregister(&STAT_T_##name); \
|
||||
}
|
||||
|
||||
struct pass_stat {
|
||||
|
|
|
@ -132,7 +132,7 @@ static int ra_alloc_blocked_register(struct ra *ra, struct ir *ir,
|
|||
// the interval's value needs to be filled back from from the stack before
|
||||
// its next use
|
||||
struct ir_use *next_use = interval->next;
|
||||
struct ir_use *prev_use = list_prev_entry(next_use, it);
|
||||
struct ir_use *prev_use = list_prev_entry(next_use, struct ir_use, it);
|
||||
CHECK(next_use,
|
||||
"Register being spilled has no next use, why wasn't it expired?");
|
||||
|
||||
|
@ -140,13 +140,15 @@ static int ra_alloc_blocked_register(struct ra *ra, struct ir *ir,
|
|||
struct ir_local *local = ir_alloc_local(ir, interval->instr->result->type);
|
||||
|
||||
// insert load before next use
|
||||
ir->current_instr = list_prev_entry(next_use->instr, it);
|
||||
ir->current_instr = list_prev_entry(next_use->instr, struct ir_instr, it);
|
||||
struct ir_value *load_value = ir_load_local(ir, local);
|
||||
struct ir_instr *load_instr = load_value->def;
|
||||
|
||||
// assign the load a valid ordinal
|
||||
int load_ordinal = ra_get_ordinal(list_prev_entry(load_instr, it)) + 1;
|
||||
CHECK_LT(load_ordinal, ra_get_ordinal(list_next_entry(load_instr, it)));
|
||||
int load_ordinal =
|
||||
ra_get_ordinal(list_prev_entry(load_instr, struct ir_instr, it)) + 1;
|
||||
CHECK_LT(load_ordinal,
|
||||
ra_get_ordinal(list_next_entry(load_instr, struct ir_instr, it)));
|
||||
ra_set_ordinal(load_instr, load_ordinal);
|
||||
|
||||
// update uses of interval->instr after the next use to use the new value
|
||||
|
@ -155,7 +157,7 @@ static int ra_alloc_blocked_register(struct ra *ra, struct ir *ir,
|
|||
while (next_use) {
|
||||
// cache off next next since calling set_value will modify the linked list
|
||||
// pointers
|
||||
struct ir_use *next_next_use = list_next_entry(next_use, it);
|
||||
struct ir_use *next_next_use = list_next_entry(next_use, struct ir_use, it);
|
||||
ir_replace_use(next_use, load_instr->result);
|
||||
next_use = next_next_use;
|
||||
}
|
||||
|
@ -169,7 +171,7 @@ static int ra_alloc_blocked_register(struct ra *ra, struct ir *ir,
|
|||
|
||||
if (prev_use) {
|
||||
// there is a previous useerence, insert store after it
|
||||
CHECK(list_next_entry(prev_use, it) == NULL,
|
||||
CHECK(list_next_entry(prev_use, struct ir_use, it) == NULL,
|
||||
"All future uses should have been replaced");
|
||||
after = prev_use->instr;
|
||||
} else {
|
||||
|
@ -248,7 +250,7 @@ static int ra_reuse_arg_register(struct ra *ra, struct ir *ir,
|
|||
// if the argument's register is used after this instruction, it's not
|
||||
// trivial to reuse
|
||||
struct interval *interval = &ra->intervals[prefered];
|
||||
if (list_next_entry(interval->next, it)) {
|
||||
if (list_next_entry(interval->next, struct ir_use, it)) {
|
||||
return NO_REGISTER;
|
||||
}
|
||||
|
||||
|
@ -283,8 +285,8 @@ static void ra_expire_set(struct ra *ra, struct register_set *set,
|
|||
|
||||
// if there are more uses, advance the next use and reinsert the interval
|
||||
// into the correct position
|
||||
if (interval->next && list_next_entry(interval->next, it)) {
|
||||
interval->next = list_next_entry(interval->next, it);
|
||||
if (interval->next && list_next_entry(interval->next, struct ir_use, it)) {
|
||||
interval->next = list_next_entry(interval->next, struct ir_use, it);
|
||||
ra_insert_interval(set, interval);
|
||||
}
|
||||
// if there are no more uses, but the register has been reused by
|
||||
|
@ -356,7 +358,7 @@ static void ra_init_sets(struct ra *ra, const struct jit_register *registers,
|
|||
|
||||
void ra_run(struct ir *ir, const struct jit_register *registers,
|
||||
int num_registers) {
|
||||
struct ra ra = {};
|
||||
struct ra ra = {0};
|
||||
|
||||
ra_init_sets(&ra, registers, num_registers);
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@ int main(int argc, char **argv) {
|
|||
}
|
||||
|
||||
// load base options from config
|
||||
char config[PATH_MAX] = {};
|
||||
char config[PATH_MAX] = {0};
|
||||
snprintf(config, sizeof(config), "%s" PATH_SEPARATOR "config", appdir);
|
||||
options_read(config);
|
||||
|
||||
|
|
|
@ -268,7 +268,7 @@ static void rb_destroy_program(struct shader_program *program) {
|
|||
static bool rb_compile_program(struct shader_program *program,
|
||||
const char *header, const char *vertex_source,
|
||||
const char *fragment_source) {
|
||||
char buffer[16384] = {};
|
||||
char buffer[16384] = {0};
|
||||
|
||||
memset(program, 0, sizeof(*program));
|
||||
program->program = glCreateProgram();
|
||||
|
@ -701,7 +701,7 @@ struct rb *rb_create(struct window *window) {
|
|||
struct rb *rb = (struct rb *)calloc(1, sizeof(struct rb));
|
||||
rb->window = window;
|
||||
rb->listener = (struct window_listener){
|
||||
rb, NULL, &rb_paint_debug_menu, NULL, NULL, NULL, NULL, {}};
|
||||
rb, NULL, &rb_paint_debug_menu, NULL, NULL, NULL, NULL, {0}};
|
||||
|
||||
win_add_listener(rb->window, &rb->listener);
|
||||
|
||||
|
|
|
@ -319,7 +319,7 @@ static struct key keys[] = {{K_UNKNOWN, "unknown"},
|
|||
{K_AXIS15, "axis15"}};
|
||||
|
||||
enum keycode get_key_by_name(const char *keyname) {
|
||||
char buffer[256] = {};
|
||||
char buffer[256] = {0};
|
||||
int len = 0;
|
||||
|
||||
while (*keyname) {
|
||||
|
|
|
@ -263,8 +263,7 @@ struct microprofile *mp_create(struct window *window) {
|
|||
calloc(1, sizeof(struct microprofile)));
|
||||
|
||||
mp->window = window;
|
||||
mp->listener = (struct window_listener){
|
||||
mp, NULL, NULL, &mp_keydown, NULL, &mp_mousemove, NULL, {}};
|
||||
mp->listener = {mp, NULL, NULL, &mp_keydown, NULL, &mp_mousemove, NULL, {}};
|
||||
|
||||
win_add_listener(mp->window, &mp->listener);
|
||||
|
||||
|
|
|
@ -5,7 +5,14 @@
|
|||
#include "ui/window.h"
|
||||
|
||||
#define NK_IMPLEMENTATION
|
||||
#if defined(_MSC_VER)
|
||||
#pragma warning(push)
|
||||
#pragma warning(disable : 4116)
|
||||
#endif
|
||||
#include <nuklear.h>
|
||||
#if defined(_MSC_VER)
|
||||
#pragma warning(pop)
|
||||
#endif
|
||||
|
||||
static void nk_keydown(void *data, enum keycode code, int16_t value) {
|
||||
struct nuklear *nk = data;
|
||||
|
@ -75,7 +82,7 @@ void nk_end_frame(struct nuklear *nk) {
|
|||
nk_buffer_init_fixed(&vbuf, nk->vertices, sizeof(nk->vertices));
|
||||
nk_buffer_init_fixed(&ebuf, nk->elements, sizeof(nk->elements));
|
||||
|
||||
struct nk_convert_config config = {};
|
||||
struct nk_convert_config config = {0};
|
||||
config.global_alpha = 1.0f;
|
||||
config.shape_AA = NK_ANTI_ALIASING_ON;
|
||||
config.line_AA = NK_ANTI_ALIASING_ON;
|
||||
|
@ -95,7 +102,7 @@ void nk_end_frame(struct nuklear *nk) {
|
|||
const struct nk_draw_command *cmd = NULL;
|
||||
int offset = 0;
|
||||
|
||||
struct surface2d surf = {};
|
||||
struct surface2d surf = {0};
|
||||
surf.prim_type = PRIM_TRIANGLES;
|
||||
surf.src_blend = BLEND_SRC_ALPHA;
|
||||
surf.dst_blend = BLEND_ONE_MINUS_SRC_ALPHA;
|
||||
|
@ -132,7 +139,7 @@ struct nuklear *nk_create(struct window *window) {
|
|||
struct nuklear *nk = calloc(1, sizeof(struct nuklear));
|
||||
nk->window = window;
|
||||
nk->listener = (struct window_listener){
|
||||
nk, NULL, NULL, &nk_keydown, &nk_textinput, &nk_mousemove, NULL, {}};
|
||||
nk, NULL, NULL, &nk_keydown, &nk_textinput, &nk_mousemove, NULL, {0}};
|
||||
|
||||
win_add_listener(nk->window, &nk->listener);
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ static int get_num_instrs(const struct ir *ir) {
|
|||
}
|
||||
|
||||
static void process_file(const char *filename, bool disable_ir_dump) {
|
||||
struct ir ir = {};
|
||||
struct ir ir = {0};
|
||||
ir.buffer = ir_buffer;
|
||||
ir.capacity = sizeof(ir_buffer);
|
||||
|
||||
|
|
Loading…
Reference in New Issue