mirror of https://github.com/inolen/redream.git
multithreaded rendering
This commit is contained in:
parent
0a13de9244
commit
bd352c3cc4
|
@ -2,6 +2,7 @@ cmake_minimum_required(VERSION 2.8.12)
|
|||
|
||||
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake")
|
||||
|
||||
include(CheckCSourceCompiles)
|
||||
include(CheckIncludeFiles)
|
||||
include(CheckFunctionExists)
|
||||
include(ExternalProject)
|
||||
|
@ -28,6 +29,7 @@ set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
|
|||
# config file
|
||||
#--------------------------------------------------
|
||||
|
||||
check_include_files(stdatomic.h HAVE_STDATOMIC_H)
|
||||
check_include_files(strings.h HAVE_STRINGS_H)
|
||||
check_function_exists(strcasecmp HAVE_STRCASECMP)
|
||||
check_function_exists(strnlen HAVE_STRNLEN)
|
||||
|
@ -190,18 +192,21 @@ if(WIN32)
|
|||
list(APPEND REDREAM_SOURCES src/sys/exception_handler_win.c)
|
||||
list(APPEND REDREAM_SOURCES src/sys/filesystem_win.c)
|
||||
list(APPEND REDREAM_SOURCES src/sys/memory_win.c)
|
||||
list(APPEND REDREAM_SOURCES src/sys/thread_win.c)
|
||||
list(APPEND REDREAM_SOURCES src/sys/time_win.c)
|
||||
elseif(APPLE)
|
||||
list(APPEND REDREAM_DEFS PLATFORM_DARWIN=1)
|
||||
list(APPEND REDREAM_SOURCES src/sys/exception_handler_mac.c)
|
||||
list(APPEND REDREAM_SOURCES src/sys/filesystem_posix.c)
|
||||
list(APPEND REDREAM_SOURCES src/sys/memory_posix.c)
|
||||
list(APPEND REDREAM_SOURCES src/sys/thread_posix.c)
|
||||
list(APPEND REDREAM_SOURCES src/sys/time_mac.c)
|
||||
else()
|
||||
list(APPEND REDREAM_DEFS PLATFORM_LINUX=1)
|
||||
list(APPEND REDREAM_SOURCES src/sys/exception_handler_linux.c)
|
||||
list(APPEND REDREAM_SOURCES src/sys/filesystem_posix.c)
|
||||
list(APPEND REDREAM_SOURCES src/sys/memory_posix.c)
|
||||
list(APPEND REDREAM_SOURCES src/sys/thread_posix.c)
|
||||
list(APPEND REDREAM_SOURCES src/sys/time_linux.c)
|
||||
endif()
|
||||
|
||||
|
@ -210,7 +215,7 @@ endif()
|
|||
source_group_by_dir(REDREAM_SOURCES)
|
||||
|
||||
if("${CMAKE_C_COMPILER_ID}" STREQUAL "GNU" OR "${CMAKE_C_COMPILER_ID}" STREQUAL "Clang")
|
||||
set(REDREAM_COMPILE_FLAGS $<$<COMPILE_LANGUAGE:CXX>:-std=c++11 -fno-operator-names> $<$<COMPILE_LANGUAGE:C>:-std=c11> -Wall -Wextra -Werror -Wno-unused-function -Wno-unused-parameter -Wno-unused-variable -Wno-strict-aliasing -D_GNU_SOURCE)
|
||||
set(REDREAM_COMPILE_FLAGS $<$<COMPILE_LANGUAGE:CXX>:-std=c++11 -fno-operator-names> $<$<COMPILE_LANGUAGE:C>:-std=c11> -Wall -Wextra -Werror -Wno-unused-function -Wno-unused-parameter -Wno-unused-variable -Wno-strict-aliasing -D_GNU_SOURCE)
|
||||
|
||||
# some flavors of GCC require this to be defined for the PR* macros in inttypes.h
|
||||
if("${CMAKE_C_COMPILER_ID}" STREQUAL "GNU")
|
||||
|
|
|
@ -423,9 +423,9 @@ void rb_link(struct rb_tree *t, struct rb_node *n, struct rb_callbacks *cb) {
|
|||
cb->propagate(t, n);
|
||||
}
|
||||
|
||||
// #ifdef VERIFY_TREE
|
||||
#ifdef VERIFY_TREE
|
||||
rb_verify(t->root);
|
||||
// #endif
|
||||
#endif
|
||||
}
|
||||
|
||||
void rb_unlink(struct rb_tree *t, struct rb_node *n, struct rb_callbacks *cb) {
|
||||
|
@ -457,9 +457,9 @@ void rb_unlink(struct rb_tree *t, struct rb_node *n, struct rb_callbacks *cb) {
|
|||
cb->propagate(t, n->parent);
|
||||
}
|
||||
|
||||
// #ifdef VERIFY_TREE
|
||||
#ifdef VERIFY_TREE
|
||||
rb_verify(t->root);
|
||||
// #endif
|
||||
#endif
|
||||
}
|
||||
|
||||
void rb_insert(struct rb_tree *t, struct rb_node *n, struct rb_callbacks *cb) {
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
#include <stdatomic.h>
|
||||
#include "emu/emulator.h"
|
||||
#include "core/option.h"
|
||||
#include "hw/dreamcast.h"
|
||||
|
@ -5,6 +6,7 @@
|
|||
#include "hw/memory.h"
|
||||
#include "hw/scheduler.h"
|
||||
#include "hw/sh4/sh4.h"
|
||||
#include "sys/thread.h"
|
||||
#include "sys/time.h"
|
||||
#include "ui/nuklear.h"
|
||||
#include "ui/window.h"
|
||||
|
@ -16,7 +18,8 @@ struct emu {
|
|||
struct window *window;
|
||||
struct window_listener *listener;
|
||||
struct dreamcast *dc;
|
||||
bool running;
|
||||
atomic_int running;
|
||||
int throttled;
|
||||
};
|
||||
|
||||
static bool emu_load_bios(struct emu *emu, const char *path) {
|
||||
|
@ -131,6 +134,11 @@ static void emu_paint(void *data) {
|
|||
static void emu_paint_debug_menu(void *data, struct nk_context *ctx) {
|
||||
struct emu *emu = data;
|
||||
|
||||
if (nk_tree_push(ctx, NK_TREE_TAB, "emu", NK_MINIMIZED)) {
|
||||
nk_checkbox_label(ctx, "throttled", &emu->throttled);
|
||||
nk_tree_pop(ctx);
|
||||
}
|
||||
|
||||
dc_paint_debug_menu(emu->dc, ctx);
|
||||
}
|
||||
|
||||
|
@ -150,7 +158,30 @@ static void emu_keydown(void *data, enum keycode code, int16_t value) {
|
|||
static void emu_close(void *data) {
|
||||
struct emu *emu = data;
|
||||
|
||||
emu->running = false;
|
||||
atomic_store(&emu->running, 0);
|
||||
}
|
||||
|
||||
static void *emu_core_thread(void *data) {
|
||||
struct emu *emu = data;
|
||||
|
||||
static const int64_t MACHINE_STEP = HZ_TO_NANO(1000);
|
||||
int64_t current_time = time_nanoseconds();
|
||||
int64_t next_time = current_time;
|
||||
|
||||
while (atomic_load_explicit(&emu->running, memory_order_relaxed)) {
|
||||
current_time = time_nanoseconds();
|
||||
|
||||
int64_t delta_time = current_time - next_time;
|
||||
|
||||
if (emu->throttled && delta_time < 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
dc_tick(emu->dc, MACHINE_STEP);
|
||||
next_time = current_time + MACHINE_STEP;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void emu_run(struct emu *emu, const char *path) {
|
||||
|
@ -178,33 +209,19 @@ void emu_run(struct emu *emu, const char *path) {
|
|||
}
|
||||
}
|
||||
|
||||
// start running
|
||||
static const int64_t MACHINE_STEP = HZ_TO_NANO(1000);
|
||||
static const int64_t FRAME_STEP = HZ_TO_NANO(60);
|
||||
// start core emulator thread
|
||||
thread_t core_thread;
|
||||
atomic_store(&emu->running, 1);
|
||||
core_thread = thread_create(&emu_core_thread, NULL, emu);
|
||||
|
||||
int64_t current_time = time_nanoseconds();
|
||||
int64_t next_machine_time = current_time;
|
||||
int64_t next_frame_time = current_time;
|
||||
|
||||
emu->running = true;
|
||||
|
||||
while (emu->running) {
|
||||
current_time = time_nanoseconds();
|
||||
|
||||
// run dreamcast machine
|
||||
if (current_time > next_machine_time) {
|
||||
dc_tick(emu->dc, MACHINE_STEP);
|
||||
|
||||
next_machine_time = current_time + MACHINE_STEP;
|
||||
}
|
||||
|
||||
// run local frame
|
||||
if (current_time > next_frame_time) {
|
||||
win_pump_events(emu->window);
|
||||
|
||||
next_frame_time = current_time + FRAME_STEP;
|
||||
}
|
||||
// run the renderer / ui in the main thread
|
||||
while (atomic_load_explicit(&emu->running, memory_order_relaxed)) {
|
||||
win_pump_events(emu->window);
|
||||
}
|
||||
|
||||
// wait for the graphics thread to exit
|
||||
void *result;
|
||||
thread_join(core_thread, &result);
|
||||
}
|
||||
|
||||
struct emu *emu_create(struct window *window) {
|
||||
|
@ -215,6 +232,7 @@ struct emu *emu_create(struct window *window) {
|
|||
|
||||
emu->window = window;
|
||||
emu->listener = win_add_listener(emu->window, &callbacks, emu);
|
||||
emu->running = ATOMIC_VAR_INIT(0);
|
||||
|
||||
return emu;
|
||||
}
|
||||
|
|
157
src/emu/tracer.c
157
src/emu/tracer.c
|
@ -58,20 +58,8 @@ static const char *s_shademode_names[] = {
|
|||
"DECAL", "MODULATE", "DECAL_ALPHA", "MODULATE_ALPHA",
|
||||
};
|
||||
|
||||
struct texture_entry {
|
||||
union tsp tsp;
|
||||
union tcw tcw;
|
||||
const uint8_t *palette;
|
||||
const uint8_t *texture;
|
||||
texture_handle_t handle;
|
||||
enum pxl_format format;
|
||||
enum filter_mode filter;
|
||||
enum wrap_mode wrap_u;
|
||||
enum wrap_mode wrap_v;
|
||||
bool mipmaps;
|
||||
int width;
|
||||
int height;
|
||||
|
||||
struct tracer_texture_entry {
|
||||
struct texture_entry base;
|
||||
struct rb_node live_it;
|
||||
struct list_node free_it;
|
||||
};
|
||||
|
@ -102,92 +90,68 @@ struct tracer {
|
|||
int sorted_surfs[TA_MAX_SURFS];
|
||||
struct param_state states[TA_MAX_PARAMS];
|
||||
|
||||
struct texture_entry textures[1024];
|
||||
struct tracer_texture_entry textures[1024];
|
||||
struct rb_tree live_textures;
|
||||
struct list free_textures;
|
||||
};
|
||||
|
||||
static int tracer_texture_cmp(const struct rb_node *rb_lhs,
|
||||
const struct rb_node *rb_rhs) {
|
||||
const struct texture_entry *lhs =
|
||||
rb_entry(rb_lhs, const struct texture_entry, live_it);
|
||||
const struct texture_entry *rhs =
|
||||
rb_entry(rb_rhs, const struct texture_entry, live_it);
|
||||
return tr_get_texture_key(lhs->tsp, lhs->tcw) -
|
||||
tr_get_texture_key(rhs->tsp, rhs->tcw);
|
||||
const struct tracer_texture_entry *lhs =
|
||||
rb_entry(rb_lhs, const struct tracer_texture_entry, live_it);
|
||||
const struct tracer_texture_entry *rhs =
|
||||
rb_entry(rb_rhs, const struct tracer_texture_entry, live_it);
|
||||
return tr_texture_key(lhs->base.tsp, lhs->base.tcw) -
|
||||
tr_texture_key(rhs->base.tsp, rhs->base.tcw);
|
||||
}
|
||||
|
||||
static struct rb_callbacks tracer_texture_cb = {&tracer_texture_cmp, NULL,
|
||||
NULL};
|
||||
|
||||
static struct tracer_texture_entry *tracer_find_texture(struct tracer *tracer,
|
||||
union tsp tsp,
|
||||
union tcw tcw) {
|
||||
struct tracer_texture_entry search;
|
||||
search.base.tsp = tsp;
|
||||
search.base.tcw = tcw;
|
||||
|
||||
return rb_find_entry(&tracer->live_textures, &search, live_it,
|
||||
&tracer_texture_cb);
|
||||
}
|
||||
|
||||
static void tracer_add_texture(struct tracer *tracer, union tsp tsp,
|
||||
union tcw tcw, const uint8_t *palette,
|
||||
const uint8_t *texture) {
|
||||
struct texture_entry *entry =
|
||||
list_first_entry(&tracer->free_textures, struct texture_entry, free_it);
|
||||
CHECK_NOTNULL(entry);
|
||||
list_remove(&tracer->free_textures, &entry->free_it);
|
||||
struct tracer_texture_entry *entry = tracer_find_texture(tracer, tsp, tcw);
|
||||
int new_entry = 0;
|
||||
|
||||
entry->tsp = tsp;
|
||||
entry->tcw = tcw;
|
||||
entry->palette = palette;
|
||||
entry->texture = texture;
|
||||
entry->handle = 0;
|
||||
if (!entry) {
|
||||
entry = list_first_entry(&tracer->free_textures,
|
||||
struct tracer_texture_entry, free_it);
|
||||
CHECK_NOTNULL(entry);
|
||||
list_remove(&tracer->free_textures, &entry->free_it);
|
||||
|
||||
rb_insert(&tracer->live_textures, &entry->live_it, &tracer_texture_cb);
|
||||
entry->base.tsp = tsp;
|
||||
entry->base.tcw = tcw;
|
||||
|
||||
rb_insert(&tracer->live_textures, &entry->live_it, &tracer_texture_cb);
|
||||
|
||||
new_entry = 1;
|
||||
};
|
||||
|
||||
entry->base.dirty = new_entry ? 0 : 1;
|
||||
entry->base.palette = palette;
|
||||
entry->base.texture = texture;
|
||||
}
|
||||
|
||||
static void tracer_remove_texture(struct tracer *tracer, union tsp tsp,
|
||||
union tcw tcw) {
|
||||
struct texture_entry search;
|
||||
search.tsp = tsp;
|
||||
search.tcw = tcw;
|
||||
|
||||
struct texture_entry *entry = rb_find_entry(&tracer->live_textures, &search,
|
||||
live_it, &tracer_texture_cb);
|
||||
CHECK_NOTNULL(entry);
|
||||
rb_unlink(&tracer->live_textures, &entry->live_it, &tracer_texture_cb);
|
||||
|
||||
list_add(&tracer->free_textures, &entry->free_it);
|
||||
}
|
||||
|
||||
static texture_handle_t tracer_get_texture(void *data,
|
||||
const struct tile_ctx *ctx,
|
||||
union tsp tsp, union tcw tcw,
|
||||
void *register_data,
|
||||
register_texture_cb register_cb) {
|
||||
static struct texture_entry *tracer_texture_interface_find_texture(
|
||||
void *data, union tsp tsp, union tcw tcw) {
|
||||
struct tracer *tracer = data;
|
||||
|
||||
struct texture_entry search;
|
||||
search.tsp = tsp;
|
||||
search.tcw = tcw;
|
||||
|
||||
struct texture_entry *entry = rb_find_entry(&tracer->live_textures, &search,
|
||||
live_it, &tracer_texture_cb);
|
||||
struct tracer_texture_entry *entry = tracer_find_texture(tracer, tsp, tcw);
|
||||
CHECK_NOTNULL(entry, "Texture wasn't available in cache");
|
||||
|
||||
// TODO fixme, pass correct struct tile_ctx to tracer_add_texture so this
|
||||
// isn't deferred
|
||||
if (!entry->handle) {
|
||||
struct texture_reg reg = {};
|
||||
reg.ctx = ctx;
|
||||
reg.tsp = tsp;
|
||||
reg.tcw = tcw;
|
||||
reg.palette = entry->palette;
|
||||
reg.texture = entry->texture;
|
||||
register_cb(register_data, ®);
|
||||
|
||||
entry->handle = reg.handle;
|
||||
entry->format = reg.format;
|
||||
entry->filter = reg.filter;
|
||||
entry->wrap_u = reg.wrap_u;
|
||||
entry->wrap_v = reg.wrap_v;
|
||||
entry->mipmaps = reg.mipmaps;
|
||||
entry->width = reg.width;
|
||||
entry->height = reg.height;
|
||||
}
|
||||
|
||||
return entry->handle;
|
||||
return &entry->base;
|
||||
}
|
||||
|
||||
static void tracer_copy_command(const struct trace_cmd *cmd,
|
||||
|
@ -285,9 +249,8 @@ static void tracer_prev_context(struct tracer *tracer) {
|
|||
|
||||
while (curr != prev) {
|
||||
if (curr->type == TRACE_CMD_TEXTURE) {
|
||||
tracer_remove_texture(tracer, curr->texture.tsp, curr->texture.tcw);
|
||||
|
||||
struct trace_cmd * override = curr->override;
|
||||
|
||||
if (override) {
|
||||
CHECK_EQ(override->type, TRACE_CMD_TEXTURE);
|
||||
|
||||
|
@ -735,11 +698,11 @@ static void tracer_render_side_menu(struct tracer *tracer) {
|
|||
if (nk_tree_push(ctx, NK_TREE_TAB, "textures", 0)) {
|
||||
nk_layout_row_static(ctx, 40.0f, 40.0f, 4);
|
||||
|
||||
rb_for_each_entry(tex, &tracer->live_textures, struct texture_entry,
|
||||
live_it) {
|
||||
rb_for_each_entry(entry, &tracer->live_textures,
|
||||
struct tracer_texture_entry, live_it) {
|
||||
struct nk_rect bounds = nk_widget_bounds(ctx);
|
||||
|
||||
nk_image(ctx, nk_image_id((int)tex->handle));
|
||||
nk_image(ctx, nk_image_id((int)entry->base.handle));
|
||||
|
||||
if (nk_input_is_mouse_hovering_rect(&ctx->input, bounds)) {
|
||||
struct nk_panel tooltip;
|
||||
|
@ -755,7 +718,7 @@ static void tracer_render_side_menu(struct tracer *tracer) {
|
|||
if (nk_group_begin(ctx, &tab, "texture preview",
|
||||
NK_WINDOW_NO_SCROLLBAR)) {
|
||||
nk_layout_row_static(ctx, 184.0f, 184.0f, 1);
|
||||
nk_image(ctx, nk_image_id((int)tex->handle));
|
||||
nk_image(ctx, nk_image_id((int)entry->base.handle));
|
||||
nk_group_end(ctx);
|
||||
}
|
||||
|
||||
|
@ -763,18 +726,19 @@ static void tracer_render_side_menu(struct tracer *tracer) {
|
|||
NK_WINDOW_NO_SCROLLBAR)) {
|
||||
nk_layout_row_static(ctx, ctx->style.font.height, 184.0f, 1);
|
||||
nk_labelf(ctx, NK_TEXT_LEFT, "addr: 0x%08x",
|
||||
tex->tcw.texture_addr << 3);
|
||||
entry->base.tcw.texture_addr << 3);
|
||||
nk_labelf(ctx, NK_TEXT_LEFT, "format: %s",
|
||||
s_pixel_format_names[tex->format]);
|
||||
s_pixel_format_names[entry->base.format]);
|
||||
nk_labelf(ctx, NK_TEXT_LEFT, "filter: %s",
|
||||
s_filter_mode_names[tex->filter]);
|
||||
s_filter_mode_names[entry->base.filter]);
|
||||
nk_labelf(ctx, NK_TEXT_LEFT, "wrap_u: %s",
|
||||
s_wrap_mode_names[tex->wrap_u]);
|
||||
s_wrap_mode_names[entry->base.wrap_u]);
|
||||
nk_labelf(ctx, NK_TEXT_LEFT, "wrap_v: %s",
|
||||
s_wrap_mode_names[tex->wrap_v]);
|
||||
nk_labelf(ctx, NK_TEXT_LEFT, "mipmaps: %d", tex->mipmaps);
|
||||
nk_labelf(ctx, NK_TEXT_LEFT, "width: %d", tex->width);
|
||||
nk_labelf(ctx, NK_TEXT_LEFT, "height: %d", tex->height);
|
||||
s_wrap_mode_names[entry->base.wrap_v]);
|
||||
nk_labelf(ctx, NK_TEXT_LEFT, "mipmaps: %d",
|
||||
entry->base.mipmaps);
|
||||
nk_labelf(ctx, NK_TEXT_LEFT, "width: %d", entry->base.width);
|
||||
nk_labelf(ctx, NK_TEXT_LEFT, "height: %d", entry->base.height);
|
||||
nk_group_end(ctx);
|
||||
}
|
||||
|
||||
|
@ -797,7 +761,7 @@ static void tracer_render_side_menu(struct tracer *tracer) {
|
|||
static void tracer_paint(void *data) {
|
||||
struct tracer *tracer = data;
|
||||
|
||||
tr_parse_context(tracer->tr, &tracer->ctx, &tracer->rctx);
|
||||
tr_parse_context(tracer->tr, &tracer->ctx, 0, &tracer->rctx);
|
||||
|
||||
// render ui
|
||||
tracer_render_side_menu(tracer);
|
||||
|
@ -897,7 +861,10 @@ struct tracer *tracer_create(struct window *window) {
|
|||
tracer->window = window;
|
||||
tracer->listener = win_add_listener(window, &callbacks, tracer);
|
||||
tracer->rb = window->rb;
|
||||
tracer->tr = tr_create(tracer->rb, tracer, &tracer_get_texture);
|
||||
|
||||
struct texture_interface texture_if = {
|
||||
tracer, &tracer_texture_interface_find_texture};
|
||||
tracer->tr = tr_create(tracer->rb, &texture_if);
|
||||
|
||||
// setup tile context buffers
|
||||
tracer->ctx.params = tracer->params;
|
||||
|
@ -914,7 +881,7 @@ struct tracer *tracer_create(struct window *window) {
|
|||
|
||||
// add all textures to free list
|
||||
for (int i = 0, n = array_size(tracer->textures); i < n; i++) {
|
||||
struct texture_entry *entry = &tracer->textures[i];
|
||||
struct tracer_texture_entry *entry = &tracer->textures[i];
|
||||
list_add(&tracer->free_textures, &entry->free_it);
|
||||
}
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@ enum holly_interrupt {
|
|||
HOLLY_INTC_PCEOVINT = HOLLY_INTC_NRM | 0x1,
|
||||
// ISP End of Render
|
||||
HOLLY_INTC_PCEOIINT = HOLLY_INTC_NRM | 0x2,
|
||||
// union tsp End of Render
|
||||
// TSP End of Render
|
||||
HOLLY_INTC_PCEOTINT = HOLLY_INTC_NRM | 0x4,
|
||||
// VBlank In
|
||||
HOLLY_INTC_PCVIINT = HOLLY_INTC_NRM | 0x8,
|
||||
|
@ -86,7 +86,7 @@ enum holly_interrupt {
|
|||
HOLLY_INTC_PCIOCINT = HOLLY_INTC_ERR | 0x1,
|
||||
// Hazard Processing of Strip Buffer
|
||||
HOLLY_INTC_PCHZDINT = HOLLY_INTC_ERR | 0x2,
|
||||
// ISP/union tsp Parameter Limit Address
|
||||
// ISP/TSP Parameter Limit Address
|
||||
HOLLY_INTC_TAPOFINT = HOLLY_INTC_ERR | 0x4,
|
||||
// Object List Limit Address
|
||||
HOLLY_INTC_TALOFINT = HOLLY_INTC_ERR | 0x8,
|
||||
|
|
|
@ -12,23 +12,22 @@
|
|||
#include "renderer/backend.h"
|
||||
#include "sys/exception_handler.h"
|
||||
#include "sys/filesystem.h"
|
||||
#include "sys/thread.h"
|
||||
#include "ui/nuklear.h"
|
||||
|
||||
#define TA_MAX_CONTEXTS 4
|
||||
#define TA_MAX_CONTEXTS 32
|
||||
|
||||
struct texture_entry {
|
||||
struct ta *ta;
|
||||
texture_key_t key;
|
||||
texture_handle_t handle;
|
||||
struct ta_texture_entry {
|
||||
struct texture_entry base;
|
||||
struct memory_watch *texture_watch;
|
||||
struct memory_watch *palette_watch;
|
||||
struct list_node free_it;
|
||||
struct rb_node live_it;
|
||||
struct list_node invalid_it;
|
||||
};
|
||||
|
||||
struct ta {
|
||||
struct device base;
|
||||
struct scheduler *scheduler;
|
||||
struct holly *holly;
|
||||
struct pvr *pvr;
|
||||
struct address_space *space;
|
||||
|
@ -39,31 +38,42 @@ struct ta {
|
|||
// texture cache entry pool. free entries are in a linked list, live entries
|
||||
// are in a tree ordered by texture key, textures queued for invalidation are
|
||||
// in the the invalid_entries linked list
|
||||
struct texture_entry entries[1024];
|
||||
struct ta_texture_entry entries[1024];
|
||||
struct list free_entries;
|
||||
struct rb_tree live_entries;
|
||||
struct list invalid_entries;
|
||||
int num_invalidated;
|
||||
|
||||
// tile context pool. free contexts are in a linked list, live contexts are
|
||||
// are in a tree ordered by the context's guest address, and a pointer to the
|
||||
// next context up for rendering is stored in pending_context
|
||||
// are in a tree ordered by the context's guest address
|
||||
struct tile_ctx contexts[TA_MAX_CONTEXTS];
|
||||
struct list free_contexts;
|
||||
struct rb_tree live_contexts;
|
||||
|
||||
// the pending context is the last context requested to be rendered by the
|
||||
// emulation thread. a mutex is used to synchronize access with the graphics
|
||||
// thread
|
||||
mutex_t pending_mutex;
|
||||
struct tile_ctx *pending_context;
|
||||
|
||||
// last parsed pending context
|
||||
struct render_ctx render_context;
|
||||
|
||||
// buffers used by the tile contexts. allocating here instead of inside each
|
||||
// tile_ctx to avoid blowing the stack when a tile_ctx is needed temporarily
|
||||
// on the stack for searching
|
||||
uint8_t params[TA_MAX_CONTEXTS * TA_MAX_PARAMS];
|
||||
|
||||
// buffers used by render contexts
|
||||
// buffers used by render context
|
||||
struct surface surfs[TA_MAX_SURFS];
|
||||
struct vertex verts[TA_MAX_VERTS];
|
||||
int sorted_surfs[TA_MAX_SURFS];
|
||||
|
||||
struct trace_writer *trace_writer;
|
||||
|
||||
// debug info
|
||||
int frame;
|
||||
int frames_skipped;
|
||||
int num_textures;
|
||||
};
|
||||
|
||||
int g_param_sizes[0x100 * TA_NUM_PARAMS * TA_NUM_VERT_TYPES];
|
||||
|
@ -80,11 +90,12 @@ static enum holly_interrupt list_interrupts[] = {
|
|||
|
||||
static int ta_entry_cmp(const struct rb_node *rb_lhs,
|
||||
const struct rb_node *rb_rhs) {
|
||||
const struct texture_entry *lhs =
|
||||
rb_entry(rb_lhs, const struct texture_entry, live_it);
|
||||
const struct texture_entry *rhs =
|
||||
rb_entry(rb_rhs, const struct texture_entry, live_it);
|
||||
return lhs->key - rhs->key;
|
||||
const struct ta_texture_entry *lhs =
|
||||
rb_entry(rb_lhs, const struct ta_texture_entry, live_it);
|
||||
const struct ta_texture_entry *rhs =
|
||||
rb_entry(rb_rhs, const struct ta_texture_entry, live_it);
|
||||
return tr_texture_key(lhs->base.tsp, lhs->base.tcw) -
|
||||
tr_texture_key(rhs->base.tsp, rhs->base.tcw);
|
||||
}
|
||||
|
||||
static int ta_context_cmp(const struct rb_node *rb_lhs,
|
||||
|
@ -224,18 +235,82 @@ static void ta_soft_reset(struct ta *ta) {
|
|||
// FIXME what are we supposed to do here?
|
||||
}
|
||||
|
||||
static struct ta_texture_entry *ta_alloc_texture(struct ta *ta, union tsp tsp,
|
||||
union tcw tcw) {
|
||||
// remove from free list
|
||||
struct ta_texture_entry *entry =
|
||||
list_first_entry(&ta->free_entries, struct ta_texture_entry, free_it);
|
||||
CHECK_NOTNULL(entry);
|
||||
list_remove(&ta->free_entries, &entry->free_it);
|
||||
|
||||
// reset entry
|
||||
memset(entry, 0, sizeof(*entry));
|
||||
entry->base.tsp = tsp;
|
||||
entry->base.tcw = tcw;
|
||||
|
||||
// add to live tree
|
||||
rb_insert(&ta->live_entries, &entry->live_it, &ta_entry_cb);
|
||||
|
||||
ta->num_textures++;
|
||||
|
||||
return entry;
|
||||
}
|
||||
|
||||
static struct ta_texture_entry *ta_find_texture(struct ta *ta, union tsp tsp,
|
||||
union tcw tcw) {
|
||||
struct ta_texture_entry search;
|
||||
search.base.tsp = tsp;
|
||||
search.base.tcw = tcw;
|
||||
|
||||
return rb_find_entry(&ta->live_entries, &search, live_it, &ta_entry_cb);
|
||||
}
|
||||
|
||||
static struct texture_entry *ta_texture_interface_find_texture(void *data,
|
||||
union tsp tsp,
|
||||
union tcw tcw) {
|
||||
struct ta_texture_entry *entry = ta_find_texture(data, tsp, tcw);
|
||||
|
||||
if (!entry) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return &entry->base;
|
||||
}
|
||||
|
||||
static void ta_clear_textures(struct ta *ta) {
|
||||
LOG_INFO("Texture cache cleared");
|
||||
|
||||
struct rb_node *it = rb_first(&ta->live_entries);
|
||||
|
||||
while (it) {
|
||||
struct rb_node *next = rb_next(it);
|
||||
|
||||
struct ta_texture_entry *entry =
|
||||
rb_entry(it, struct ta_texture_entry, live_it);
|
||||
|
||||
entry->base.dirty = 1;
|
||||
|
||||
it = next;
|
||||
}
|
||||
}
|
||||
|
||||
static void ta_texture_invalidated(const struct exception *ex, void *data) {
|
||||
struct ta_texture_entry *entry = data;
|
||||
entry->texture_watch = NULL;
|
||||
entry->base.dirty = 1;
|
||||
}
|
||||
|
||||
static void ta_palette_invalidated(const struct exception *ex, void *data) {
|
||||
struct ta_texture_entry *entry = data;
|
||||
entry->palette_watch = NULL;
|
||||
entry->base.dirty = 1;
|
||||
}
|
||||
|
||||
static struct tile_ctx *ta_get_context(struct ta *ta, uint32_t addr) {
|
||||
struct tile_ctx search;
|
||||
search.addr = addr;
|
||||
|
||||
struct tile_ctx *ctx =
|
||||
rb_find_entry(&ta->live_contexts, &search, live_it, &ta_context_cb);
|
||||
|
||||
if (!ctx) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return ctx;
|
||||
return rb_find_entry(&ta->live_contexts, &search, live_it, &ta_context_cb);
|
||||
}
|
||||
|
||||
static struct tile_ctx *ta_alloc_context(struct ta *ta, uint32_t addr) {
|
||||
|
@ -258,15 +333,10 @@ static struct tile_ctx *ta_alloc_context(struct ta *ta, uint32_t addr) {
|
|||
}
|
||||
|
||||
static void ta_unlink_context(struct ta *ta, struct tile_ctx *ctx) {
|
||||
// remove from live tree
|
||||
rb_unlink(&ta->live_contexts, &ctx->live_it, &ta_context_cb);
|
||||
}
|
||||
|
||||
static void ta_free_context(struct ta *ta, struct tile_ctx *ctx) {
|
||||
// remove from live tree
|
||||
ta_unlink_context(ta, ctx);
|
||||
|
||||
// add to free list
|
||||
list_add(&ta->free_contexts, &ctx->free_it);
|
||||
}
|
||||
|
||||
|
@ -294,8 +364,7 @@ static void ta_write_context(struct ta *ta, uint32_t addr, uint32_t value) {
|
|||
*(uint32_t *)&ctx->params[ctx->size] = value;
|
||||
ctx->size += 4;
|
||||
|
||||
// each TA command is either 32 or 64 bytes, with the union pcw being in the
|
||||
// first
|
||||
// each TA command is either 32 or 64 bytes, with the pcw being in the first
|
||||
// 32 bytes always. check every 32 bytes to see if the command has been
|
||||
// completely received or not
|
||||
if (ctx->size % 32 == 0) {
|
||||
|
@ -335,7 +404,117 @@ static void ta_write_context(struct ta *ta, uint32_t addr, uint32_t value) {
|
|||
}
|
||||
}
|
||||
|
||||
static void ta_save_state(struct ta *ta, struct tile_ctx *ctx) {
|
||||
static void ta_register_texture(struct ta *ta, union tsp tsp, union tcw tcw) {
|
||||
struct ta_texture_entry *entry = ta_find_texture(ta, tsp, tcw);
|
||||
int new_entry = 0;
|
||||
|
||||
if (!entry) {
|
||||
entry = ta_alloc_texture(ta, tsp, tcw);
|
||||
new_entry = 1;
|
||||
}
|
||||
|
||||
// mark texture source valid for the current frame
|
||||
entry->base.frame = ta->frame;
|
||||
|
||||
// set texture address
|
||||
if (!entry->base.texture) {
|
||||
uint8_t *video_ram = as_translate(ta->space, 0x04000000);
|
||||
uint32_t texture_addr = tcw.texture_addr << 3;
|
||||
int width = 8 << tsp.texture_u_size;
|
||||
int height = 8 << tsp.texture_v_size;
|
||||
int element_size_bits = tcw.pixel_format == TA_PIXEL_8BPP
|
||||
? 8
|
||||
: tcw.pixel_format == TA_PIXEL_4BPP ? 4 : 16;
|
||||
entry->base.texture = &video_ram[texture_addr];
|
||||
entry->base.texture_size = (width * height * element_size_bits) >> 3;
|
||||
}
|
||||
|
||||
// set palette address
|
||||
if (!entry->base.palette) {
|
||||
if (tcw.pixel_format == TA_PIXEL_4BPP ||
|
||||
tcw.pixel_format == TA_PIXEL_8BPP) {
|
||||
uint8_t *palette_ram = as_translate(ta->space, 0x005f9000);
|
||||
uint32_t palette_addr = 0;
|
||||
int palette_size = 0;
|
||||
|
||||
// palette ram is 4096 bytes, with each palette entry being 4 bytes each,
|
||||
// resulting in 1 << 10 indexes
|
||||
if (tcw.pixel_format == TA_PIXEL_4BPP) {
|
||||
// in 4bpp mode, the palette selector represents the upper 6 bits of the
|
||||
// palette index, with the remaining 4 bits being filled in by the
|
||||
// texture
|
||||
palette_addr = (tcw.p.palette_selector << 4) * 4;
|
||||
palette_size = (1 << 4) * 4;
|
||||
} else if (tcw.pixel_format == TA_PIXEL_8BPP) {
|
||||
// in 4bpp mode, the palette selector represents the upper 2 bits of the
|
||||
// palette index, with the remaining 8 bits being filled in by the
|
||||
// texture
|
||||
palette_addr = ((tcw.p.palette_selector & 0x30) << 4) * 4;
|
||||
palette_size = (1 << 8) * 4;
|
||||
}
|
||||
|
||||
entry->base.palette = &palette_ram[palette_addr];
|
||||
entry->base.palette_size = palette_size;
|
||||
}
|
||||
}
|
||||
|
||||
// add write callback in order to invalidate on future writes. the callback
|
||||
// address will be page aligned, therefore it will be triggered falsely in
|
||||
// some cases. over invalidate in these cases
|
||||
if (!entry->texture_watch) {
|
||||
entry->texture_watch =
|
||||
add_single_write_watch(entry->base.texture, entry->base.texture_size,
|
||||
&ta_texture_invalidated, entry);
|
||||
}
|
||||
|
||||
if (entry->base.palette && !entry->palette_watch) {
|
||||
entry->palette_watch =
|
||||
add_single_write_watch(entry->base.palette, entry->base.palette_size,
|
||||
&ta_palette_invalidated, entry);
|
||||
}
|
||||
|
||||
// ad new entries to the trace
|
||||
if (ta->trace_writer && new_entry) {
|
||||
trace_writer_insert_texture(ta->trace_writer, tsp, tcw, entry->base.palette,
|
||||
entry->base.palette_size, entry->base.texture,
|
||||
entry->base.texture_size);
|
||||
}
|
||||
}
|
||||
|
||||
static void ta_register_textures(struct ta *ta, struct tile_ctx *ctx,
|
||||
int *num_polys) {
|
||||
const uint8_t *data = ctx->params;
|
||||
const uint8_t *end = ctx->params + ctx->size;
|
||||
int vertex_type = 0;
|
||||
|
||||
*num_polys = 0;
|
||||
|
||||
while (data < end) {
|
||||
union pcw pcw = *(union pcw *)data;
|
||||
|
||||
switch (pcw.para_type) {
|
||||
case TA_PARAM_POLY_OR_VOL:
|
||||
case TA_PARAM_SPRITE: {
|
||||
const union poly_param *param = (const union poly_param *)data;
|
||||
|
||||
vertex_type = ta_get_vert_type(param->type0.pcw);
|
||||
|
||||
if (param->type0.pcw.texture) {
|
||||
ta_register_texture(ta, param->type0.tsp, param->type0.tcw);
|
||||
}
|
||||
|
||||
(*num_polys)++;
|
||||
} break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
data += ta_get_param_size(pcw, vertex_type);
|
||||
}
|
||||
}
|
||||
|
||||
static void ta_save_register_state(struct ta *ta, struct tile_ctx *ctx) {
|
||||
struct pvr *pvr = ta->pvr;
|
||||
|
||||
// autosort
|
||||
|
@ -409,222 +588,79 @@ static void ta_save_state(struct ta *ta, struct tile_ctx *ctx) {
|
|||
}
|
||||
}
|
||||
|
||||
static void ta_finish_context(struct ta *ta, uint32_t addr) {
|
||||
struct tile_ctx *ctx = ta_get_context(ta, addr);
|
||||
CHECK_NOTNULL(ctx);
|
||||
|
||||
// save required register state being that the actual rendering of this
|
||||
// context will be deferred
|
||||
ta_save_state(ta, ctx);
|
||||
|
||||
// tell holly that rendering is complete
|
||||
static void ta_end_render(struct ta *ta) {
|
||||
// let the game know rendering is complete
|
||||
holly_raise_interrupt(ta->holly, HOLLY_INTC_PCEOVINT);
|
||||
holly_raise_interrupt(ta->holly, HOLLY_INTC_PCEOIINT);
|
||||
holly_raise_interrupt(ta->holly, HOLLY_INTC_PCEOTINT);
|
||||
}
|
||||
|
||||
// free the last pending context
|
||||
static void ta_render_timer(void *data) {
|
||||
struct ta *ta = data;
|
||||
|
||||
// ideally, the graphics thread has parsed the pending context, uploaded its
|
||||
// textures, etc. during the estimated render time. however, if it hasn't
|
||||
// finished, the emulation thread must be paused to avoid altering
|
||||
// the yet-to-be-uploaded texture memory
|
||||
mutex_lock(ta->pending_mutex);
|
||||
mutex_unlock(ta->pending_mutex);
|
||||
|
||||
ta_end_render(ta);
|
||||
}
|
||||
|
||||
static void ta_start_render(struct ta *ta, uint32_t addr) {
|
||||
struct tile_ctx *ctx = ta_get_context(ta, addr);
|
||||
CHECK_NOTNULL(ctx);
|
||||
|
||||
// save off required register state that may be modified by the time the
|
||||
// context is rendered
|
||||
ta_save_register_state(ta, ctx);
|
||||
|
||||
// if the graphics thread is still parsing the previous context, skip this one
|
||||
if (!mutex_trylock(ta->pending_mutex)) {
|
||||
ta_unlink_context(ta, ctx);
|
||||
ta_free_context(ta, ctx);
|
||||
ta_end_render(ta);
|
||||
ta->frames_skipped++;
|
||||
return;
|
||||
}
|
||||
|
||||
// free the previous pending context if it wasn't rendered
|
||||
if (ta->pending_context) {
|
||||
ta_free_context(ta, ta->pending_context);
|
||||
ta->pending_context = NULL;
|
||||
}
|
||||
|
||||
// set this context to pending
|
||||
// set the new pending context
|
||||
ta_unlink_context(ta, ctx);
|
||||
|
||||
ta->pending_context = ctx;
|
||||
}
|
||||
|
||||
static struct texture_entry *ta_alloc_texture(struct ta *ta,
|
||||
texture_key_t key) {
|
||||
// remove from free list
|
||||
struct texture_entry *entry =
|
||||
list_first_entry(&ta->free_entries, struct texture_entry, free_it);
|
||||
CHECK_NOTNULL(entry);
|
||||
list_remove(&ta->free_entries, &entry->free_it);
|
||||
// increment internal frame number. this frame number is assigned to each
|
||||
// texture source registered by this context
|
||||
ta->frame++;
|
||||
|
||||
// reset entry
|
||||
memset(entry, 0, sizeof(*entry));
|
||||
entry->ta = ta;
|
||||
entry->key = key;
|
||||
// register the source of each texture referenced by the context with the
|
||||
// tile renderer. note, the process of actually uploading the texture to the
|
||||
// render backend happens lazily while rendering the context (keeping all
|
||||
// backend operations on the same thread). this registration just lets the
|
||||
// backend know where the texture's source data is
|
||||
int num_polys = 0;
|
||||
ta_register_textures(ta, ta->pending_context, &num_polys);
|
||||
|
||||
// add to live tree
|
||||
rb_insert(&ta->live_entries, &entry->live_it, &ta_entry_cb);
|
||||
|
||||
return entry;
|
||||
}
|
||||
|
||||
static void ta_free_texture(struct ta *ta, struct texture_entry *entry) {
|
||||
// remove from live list
|
||||
rb_unlink(&ta->live_entries, &entry->live_it, &ta_entry_cb);
|
||||
|
||||
// add back to free list
|
||||
list_add(&ta->free_entries, &entry->free_it);
|
||||
}
|
||||
|
||||
static void ta_invalidate_texture(struct ta *ta, struct texture_entry *entry) {
|
||||
rb_free_texture(ta->rb, entry->handle);
|
||||
|
||||
if (entry->texture_watch) {
|
||||
remove_memory_watch(entry->texture_watch);
|
||||
}
|
||||
|
||||
if (entry->palette_watch) {
|
||||
remove_memory_watch(entry->palette_watch);
|
||||
}
|
||||
|
||||
list_remove(&ta->invalid_entries, &entry->invalid_it);
|
||||
|
||||
ta_free_texture(ta, entry);
|
||||
}
|
||||
|
||||
static void ta_clear_textures(struct ta *ta) {
|
||||
LOG_INFO("Texture cache cleared");
|
||||
|
||||
struct rb_node *it = rb_first(&ta->live_entries);
|
||||
|
||||
while (it) {
|
||||
struct rb_node *next = rb_next(it);
|
||||
|
||||
struct texture_entry *entry = rb_entry(it, struct texture_entry, live_it);
|
||||
ta_invalidate_texture(ta, entry);
|
||||
|
||||
it = next;
|
||||
}
|
||||
|
||||
CHECK(!rb_first(&ta->live_entries));
|
||||
}
|
||||
|
||||
static void ta_clear_pending_textures(struct ta *ta) {
|
||||
list_for_each_entry_safe(it, &ta->invalid_entries, struct texture_entry,
|
||||
invalid_it) {
|
||||
ta_invalidate_texture(ta, it);
|
||||
ta->num_invalidated++;
|
||||
}
|
||||
|
||||
CHECK(list_empty(&ta->invalid_entries));
|
||||
|
||||
prof_count("Num invalidated textures", ta->num_invalidated);
|
||||
}
|
||||
|
||||
static void ta_texture_invalidated(const struct exception *ex, void *data) {
|
||||
struct texture_entry *entry = data;
|
||||
struct ta *ta = entry->ta;
|
||||
|
||||
// don't double remove the watch during invalidation
|
||||
entry->texture_watch = NULL;
|
||||
|
||||
// add to pending invalidation list (can't remove inside of signal
|
||||
// handler)
|
||||
if (!entry->invalid_it.next) {
|
||||
list_add(&ta->invalid_entries, &entry->invalid_it);
|
||||
}
|
||||
}
|
||||
|
||||
static void ta_palette_invalidated(const struct exception *ex, void *data) {
|
||||
struct texture_entry *entry = data;
|
||||
struct ta *ta = entry->ta;
|
||||
|
||||
// don't double remove the watch during invalidation
|
||||
entry->palette_watch = NULL;
|
||||
|
||||
// add to pending invalidation list (can't remove inside of signal
|
||||
// handler)
|
||||
if (!entry->invalid_it.next) {
|
||||
list_add(&ta->invalid_entries, &entry->invalid_it);
|
||||
}
|
||||
}
|
||||
|
||||
static texture_handle_t ta_get_texture(void *data, const struct tile_ctx *ctx,
|
||||
union tsp tsp, union tcw tcw,
|
||||
void *register_data,
|
||||
register_texture_cb register_cb) {
|
||||
struct ta *ta = data;
|
||||
|
||||
// clear any pending texture invalidations at this time
|
||||
ta_clear_pending_textures(ta);
|
||||
|
||||
// TODO struct tile_ctx isn't considered for caching here (stride and
|
||||
// pal_pxl_format are used by TileRenderer), this feels bad
|
||||
texture_key_t texture_key = tr_get_texture_key(tsp, tcw);
|
||||
|
||||
// see if an an entry already exists
|
||||
struct texture_entry search;
|
||||
search.key = texture_key;
|
||||
|
||||
struct texture_entry *existing =
|
||||
rb_find_entry(&ta->live_entries, &search, live_it, &ta_entry_cb);
|
||||
|
||||
if (existing) {
|
||||
return existing->handle;
|
||||
}
|
||||
|
||||
// union tcw texture_addr field is in 64-bit units
|
||||
uint32_t texture_addr = tcw.texture_addr << 3;
|
||||
|
||||
// get the texture data
|
||||
uint8_t *video_ram = as_translate(ta->space, 0x04000000);
|
||||
uint8_t *texture = &video_ram[texture_addr];
|
||||
int width = 8 << tsp.texture_u_size;
|
||||
int height = 8 << tsp.texture_v_size;
|
||||
int element_size_bits = tcw.pixel_format == TA_PIXEL_8BPP
|
||||
? 8
|
||||
: tcw.pixel_format == TA_PIXEL_4BPP ? 4 : 16;
|
||||
int texture_size = (width * height * element_size_bits) >> 3;
|
||||
|
||||
// get the palette data
|
||||
uint8_t *palette_ram = as_translate(ta->space, 0x005f9000);
|
||||
uint8_t *palette = NULL;
|
||||
uint32_t palette_addr = 0;
|
||||
int palette_size = 0;
|
||||
|
||||
if (tcw.pixel_format == TA_PIXEL_4BPP || tcw.pixel_format == TA_PIXEL_8BPP) {
|
||||
// palette ram is 4096 bytes, with each palette entry being 4 bytes each,
|
||||
// resulting in 1 << 10 indexes
|
||||
if (tcw.pixel_format == TA_PIXEL_4BPP) {
|
||||
// in 4bpp mode, the palette selector represents the upper 6 bits of the
|
||||
// palette index, with the remaining 4 bits being filled in by the texture
|
||||
palette_addr = (tcw.p.palette_selector << 4) * 4;
|
||||
palette_size = (1 << 4) * 4;
|
||||
} else if (tcw.pixel_format == TA_PIXEL_8BPP) {
|
||||
// in 4bpp mode, the palette selector represents the upper 2 bits of the
|
||||
// palette index, with the remaining 8 bits being filled in by the texture
|
||||
palette_addr = ((tcw.p.palette_selector & 0x30) << 4) * 4;
|
||||
palette_size = (1 << 8) * 4;
|
||||
}
|
||||
|
||||
palette = &palette_ram[palette_addr];
|
||||
}
|
||||
|
||||
// register the texture with the render backend
|
||||
struct texture_reg reg = {};
|
||||
reg.ctx = ctx;
|
||||
reg.tsp = tsp;
|
||||
reg.tcw = tcw;
|
||||
reg.palette = palette;
|
||||
reg.texture = texture;
|
||||
register_cb(register_data, ®);
|
||||
|
||||
// insert into the cache
|
||||
struct texture_entry *entry = ta_alloc_texture(ta, texture_key);
|
||||
entry->handle = reg.handle;
|
||||
|
||||
// add write callback in order to invalidate on future writes. the callback
|
||||
// address will be page aligned, therefore it will be triggered falsely in
|
||||
// some cases. over invalidate in these cases
|
||||
entry->texture_watch = add_single_write_watch(texture, texture_size,
|
||||
&ta_texture_invalidated, entry);
|
||||
|
||||
if (palette) {
|
||||
entry->palette_watch = add_single_write_watch(
|
||||
palette, palette_size, &ta_palette_invalidated, entry);
|
||||
}
|
||||
// supposedly, the dreamcast can push around ~3 million polygons per second
|
||||
// through the TA / PVR. with that in mind, a very poor estimate can be made
|
||||
// for how long the TA would take to render a frame based on the number of
|
||||
// polys pushed: 1,000,000,000 / 3,000,000 = 333 nanoseconds per polygon
|
||||
int64_t ns = num_polys * INT64_C(333);
|
||||
scheduler_start_timer(ta->scheduler, &ta_render_timer, ta, ns);
|
||||
|
||||
if (ta->trace_writer) {
|
||||
trace_writer_insert_texture(ta->trace_writer, tsp, tcw, palette,
|
||||
palette_size, texture, texture_size);
|
||||
trace_writer_render_context(ta->trace_writer, ta->pending_context);
|
||||
}
|
||||
|
||||
return reg.handle;
|
||||
// unlock the mutex, enabling the graphics thread to start parsing the
|
||||
// pending context
|
||||
mutex_unlock(ta->pending_mutex);
|
||||
}
|
||||
|
||||
static void ta_write_poly_fifo(struct ta *ta, uint32_t addr, uint32_t value) {
|
||||
|
@ -666,20 +702,21 @@ REG_W32(struct ta *ta, STARTRENDER) {
|
|||
return;
|
||||
}
|
||||
|
||||
ta_finish_context(ta, ta->pvr->PARAM_BASE->base_address);
|
||||
ta_start_render(ta, ta->pvr->PARAM_BASE->base_address);
|
||||
}
|
||||
|
||||
static bool ta_init(struct device *dev) {
|
||||
struct ta *ta = container_of(dev, struct ta, base);
|
||||
struct dreamcast *dc = ta->base.dc;
|
||||
|
||||
ta->scheduler = dc->scheduler;
|
||||
ta->holly = dc->holly;
|
||||
ta->pvr = dc->pvr;
|
||||
ta->space = dc->sh4->base.memory->space;
|
||||
ta->video_ram = as_translate(ta->space, 0x04000000);
|
||||
|
||||
for (int i = 0; i < array_size(ta->entries); i++) {
|
||||
struct texture_entry *entry = &ta->entries[i];
|
||||
struct ta_texture_entry *entry = &ta->entries[i];
|
||||
list_add(&ta->free_entries, &entry->free_it);
|
||||
}
|
||||
|
||||
|
@ -735,34 +772,36 @@ static void ta_toggle_tracing(struct ta *ta) {
|
|||
|
||||
static void ta_paint(struct device *dev) {
|
||||
struct ta *ta = container_of(dev, struct ta, base);
|
||||
struct render_ctx *rctx = &ta->render_context;
|
||||
|
||||
mutex_lock(ta->pending_mutex);
|
||||
|
||||
if (ta->pending_context) {
|
||||
struct render_ctx rctx = {};
|
||||
rctx.surfs = ta->surfs;
|
||||
rctx.surfs_size = array_size(ta->surfs);
|
||||
rctx.verts = ta->verts;
|
||||
rctx.verts_size = array_size(ta->verts);
|
||||
rctx.sorted_surfs = ta->sorted_surfs;
|
||||
rctx.sorted_surfs_size = array_size(ta->sorted_surfs);
|
||||
rctx->surfs = ta->surfs;
|
||||
rctx->surfs_size = array_size(ta->surfs);
|
||||
rctx->verts = ta->verts;
|
||||
rctx->verts_size = array_size(ta->verts);
|
||||
rctx->sorted_surfs = ta->sorted_surfs;
|
||||
rctx->sorted_surfs_size = array_size(ta->sorted_surfs);
|
||||
|
||||
tr_parse_context(ta->tr, ta->pending_context, &rctx);
|
||||
tr_parse_context(ta->tr, ta->pending_context, ta->frame, rctx);
|
||||
|
||||
tr_render_context(ta->tr, &rctx);
|
||||
|
||||
// write render command after actually rendering the context so texture
|
||||
// insert commands will be written out first
|
||||
if (ta->trace_writer && !ta->pending_context->wrote) {
|
||||
trace_writer_render_context(ta->trace_writer, ta->pending_context);
|
||||
ta->pending_context->wrote = true;
|
||||
}
|
||||
ta_free_context(ta, ta->pending_context);
|
||||
ta->pending_context = NULL;
|
||||
}
|
||||
|
||||
mutex_unlock(ta->pending_mutex);
|
||||
|
||||
tr_render_context(ta->tr, rctx);
|
||||
}
|
||||
|
||||
static void ta_paint_debug_menu(struct device *dev, struct nk_context *ctx) {
|
||||
struct ta *ta = container_of(dev, struct ta, base);
|
||||
|
||||
if (nk_tree_push(ctx, NK_TREE_TAB, "ta", NK_MINIMIZED)) {
|
||||
// nk_layout_row_static(ctx, 40.0f, 40.0f, 4);
|
||||
nk_value_int(ctx, "frames skipped", ta->frames_skipped);
|
||||
nk_value_int(ctx, "num textures", ta->num_textures);
|
||||
|
||||
if (!ta->trace_writer &&
|
||||
nk_button_label(ctx, "start trace", NK_BUTTON_DEFAULT)) {
|
||||
ta_toggle_tracing(ta);
|
||||
|
@ -824,16 +863,20 @@ struct ta *ta_create(struct dreamcast *dc, struct rb *rb) {
|
|||
window_interface_create(&ta_paint, &ta_paint_debug_menu, NULL);
|
||||
|
||||
ta->rb = rb;
|
||||
ta->tr = tr_create(ta->rb, ta, &ta_get_texture);
|
||||
|
||||
struct texture_interface texture_if = {ta,
|
||||
&ta_texture_interface_find_texture};
|
||||
ta->tr = tr_create(ta->rb, &texture_if);
|
||||
|
||||
ta->pending_mutex = mutex_create();
|
||||
|
||||
return ta;
|
||||
}
|
||||
|
||||
void ta_destroy(struct ta *ta) {
|
||||
mutex_destroy(ta->pending_mutex);
|
||||
tr_destroy(ta->tr);
|
||||
|
||||
window_interface_destroy(ta->base.window);
|
||||
|
||||
dc_destroy_device(&ta->base);
|
||||
}
|
||||
|
||||
|
|
|
@ -441,7 +441,7 @@ union vert_param {
|
|||
struct tile_ctx {
|
||||
uint32_t addr;
|
||||
|
||||
// pvr state
|
||||
// pvr / ta state
|
||||
bool autosort;
|
||||
int stride;
|
||||
int pal_pxl_format;
|
||||
|
@ -464,9 +464,6 @@ struct tile_ctx {
|
|||
int list_type;
|
||||
int vertex_type;
|
||||
|
||||
// debug traces
|
||||
bool wrote;
|
||||
|
||||
struct list_node free_it;
|
||||
struct rb_node live_it;
|
||||
};
|
||||
|
|
|
@ -9,9 +9,7 @@
|
|||
|
||||
struct tr {
|
||||
struct rb *rb;
|
||||
|
||||
void *get_texture_data;
|
||||
get_texture_cb get_texture;
|
||||
struct texture_interface texture_if;
|
||||
|
||||
// current global state
|
||||
const union poly_param *last_poly;
|
||||
|
@ -118,15 +116,36 @@ static inline uint32_t float_to_rgba(float r, float g, float b, float a) {
|
|||
(float_to_u8(g) << 8) | float_to_u8(r);
|
||||
}
|
||||
|
||||
static void tr_register_texture(void *data, struct texture_reg *reg) {
|
||||
static uint8_t converted[1024 * 1024 * 4];
|
||||
static texture_handle_t tr_demand_texture(struct tr *tr,
|
||||
const struct tile_ctx *ctx, int frame,
|
||||
union tsp tsp, union tcw tcw) {
|
||||
// TODO it's bad that textures are only cached based off tsp / tcw yet
|
||||
// the TEXT_CONTROL registers and PAL_RAM_CTRL registers are used here
|
||||
// to control texture generation
|
||||
|
||||
struct tr *tr = data;
|
||||
const struct tile_ctx *ctx = reg->ctx;
|
||||
union tsp tsp = reg->tsp;
|
||||
union tcw tcw = reg->tcw;
|
||||
const uint8_t *palette = reg->palette;
|
||||
const uint8_t *texture = reg->texture;
|
||||
struct texture_entry *entry =
|
||||
tr->texture_if.find_texture(tr->texture_if.data, tsp, tcw);
|
||||
CHECK_NOTNULL(entry);
|
||||
|
||||
// if there's a non-dirty handle, go ahead and return it
|
||||
if (entry->handle && !entry->dirty) {
|
||||
return entry->handle;
|
||||
}
|
||||
|
||||
// if there's a dirty handle, destroy it before creating the new one
|
||||
if (entry->handle && entry->dirty) {
|
||||
rb_destroy_texture(tr->rb, entry->handle);
|
||||
entry->handle = 0;
|
||||
}
|
||||
|
||||
// sanity check that the texture source is valid for the current frame. video
|
||||
// ram will be modified between frames, if these values don't match something
|
||||
// is broken in the ta's thread synchronization
|
||||
CHECK_EQ(frame, entry->frame);
|
||||
|
||||
static uint8_t converted[1024 * 1024 * 4];
|
||||
const uint8_t *palette = entry->palette;
|
||||
const uint8_t *texture = entry->texture;
|
||||
const uint8_t *input = texture;
|
||||
const uint8_t *output = texture;
|
||||
|
||||
|
@ -276,19 +295,18 @@ static void tr_register_texture(void *data, struct texture_reg *reg) {
|
|||
tsp.clamp_v ? WRAP_CLAMP_TO_EDGE
|
||||
: (tsp.flip_v ? WRAP_MIRRORED_REPEAT : WRAP_REPEAT);
|
||||
|
||||
texture_handle_t handle =
|
||||
rb_register_texture(tr->rb, pixel_fmt, filter, wrap_u, wrap_v, mip_mapped,
|
||||
width, height, output);
|
||||
entry->handle = rb_create_texture(tr->rb, pixel_fmt, filter, wrap_u, wrap_v,
|
||||
mip_mapped, width, height, output);
|
||||
entry->format = pixel_fmt;
|
||||
entry->filter = filter;
|
||||
entry->wrap_u = wrap_u;
|
||||
entry->wrap_v = wrap_v;
|
||||
entry->mipmaps = mip_mapped;
|
||||
entry->width = width;
|
||||
entry->height = height;
|
||||
entry->dirty = 0;
|
||||
|
||||
//
|
||||
reg->handle = handle;
|
||||
reg->format = pixel_fmt;
|
||||
reg->filter = filter;
|
||||
reg->wrap_u = wrap_u;
|
||||
reg->wrap_v = wrap_v;
|
||||
reg->mipmaps = mip_mapped;
|
||||
reg->width = width;
|
||||
reg->height = height;
|
||||
return entry->handle;
|
||||
}
|
||||
|
||||
static struct surface *tr_alloc_surf(struct tr *tr, struct render_ctx *rctx,
|
||||
|
@ -478,7 +496,8 @@ static void tr_parse_bg(struct tr *tr, const struct tile_ctx *ctx,
|
|||
// NOTE this offset color implementation is not correct at all, see the
|
||||
// Texture/Shading Instruction in the union tsp instruction word
|
||||
static void tr_parse_poly_param(struct tr *tr, const struct tile_ctx *ctx,
|
||||
struct render_ctx *rctx, const uint8_t *data) {
|
||||
int frame, struct render_ctx *rctx,
|
||||
const uint8_t *data) {
|
||||
tr_discard_incomplete_surf(tr, rctx);
|
||||
|
||||
const union poly_param *param = (const union poly_param *)data;
|
||||
|
@ -561,8 +580,8 @@ static void tr_parse_poly_param(struct tr *tr, const struct tile_ctx *ctx,
|
|||
}
|
||||
|
||||
if (param->type0.pcw.texture) {
|
||||
surf->texture = tr->get_texture(tr->get_texture_data, ctx, param->type0.tsp,
|
||||
param->type0.tcw, tr, &tr_register_texture);
|
||||
surf->texture =
|
||||
tr_demand_texture(tr, ctx, frame, param->type0.tsp, param->type0.tcw);
|
||||
} else {
|
||||
surf->texture = 0;
|
||||
}
|
||||
|
@ -755,7 +774,7 @@ static void tr_parse_vert_param(struct tr *tr, const struct tile_ctx *ctx,
|
|||
} break;
|
||||
|
||||
case 17: {
|
||||
LOG_WARNING("Unhandled modvol triangle");
|
||||
// LOG_WARNING("Unhandled modvol triangle");
|
||||
} break;
|
||||
|
||||
default:
|
||||
|
@ -921,12 +940,8 @@ static void tr_reset(struct tr *tr, struct render_ctx *rctx) {
|
|||
tr->last_sorted_surf = 0;
|
||||
}
|
||||
|
||||
texture_key_t tr_get_texture_key(union tsp tsp, union tcw tcw) {
|
||||
return ((uint64_t)tsp.full << 32) | tcw.full;
|
||||
}
|
||||
|
||||
static void tr_parse_context_inner(struct tr *tr, const struct tile_ctx *ctx,
|
||||
struct render_ctx *rctx) {
|
||||
int frame, struct render_ctx *rctx) {
|
||||
const uint8_t *data = ctx->params;
|
||||
const uint8_t *end = ctx->params + ctx->size;
|
||||
|
||||
|
@ -958,11 +973,11 @@ static void tr_parse_context_inner(struct tr *tr, const struct tile_ctx *ctx,
|
|||
|
||||
// global params
|
||||
case TA_PARAM_POLY_OR_VOL:
|
||||
tr_parse_poly_param(tr, ctx, rctx, data);
|
||||
tr_parse_poly_param(tr, ctx, frame, rctx, data);
|
||||
break;
|
||||
|
||||
case TA_PARAM_SPRITE:
|
||||
tr_parse_poly_param(tr, ctx, rctx, data);
|
||||
tr_parse_poly_param(tr, ctx, frame, rctx, data);
|
||||
break;
|
||||
|
||||
// vertex params
|
||||
|
@ -992,11 +1007,11 @@ static void tr_parse_context_inner(struct tr *tr, const struct tile_ctx *ctx,
|
|||
tr_proj_mat(tr, ctx, rctx);
|
||||
}
|
||||
|
||||
void tr_parse_context(struct tr *tr, const struct tile_ctx *ctx,
|
||||
void tr_parse_context(struct tr *tr, const struct tile_ctx *ctx, int frame,
|
||||
struct render_ctx *rctx) {
|
||||
PROF_ENTER("tr_parse_context");
|
||||
|
||||
tr_parse_context_inner(tr, ctx, rctx);
|
||||
tr_parse_context_inner(tr, ctx, frame, rctx);
|
||||
|
||||
PROF_LEAVE();
|
||||
}
|
||||
|
@ -1023,13 +1038,11 @@ void tr_render_context(struct tr *tr, const struct render_ctx *ctx) {
|
|||
PROF_LEAVE();
|
||||
}
|
||||
|
||||
struct tr *tr_create(struct rb *rb, void *get_texture_data,
|
||||
get_texture_cb get_texture) {
|
||||
struct tr *tr = malloc(sizeof(struct tr));
|
||||
memset(tr, 0, sizeof(*tr));
|
||||
struct tr *tr_create(struct rb *rb, struct texture_interface *texture_if) {
|
||||
struct tr *tr = calloc(1, sizeof(struct tr));
|
||||
|
||||
tr->rb = rb;
|
||||
tr->get_texture_data = get_texture_data;
|
||||
tr->get_texture = get_texture;
|
||||
tr->texture_if = *texture_if;
|
||||
|
||||
return tr;
|
||||
}
|
||||
|
|
|
@ -1,28 +1,27 @@
|
|||
#ifndef TR_H
|
||||
#define TR_H
|
||||
|
||||
#include "core/rb_tree.h"
|
||||
#include "hw/holly/ta_types.h"
|
||||
#include "renderer/backend.h"
|
||||
|
||||
struct tr;
|
||||
|
||||
// register_texture_cb / get_texture_cb provide an abstraction around
|
||||
// providing textures to the renderer. when emulating the actual TA,
|
||||
// textures will be provided from guest memory, but when playing
|
||||
// back traces the textures will come from the trace itself
|
||||
typedef uint64_t texture_key_t;
|
||||
|
||||
struct texture_reg {
|
||||
// texture registration input
|
||||
const struct tile_ctx *ctx;
|
||||
struct texture_entry {
|
||||
union tsp tsp;
|
||||
union tcw tcw;
|
||||
const uint8_t *palette;
|
||||
const uint8_t *texture;
|
||||
|
||||
// texture registration output. normally, the handle is the only information
|
||||
// needed, but the rest is used by the tracer for debugging purposes
|
||||
texture_handle_t handle;
|
||||
// source info
|
||||
int frame;
|
||||
int dirty;
|
||||
const uint8_t *texture;
|
||||
int texture_size;
|
||||
const uint8_t *palette;
|
||||
int palette_size;
|
||||
|
||||
// backend info
|
||||
enum pxl_format format;
|
||||
enum filter_mode filter;
|
||||
enum wrap_mode wrap_u;
|
||||
|
@ -30,13 +29,16 @@ struct texture_reg {
|
|||
bool mipmaps;
|
||||
int width;
|
||||
int height;
|
||||
texture_handle_t handle;
|
||||
};
|
||||
|
||||
typedef void (*register_texture_cb)(void *, struct texture_reg *reg);
|
||||
|
||||
typedef texture_handle_t (*get_texture_cb)(void *, const struct tile_ctx *,
|
||||
union tsp, union tcw, void *,
|
||||
register_texture_cb);
|
||||
// provides abstraction around providing texture data to the renderer. when
|
||||
// emulating the actual ta, textures will be provided from guest memory, but
|
||||
// when playing back traces the textures will come from the trace itself
|
||||
struct texture_interface {
|
||||
void *data;
|
||||
struct texture_entry *(*find_texture)(void *, union tsp, union tcw);
|
||||
};
|
||||
|
||||
// represents the parse state after each ta parameter. used to visually scrub
|
||||
// through the scene parameter by parameter in the tracer
|
||||
|
@ -67,13 +69,15 @@ struct render_ctx {
|
|||
int num_states;
|
||||
};
|
||||
|
||||
texture_key_t tr_get_texture_key(union tsp tsp, union tcw tcw);
|
||||
static inline texture_key_t tr_texture_key(union tsp tsp, union tcw tcw) {
|
||||
return ((uint64_t)tsp.full << 32) | tcw.full;
|
||||
}
|
||||
|
||||
void tr_parse_context(struct tr *tr, const struct tile_ctx *ctx,
|
||||
void tr_parse_context(struct tr *tr, const struct tile_ctx *ctx, int frame,
|
||||
struct render_ctx *rctx);
|
||||
void tr_render_context(struct tr *tr, const struct render_ctx *rctx);
|
||||
|
||||
struct tr *tr_create(struct rb *rb, void *get_tex_data, get_texture_cb get_tex);
|
||||
struct tr *tr_create(struct rb *rb, struct texture_interface *texture_if);
|
||||
void tr_destroy(struct tr *tr);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -72,7 +72,7 @@ static bool trace_patch_overrides(struct trace_cmd *cmd) {
|
|||
while (cmd) {
|
||||
if (cmd->type == TRACE_CMD_TEXTURE) {
|
||||
texture_key_t texture_key =
|
||||
tr_get_texture_key(cmd->texture.tsp, cmd->texture.tcw);
|
||||
tr_texture_key(cmd->texture.tsp, cmd->texture.tcw);
|
||||
|
||||
// walk backwards and see if this texture overrode a previous command
|
||||
// TODO could cache this information in a map
|
||||
|
@ -81,7 +81,7 @@ static bool trace_patch_overrides(struct trace_cmd *cmd) {
|
|||
while (prev) {
|
||||
if (prev->type == TRACE_CMD_TEXTURE) {
|
||||
texture_key_t prev_texture_key =
|
||||
tr_get_texture_key(prev->texture.tsp, prev->texture.tcw);
|
||||
tr_texture_key(prev->texture.tsp, prev->texture.tcw);
|
||||
|
||||
if (prev_texture_key == texture_key) {
|
||||
cmd->override = prev;
|
||||
|
|
|
@ -703,18 +703,19 @@ static bool sh4_init(struct device *dev) {
|
|||
sh4->scheduler = dc->scheduler;
|
||||
sh4->space = sh4->base.memory->space;
|
||||
|
||||
struct mem_interface memif = {&sh4->ctx,
|
||||
sh4->base.memory->space->protected_base,
|
||||
sh4->base.memory->space,
|
||||
&as_read8,
|
||||
&as_read16,
|
||||
&as_read32,
|
||||
&as_read64,
|
||||
&as_write8,
|
||||
&as_write16,
|
||||
&as_write32,
|
||||
&as_write64};
|
||||
sh4->code_cache = sh4_cache_create(&memif, &sh4_compile_pc);
|
||||
struct jit_memory_interface memory_if = {
|
||||
&sh4->ctx,
|
||||
sh4->base.memory->space->protected_base,
|
||||
sh4->base.memory->space,
|
||||
&as_read8,
|
||||
&as_read16,
|
||||
&as_read32,
|
||||
&as_read64,
|
||||
&as_write8,
|
||||
&as_write16,
|
||||
&as_write32,
|
||||
&as_write64};
|
||||
sh4->code_cache = sh4_cache_create(&memory_if, &sh4_compile_pc);
|
||||
|
||||
// initialize context
|
||||
sh4->ctx.sh4 = sh4;
|
||||
|
@ -775,11 +776,8 @@ static void sh4_paint_debug_menu(struct device *dev, struct nk_context *ctx) {
|
|||
struct sh4 *sh4 = container_of(dev, struct sh4, base);
|
||||
struct sh4_perf *perf = &sh4->perf;
|
||||
|
||||
struct nk_panel tab;
|
||||
|
||||
if (nk_tree_push(ctx, NK_TREE_TAB, "sh4", NK_MINIMIZED)) {
|
||||
float latest_mips = perf->mips[(perf->num_mips - 1) % MAX_MIPS_SAMPLES];
|
||||
nk_value_float(ctx, "mips", latest_mips);
|
||||
nk_value_int(ctx, "mips", perf->mips);
|
||||
nk_tree_pop(ctx);
|
||||
}
|
||||
|
||||
|
@ -827,20 +825,19 @@ static void sh4_run_inner(struct device *dev, int64_t ns) {
|
|||
|
||||
// track mips
|
||||
int64_t now = time_nanoseconds();
|
||||
int64_t next_time = sh4->perf.last_sample_time + NS_PER_SEC;
|
||||
int64_t next_time = sh4->perf.last_mips_time + NS_PER_SEC;
|
||||
|
||||
if (now > next_time) {
|
||||
// convert total number of instructions / nanoseconds delta into millions
|
||||
// of instructions per second
|
||||
float num_instrs_millions = sh4->ctx.num_instrs / 1000000.0f;
|
||||
int64_t delta_ns = now - sh4->perf.last_sample_time;
|
||||
int64_t delta_ns = now - sh4->perf.last_mips_time;
|
||||
float delta_s = delta_ns / 1000000000.0f;
|
||||
sh4->perf.mips[sh4->perf.num_mips] = num_instrs_millions / delta_s;
|
||||
sh4->perf.num_mips = (sh4->perf.num_mips + 1) % MAX_MIPS_SAMPLES;
|
||||
sh4->perf.mips = (int)(num_instrs_millions / delta_s);
|
||||
|
||||
// reset state
|
||||
sh4->perf.last_mips_time = now;
|
||||
sh4->ctx.num_instrs = 0;
|
||||
sh4->perf.last_sample_time = now;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -28,9 +28,8 @@ struct sh4_dtr {
|
|||
|
||||
struct sh4_perf {
|
||||
bool show;
|
||||
int64_t last_sample_time;
|
||||
float mips[MAX_MIPS_SAMPLES];
|
||||
int num_mips;
|
||||
int64_t last_mips_time;
|
||||
int mips;
|
||||
};
|
||||
|
||||
struct sh4 {
|
||||
|
|
|
@ -281,7 +281,7 @@ code_pointer_t sh4_cache_compile_code(struct sh4_cache *cache,
|
|||
return code;
|
||||
}
|
||||
|
||||
struct sh4_cache *sh4_cache_create(const struct mem_interface *memif,
|
||||
struct sh4_cache *sh4_cache_create(const struct jit_memory_interface *memory_if,
|
||||
code_pointer_t default_code) {
|
||||
struct sh4_cache *cache = calloc(1, sizeof(struct sh4_cache));
|
||||
|
||||
|
@ -292,7 +292,7 @@ struct sh4_cache *sh4_cache_create(const struct mem_interface *memif,
|
|||
|
||||
// setup parser and emitter
|
||||
cache->frontend = sh4_frontend_create();
|
||||
cache->backend = x64_backend_create(memif);
|
||||
cache->backend = x64_backend_create(memory_if);
|
||||
|
||||
// initialize all entries in block cache to reference the default block
|
||||
cache->default_code = default_code;
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
struct exception_handler;
|
||||
struct jit_backend;
|
||||
struct jit_frontend;
|
||||
struct mem_interface;
|
||||
struct jit_memory_interface;
|
||||
|
||||
typedef uint32_t (*code_pointer_t)();
|
||||
|
||||
|
@ -57,7 +57,7 @@ code_pointer_t sh4_cache_compile_code(struct sh4_cache *cache,
|
|||
uint32_t guest_addr, uint8_t *guest_ptr,
|
||||
int flags);
|
||||
|
||||
struct sh4_cache *sh4_cache_create(const struct mem_interface *memif,
|
||||
struct sh4_cache *sh4_cache_create(const struct jit_memory_interface *memory_if,
|
||||
code_pointer_t default_code);
|
||||
void sh4_cache_destroy(struct sh4_cache *cache);
|
||||
|
||||
|
|
|
@ -8,13 +8,13 @@ struct address_space;
|
|||
struct ir;
|
||||
struct exception;
|
||||
|
||||
struct register_def {
|
||||
struct jit_register {
|
||||
const char *name;
|
||||
int value_types;
|
||||
const void *data;
|
||||
};
|
||||
|
||||
struct mem_interface {
|
||||
struct jit_memory_interface {
|
||||
void *ctx_base;
|
||||
void *mem_base;
|
||||
struct address_space *mem_self;
|
||||
|
@ -31,7 +31,7 @@ struct mem_interface {
|
|||
struct jit_backend;
|
||||
|
||||
struct jit_backend {
|
||||
const struct register_def *registers;
|
||||
const struct jit_register *registers;
|
||||
int num_registers;
|
||||
|
||||
void (*reset)(struct jit_backend *base);
|
||||
|
|
|
@ -74,7 +74,7 @@ const Xbyak::Reg64 arg2(x64_arg2_idx);
|
|||
const Xbyak::Reg64 tmp0(x64_tmp0_idx);
|
||||
const Xbyak::Reg64 tmp1(x64_tmp1_idx);
|
||||
|
||||
const struct register_def x64_registers[] = {
|
||||
const struct jit_register x64_registers[] = {
|
||||
{"rbx", VALUE_INT_MASK, reinterpret_cast<const void *>(&Xbyak::util::rbx)},
|
||||
{"rbp", VALUE_INT_MASK, reinterpret_cast<const void *>(&Xbyak::util::rbp)},
|
||||
{"r12", VALUE_INT_MASK, reinterpret_cast<const void *>(&Xbyak::util::r12)},
|
||||
|
@ -105,7 +105,7 @@ const struct register_def x64_registers[] = {
|
|||
reinterpret_cast<const void *>(&Xbyak::util::xmm15)}};
|
||||
|
||||
const int x64_num_registers =
|
||||
sizeof(x64_registers) / sizeof(struct register_def);
|
||||
sizeof(x64_registers) / sizeof(struct jit_register);
|
||||
|
||||
//
|
||||
// x64 code buffer. this will break down if running two instances of the x64
|
||||
|
@ -151,7 +151,7 @@ enum xmm_constant {
|
|||
|
||||
struct x64_backend {
|
||||
struct jit_backend base;
|
||||
struct mem_interface memif;
|
||||
struct jit_memory_interface memory_if;
|
||||
|
||||
Xbyak::CodeGenerator *codegen;
|
||||
csh capstone_handle;
|
||||
|
@ -351,8 +351,8 @@ static void x64_backend_emit_prolog(struct x64_backend *backend, struct ir *ir,
|
|||
e.sub(e.rsp, stack_size);
|
||||
|
||||
// copy guest context and memory base to argument registers
|
||||
e.mov(e.r14, reinterpret_cast<uint64_t>(backend->memif.ctx_base));
|
||||
e.mov(e.r15, reinterpret_cast<uint64_t>(backend->memif.mem_base));
|
||||
e.mov(e.r14, reinterpret_cast<uint64_t>(backend->memory_if.ctx_base));
|
||||
e.mov(e.r15, reinterpret_cast<uint64_t>(backend->memory_if.mem_base));
|
||||
|
||||
*out_stack_size = stack_size;
|
||||
}
|
||||
|
@ -518,7 +518,7 @@ static bool x64_backend_handle_exception(struct jit_backend *base,
|
|||
// figure out the guest address that was being accessed
|
||||
const uint8_t *fault_addr = reinterpret_cast<const uint8_t *>(ex->fault_addr);
|
||||
const uint8_t *protected_start =
|
||||
reinterpret_cast<const uint8_t *>(backend->memif.mem_base);
|
||||
reinterpret_cast<const uint8_t *>(backend->memory_if.mem_base);
|
||||
uint32_t guest_addr = static_cast<uint32_t>(fault_addr - protected_start);
|
||||
|
||||
// instead of handling the dynamic callback from inside of the exception
|
||||
|
@ -537,22 +537,26 @@ static bool x64_backend_handle_exception(struct jit_backend *base,
|
|||
if (mov.is_load) {
|
||||
// prep argument registers (memory object, guest_addr) for read function
|
||||
ex->thread_state.r[x64_arg0_idx] =
|
||||
reinterpret_cast<uint64_t>(backend->memif.mem_self);
|
||||
reinterpret_cast<uint64_t>(backend->memory_if.mem_self);
|
||||
ex->thread_state.r[x64_arg1_idx] = static_cast<uint64_t>(guest_addr);
|
||||
|
||||
// prep function call address for thunk
|
||||
switch (mov.operand_size) {
|
||||
case 1:
|
||||
ex->thread_state.rax = reinterpret_cast<uint64_t>(backend->memif.r8);
|
||||
ex->thread_state.rax =
|
||||
reinterpret_cast<uint64_t>(backend->memory_if.r8);
|
||||
break;
|
||||
case 2:
|
||||
ex->thread_state.rax = reinterpret_cast<uint64_t>(backend->memif.r16);
|
||||
ex->thread_state.rax =
|
||||
reinterpret_cast<uint64_t>(backend->memory_if.r16);
|
||||
break;
|
||||
case 4:
|
||||
ex->thread_state.rax = reinterpret_cast<uint64_t>(backend->memif.r32);
|
||||
ex->thread_state.rax =
|
||||
reinterpret_cast<uint64_t>(backend->memory_if.r32);
|
||||
break;
|
||||
case 8:
|
||||
ex->thread_state.rax = reinterpret_cast<uint64_t>(backend->memif.r64);
|
||||
ex->thread_state.rax =
|
||||
reinterpret_cast<uint64_t>(backend->memory_if.r64);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -563,23 +567,27 @@ static bool x64_backend_handle_exception(struct jit_backend *base,
|
|||
// prep argument registers (memory object, guest_addr, value) for write
|
||||
// function
|
||||
ex->thread_state.r[x64_arg0_idx] =
|
||||
reinterpret_cast<uint64_t>(backend->memif.mem_self);
|
||||
reinterpret_cast<uint64_t>(backend->memory_if.mem_self);
|
||||
ex->thread_state.r[x64_arg1_idx] = static_cast<uint64_t>(guest_addr);
|
||||
ex->thread_state.r[x64_arg2_idx] = ex->thread_state.r[mov.reg];
|
||||
|
||||
// prep function call address for thunk
|
||||
switch (mov.operand_size) {
|
||||
case 1:
|
||||
ex->thread_state.rax = reinterpret_cast<uint64_t>(backend->memif.w8);
|
||||
ex->thread_state.rax =
|
||||
reinterpret_cast<uint64_t>(backend->memory_if.w8);
|
||||
break;
|
||||
case 2:
|
||||
ex->thread_state.rax = reinterpret_cast<uint64_t>(backend->memif.w16);
|
||||
ex->thread_state.rax =
|
||||
reinterpret_cast<uint64_t>(backend->memory_if.w16);
|
||||
break;
|
||||
case 4:
|
||||
ex->thread_state.rax = reinterpret_cast<uint64_t>(backend->memif.w32);
|
||||
ex->thread_state.rax =
|
||||
reinterpret_cast<uint64_t>(backend->memory_if.w32);
|
||||
break;
|
||||
case 8:
|
||||
ex->thread_state.rax = reinterpret_cast<uint64_t>(backend->memif.w64);
|
||||
ex->thread_state.rax =
|
||||
reinterpret_cast<uint64_t>(backend->memory_if.w64);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -723,23 +731,23 @@ EMITTER(LOAD_SLOW) {
|
|||
void *fn = nullptr;
|
||||
switch (instr->result->type) {
|
||||
case VALUE_I8:
|
||||
fn = reinterpret_cast<void *>(backend->memif.r8);
|
||||
fn = reinterpret_cast<void *>(backend->memory_if.r8);
|
||||
break;
|
||||
case VALUE_I16:
|
||||
fn = reinterpret_cast<void *>(backend->memif.r16);
|
||||
fn = reinterpret_cast<void *>(backend->memory_if.r16);
|
||||
break;
|
||||
case VALUE_I32:
|
||||
fn = reinterpret_cast<void *>(backend->memif.r32);
|
||||
fn = reinterpret_cast<void *>(backend->memory_if.r32);
|
||||
break;
|
||||
case VALUE_I64:
|
||||
fn = reinterpret_cast<void *>(backend->memif.r64);
|
||||
fn = reinterpret_cast<void *>(backend->memory_if.r64);
|
||||
break;
|
||||
default:
|
||||
LOG_FATAL("Unexpected load result type");
|
||||
break;
|
||||
}
|
||||
|
||||
e.mov(arg0, reinterpret_cast<uint64_t>(backend->memif.mem_self));
|
||||
e.mov(arg0, reinterpret_cast<uint64_t>(backend->memory_if.mem_self));
|
||||
e.mov(arg1, a);
|
||||
e.call(reinterpret_cast<void *>(fn));
|
||||
e.mov(result, e.rax);
|
||||
|
@ -752,23 +760,23 @@ EMITTER(STORE_SLOW) {
|
|||
void *fn = nullptr;
|
||||
switch (instr->arg[1]->type) {
|
||||
case VALUE_I8:
|
||||
fn = reinterpret_cast<void *>(backend->memif.w8);
|
||||
fn = reinterpret_cast<void *>(backend->memory_if.w8);
|
||||
break;
|
||||
case VALUE_I16:
|
||||
fn = reinterpret_cast<void *>(backend->memif.w16);
|
||||
fn = reinterpret_cast<void *>(backend->memory_if.w16);
|
||||
break;
|
||||
case VALUE_I32:
|
||||
fn = reinterpret_cast<void *>(backend->memif.w32);
|
||||
fn = reinterpret_cast<void *>(backend->memory_if.w32);
|
||||
break;
|
||||
case VALUE_I64:
|
||||
fn = reinterpret_cast<void *>(backend->memif.w64);
|
||||
fn = reinterpret_cast<void *>(backend->memory_if.w64);
|
||||
break;
|
||||
default:
|
||||
LOG_FATAL("Unexpected store value type");
|
||||
break;
|
||||
}
|
||||
|
||||
e.mov(arg0, reinterpret_cast<uint64_t>(backend->memif.mem_self));
|
||||
e.mov(arg0, reinterpret_cast<uint64_t>(backend->memory_if.mem_self));
|
||||
e.mov(arg1, a);
|
||||
e.mov(arg2, b);
|
||||
e.call(reinterpret_cast<void *>(fn));
|
||||
|
@ -1640,7 +1648,7 @@ EMITTER(BRANCH_COND) {
|
|||
EMITTER(CALL_EXTERNAL) {
|
||||
const Xbyak::Reg addr = x64_backend_register(backend, instr->arg[0]);
|
||||
|
||||
e.mov(arg0, reinterpret_cast<uint64_t>(backend->memif.ctx_base));
|
||||
e.mov(arg0, reinterpret_cast<uint64_t>(backend->memory_if.ctx_base));
|
||||
if (instr->arg[1]) {
|
||||
const Xbyak::Reg arg = x64_backend_register(backend, instr->arg[1]);
|
||||
e.mov(arg1, arg);
|
||||
|
@ -1649,7 +1657,8 @@ EMITTER(CALL_EXTERNAL) {
|
|||
e.call(e.rax);
|
||||
}
|
||||
|
||||
struct jit_backend *x64_backend_create(const struct mem_interface *memif) {
|
||||
struct jit_backend *x64_backend_create(
|
||||
const struct jit_memory_interface *memory_if) {
|
||||
struct x64_backend *backend = reinterpret_cast<struct x64_backend *>(
|
||||
calloc(1, sizeof(struct x64_backend)));
|
||||
|
||||
|
@ -1660,7 +1669,7 @@ struct jit_backend *x64_backend_create(const struct mem_interface *memif) {
|
|||
backend->base.dump_code = &x64_backend_dump_code;
|
||||
backend->base.handle_exception = &x64_backend_handle_exception;
|
||||
|
||||
backend->memif = *memif;
|
||||
backend->memory_if = *memory_if;
|
||||
|
||||
backend->codegen = new Xbyak::CodeGenerator(x64_code_size, x64_code);
|
||||
|
||||
|
|
|
@ -3,10 +3,11 @@
|
|||
|
||||
#include "jit/backend/backend.h"
|
||||
|
||||
extern const struct register_def x64_registers[];
|
||||
extern const struct jit_register x64_registers[];
|
||||
extern const int x64_num_registers;
|
||||
|
||||
struct jit_backend *x64_backend_create(const struct mem_interface *memif);
|
||||
struct jit_backend *x64_backend_create(
|
||||
const struct jit_memory_interface *memory_if);
|
||||
void x64_backend_destroy(struct jit_backend *b);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -27,7 +27,7 @@ struct register_set {
|
|||
|
||||
struct ra {
|
||||
// canonical backend register information
|
||||
const struct register_def *registers;
|
||||
const struct jit_register *registers;
|
||||
int num_registers;
|
||||
|
||||
// allocation state
|
||||
|
@ -240,7 +240,7 @@ static int ra_reuse_arg_register(struct ra *ra, struct ir *ir,
|
|||
}
|
||||
|
||||
// make sure the register can hold the result type
|
||||
const struct register_def *r = &ra->registers[prefered];
|
||||
const struct jit_register *r = &ra->registers[prefered];
|
||||
if (!(r->value_types & (1 << instr->result->type))) {
|
||||
return NO_REGISTER;
|
||||
}
|
||||
|
@ -334,13 +334,13 @@ static void ra_assign_ordinals(struct ir *ir) {
|
|||
}
|
||||
}
|
||||
|
||||
static void ra_init_sets(struct ra *ra, const struct register_def *registers,
|
||||
static void ra_init_sets(struct ra *ra, const struct jit_register *registers,
|
||||
int num_registers) {
|
||||
ra->registers = registers;
|
||||
ra->num_registers = num_registers;
|
||||
|
||||
for (int i = 0; i < ra->num_registers; i++) {
|
||||
const struct register_def *r = &ra->registers[i];
|
||||
const struct jit_register *r = &ra->registers[i];
|
||||
|
||||
if (r->value_types == VALUE_INT_MASK) {
|
||||
ra_push_register(&ra->int_registers, i);
|
||||
|
@ -354,7 +354,7 @@ static void ra_init_sets(struct ra *ra, const struct register_def *registers,
|
|||
}
|
||||
}
|
||||
|
||||
void ra_run(struct ir *ir, const struct register_def *registers,
|
||||
void ra_run(struct ir *ir, const struct jit_register *registers,
|
||||
int num_registers) {
|
||||
struct ra ra = {};
|
||||
|
||||
|
|
|
@ -2,9 +2,9 @@
|
|||
#define REGISTER_ALLOCATION_PASS_H
|
||||
|
||||
struct ir;
|
||||
struct register_def;
|
||||
struct jit_register;
|
||||
|
||||
void ra_run(struct ir *ir, const struct register_def *registers,
|
||||
void ra_run(struct ir *ir, const struct jit_register *registers,
|
||||
int num_registers);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -133,13 +133,12 @@ void rb_end_ortho(struct rb *rb);
|
|||
void rb_begin_frame(struct rb *rb);
|
||||
void rb_end_frame(struct rb *rb);
|
||||
|
||||
texture_handle_t rb_register_texture(struct rb *rb, enum pxl_format format,
|
||||
enum filter_mode filter,
|
||||
enum wrap_mode wrap_u,
|
||||
enum wrap_mode wrap_v, bool mipmaps,
|
||||
int width, int height,
|
||||
const uint8_t *buffer);
|
||||
void rb_free_texture(struct rb *rb, texture_handle_t handle);
|
||||
texture_handle_t rb_create_texture(struct rb *rb, enum pxl_format format,
|
||||
enum filter_mode filter,
|
||||
enum wrap_mode wrap_u, enum wrap_mode wrap_v,
|
||||
bool mipmaps, int width, int height,
|
||||
const uint8_t *buffer);
|
||||
void rb_destroy_texture(struct rb *rb, texture_handle_t handle);
|
||||
|
||||
struct rb *rb_create(struct window *window);
|
||||
void rb_destroy(struct rb *rb);
|
||||
|
|
|
@ -629,12 +629,11 @@ void rb_end_frame(struct rb *rb) {
|
|||
SDL_GL_SwapWindow(rb->window->handle);
|
||||
}
|
||||
|
||||
texture_handle_t rb_register_texture(struct rb *rb, enum pxl_format format,
|
||||
enum filter_mode filter,
|
||||
enum wrap_mode wrap_u,
|
||||
enum wrap_mode wrap_v, bool mipmaps,
|
||||
int width, int height,
|
||||
const uint8_t *buffer) {
|
||||
texture_handle_t rb_create_texture(struct rb *rb, enum pxl_format format,
|
||||
enum filter_mode filter,
|
||||
enum wrap_mode wrap_u, enum wrap_mode wrap_v,
|
||||
bool mipmaps, int width, int height,
|
||||
const uint8_t *buffer) {
|
||||
// FIXME worth speeding up?
|
||||
texture_handle_t handle;
|
||||
for (handle = 1; handle < MAX_TEXTURES; handle++) {
|
||||
|
@ -692,7 +691,7 @@ texture_handle_t rb_register_texture(struct rb *rb, enum pxl_format format,
|
|||
return handle;
|
||||
}
|
||||
|
||||
void rb_free_texture(struct rb *rb, texture_handle_t handle) {
|
||||
void rb_destroy_texture(struct rb *rb, texture_handle_t handle) {
|
||||
GLuint *gltex = &rb->textures[handle];
|
||||
glDeleteTextures(1, gltex);
|
||||
*gltex = 0;
|
||||
|
|
|
@ -83,7 +83,7 @@ static bool watcher_handle_exception(void *ctx, struct exception *ex) {
|
|||
return handled;
|
||||
}
|
||||
|
||||
struct memory_watch *add_single_write_watch(void *ptr, size_t size,
|
||||
struct memory_watch *add_single_write_watch(const void *ptr, size_t size,
|
||||
memory_watch_cb cb, void *data) {
|
||||
if (!s_watcher) {
|
||||
watcher_create();
|
||||
|
|
|
@ -46,7 +46,7 @@ enum memory_watch_type {
|
|||
|
||||
typedef void (*memory_watch_cb)(const struct exception *, void *);
|
||||
|
||||
struct memory_watch *add_single_write_watch(void *ptr, size_t size,
|
||||
struct memory_watch *add_single_write_watch(const void *ptr, size_t size,
|
||||
memory_watch_cb cb, void *data);
|
||||
void remove_memory_watch(struct memory_watch *watch);
|
||||
|
||||
|
|
|
@ -0,0 +1,26 @@
|
|||
#ifndef THREADS_H
|
||||
#define THREADS_H
|
||||
|
||||
typedef void *thread_t;
|
||||
typedef void *(*thread_fn)(void *);
|
||||
|
||||
thread_t thread_create(thread_fn fn, const char *name, void *data);
|
||||
void thread_detach(thread_t thread);
|
||||
void thread_join(thread_t thread, void **result);
|
||||
|
||||
typedef void *mutex_t;
|
||||
|
||||
mutex_t mutex_create();
|
||||
int mutex_trylock(mutex_t mutex);
|
||||
void mutex_lock(mutex_t mutex);
|
||||
void mutex_unlock(mutex_t mutex);
|
||||
void mutex_destroy(mutex_t mutex);
|
||||
|
||||
typedef void *cond_t;
|
||||
|
||||
cond_t cond_create();
|
||||
void cond_wait(cond_t cond, mutex_t mutex);
|
||||
void cond_signal(cond_t cond);
|
||||
void cond_destroy(cond_t cond);
|
||||
|
||||
#endif
|
|
@ -0,0 +1,95 @@
|
|||
#include <pthread.h>
|
||||
#include <stdlib.h>
|
||||
#include "sys/thread.h"
|
||||
|
||||
static void thread_destroy(thread_t thread) {
|
||||
pthread_t *pthread = (pthread_t *)thread;
|
||||
|
||||
free(pthread);
|
||||
}
|
||||
|
||||
thread_t thread_create(thread_fn fn, const char *name, void *data) {
|
||||
pthread_t *pthread = calloc(1, sizeof(pthread_t));
|
||||
|
||||
if (pthread_create(pthread, NULL, fn, data)) {
|
||||
thread_destroy(pthread);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return (thread_t)pthread;
|
||||
}
|
||||
|
||||
void thread_detach(thread_t thread) {
|
||||
pthread_t *pthread = (pthread_t *)thread;
|
||||
|
||||
pthread_detach(*pthread);
|
||||
}
|
||||
|
||||
void thread_join(thread_t thread, void **result) {
|
||||
pthread_t *pthread = (pthread_t *)thread;
|
||||
|
||||
pthread_join(*pthread, result);
|
||||
}
|
||||
|
||||
mutex_t mutex_create() {
|
||||
pthread_mutex_t *pmutex = calloc(1, sizeof(pthread_mutex_t));
|
||||
|
||||
pthread_mutex_init(pmutex, NULL);
|
||||
|
||||
return (mutex_t)pmutex;
|
||||
}
|
||||
|
||||
int mutex_trylock(mutex_t mutex) {
|
||||
pthread_mutex_t *pmutex = (pthread_mutex_t *)mutex;
|
||||
|
||||
return pthread_mutex_trylock(pmutex) == 0;
|
||||
}
|
||||
|
||||
void mutex_lock(mutex_t mutex) {
|
||||
pthread_mutex_t *pmutex = (pthread_mutex_t *)mutex;
|
||||
|
||||
pthread_mutex_lock(pmutex);
|
||||
}
|
||||
|
||||
void mutex_unlock(mutex_t mutex) {
|
||||
pthread_mutex_t *pmutex = (pthread_mutex_t *)mutex;
|
||||
|
||||
pthread_mutex_unlock(pmutex);
|
||||
}
|
||||
|
||||
void mutex_destroy(mutex_t mutex) {
|
||||
pthread_mutex_t *pmutex = (pthread_mutex_t *)mutex;
|
||||
|
||||
pthread_mutex_destroy(pmutex);
|
||||
|
||||
free(pmutex);
|
||||
}
|
||||
|
||||
cond_t cond_create() {
|
||||
pthread_cond_t *pcond = calloc(1, sizeof(pthread_mutex_t));
|
||||
|
||||
pthread_cond_init(pcond, NULL);
|
||||
|
||||
return (cond_t)pcond;
|
||||
}
|
||||
|
||||
void cond_wait(cond_t cond, mutex_t mutex) {
|
||||
pthread_cond_t *pcond = (pthread_cond_t *)cond;
|
||||
pthread_mutex_t *pmutex = (pthread_mutex_t *)mutex;
|
||||
|
||||
pthread_cond_wait(pcond, pmutex);
|
||||
}
|
||||
|
||||
void cond_signal(cond_t cond) {
|
||||
pthread_cond_t *pcond = (pthread_cond_t *)cond;
|
||||
|
||||
pthread_cond_signal(pcond);
|
||||
}
|
||||
|
||||
void cond_destroy(cond_t cond) {
|
||||
pthread_cond_t *pcond = (pthread_cond_t *)cond;
|
||||
|
||||
pthread_cond_destroy(pcond);
|
||||
|
||||
free(pcond);
|
||||
}
|
|
@ -27,7 +27,7 @@ static const int MAX_2D_SURFACES = 256;
|
|||
struct microprofile {
|
||||
struct window *window;
|
||||
struct window_listener *listener;
|
||||
texture_handle_t font_tex;
|
||||
texture_handle_t font_texture;
|
||||
struct surface2d surfs[MAX_2D_SURFACES];
|
||||
int num_surfs;
|
||||
struct vertex2d verts[MAX_2D_VERTICES];
|
||||
|
@ -103,7 +103,7 @@ static void mp_draw_text(struct microprofile *mp, int x, int y, uint32_t color,
|
|||
int text_len = static_cast<int>(strlen(text));
|
||||
|
||||
struct vertex2d *vertex = mp_alloc_verts(mp, {PRIM_TRIANGLES,
|
||||
mp->font_tex,
|
||||
mp->font_texture,
|
||||
BLEND_SRC_ALPHA,
|
||||
BLEND_ONE_MINUS_SRC_ALPHA,
|
||||
false,
|
||||
|
@ -283,15 +283,17 @@ struct microprofile *mp_create(struct window *window) {
|
|||
g_MicroProfile.nBars |= MP_DRAW_TIMERS | MP_DRAW_AVERAGE | MP_DRAW_CALL_COUNT;
|
||||
|
||||
// register the font texture
|
||||
mp->font_tex =
|
||||
rb_register_texture(rb, PXL_RGBA, FILTER_NEAREST, WRAP_CLAMP_TO_EDGE,
|
||||
WRAP_CLAMP_TO_EDGE, false, FONT_WIDTH, FONT_HEIGHT,
|
||||
reinterpret_cast<const uint8_t *>(s_font_data));
|
||||
mp->font_texture =
|
||||
rb_create_texture(rb, PXL_RGBA, FILTER_NEAREST, WRAP_CLAMP_TO_EDGE,
|
||||
WRAP_CLAMP_TO_EDGE, false, FONT_WIDTH, FONT_HEIGHT,
|
||||
reinterpret_cast<const uint8_t *>(s_font_data));
|
||||
|
||||
return mp;
|
||||
}
|
||||
|
||||
void mp_destroy(struct microprofile *mp) {
|
||||
rb_destroy_texture(mp->window->rb, mp->font_texture);
|
||||
|
||||
win_remove_listener(mp->window, mp->listener);
|
||||
|
||||
free(mp);
|
||||
|
|
|
@ -143,10 +143,10 @@ struct nuklear *nk_create(struct window *window) {
|
|||
int font_width, font_height;
|
||||
const void *font_data = nk_font_atlas_bake(
|
||||
&nk->atlas, &font_width, &font_height, NK_FONT_ATLAS_RGBA32);
|
||||
texture_handle_t handle = rb_register_texture(
|
||||
nk->window->rb, PXL_RGBA, FILTER_BILINEAR, WRAP_REPEAT, WRAP_REPEAT,
|
||||
false, font_width, font_height, font_data);
|
||||
nk_font_atlas_end(&nk->atlas, nk_handle_id((int)handle), &nk->null);
|
||||
nk->font_texture =
|
||||
rb_create_texture(nk->window->rb, PXL_RGBA, FILTER_BILINEAR, WRAP_REPEAT,
|
||||
WRAP_REPEAT, false, font_width, font_height, font_data);
|
||||
nk_font_atlas_end(&nk->atlas, nk_handle_id((int)nk->font_texture), &nk->null);
|
||||
|
||||
// initialize nuklear context
|
||||
nk_init_default(&nk->ctx, &font->handle);
|
||||
|
@ -160,6 +160,8 @@ void nk_destroy(struct nuklear *nk) {
|
|||
nk_font_atlas_clear(&nk->atlas);
|
||||
nk_free(&nk->ctx);
|
||||
|
||||
rb_destroy_texture(nk->window->rb, nk->font_texture);
|
||||
|
||||
win_remove_listener(nk->window, nk->listener);
|
||||
|
||||
free(nk);
|
||||
|
|
|
@ -29,6 +29,8 @@ struct nuklear {
|
|||
struct nk_font_atlas atlas;
|
||||
struct nk_draw_null_texture null;
|
||||
|
||||
texture_handle_t font_texture;
|
||||
|
||||
// render buffers
|
||||
struct vertex2d vertices[NK_MAX_VERTICES];
|
||||
uint16_t elements[NK_MAX_ELEMENTS];
|
||||
|
|
Loading…
Reference in New Issue