mirror of https://github.com/inolen/redream.git
remove remaining bool usage
This commit is contained in:
parent
26e572e8e5
commit
9eb56d1ab2
|
@ -36,10 +36,10 @@ struct emu {
|
|||
int sorted_surfs[TA_MAX_SURFS];
|
||||
};
|
||||
|
||||
static bool emu_launch_bin(struct emu *emu, const char *path) {
|
||||
static int emu_launch_bin(struct emu *emu, const char *path) {
|
||||
FILE *fp = fopen(path, "rb");
|
||||
if (!fp) {
|
||||
return false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
fseek(fp, 0, SEEK_END);
|
||||
|
@ -53,27 +53,27 @@ static bool emu_launch_bin(struct emu *emu, const char *path) {
|
|||
|
||||
if (n != size) {
|
||||
LOG_WARNING("BIN read failed");
|
||||
return false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
sh4_reset(emu->dc->sh4, 0x0c010000);
|
||||
dc_resume(emu->dc);
|
||||
|
||||
return true;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static bool emu_launch_gdi(struct emu *emu, const char *path) {
|
||||
static int emu_launch_gdi(struct emu *emu, const char *path) {
|
||||
struct disc *disc = disc_create_gdi(path);
|
||||
|
||||
if (!disc) {
|
||||
return false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
gdrom_set_disc(emu->dc->gdrom, disc);
|
||||
sh4_reset(emu->dc->sh4, 0xa0000000);
|
||||
dc_resume(emu->dc);
|
||||
|
||||
return true;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void emu_paint(void *data) {
|
||||
|
|
|
@ -223,7 +223,7 @@ static uint32_t aica_rtc_reg_read(struct aica *aica, uint32_t addr,
|
|||
case 0x8:
|
||||
return 0;
|
||||
default:
|
||||
CHECK(false);
|
||||
LOG_FATAL("Unexpected rtc address");
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
@ -246,7 +246,7 @@ static void aica_rtc_reg_write(struct aica *aica, uint32_t addr, uint32_t data,
|
|||
aica->rtc_write = data & 1;
|
||||
break;
|
||||
default:
|
||||
CHECK(false);
|
||||
LOG_FATAL("Unexpected rtc address");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -670,7 +670,7 @@ static void aica_debug_menu(struct device *dev, struct nk_context *ctx) {
|
|||
}
|
||||
}
|
||||
|
||||
static bool aica_init(struct device *dev) {
|
||||
static int aica_init(struct device *dev) {
|
||||
struct aica *aica = (struct aica *)dev;
|
||||
|
||||
aica->wave_ram = memory_translate(aica->memory, "aica wave ram", 0x00000000);
|
||||
|
@ -690,7 +690,7 @@ static bool aica_init(struct device *dev) {
|
|||
scheduler_start_timer(aica->scheduler, &aica_next_sample, aica,
|
||||
HZ_TO_NANO(AICA_SAMPLE_FREQ / AICA_SAMPLE_BATCH));
|
||||
|
||||
return true;
|
||||
return 1;
|
||||
}
|
||||
|
||||
void aica_destroy(struct aica *aica) {
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
#include <stdbool.h>
|
||||
#include <stdio.h>
|
||||
#include "hw/arm7/arm7.h"
|
||||
#include "core/log.h"
|
||||
|
@ -208,7 +207,7 @@ static void arm7_run(struct device *dev, int64_t ns) {
|
|||
PROF_LEAVE();
|
||||
}
|
||||
|
||||
static bool arm7_init(struct device *dev) {
|
||||
static int arm7_init(struct device *dev) {
|
||||
struct arm7 *arm = (struct arm7 *)dev;
|
||||
struct dreamcast *dc = arm->dc;
|
||||
|
||||
|
@ -247,12 +246,12 @@ static bool arm7_init(struct device *dev) {
|
|||
arm->backend = backend;
|
||||
|
||||
if (!jit_init(arm->jit, &arm->guest, arm->frontend, arm->backend)) {
|
||||
return false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
arm->wave_ram = memory_translate(dc->memory, "aica wave ram", 0x00000000);
|
||||
|
||||
return true;
|
||||
return 1;
|
||||
}
|
||||
|
||||
void arm7_destroy(struct arm7 *arm) {
|
||||
|
|
|
@ -56,7 +56,7 @@ static void debugger_gdb_server_read_reg(void *data, int n, intmax_t *value,
|
|||
*value = v;
|
||||
}
|
||||
|
||||
bool debugger_init(struct debugger *dbg) {
|
||||
int debugger_init(struct debugger *dbg) {
|
||||
// use the first device found with a debug interface
|
||||
list_for_each_entry(dev, &dbg->dc->devices, struct device, it) {
|
||||
if (dev->debug_if) {
|
||||
|
@ -67,7 +67,7 @@ bool debugger_init(struct debugger *dbg) {
|
|||
|
||||
// didn't find a debuggable device
|
||||
if (!dbg->dev) {
|
||||
return false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
// create the gdb server
|
||||
|
@ -87,10 +87,10 @@ bool debugger_init(struct debugger *dbg) {
|
|||
dbg->sv = gdb_server_create(&target, 24690);
|
||||
if (!dbg->sv) {
|
||||
LOG_WARNING("Failed to create GDB server");
|
||||
return false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return true;
|
||||
return 1;
|
||||
}
|
||||
|
||||
void debugger_trap(struct debugger *dbg) {
|
||||
|
|
|
@ -1,13 +1,12 @@
|
|||
#ifndef DEBUGGER_H
|
||||
#define DEBUGGER_H
|
||||
|
||||
#include <stdbool.h>
|
||||
#include <stdint.h>
|
||||
|
||||
struct dreamcast;
|
||||
struct debugger;
|
||||
|
||||
bool debugger_init(struct debugger *dbg);
|
||||
int debugger_init(struct debugger *dbg);
|
||||
void debugger_trap(struct debugger *dbg);
|
||||
void debugger_tick(struct debugger *dbg);
|
||||
|
||||
|
|
|
@ -68,15 +68,15 @@ void dc_suspend(struct dreamcast *dc) {
|
|||
dc->running = 0;
|
||||
}
|
||||
|
||||
bool dc_init(struct dreamcast *dc) {
|
||||
int dc_init(struct dreamcast *dc) {
|
||||
if (dc->debugger && !debugger_init(dc->debugger)) {
|
||||
dc_destroy(dc);
|
||||
return false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!memory_init(dc->memory)) {
|
||||
dc_destroy(dc);
|
||||
return false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* initialize each device */
|
||||
|
@ -99,11 +99,11 @@ bool dc_init(struct dreamcast *dc) {
|
|||
if (!dev->init(dev)) {
|
||||
LOG_INFO("Device \"%s\" failed to initialize", dev->name);
|
||||
dc_destroy(dc);
|
||||
return false;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
return 1;
|
||||
}
|
||||
|
||||
void dc_destroy_window_interface(struct window_interface *window) {
|
||||
|
@ -163,7 +163,7 @@ struct device *dc_get_device(struct dreamcast *dc, const char *name) {
|
|||
}
|
||||
|
||||
void *dc_create_device(struct dreamcast *dc, size_t size, const char *name,
|
||||
bool (*init)(struct device *dev)) {
|
||||
int (*init)(struct device *dev)) {
|
||||
struct device *dev = calloc(1, size);
|
||||
|
||||
dev->dc = dc;
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
#ifndef DREAMCAST_H
|
||||
#define DREAMCAST_H
|
||||
|
||||
#include <stdbool.h>
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
#include "core/list.h"
|
||||
|
@ -76,7 +75,7 @@ typedef void (*device_run_cb)(struct device *, int64_t);
|
|||
|
||||
struct execute_interface {
|
||||
device_run_cb run;
|
||||
bool running;
|
||||
int running;
|
||||
};
|
||||
|
||||
/* memory interface */
|
||||
|
@ -104,7 +103,7 @@ struct window_interface {
|
|||
struct device {
|
||||
struct dreamcast *dc;
|
||||
const char *name;
|
||||
bool (*init)(struct device *dev);
|
||||
int (*init)(struct device *dev);
|
||||
|
||||
/* optional interfaces */
|
||||
struct debug_interface *debug_if;
|
||||
|
@ -155,7 +154,7 @@ struct dreamcast *dc_create();
|
|||
void dc_destroy(struct dreamcast *dc);
|
||||
|
||||
void *dc_create_device(struct dreamcast *dc, size_t size, const char *name,
|
||||
bool (*init)(struct device *dev));
|
||||
int (*init)(struct device *dev));
|
||||
struct device *dc_get_device(struct dreamcast *dc, const char *name);
|
||||
void dc_destroy_device(struct device *dev);
|
||||
|
||||
|
@ -172,7 +171,7 @@ struct window_interface *dc_create_window_interface(
|
|||
device_joy_add_cb joy_add, device_joy_remove_cb joy_remove);
|
||||
void dc_destroy_window_interface(struct window_interface *window);
|
||||
|
||||
bool dc_init(struct dreamcast *dc);
|
||||
int dc_init(struct dreamcast *dc);
|
||||
void dc_suspend(struct dreamcast *dc);
|
||||
void dc_resume(struct dreamcast *dc);
|
||||
void dc_tick(struct dreamcast *dc, int64_t ns);
|
||||
|
|
|
@ -38,7 +38,7 @@ enum gd_state {
|
|||
};
|
||||
|
||||
struct cdread {
|
||||
bool dma;
|
||||
int dma;
|
||||
enum gd_secfmt sector_fmt;
|
||||
enum gd_secmask sector_mask;
|
||||
int first_sector;
|
||||
|
@ -70,7 +70,7 @@ struct gdrom {
|
|||
static void gdrom_event(struct gdrom *gd, enum gd_event ev, intptr_t arg0,
|
||||
intptr_t arg1);
|
||||
|
||||
static int gdrom_get_fad(uint8_t a, uint8_t b, uint8_t c, bool msf) {
|
||||
static int gdrom_get_fad(uint8_t a, uint8_t b, uint8_t c, int msf) {
|
||||
if (msf) {
|
||||
/* MSF mode
|
||||
Byte 2 - Start time: minutes (binary 0 - 255)
|
||||
|
@ -273,7 +273,7 @@ static void gdrom_spi_cmd(struct gdrom *gd, uint8_t *data) {
|
|||
} break;
|
||||
|
||||
case SPI_CD_READ: {
|
||||
bool msf = (data[1] & 0x1);
|
||||
int msf = (data[1] & 0x1);
|
||||
|
||||
gd->req.dma = gd->features.dma;
|
||||
gd->req.sector_fmt = (enum gd_secfmt)((data[1] & 0xe) >> 1);
|
||||
|
@ -347,7 +347,7 @@ static int gdrom_read_sectors(struct gdrom *gd, int fad, enum gd_secfmt fmt,
|
|||
total += 2048;
|
||||
fad++;
|
||||
} else {
|
||||
CHECK(false);
|
||||
LOG_FATAL("Unsupported sector format");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -521,12 +521,12 @@ static void gdrom_event(struct gdrom *gd, enum gd_event ev, intptr_t arg0,
|
|||
gd->state);
|
||||
}
|
||||
|
||||
static bool gdrom_init(struct device *dev) {
|
||||
static int gdrom_init(struct device *dev) {
|
||||
struct gdrom *gd = (struct gdrom *)dev;
|
||||
|
||||
gdrom_set_disc(gd, NULL);
|
||||
|
||||
return true;
|
||||
return 1;
|
||||
}
|
||||
|
||||
void gdrom_dma_end(struct gdrom *gd) {
|
||||
|
|
|
@ -302,9 +302,9 @@ static void holly_debug_menu(struct device *dev, struct nk_context *ctx) {
|
|||
}
|
||||
}
|
||||
|
||||
static bool holly_init(struct device *dev) {
|
||||
static int holly_init(struct device *dev) {
|
||||
struct holly *hl = (struct holly *)dev;
|
||||
return true;
|
||||
return 1;
|
||||
}
|
||||
|
||||
void holly_destroy(struct holly *hl) {
|
||||
|
|
|
@ -157,8 +157,8 @@ int maple_handle_command(struct maple *mp, struct maple_frame *frame,
|
|||
return 1;
|
||||
}
|
||||
|
||||
static bool maple_init(struct device *dev) {
|
||||
return true;
|
||||
static int maple_init(struct device *dev) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
void maple_destroy(struct maple *mp) {
|
||||
|
|
|
@ -64,7 +64,7 @@ static void mirror_iterator_init(struct mirror_iterator *it, uint32_t addr,
|
|||
it->first = 1;
|
||||
}
|
||||
|
||||
static bool mirror_iterator_next(struct mirror_iterator *it) {
|
||||
static int mirror_iterator_next(struct mirror_iterator *it) {
|
||||
/* first iteration just returns base */
|
||||
if (it->first) {
|
||||
it->first = 0;
|
||||
|
@ -92,7 +92,7 @@ static bool mirror_iterator_next(struct mirror_iterator *it) {
|
|||
return 1;
|
||||
}
|
||||
|
||||
static bool reserve_address_space(uint8_t **base) {
|
||||
static int reserve_address_space(uint8_t **base) {
|
||||
/* find a contiguous (1 << 32) chunk of memory to map an address space to */
|
||||
int i = 64;
|
||||
|
||||
|
|
|
@ -96,7 +96,7 @@ static inline void UYVY422_read(UYVY422_type px, uint8_t *r, uint8_t *g,
|
|||
|
||||
static inline void UYVY422_write(UYVY422_type *dst, uint8_t r, uint8_t g,
|
||||
uint8_t b, uint8_t a) {
|
||||
CHECK(false);
|
||||
LOG_FATAL("UYVY422_write unsupported");
|
||||
}
|
||||
|
||||
/* ARGB4444 */
|
||||
|
|
|
@ -211,7 +211,7 @@ static void pvr_vram_interleaved_write_string(struct pvr *pvr, uint32_t dst,
|
|||
}
|
||||
}
|
||||
|
||||
static bool pvr_init(struct device *dev) {
|
||||
static int pvr_init(struct device *dev) {
|
||||
struct pvr *pvr = (struct pvr *)dev;
|
||||
struct dreamcast *dc = pvr->dc;
|
||||
|
||||
|
@ -228,7 +228,7 @@ static bool pvr_init(struct device *dev) {
|
|||
/* configure initial vsync interval */
|
||||
pvr_reconfigure_spg(pvr);
|
||||
|
||||
return true;
|
||||
return 1;
|
||||
}
|
||||
|
||||
void pvr_destroy(struct pvr *pvr) {
|
||||
|
|
|
@ -839,7 +839,7 @@ static void ta_texture_fifo_write(struct ta *ta, uint32_t dst, void *ptr,
|
|||
PROF_LEAVE();
|
||||
}
|
||||
|
||||
static bool ta_init(struct device *dev) {
|
||||
static int ta_init(struct device *dev) {
|
||||
struct ta *ta = (struct ta *)dev;
|
||||
struct dreamcast *dc = ta->dc;
|
||||
|
||||
|
@ -855,7 +855,7 @@ static bool ta_init(struct device *dev) {
|
|||
list_add(&ta->free_contexts, &ctx->it);
|
||||
}
|
||||
|
||||
return true;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void ta_toggle_tracing(struct ta *ta) {
|
||||
|
@ -917,13 +917,13 @@ static void ta_debug_menu(struct device *dev, struct nk_context *ctx) {
|
|||
}
|
||||
|
||||
void ta_build_tables() {
|
||||
static bool initialized = false;
|
||||
static int initialized = 0;
|
||||
|
||||
if (initialized) {
|
||||
return;
|
||||
}
|
||||
|
||||
initialized = true;
|
||||
initialized = 1;
|
||||
|
||||
for (int i = 0; i < 0x100; i++) {
|
||||
union pcw pcw = *(union pcw *)&i;
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
#ifndef TA_TYPES_H
|
||||
#define TA_TYPES_H
|
||||
|
||||
#include <stdbool.h>
|
||||
#include "core/list.h"
|
||||
#include "core/rb_tree.h"
|
||||
|
||||
|
@ -443,7 +442,7 @@ struct tile_ctx {
|
|||
|
||||
/* pvr / ta state */
|
||||
int frame;
|
||||
bool autosort;
|
||||
int autosort;
|
||||
int stride;
|
||||
int pal_pxl_format;
|
||||
int video_width;
|
||||
|
|
|
@ -45,15 +45,15 @@ static int boot_load_rom(struct boot *boot, const char *path) {
|
|||
return 1;
|
||||
}
|
||||
|
||||
static bool boot_init(struct device *dev) {
|
||||
static int boot_init(struct device *dev) {
|
||||
struct boot *boot = (struct boot *)dev;
|
||||
|
||||
if (!boot_load_rom(boot, OPTION_bios)) {
|
||||
LOG_WARNING("Failed to load boot rom");
|
||||
return false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return true;
|
||||
return 1;
|
||||
}
|
||||
|
||||
void boot_destroy(struct boot *boot) {
|
||||
|
|
|
@ -196,15 +196,15 @@ static void flash_rom_write(struct flash *flash, uint32_t addr, uint32_t data,
|
|||
}
|
||||
}
|
||||
|
||||
static bool flash_init(struct device *dev) {
|
||||
static int flash_init(struct device *dev) {
|
||||
struct flash *flash = (struct flash *)dev;
|
||||
|
||||
if (!flash_init_bin()) {
|
||||
LOG_WARNING("Failed to load flash rom");
|
||||
return false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return true;
|
||||
return 1;
|
||||
}
|
||||
|
||||
void flash_destroy(struct flash *flash) {
|
||||
|
|
|
@ -283,7 +283,7 @@ static void sh4_run(struct device *dev, int64_t ns) {
|
|||
PROF_LEAVE();
|
||||
}
|
||||
|
||||
static bool sh4_init(struct device *dev) {
|
||||
static int sh4_init(struct device *dev) {
|
||||
struct sh4 *sh4 = (struct sh4 *)dev;
|
||||
struct dreamcast *dc = sh4->dc;
|
||||
|
||||
|
@ -323,10 +323,10 @@ static bool sh4_init(struct device *dev) {
|
|||
sh4->backend = backend;
|
||||
|
||||
if (!jit_init(sh4->jit, &sh4->guest, sh4->frontend, sh4->backend)) {
|
||||
return false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return true;
|
||||
return 1;
|
||||
}
|
||||
|
||||
void sh4_destroy(struct sh4 *sh4) {
|
||||
|
|
|
@ -62,7 +62,7 @@ void sh4_intc_reprioritize(struct sh4 *sh4) {
|
|||
continue;
|
||||
}
|
||||
|
||||
bool was_requested = old & sh4->sort_id[j];
|
||||
int was_requested = old & sh4->sort_id[j];
|
||||
|
||||
sh4->sorted_interrupts[n] = j;
|
||||
sh4->sort_id[j] = (uint64_t)1 << n;
|
||||
|
|
|
@ -16,9 +16,9 @@ extern "C" {
|
|||
#include "sys/memory.h"
|
||||
}
|
||||
|
||||
//
|
||||
// x64 stack layout
|
||||
//
|
||||
/*
|
||||
* x64 stack layout
|
||||
*/
|
||||
|
||||
#if PLATFORM_WINDOWS
|
||||
static const int STACK_SHADOW_SPACE = 32;
|
||||
|
@ -27,37 +27,37 @@ static const int STACK_SHADOW_SPACE = 0;
|
|||
#endif
|
||||
static const int STACK_OFFSET_LOCALS = STACK_SHADOW_SPACE + 8;
|
||||
|
||||
//
|
||||
// x64 register layout
|
||||
//
|
||||
/*
|
||||
* x64 register layout
|
||||
*/
|
||||
|
||||
// %rax %eax %ax %al <-- both: temporary
|
||||
// %rcx %ecx %cx %cl <-- both: argument
|
||||
// %rdx %edx %dx %dl <-- both: argument
|
||||
// %rbx %ebx %bx %bl <-- both: available (callee saved)
|
||||
// %rsp %esp %sp %spl <-- both: reserved
|
||||
// %rbp %ebp %bp %bpl <-- both: available (callee saved)
|
||||
// %rsi %esi %si %sil <-- msvc: available (callee saved), amd64: argument
|
||||
// %rdi %edi %di %dil <-- msvc: available (callee saved), amd64: argument
|
||||
// %r8 %r8d %r8w %r8b <-- both: argument
|
||||
// %r9 %r9d %r9w %r9b <-- both: argument
|
||||
// %r10 %r10d %r10w %r10b <-- both: available (not callee saved)
|
||||
// %r11 %r11d %r11w %r11b <-- both: available (not callee saved)
|
||||
// %r12 %r12d %r12w %r12b <-- both: available (callee saved)
|
||||
// %r13 %r13d %r13w %r13b <-- both: available (callee saved)
|
||||
// %r14 %r14d %r14w %r14b <-- both: available (callee saved)
|
||||
// %r15 %r15d %r15w %r15b <-- both: available (callee saved)
|
||||
/* %rax %eax %ax %al <-- both: temporary
|
||||
%rcx %ecx %cx %cl <-- both: argument
|
||||
%rdx %edx %dx %dl <-- both: argument
|
||||
%rbx %ebx %bx %bl <-- both: available (callee saved)
|
||||
%rsp %esp %sp %spl <-- both: reserved
|
||||
%rbp %ebp %bp %bpl <-- both: available (callee saved)
|
||||
%rsi %esi %si %sil <-- msvc: available (callee saved), amd64: argument
|
||||
%rdi %edi %di %dil <-- msvc: available (callee saved), amd64: argument
|
||||
%r8 %r8d %r8w %r8b <-- both: argument
|
||||
%r9 %r9d %r9w %r9b <-- both: argument
|
||||
%r10 %r10d %r10w %r10b <-- both: available (not callee saved)
|
||||
%r11 %r11d %r11w %r11b <-- both: available (not callee saved)
|
||||
%r12 %r12d %r12w %r12b <-- both: available (callee saved)
|
||||
%r13 %r13d %r13w %r13b <-- both: available (callee saved)
|
||||
%r14 %r14d %r14w %r14b <-- both: available (callee saved)
|
||||
%r15 %r15d %r15w %r15b <-- both: available (callee saved)
|
||||
|
||||
// msvc calling convention uses rcx, rdx, r8, r9 for arguments
|
||||
// amd64 calling convention uses rdi, rsi, rdx, rcx, r8, r9 for arguments
|
||||
// both use the same xmm registers for floating point arguments
|
||||
// our largest function call uses only 3 arguments
|
||||
// msvc is left with rax, rdi, rsi, r9-r11,
|
||||
// amd64 is left with rax, rcx, r8-r11 available on amd64
|
||||
msvc calling convention uses rcx, rdx, r8, r9 for arguments
|
||||
amd64 calling convention uses rdi, rsi, rdx, rcx, r8, r9 for arguments
|
||||
both use the same xmm registers for floating point arguments
|
||||
our largest function call uses only 3 arguments
|
||||
msvc is left with rax, rdi, rsi, r9-r11,
|
||||
amd64 is left with rax, rcx, r8-r11 available on amd64
|
||||
|
||||
// rax is used as a scratch register
|
||||
// r10, r11, xmm1 are used for constant not eliminated by const propagation
|
||||
// r14, r15 are reserved for the context and memory pointers
|
||||
rax is used as a scratch register
|
||||
r10, r11, xmm1 are used for constant not eliminated by const propagation
|
||||
r14, r15 are reserved for the context and memory pointers */
|
||||
|
||||
#if PLATFORM_WINDOWS
|
||||
const int x64_arg0_idx = Xbyak::Operand::RCX;
|
||||
|
@ -81,14 +81,12 @@ const Xbyak::Reg64 tmp0(x64_tmp0_idx);
|
|||
const Xbyak::Reg64 tmp1(x64_tmp1_idx);
|
||||
|
||||
const struct jit_register x64_registers[] = {
|
||||
{"rbx", VALUE_INT_MASK, reinterpret_cast<const void *>(&Xbyak::util::rbx)},
|
||||
{"rbp", VALUE_INT_MASK, reinterpret_cast<const void *>(&Xbyak::util::rbp)},
|
||||
{"r12", VALUE_INT_MASK, reinterpret_cast<const void *>(&Xbyak::util::r12)},
|
||||
{"r13", VALUE_INT_MASK, reinterpret_cast<const void *>(&Xbyak::util::r13)},
|
||||
// {"r14", VALUE_INT_MASK,
|
||||
// reinterpret_cast<const void *>(&Xbyak::util::r14)},
|
||||
// {"r15", VALUE_INT_MASK,
|
||||
// reinterpret_cast<const void *>(&Xbyak::util::r15)},
|
||||
{"rbx", VALUE_INT_MASK, (const void *)&Xbyak::util::rbx},
|
||||
{"rbp", VALUE_INT_MASK, (const void *)&Xbyak::util::rbp},
|
||||
{"r12", VALUE_INT_MASK, (const void *)&Xbyak::util::r12},
|
||||
{"r13", VALUE_INT_MASK, (const void *)&Xbyak::util::r13},
|
||||
/* {"r14", VALUE_INT_MASK, (const void *)&Xbyak::util::r14},
|
||||
{"r15", VALUE_INT_MASK, (const void *)&Xbyak::util::r15}, */
|
||||
{"xmm6", VALUE_FLOAT_MASK,
|
||||
reinterpret_cast<const void *>(&Xbyak::util::xmm6)},
|
||||
{"xmm7", VALUE_FLOAT_MASK,
|
||||
|
@ -113,9 +111,9 @@ const struct jit_register x64_registers[] = {
|
|||
const int x64_num_registers =
|
||||
sizeof(x64_registers) / sizeof(struct jit_register);
|
||||
|
||||
//
|
||||
// x64 emitters for each ir op
|
||||
//
|
||||
/*
|
||||
* x64 emitters for each ir op
|
||||
*/
|
||||
struct x64_backend;
|
||||
|
||||
typedef void (*x64_emit_cb)(struct x64_backend *, Xbyak::CodeGenerator &,
|
||||
|
@ -134,11 +132,9 @@ static x64_emit_cb x64_backend_emitters[NUM_OPS];
|
|||
void x64_emit_##op(struct x64_backend *backend, Xbyak::CodeGenerator &e, \
|
||||
const struct ir_instr *instr)
|
||||
|
||||
//
|
||||
// xmm constants. SSE / AVX provides no support for loading a constant into an
|
||||
// xmm register, so instead frequently used constants are emitted to the code
|
||||
// buffer and used as memory operands
|
||||
//
|
||||
/* xmm constants. SSE / AVX provides no support for loading a constant into an
|
||||
xmm register, so instead frequently used constants are emitted to the code
|
||||
buffer and used as memory operand */
|
||||
enum xmm_constant {
|
||||
XMM_CONST_ABS_MASK_PS,
|
||||
XMM_CONST_ABS_MASK_PD,
|
||||
|
@ -169,8 +165,8 @@ static const Xbyak::Reg x64_backend_register(struct x64_backend *backend,
|
|||
const struct ir_value *v) {
|
||||
auto &e = *backend->codegen;
|
||||
|
||||
// if the value is a local or constant, copy it to a tempory register, else
|
||||
// return the register allocated for it
|
||||
/* if the value is a local or constant, copy it to a tempory register, else
|
||||
return the register allocated for it */
|
||||
if (ir_is_constant(v)) {
|
||||
CHECK_LT(backend->num_temps, 2);
|
||||
|
||||
|
@ -187,14 +183,14 @@ static const Xbyak::Reg x64_backend_register(struct x64_backend *backend,
|
|||
tmp = tmp.cvt32();
|
||||
break;
|
||||
case VALUE_I64:
|
||||
// no conversion needed
|
||||
/* no conversion needed */
|
||||
break;
|
||||
default:
|
||||
LOG_FATAL("Unexpected value type");
|
||||
break;
|
||||
}
|
||||
|
||||
// copy value to the temporary register
|
||||
/* copy value to the temporary register */
|
||||
e.mov(tmp, ir_zext_constant(v));
|
||||
|
||||
return tmp;
|
||||
|
@ -226,10 +222,10 @@ static const Xbyak::Xmm x64_backend_xmm_register(struct x64_backend *backend,
|
|||
const struct ir_value *v) {
|
||||
auto &e = *backend->codegen;
|
||||
|
||||
// if the value isn't allocated a XMM register copy it to a temporary XMM,
|
||||
// register, else return the XMM register allocated for it
|
||||
/* if the value isn't allocated a XMM register copy it to a temporary XMM,
|
||||
register, else return the XMM register allocated for it */
|
||||
if (ir_is_constant(v)) {
|
||||
// copy value to the temporary register
|
||||
/* copy value to the temporary register */
|
||||
if (v->type == VALUE_F32) {
|
||||
float val = v->f32;
|
||||
e.mov(e.eax, *(int32_t *)&val);
|
||||
|
@ -410,37 +406,37 @@ static int x64_backend_handle_exception(struct jit_backend *base,
|
|||
|
||||
const uint8_t *data = reinterpret_cast<const uint8_t *>(ex->thread_state.rip);
|
||||
|
||||
// it's assumed a mov has triggered the exception
|
||||
/* it's assumed a mov has triggered the exception */
|
||||
struct x64_mov mov;
|
||||
if (!x64_decode_mov(data, &mov)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
// figure out the guest address that was being accessed
|
||||
/* figure out the guest address that was being accessed */
|
||||
const uint8_t *fault_addr = reinterpret_cast<const uint8_t *>(ex->fault_addr);
|
||||
const uint8_t *protected_start =
|
||||
reinterpret_cast<const uint8_t *>(ex->thread_state.r15);
|
||||
uint32_t guest_addr = static_cast<uint32_t>(fault_addr - protected_start);
|
||||
|
||||
// instead of handling the dynamic callback from inside of the exception
|
||||
// handler, force rip to the beginning of a thunk which will invoke the
|
||||
// callback once the exception handler has exited. this frees the callbacks
|
||||
// from any restrictions imposed by an exception handler, and also prevents
|
||||
// a possible recursive exceptions
|
||||
/* instead of handling the dynamic callback from inside of the exception
|
||||
handler, force rip to the beginning of a thunk which will invoke the
|
||||
callback once the exception handler has exited. this frees the callbacks
|
||||
from any restrictions imposed by an exception handler, and also prevents
|
||||
a possible recursive exceptions
|
||||
|
||||
// push the return address (the next instruction after the current mov) to
|
||||
// the stack. also, adjust the stack for the return address, with an extra
|
||||
// 8 bytes to keep it aligned
|
||||
push the return address (the next instruction after the current mov) to
|
||||
the stack. also, adjust the stack for the return address, with an extra
|
||||
8 bytes to keep it aligned */
|
||||
*(uintptr_t *)(ex->thread_state.rsp - 8) = ex->thread_state.rip + mov.length;
|
||||
ex->thread_state.rsp -= STACK_SHADOW_SPACE + 8 + 8;
|
||||
CHECK(ex->thread_state.rsp % 16 == 0);
|
||||
|
||||
if (mov.is_load) {
|
||||
// prep argument registers (memory object, guest_addr) for read function
|
||||
/* prep argument registers (memory object, guest_addr) for read function */
|
||||
ex->thread_state.r[x64_arg0_idx] = reinterpret_cast<uint64_t>(guest->space);
|
||||
ex->thread_state.r[x64_arg1_idx] = static_cast<uint64_t>(guest_addr);
|
||||
|
||||
// prep function call address for thunk
|
||||
/* prep function call address for thunk */
|
||||
switch (mov.operand_size) {
|
||||
case 1:
|
||||
ex->thread_state.rax = reinterpret_cast<uint64_t>(guest->r8);
|
||||
|
@ -456,17 +452,17 @@ static int x64_backend_handle_exception(struct jit_backend *base,
|
|||
break;
|
||||
}
|
||||
|
||||
// resume execution in the thunk once the exception handler exits
|
||||
/* resume execution in the thunk once the exception handler exits */
|
||||
ex->thread_state.rip =
|
||||
reinterpret_cast<uint64_t>(backend->load_thunk[mov.reg]);
|
||||
} else {
|
||||
// prep argument registers (memory object, guest_addr, value) for write
|
||||
// function
|
||||
/* prep argument registers (memory object, guest_addr, value) for write
|
||||
function */
|
||||
ex->thread_state.r[x64_arg0_idx] = reinterpret_cast<uint64_t>(guest->space);
|
||||
ex->thread_state.r[x64_arg1_idx] = static_cast<uint64_t>(guest_addr);
|
||||
ex->thread_state.r[x64_arg2_idx] = ex->thread_state.r[mov.reg];
|
||||
|
||||
// prep function call address for thunk
|
||||
/* prep function call address for thunk */
|
||||
switch (mov.operand_size) {
|
||||
case 1:
|
||||
ex->thread_state.rax = reinterpret_cast<uint64_t>(guest->w8);
|
||||
|
@ -482,7 +478,7 @@ static int x64_backend_handle_exception(struct jit_backend *base,
|
|||
break;
|
||||
}
|
||||
|
||||
// resume execution in the thunk once the exception handler exits
|
||||
/* resume execution in the thunk once the exception handler exits */
|
||||
ex->thread_state.rip = reinterpret_cast<uint64_t>(backend->store_thunk);
|
||||
}
|
||||
|
||||
|
@ -512,8 +508,8 @@ static void *x64_backend_assemble_code(struct jit_backend *base, struct ir *ir,
|
|||
|
||||
struct x64_backend *backend = container_of(base, struct x64_backend, base);
|
||||
|
||||
// try to generate the x64 code. if the code buffer overflows let the backend
|
||||
// know so it can reset the cache and try again
|
||||
/* try to generate the x64 code. if the code buffer overflows let the backend
|
||||
know so it can reset the cache and try again */
|
||||
void *code = NULL;
|
||||
|
||||
try {
|
||||
|
@ -995,7 +991,7 @@ EMITTER(SEXT) {
|
|||
const Xbyak::Reg a = x64_backend_register(backend, instr->arg[0]);
|
||||
|
||||
if (a == result) {
|
||||
// already the correct width
|
||||
/* already the correct width */
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1011,12 +1007,12 @@ EMITTER(ZEXT) {
|
|||
const Xbyak::Reg a = x64_backend_register(backend, instr->arg[0]);
|
||||
|
||||
if (a == result) {
|
||||
// already the correct width
|
||||
/* already the correct width */
|
||||
return;
|
||||
}
|
||||
|
||||
if (result.isBit(64) && a.isBit(32)) {
|
||||
// mov will automatically zero fill the upper 32-bits
|
||||
/* mov will automatically zero fill the upper 32-bits */
|
||||
e.mov(result.cvt32(), a);
|
||||
} else {
|
||||
e.movzx(result, a);
|
||||
|
@ -1028,8 +1024,8 @@ EMITTER(TRUNC) {
|
|||
const Xbyak::Reg a = x64_backend_register(backend, instr->arg[0]);
|
||||
|
||||
if (result.getIdx() == a.getIdx()) {
|
||||
// noop if already the same register. note, this means the high order bits
|
||||
// of the result won't be cleared, but I believe that is fine
|
||||
/* noop if already the same register. note, this means the high order bits
|
||||
of the result won't be cleared, but I believe that is fine */
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1049,7 +1045,7 @@ EMITTER(TRUNC) {
|
|||
}
|
||||
|
||||
if (truncated.isBit(32)) {
|
||||
// mov will automatically zero fill the upper 32-bits
|
||||
/* mov will automatically zero fill the upper 32-bits */
|
||||
e.mov(result, truncated);
|
||||
} else {
|
||||
e.movzx(result.cvt32(), truncated);
|
||||
|
@ -1076,7 +1072,7 @@ EMITTER(SELECT) {
|
|||
const Xbyak::Reg b = x64_backend_register(backend, instr->arg[1]);
|
||||
const Xbyak::Reg cond = x64_backend_register(backend, instr->arg[2]);
|
||||
|
||||
// convert result to Reg32e to please xbyak
|
||||
/* convert result to Reg32e to please xbyak */
|
||||
CHECK_GE(result.getBit(), 32);
|
||||
Xbyak::Reg32e result_32e(result.getIdx(), result.getBit());
|
||||
|
||||
|
@ -1262,9 +1258,9 @@ EMITTER(NEG) {
|
|||
|
||||
EMITTER(ABS) {
|
||||
LOG_FATAL("Unsupported");
|
||||
// e.mov(e.rax, *result);
|
||||
// e.neg(e.rax);
|
||||
// e.cmovl(reinterpret_cast<const Xbyak::Reg *>(result)->cvt32(), e.rax);
|
||||
/* e.mov(e.rax, *result);
|
||||
e.neg(e.rax);
|
||||
e.cmovl(reinterpret_cast<const Xbyak::Reg *>(result)->cvt32(), e.rax); */
|
||||
}
|
||||
|
||||
EMITTER(FADD) {
|
||||
|
@ -1504,16 +1500,16 @@ EMITTER(ASHD) {
|
|||
e.mov(result, v);
|
||||
}
|
||||
|
||||
// check if we're shifting left or right
|
||||
/* check if we're shifting left or right */
|
||||
e.test(n, 0x80000000);
|
||||
e.jnz(".shr");
|
||||
|
||||
// perform shift left
|
||||
/* perform shift left */
|
||||
e.mov(e.cl, n);
|
||||
e.sal(result, e.cl);
|
||||
e.jmp(".end");
|
||||
|
||||
// perform right shift
|
||||
/* perform right shift */
|
||||
e.L(".shr");
|
||||
e.test(n, 0x1f);
|
||||
e.jz(".shr_overflow");
|
||||
|
@ -1522,11 +1518,11 @@ EMITTER(ASHD) {
|
|||
e.sar(result, e.cl);
|
||||
e.jmp(".end");
|
||||
|
||||
// right shift overflowed
|
||||
/* right shift overflowed */
|
||||
e.L(".shr_overflow");
|
||||
e.sar(result, 31);
|
||||
|
||||
// shift is done
|
||||
/* shift is done */
|
||||
e.L(".end");
|
||||
|
||||
e.outLocalLabel();
|
||||
|
@ -1543,16 +1539,16 @@ EMITTER(LSHD) {
|
|||
e.mov(result, v);
|
||||
}
|
||||
|
||||
// check if we're shifting left or right
|
||||
/* check if we're shifting left or right */
|
||||
e.test(n, 0x80000000);
|
||||
e.jnz(".shr");
|
||||
|
||||
// perform shift left
|
||||
/* perform shift left */
|
||||
e.mov(e.cl, n);
|
||||
e.shl(result, e.cl);
|
||||
e.jmp(".end");
|
||||
|
||||
// perform right shift
|
||||
/* perform right shift */
|
||||
e.L(".shr");
|
||||
e.test(n, 0x1f);
|
||||
e.jz(".shr_overflow");
|
||||
|
@ -1561,11 +1557,11 @@ EMITTER(LSHD) {
|
|||
e.shr(result, e.cl);
|
||||
e.jmp(".end");
|
||||
|
||||
// right shift overflowed
|
||||
/* right shift overflowed */
|
||||
e.L(".shr_overflow");
|
||||
e.mov(result, 0x0);
|
||||
|
||||
// shift is done
|
||||
/* shift is done */
|
||||
e.L(".end");
|
||||
|
||||
e.outLocalLabel();
|
||||
|
@ -1678,7 +1674,7 @@ struct jit_backend *x64_backend_create(struct jit *jit, void *code,
|
|||
int res = cs_open(CS_ARCH_X86, CS_MODE_64, &backend->capstone_handle);
|
||||
CHECK_EQ(res, CS_ERR_OK);
|
||||
|
||||
// do an initial reset to emit constants and thunks
|
||||
/* do an initial reset to emit constants and thunks */
|
||||
x64_backend_reset((jit_backend *)backend);
|
||||
|
||||
return (struct jit_backend *)backend;
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
#include "jit/backend/x64/x64_disassembler.h"
|
||||
|
||||
bool x64_decode_mov(const uint8_t *data, struct x64_mov *mov) {
|
||||
int x64_decode_mov(const uint8_t *data, struct x64_mov *mov) {
|
||||
const uint8_t *start = data;
|
||||
|
||||
// test for operand size prefix
|
||||
bool has_opprefix = false;
|
||||
int has_opprefix = 0;
|
||||
|
||||
if (*data == 0x66) {
|
||||
has_opprefix = true;
|
||||
has_opprefix = 1;
|
||||
data++;
|
||||
}
|
||||
|
||||
|
@ -30,8 +30,8 @@ bool x64_decode_mov(const uint8_t *data, struct x64_mov *mov) {
|
|||
|
||||
// test for MOV opcode
|
||||
// http://x86.renejeschke.de/html/file_module_x86_id_176.html
|
||||
bool is_load = false;
|
||||
bool has_imm = false;
|
||||
int is_load = 0;
|
||||
int has_imm = 0;
|
||||
int operand_size = 0;
|
||||
|
||||
// MOV r8,r/m8
|
||||
|
@ -39,8 +39,8 @@ bool x64_decode_mov(const uint8_t *data, struct x64_mov *mov) {
|
|||
// MOV r32,r/m32
|
||||
// MOV r64,r/m64
|
||||
if (*data == 0x8a || *data == 0x8b) {
|
||||
is_load = true;
|
||||
has_imm = false;
|
||||
is_load = 1;
|
||||
has_imm = 0;
|
||||
operand_size = *data == 0x8a ? 1 : (has_opprefix ? 2 : (rex_w ? 8 : 4));
|
||||
data++;
|
||||
}
|
||||
|
@ -49,8 +49,8 @@ bool x64_decode_mov(const uint8_t *data, struct x64_mov *mov) {
|
|||
// MOV r/m32,r32
|
||||
// MOV r/m64,r64
|
||||
else if (*data == 0x88 || *data == 0x89) {
|
||||
is_load = false;
|
||||
has_imm = false;
|
||||
is_load = 0;
|
||||
has_imm = 0;
|
||||
operand_size = *data == 0x88 ? 1 : (has_opprefix ? 2 : (rex_w ? 8 : 4));
|
||||
data++;
|
||||
}
|
||||
|
@ -58,8 +58,8 @@ bool x64_decode_mov(const uint8_t *data, struct x64_mov *mov) {
|
|||
// MOV r16,imm16
|
||||
// MOV r32,imm32
|
||||
else if (*data == 0xb0 || *data == 0xb8) {
|
||||
is_load = true;
|
||||
has_imm = true;
|
||||
is_load = 1;
|
||||
has_imm = 1;
|
||||
operand_size = *data == 0xb0 ? 1 : (has_opprefix ? 2 : 4);
|
||||
data++;
|
||||
}
|
||||
|
@ -67,14 +67,14 @@ bool x64_decode_mov(const uint8_t *data, struct x64_mov *mov) {
|
|||
// MOV r/m16,imm16
|
||||
// MOV r/m32,imm32
|
||||
else if (*data == 0xc6 || *data == 0xc7) {
|
||||
is_load = false;
|
||||
has_imm = true;
|
||||
is_load = 0;
|
||||
has_imm = 1;
|
||||
operand_size = *data == 0xc6 ? 1 : (has_opprefix ? 2 : 4);
|
||||
data++;
|
||||
}
|
||||
// not a supported MOV instruction
|
||||
else {
|
||||
return false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
// process ModR/M byte
|
||||
|
@ -87,8 +87,8 @@ bool x64_decode_mov(const uint8_t *data, struct x64_mov *mov) {
|
|||
mov->is_load = is_load;
|
||||
mov->is_indirect = (modrm_mod != 0b11);
|
||||
mov->has_imm = has_imm;
|
||||
mov->has_base = false;
|
||||
mov->has_index = false;
|
||||
mov->has_base = 0;
|
||||
mov->has_index = 0;
|
||||
mov->operand_size = operand_size;
|
||||
mov->reg = modrm_reg + (rex_r ? 8 : 0);
|
||||
mov->base = 0;
|
||||
|
@ -111,7 +111,7 @@ bool x64_decode_mov(const uint8_t *data, struct x64_mov *mov) {
|
|||
mov->index = sib_index + (rex_x ? 8 : 0);
|
||||
mov->scale = sib_scale;
|
||||
} else {
|
||||
mov->has_base = true;
|
||||
mov->has_base = 1;
|
||||
mov->base = modrm_rm + (rex_b ? 8 : 0);
|
||||
}
|
||||
|
||||
|
@ -164,5 +164,5 @@ bool x64_decode_mov(const uint8_t *data, struct x64_mov *mov) {
|
|||
// calculate total instruction length
|
||||
mov->length = (int)(data - start);
|
||||
|
||||
return true;
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -1,16 +1,15 @@
|
|||
#ifndef X64_DISASSEMBLER_H
|
||||
#define X64_DISASSEMBLER_H
|
||||
|
||||
#include <stdbool.h>
|
||||
#include <stdint.h>
|
||||
|
||||
struct x64_mov {
|
||||
int length;
|
||||
bool is_load;
|
||||
bool is_indirect;
|
||||
bool has_imm;
|
||||
bool has_base;
|
||||
bool has_index;
|
||||
int is_load;
|
||||
int is_indirect;
|
||||
int has_imm;
|
||||
int has_base;
|
||||
int has_index;
|
||||
int operand_size;
|
||||
int reg;
|
||||
int base;
|
||||
|
@ -20,6 +19,6 @@ struct x64_mov {
|
|||
uint64_t imm;
|
||||
};
|
||||
|
||||
bool x64_decode_mov(const uint8_t *data, struct x64_mov *mov);
|
||||
int x64_decode_mov(const uint8_t *data, struct x64_mov *mov);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -292,7 +292,7 @@ struct microprofile *mp_create(struct window *window) {
|
|||
/* register the font texture */
|
||||
mp->font_texture =
|
||||
rb_create_texture(rb, PXL_RGBA, FILTER_NEAREST, WRAP_CLAMP_TO_EDGE,
|
||||
WRAP_CLAMP_TO_EDGE, false, FONT_WIDTH, FONT_HEIGHT,
|
||||
WRAP_CLAMP_TO_EDGE, 0, FONT_WIDTH, FONT_HEIGHT,
|
||||
reinterpret_cast<const uint8_t *>(s_font_data));
|
||||
|
||||
return mp;
|
||||
|
|
|
@ -100,7 +100,7 @@ void nk_end_frame(struct nuklear *nk) {
|
|||
surf.prim_type = PRIM_TRIANGLES;
|
||||
surf.src_blend = BLEND_SRC_ALPHA;
|
||||
surf.dst_blend = BLEND_ONE_MINUS_SRC_ALPHA;
|
||||
surf.scissor = true;
|
||||
surf.scissor = 1;
|
||||
|
||||
nk_draw_foreach(cmd, &nk->ctx, &nk->cmds) {
|
||||
if (!cmd->elem_count) {
|
||||
|
@ -175,7 +175,7 @@ struct nuklear *nk_create(struct window *window) {
|
|||
&nk->atlas, &font_width, &font_height, NK_FONT_ATLAS_RGBA32);
|
||||
nk->font_texture =
|
||||
rb_create_texture(nk->window->rb, PXL_RGBA, FILTER_BILINEAR, WRAP_REPEAT,
|
||||
WRAP_REPEAT, false, font_width, font_height, font_data);
|
||||
WRAP_REPEAT, 0, font_width, font_height, font_data);
|
||||
nk_font_atlas_end(&nk->atlas, nk_handle_id((int)nk->font_texture), &nk->null);
|
||||
|
||||
/* initialize nuklear context */
|
||||
|
|
|
@ -1,8 +1,6 @@
|
|||
#ifndef NUKLEAR_H
|
||||
#define NUKLEAR_H
|
||||
|
||||
#include <stdbool.h>
|
||||
|
||||
#define NK_INCLUDE_FIXED_TYPES
|
||||
#define NK_INCLUDE_STANDARD_IO
|
||||
#define NK_INCLUDE_STANDARD_VARARGS
|
||||
|
@ -35,10 +33,10 @@ struct nuklear {
|
|||
/* input state */
|
||||
int mousex, mousey;
|
||||
int mouse_wheel;
|
||||
bool mouse_down[3];
|
||||
bool alt[2];
|
||||
bool ctrl[2];
|
||||
bool shift[2];
|
||||
int mouse_down[3];
|
||||
int alt[2];
|
||||
int ctrl[2];
|
||||
int shift[2];
|
||||
};
|
||||
|
||||
struct nuklear *nk_create(struct window *window);
|
||||
|
|
|
@ -895,7 +895,7 @@ static void win_pump_sdl(struct window *win) {
|
|||
}
|
||||
}
|
||||
|
||||
void win_enable_text_input(struct window *win, bool active) {
|
||||
void win_enable_text_input(struct window *win, int active) {
|
||||
win->text_input = active;
|
||||
|
||||
if (win->text_input) {
|
||||
|
@ -905,7 +905,7 @@ void win_enable_text_input(struct window *win, bool active) {
|
|||
}
|
||||
}
|
||||
|
||||
void win_enable_debug_menu(struct window *win, bool active) {
|
||||
void win_enable_debug_menu(struct window *win, int active) {
|
||||
win->debug_menu = active;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
#ifndef SYSTEM_H
|
||||
#define SYSTEM_H
|
||||
|
||||
#include <stdbool.h>
|
||||
#include <stdint.h>
|
||||
#include "core/list.h"
|
||||
#include "ui/keycode.h"
|
||||
|
@ -52,8 +51,8 @@ struct window {
|
|||
int width;
|
||||
int height;
|
||||
int fullscreen;
|
||||
bool debug_menu;
|
||||
bool text_input;
|
||||
int debug_menu;
|
||||
int text_input;
|
||||
|
||||
/* private state */
|
||||
struct list listeners;
|
||||
|
@ -71,7 +70,7 @@ void win_remove_listener(struct window *win, struct window_listener *listener);
|
|||
void win_pump_events(struct window *win);
|
||||
|
||||
void win_set_status(struct window *win, const char *status);
|
||||
void win_enable_debug_menu(struct window *win, bool active);
|
||||
void win_enable_text_input(struct window *win, bool active);
|
||||
void win_enable_debug_menu(struct window *win, int active);
|
||||
void win_enable_text_input(struct window *win, int active);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -46,11 +46,11 @@ struct render_backend {
|
|||
GLuint ui_vao;
|
||||
GLuint ui_vbo;
|
||||
GLuint ui_ibo;
|
||||
bool ui_use_ibo;
|
||||
int ui_use_ibo;
|
||||
|
||||
/* current gl state */
|
||||
bool scissor_test;
|
||||
bool depth_mask;
|
||||
int scissor_test;
|
||||
int depth_mask;
|
||||
enum depth_func depth_func;
|
||||
enum cull_face cull_face;
|
||||
enum blend_func src_blend;
|
||||
|
@ -116,7 +116,7 @@ static GLenum prim_types[] = {
|
|||
GL_LINES, /* PRIM_LINES */
|
||||
};
|
||||
|
||||
static void rb_set_scissor_test(struct render_backend *rb, bool enabled) {
|
||||
static void rb_set_scissor_test(struct render_backend *rb, int enabled) {
|
||||
if (rb->scissor_test == enabled) {
|
||||
return;
|
||||
}
|
||||
|
@ -135,7 +135,7 @@ static void rb_set_scissor_clip(struct render_backend *rb, int x, int y,
|
|||
glScissor(x, y, width, height);
|
||||
}
|
||||
|
||||
static void rb_set_depth_mask(struct render_backend *rb, bool enabled) {
|
||||
static void rb_set_depth_mask(struct render_backend *rb, int enabled) {
|
||||
if (rb->depth_mask == enabled) {
|
||||
return;
|
||||
}
|
||||
|
@ -233,7 +233,7 @@ static void rb_print_shader_log(GLuint shader) {
|
|||
free(info_log);
|
||||
}
|
||||
|
||||
static bool rb_compile_shader(const char *source, GLenum shader_type,
|
||||
static int rb_compile_shader(const char *source, GLenum shader_type,
|
||||
GLuint *shader) {
|
||||
size_t sourceLength = strlen(source);
|
||||
|
||||
|
@ -248,10 +248,10 @@ static bool rb_compile_shader(const char *source, GLenum shader_type,
|
|||
if (!compiled) {
|
||||
rb_print_shader_log(*shader);
|
||||
glDeleteShader(*shader);
|
||||
return false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return true;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void rb_destroy_program(struct shader_program *program) {
|
||||
|
@ -266,7 +266,7 @@ static void rb_destroy_program(struct shader_program *program) {
|
|||
glDeleteProgram(program->program);
|
||||
}
|
||||
|
||||
static bool rb_compile_program(struct shader_program *program,
|
||||
static int rb_compile_program(struct shader_program *program,
|
||||
const char *header, const char *vertex_source,
|
||||
const char *fragment_source) {
|
||||
char buffer[16384] = {0};
|
||||
|
@ -281,7 +281,7 @@ static bool rb_compile_program(struct shader_program *program,
|
|||
|
||||
if (!rb_compile_shader(buffer, GL_VERTEX_SHADER, &program->vertex_shader)) {
|
||||
rb_destroy_program(program);
|
||||
return false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
glAttachShader(program->program, program->vertex_shader);
|
||||
|
@ -295,7 +295,7 @@ static bool rb_compile_program(struct shader_program *program,
|
|||
if (!rb_compile_shader(buffer, GL_FRAGMENT_SHADER,
|
||||
&program->fragment_shader)) {
|
||||
rb_destroy_program(program);
|
||||
return false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
glAttachShader(program->program, program->fragment_shader);
|
||||
|
@ -308,7 +308,7 @@ static bool rb_compile_program(struct shader_program *program,
|
|||
|
||||
if (!linked) {
|
||||
rb_destroy_program(program);
|
||||
return false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (int i = 0; i < UNIFORM_NUM_UNIFORMS; i++) {
|
||||
|
@ -316,7 +316,7 @@ static bool rb_compile_program(struct shader_program *program,
|
|||
glGetUniformLocation(program->program, uniform_names[i]);
|
||||
}
|
||||
|
||||
return true;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void rb_destroy_context(struct render_backend *rb) {
|
||||
|
@ -328,7 +328,7 @@ static void rb_destroy_context(struct render_backend *rb) {
|
|||
rb->ctx = NULL;
|
||||
}
|
||||
|
||||
static bool rb_init_context(struct render_backend *rb) {
|
||||
static int rb_init_context(struct render_backend *rb) {
|
||||
/* need at least a 3.3 core context for our shaders */
|
||||
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 3);
|
||||
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 3);
|
||||
|
@ -337,7 +337,7 @@ static bool rb_init_context(struct render_backend *rb) {
|
|||
rb->ctx = SDL_GL_CreateContext(rb->window->handle);
|
||||
if (!rb->ctx) {
|
||||
LOG_WARNING("OpenGL context creation failed: %s", SDL_GetError());
|
||||
return false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* link in gl functions at runtime */
|
||||
|
@ -345,13 +345,13 @@ static bool rb_init_context(struct render_backend *rb) {
|
|||
GLenum err = glewInit();
|
||||
if (err != GLEW_OK) {
|
||||
LOG_WARNING("GLEW initialization failed: %s", glewGetErrorString(err));
|
||||
return false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* enable vsync */
|
||||
SDL_GL_SetSwapInterval(1);
|
||||
|
||||
return true;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void rb_destroy_textures(struct render_backend *rb) {
|
||||
|
@ -482,7 +482,7 @@ static void rb_create_vertex_buffers(struct render_backend *rb) {
|
|||
}
|
||||
|
||||
static void rb_set_initial_state(struct render_backend *rb) {
|
||||
rb_set_depth_mask(rb, true);
|
||||
rb_set_depth_mask(rb, 1);
|
||||
rb_set_depth_func(rb, DEPTH_NONE);
|
||||
rb_set_cull_face(rb, CULL_BACK);
|
||||
rb_set_blend_func(rb, BLEND_NONE, BLEND_NONE);
|
||||
|
@ -541,12 +541,12 @@ void rb_end_surfaces2d(struct render_backend *rb) {}
|
|||
void rb_draw_surface2d(struct render_backend *rb,
|
||||
const struct surface2d *surf) {
|
||||
if (surf->scissor) {
|
||||
rb_set_scissor_test(rb, true);
|
||||
rb_set_scissor_test(rb, 1);
|
||||
rb_set_scissor_clip(rb, (int)surf->scissor_rect[0],
|
||||
(int)surf->scissor_rect[1], (int)surf->scissor_rect[2],
|
||||
(int)surf->scissor_rect[3]);
|
||||
} else {
|
||||
rb_set_scissor_test(rb, false);
|
||||
rb_set_scissor_test(rb, 0);
|
||||
}
|
||||
|
||||
rb_set_blend_func(rb, surf->src_blend, surf->dst_blend);
|
||||
|
@ -574,15 +574,15 @@ void rb_begin_surfaces2d(struct render_backend *rb,
|
|||
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, rb->ui_ibo);
|
||||
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(uint16_t) * num_indices,
|
||||
indices, GL_DYNAMIC_DRAW);
|
||||
rb->ui_use_ibo = true;
|
||||
rb->ui_use_ibo = 1;
|
||||
} else {
|
||||
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, -1);
|
||||
rb->ui_use_ibo = false;
|
||||
rb->ui_use_ibo = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void rb_end_ortho(struct render_backend *rb) {
|
||||
rb_set_scissor_test(rb, false);
|
||||
rb_set_scissor_test(rb, 0);
|
||||
}
|
||||
|
||||
void rb_begin_ortho(struct render_backend *rb) {
|
||||
|
@ -608,7 +608,7 @@ void rb_begin_ortho(struct render_backend *rb) {
|
|||
ortho[11] = 0.0f;
|
||||
ortho[15] = 1.0f;
|
||||
|
||||
rb_set_depth_mask(rb, false);
|
||||
rb_set_depth_mask(rb, 0);
|
||||
rb_set_depth_func(rb, DEPTH_NONE);
|
||||
rb_set_cull_face(rb, CULL_NONE);
|
||||
|
||||
|
@ -624,7 +624,7 @@ void rb_end_frame(struct render_backend *rb) {
|
|||
}
|
||||
|
||||
void rb_begin_frame(struct render_backend *rb) {
|
||||
rb_set_depth_mask(rb, true);
|
||||
rb_set_depth_mask(rb, 1);
|
||||
|
||||
glViewport(0, 0, rb->window->width, rb->window->height);
|
||||
|
||||
|
@ -642,7 +642,7 @@ texture_handle_t rb_create_texture(struct render_backend *rb,
|
|||
enum pxl_format format,
|
||||
enum filter_mode filter,
|
||||
enum wrap_mode wrap_u, enum wrap_mode wrap_v,
|
||||
bool mipmaps, int width, int height,
|
||||
int mipmaps, int width, int height,
|
||||
const uint8_t *buffer) {
|
||||
/* find next open texture handle */
|
||||
texture_handle_t handle;
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
#ifndef RENDER_BACKEND_H
|
||||
#define RENDER_BACKEND_H
|
||||
|
||||
#include <stdbool.h>
|
||||
#include <stdint.h>
|
||||
|
||||
struct window;
|
||||
|
@ -87,13 +86,13 @@ struct vertex {
|
|||
|
||||
struct surface {
|
||||
texture_handle_t texture;
|
||||
bool depth_write;
|
||||
int depth_write;
|
||||
enum depth_func depth_func;
|
||||
enum cull_face cull;
|
||||
enum blend_func src_blend;
|
||||
enum blend_func dst_blend;
|
||||
enum shade_mode shade;
|
||||
bool ignore_tex_alpha;
|
||||
int ignore_tex_alpha;
|
||||
int first_vert;
|
||||
int num_verts;
|
||||
};
|
||||
|
@ -109,7 +108,7 @@ struct surface2d {
|
|||
texture_handle_t texture;
|
||||
enum blend_func src_blend;
|
||||
enum blend_func dst_blend;
|
||||
bool scissor;
|
||||
int scissor;
|
||||
float scissor_rect[4];
|
||||
int first_vert;
|
||||
int num_verts;
|
||||
|
@ -124,7 +123,7 @@ texture_handle_t rb_create_texture(struct render_backend *rb,
|
|||
enum pxl_format format,
|
||||
enum filter_mode filter,
|
||||
enum wrap_mode wrap_u, enum wrap_mode wrap_v,
|
||||
bool mipmaps, int width, int height,
|
||||
int mipmaps, int width, int height,
|
||||
const uint8_t *buffer);
|
||||
void rb_destroy_texture(struct render_backend *rb, texture_handle_t handle);
|
||||
|
||||
|
|
Loading…
Reference in New Issue