mirror of https://github.com/xqemu/xqemu.git
tcg: Respect highwater in tcg_out_tb_finalize
Undo the workaround at b17a6d3390
.
If there are lots of memory operations in a TB, the slow path code
can exceed the highwater reservation. Add a check within the loop.
Tested-by: Aurelien Jarno <aurelien@aurel32.net>
Reviewed-by: Aurelien Jarno <aurelien@aurel32.net>
Signed-off-by: Richard Henderson <rth@twiddle.net>
This commit is contained in:
parent
e4a096b1cd
commit
23dceda62a
|
@ -1572,7 +1572,7 @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc,
|
||||||
be->labels = l;
|
be->labels = l;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tcg_out_tb_finalize(TCGContext *s)
|
static bool tcg_out_tb_finalize(TCGContext *s)
|
||||||
{
|
{
|
||||||
static const void * const helpers[8] = {
|
static const void * const helpers[8] = {
|
||||||
helper_ret_stb_mmu,
|
helper_ret_stb_mmu,
|
||||||
|
@ -1620,8 +1620,17 @@ static void tcg_out_tb_finalize(TCGContext *s)
|
||||||
}
|
}
|
||||||
|
|
||||||
reloc_pcrel21b_slot2(l->label_ptr, dest);
|
reloc_pcrel21b_slot2(l->label_ptr, dest);
|
||||||
|
|
||||||
|
/* Test for (pending) buffer overflow. The assumption is that any
|
||||||
|
one operation beginning below the high water mark cannot overrun
|
||||||
|
the buffer completely. Thus we can test for overflow after
|
||||||
|
generating code without having to check during generation. */
|
||||||
|
if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) {
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args)
|
static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args)
|
||||||
{
|
{
|
||||||
|
|
|
@ -56,7 +56,7 @@ static inline void tcg_out_tb_init(TCGContext *s)
|
||||||
static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l);
|
static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l);
|
||||||
static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l);
|
static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l);
|
||||||
|
|
||||||
static void tcg_out_tb_finalize(TCGContext *s)
|
static bool tcg_out_tb_finalize(TCGContext *s)
|
||||||
{
|
{
|
||||||
TCGLabelQemuLdst *lb;
|
TCGLabelQemuLdst *lb;
|
||||||
|
|
||||||
|
@ -67,8 +67,17 @@ static void tcg_out_tb_finalize(TCGContext *s)
|
||||||
} else {
|
} else {
|
||||||
tcg_out_qemu_st_slow_path(s, lb);
|
tcg_out_qemu_st_slow_path(s, lb);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Test for (pending) buffer overflow. The assumption is that any
|
||||||
|
one operation beginning below the high water mark cannot overrun
|
||||||
|
the buffer completely. Thus we can test for overflow after
|
||||||
|
generating code without having to check during generation. */
|
||||||
|
if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) {
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allocate a new TCGLabelQemuLdst entry.
|
* Allocate a new TCGLabelQemuLdst entry.
|
||||||
|
|
|
@ -38,6 +38,7 @@ static inline void tcg_out_tb_init(TCGContext *s)
|
||||||
* Generate TB finalization at the end of block
|
* Generate TB finalization at the end of block
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static inline void tcg_out_tb_finalize(TCGContext *s)
|
static inline bool tcg_out_tb_finalize(TCGContext *s)
|
||||||
{
|
{
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
12
tcg/tcg.c
12
tcg/tcg.c
|
@ -111,7 +111,7 @@ static void tcg_out_call(TCGContext *s, tcg_insn_unit *target);
|
||||||
static int tcg_target_const_match(tcg_target_long val, TCGType type,
|
static int tcg_target_const_match(tcg_target_long val, TCGType type,
|
||||||
const TCGArgConstraint *arg_ct);
|
const TCGArgConstraint *arg_ct);
|
||||||
static void tcg_out_tb_init(TCGContext *s);
|
static void tcg_out_tb_init(TCGContext *s);
|
||||||
static void tcg_out_tb_finalize(TCGContext *s);
|
static bool tcg_out_tb_finalize(TCGContext *s);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -389,11 +389,7 @@ void tcg_prologue_init(TCGContext *s)
|
||||||
/* Compute a high-water mark, at which we voluntarily flush the buffer
|
/* Compute a high-water mark, at which we voluntarily flush the buffer
|
||||||
and start over. The size here is arbitrary, significantly larger
|
and start over. The size here is arbitrary, significantly larger
|
||||||
than we expect the code generation for any one opcode to require. */
|
than we expect the code generation for any one opcode to require. */
|
||||||
/* ??? We currently have no good estimate for, or checks in,
|
s->code_gen_highwater = s->code_gen_buffer + (total_size - 1024);
|
||||||
tcg_out_tb_finalize. If there are quite a lot of guest memory ops,
|
|
||||||
the number of out-of-line fragments could be quite high. In the
|
|
||||||
short-term, increase the highwater buffer. */
|
|
||||||
s->code_gen_highwater = s->code_gen_buffer + (total_size - 64*1024);
|
|
||||||
|
|
||||||
tcg_register_jit(s->code_gen_buffer, total_size);
|
tcg_register_jit(s->code_gen_buffer, total_size);
|
||||||
|
|
||||||
|
@ -2456,7 +2452,9 @@ int tcg_gen_code(TCGContext *s, tcg_insn_unit *gen_code_buf)
|
||||||
s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
|
s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
|
||||||
|
|
||||||
/* Generate TB finalization at the end of block */
|
/* Generate TB finalization at the end of block */
|
||||||
tcg_out_tb_finalize(s);
|
if (!tcg_out_tb_finalize(s)) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
/* flush instruction cache */
|
/* flush instruction cache */
|
||||||
flush_icache_range((uintptr_t)s->code_buf, (uintptr_t)s->code_ptr);
|
flush_icache_range((uintptr_t)s->code_buf, (uintptr_t)s->code_ptr);
|
||||||
|
|
Loading…
Reference in New Issue