mirror of https://github.com/xqemu/xqemu.git
misc updates
-----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJXfS7+AAoJEK0ScMxN0CebMgIH/2jV4IMUdSf/JCasvjuRIlr5 W6rYyDpqMeRRCg9PnFrMiVQZfOiKCp/UmMxvJg3PshdwZUrsJI8iRvevaJEmMM6A EyoovBV53WBglnbFwDwyfqNlS4grACZCOStTYsvYEja2g29fN1qrJnv7x8IlynB7 jyY2x5xFmL3Ic/lVCNqko+saD1Ms7WYvwNhnbs3ZTpouwC8bx/SJ/vn5Zlxj8zQn lTzFpaUZYkMUG8rK0G3QWUuBJxewYGYP1J+11JudaKCatDtuwtaL14d7grEhE8AC Gg5rpvjv7r650tMdFm6+NF/FuOmBLDm5RtKYJfCyEnHNiZbl9SkEBlTRDx2NSic= =V6KL -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20160706' into staging misc updates # gpg: Signature made Wed 06 Jul 2016 17:17:02 BST # gpg: using RSA key 0xAD1270CC4DD0279B # gpg: Good signature from "Richard Henderson <rth7680@gmail.com>" # gpg: aka "Richard Henderson <rth@redhat.com>" # gpg: aka "Richard Henderson <rth@twiddle.net>" # Primary key fingerprint: 9CB1 8DDA F8E8 49AD 2AFC 16A4 AD12 70CC 4DD0 279B * remotes/rth/tags/pull-tcg-20160706: tcg: Improve the alignment check infrastructure tcg: Optimize spills of constants tcg: Fix name for high-half register build: Use $(CCAS) for compiling .S files Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
91d3550990
|
@ -368,7 +368,7 @@ else
|
||||||
fi
|
fi
|
||||||
|
|
||||||
ar="${AR-${cross_prefix}ar}"
|
ar="${AR-${cross_prefix}ar}"
|
||||||
as="${AS-${cross_prefix}as}"
|
ccas="${CCAS-$cc}"
|
||||||
cpp="${CPP-$cc -E}"
|
cpp="${CPP-$cc -E}"
|
||||||
objcopy="${OBJCOPY-${cross_prefix}objcopy}"
|
objcopy="${OBJCOPY-${cross_prefix}objcopy}"
|
||||||
ld="${LD-${cross_prefix}ld}"
|
ld="${LD-${cross_prefix}ld}"
|
||||||
|
@ -4490,6 +4490,13 @@ if test "$fortify_source" != "no"; then
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
#################################################
|
||||||
|
# clang does not support the 16-bit assembly for roms
|
||||||
|
|
||||||
|
if echo | $ccas -dM -E - | grep __clang__ > /dev/null 2>&1 ; then
|
||||||
|
ccas="$ccas -fno-integrated-as"
|
||||||
|
fi
|
||||||
|
|
||||||
##########################################
|
##########################################
|
||||||
# check if struct fsxattr is available via linux/fs.h
|
# check if struct fsxattr is available via linux/fs.h
|
||||||
|
|
||||||
|
@ -5508,7 +5515,7 @@ echo "CXX=$cxx" >> $config_host_mak
|
||||||
echo "OBJCC=$objcc" >> $config_host_mak
|
echo "OBJCC=$objcc" >> $config_host_mak
|
||||||
echo "AR=$ar" >> $config_host_mak
|
echo "AR=$ar" >> $config_host_mak
|
||||||
echo "ARFLAGS=$ARFLAGS" >> $config_host_mak
|
echo "ARFLAGS=$ARFLAGS" >> $config_host_mak
|
||||||
echo "AS=$as" >> $config_host_mak
|
echo "CCAS=$ccas" >> $config_host_mak
|
||||||
echo "CPP=$cpp" >> $config_host_mak
|
echo "CPP=$cpp" >> $config_host_mak
|
||||||
echo "OBJCOPY=$objcopy" >> $config_host_mak
|
echo "OBJCOPY=$objcopy" >> $config_host_mak
|
||||||
echo "LD=$ld" >> $config_host_mak
|
echo "LD=$ld" >> $config_host_mak
|
||||||
|
@ -5981,7 +5988,7 @@ for rom in seabios vgabios ; do
|
||||||
config_mak=roms/$rom/config.mak
|
config_mak=roms/$rom/config.mak
|
||||||
echo "# Automatically generated by configure - do not modify" > $config_mak
|
echo "# Automatically generated by configure - do not modify" > $config_mak
|
||||||
echo "SRC_PATH=$source_path/roms/$rom" >> $config_mak
|
echo "SRC_PATH=$source_path/roms/$rom" >> $config_mak
|
||||||
echo "AS=$as" >> $config_mak
|
echo "CCAS=$ccas" >> $config_mak
|
||||||
echo "CC=$cc" >> $config_mak
|
echo "CC=$cc" >> $config_mak
|
||||||
echo "BCC=bcc" >> $config_mak
|
echo "BCC=bcc" >> $config_mak
|
||||||
echo "CPP=$cpp" >> $config_mak
|
echo "CPP=$cpp" >> $config_mak
|
||||||
|
|
|
@ -288,14 +288,22 @@ CPUArchState *cpu_copy(CPUArchState *env);
|
||||||
#if !defined(CONFIG_USER_ONLY)
|
#if !defined(CONFIG_USER_ONLY)
|
||||||
|
|
||||||
/* Flags stored in the low bits of the TLB virtual address. These are
|
/* Flags stored in the low bits of the TLB virtual address. These are
|
||||||
defined so that fast path ram access is all zeros. */
|
* defined so that fast path ram access is all zeros.
|
||||||
|
* The flags all must be between TARGET_PAGE_BITS and
|
||||||
|
* maximum address alignment bit.
|
||||||
|
*/
|
||||||
/* Zero if TLB entry is valid. */
|
/* Zero if TLB entry is valid. */
|
||||||
#define TLB_INVALID_MASK (1 << 3)
|
#define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS - 1))
|
||||||
/* Set if TLB entry references a clean RAM page. The iotlb entry will
|
/* Set if TLB entry references a clean RAM page. The iotlb entry will
|
||||||
contain the page physical address. */
|
contain the page physical address. */
|
||||||
#define TLB_NOTDIRTY (1 << 4)
|
#define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS - 2))
|
||||||
/* Set if TLB entry is an IO callback. */
|
/* Set if TLB entry is an IO callback. */
|
||||||
#define TLB_MMIO (1 << 5)
|
#define TLB_MMIO (1 << (TARGET_PAGE_BITS - 3))
|
||||||
|
|
||||||
|
/* Use this mask to check interception with an alignment mask
|
||||||
|
* in a TCG backend.
|
||||||
|
*/
|
||||||
|
#define TLB_FLAGS_MASK (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO)
|
||||||
|
|
||||||
void dump_exec_info(FILE *f, fprintf_function cpu_fprintf);
|
void dump_exec_info(FILE *f, fprintf_function cpu_fprintf);
|
||||||
void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf);
|
void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf);
|
||||||
|
|
|
@ -68,11 +68,8 @@ LINK = $(call quiet-command, $(LINKPROG) $(QEMU_CFLAGS) $(CFLAGS) $(LDFLAGS) -o
|
||||||
$(call process-archive-undefs, $1) \
|
$(call process-archive-undefs, $1) \
|
||||||
$(version-obj-y) $(call extract-libs,$1) $(LIBS)," LINK $(TARGET_DIR)$@")
|
$(version-obj-y) $(call extract-libs,$1) $(LIBS)," LINK $(TARGET_DIR)$@")
|
||||||
|
|
||||||
%.asm: %.S
|
%.o: %.S
|
||||||
$(call quiet-command,$(CPP) $(QEMU_INCLUDES) $(QEMU_CFLAGS) $(QEMU_DGFLAGS) $(CFLAGS) -o $@ $<," CPP $(TARGET_DIR)$@")
|
$(call quiet-command,$(CCAS) $(QEMU_INCLUDES) $(QEMU_CFLAGS) $(QEMU_DGFLAGS) $(CFLAGS) -c -o $@ $<," AS $(TARGET_DIR)$@")
|
||||||
|
|
||||||
%.o: %.asm
|
|
||||||
$(call quiet-command,$(AS) $(ASFLAGS) -o $@ $<," AS $(TARGET_DIR)$@")
|
|
||||||
|
|
||||||
%.o: %.cc
|
%.o: %.cc
|
||||||
$(call quiet-command,$(CXX) $(QEMU_INCLUDES) $(QEMU_CXXFLAGS) $(QEMU_DGFLAGS) $(CFLAGS) $($@-cflags) -c -o $@ $<," CXX $(TARGET_DIR)$@")
|
$(call quiet-command,$(CXX) $(QEMU_INCLUDES) $(QEMU_CXXFLAGS) $(QEMU_DGFLAGS) $(CFLAGS) $($@-cflags) -c -o $@ $<," CXX $(TARGET_DIR)$@")
|
||||||
|
|
|
@ -171,20 +171,21 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
|
||||||
unsigned mmu_idx = get_mmuidx(oi);
|
unsigned mmu_idx = get_mmuidx(oi);
|
||||||
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
||||||
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
|
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
|
||||||
|
int a_bits = get_alignment_bits(get_memop(oi));
|
||||||
uintptr_t haddr;
|
uintptr_t haddr;
|
||||||
DATA_TYPE res;
|
DATA_TYPE res;
|
||||||
|
|
||||||
/* Adjust the given return address. */
|
/* Adjust the given return address. */
|
||||||
retaddr -= GETPC_ADJ;
|
retaddr -= GETPC_ADJ;
|
||||||
|
|
||||||
/* If the TLB entry is for a different page, reload and try again. */
|
if (a_bits > 0 && (addr & ((1 << a_bits) - 1)) != 0) {
|
||||||
if ((addr & TARGET_PAGE_MASK)
|
|
||||||
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
|
||||||
if ((addr & (DATA_SIZE - 1)) != 0
|
|
||||||
&& (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
|
|
||||||
cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
|
cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
|
||||||
mmu_idx, retaddr);
|
mmu_idx, retaddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* If the TLB entry is for a different page, reload and try again. */
|
||||||
|
if ((addr & TARGET_PAGE_MASK)
|
||||||
|
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
||||||
if (!VICTIM_TLB_HIT(ADDR_READ)) {
|
if (!VICTIM_TLB_HIT(ADDR_READ)) {
|
||||||
tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
|
tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
|
||||||
mmu_idx, retaddr);
|
mmu_idx, retaddr);
|
||||||
|
@ -215,10 +216,6 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
|
||||||
DATA_TYPE res1, res2;
|
DATA_TYPE res1, res2;
|
||||||
unsigned shift;
|
unsigned shift;
|
||||||
do_unaligned_access:
|
do_unaligned_access:
|
||||||
if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
|
|
||||||
cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
|
|
||||||
mmu_idx, retaddr);
|
|
||||||
}
|
|
||||||
addr1 = addr & ~(DATA_SIZE - 1);
|
addr1 = addr & ~(DATA_SIZE - 1);
|
||||||
addr2 = addr1 + DATA_SIZE;
|
addr2 = addr1 + DATA_SIZE;
|
||||||
/* Note the adjustment at the beginning of the function.
|
/* Note the adjustment at the beginning of the function.
|
||||||
|
@ -232,13 +229,6 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Handle aligned access or unaligned access in the same page. */
|
|
||||||
if ((addr & (DATA_SIZE - 1)) != 0
|
|
||||||
&& (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
|
|
||||||
cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
|
|
||||||
mmu_idx, retaddr);
|
|
||||||
}
|
|
||||||
|
|
||||||
haddr = addr + env->tlb_table[mmu_idx][index].addend;
|
haddr = addr + env->tlb_table[mmu_idx][index].addend;
|
||||||
#if DATA_SIZE == 1
|
#if DATA_SIZE == 1
|
||||||
res = glue(glue(ld, LSUFFIX), _p)((uint8_t *)haddr);
|
res = glue(glue(ld, LSUFFIX), _p)((uint8_t *)haddr);
|
||||||
|
@ -255,20 +245,21 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
|
||||||
unsigned mmu_idx = get_mmuidx(oi);
|
unsigned mmu_idx = get_mmuidx(oi);
|
||||||
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
||||||
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
|
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
|
||||||
|
int a_bits = get_alignment_bits(get_memop(oi));
|
||||||
uintptr_t haddr;
|
uintptr_t haddr;
|
||||||
DATA_TYPE res;
|
DATA_TYPE res;
|
||||||
|
|
||||||
/* Adjust the given return address. */
|
/* Adjust the given return address. */
|
||||||
retaddr -= GETPC_ADJ;
|
retaddr -= GETPC_ADJ;
|
||||||
|
|
||||||
/* If the TLB entry is for a different page, reload and try again. */
|
if (a_bits > 0 && (addr & ((1 << a_bits) - 1)) != 0) {
|
||||||
if ((addr & TARGET_PAGE_MASK)
|
|
||||||
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
|
||||||
if ((addr & (DATA_SIZE - 1)) != 0
|
|
||||||
&& (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
|
|
||||||
cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
|
cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
|
||||||
mmu_idx, retaddr);
|
mmu_idx, retaddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* If the TLB entry is for a different page, reload and try again. */
|
||||||
|
if ((addr & TARGET_PAGE_MASK)
|
||||||
|
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
||||||
if (!VICTIM_TLB_HIT(ADDR_READ)) {
|
if (!VICTIM_TLB_HIT(ADDR_READ)) {
|
||||||
tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
|
tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
|
||||||
mmu_idx, retaddr);
|
mmu_idx, retaddr);
|
||||||
|
@ -299,10 +290,6 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
|
||||||
DATA_TYPE res1, res2;
|
DATA_TYPE res1, res2;
|
||||||
unsigned shift;
|
unsigned shift;
|
||||||
do_unaligned_access:
|
do_unaligned_access:
|
||||||
if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
|
|
||||||
cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
|
|
||||||
mmu_idx, retaddr);
|
|
||||||
}
|
|
||||||
addr1 = addr & ~(DATA_SIZE - 1);
|
addr1 = addr & ~(DATA_SIZE - 1);
|
||||||
addr2 = addr1 + DATA_SIZE;
|
addr2 = addr1 + DATA_SIZE;
|
||||||
/* Note the adjustment at the beginning of the function.
|
/* Note the adjustment at the beginning of the function.
|
||||||
|
@ -316,13 +303,6 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Handle aligned access or unaligned access in the same page. */
|
|
||||||
if ((addr & (DATA_SIZE - 1)) != 0
|
|
||||||
&& (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
|
|
||||||
cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
|
|
||||||
mmu_idx, retaddr);
|
|
||||||
}
|
|
||||||
|
|
||||||
haddr = addr + env->tlb_table[mmu_idx][index].addend;
|
haddr = addr + env->tlb_table[mmu_idx][index].addend;
|
||||||
res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr);
|
res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr);
|
||||||
return res;
|
return res;
|
||||||
|
@ -376,19 +356,20 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
||||||
unsigned mmu_idx = get_mmuidx(oi);
|
unsigned mmu_idx = get_mmuidx(oi);
|
||||||
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
||||||
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
|
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
|
||||||
|
int a_bits = get_alignment_bits(get_memop(oi));
|
||||||
uintptr_t haddr;
|
uintptr_t haddr;
|
||||||
|
|
||||||
/* Adjust the given return address. */
|
/* Adjust the given return address. */
|
||||||
retaddr -= GETPC_ADJ;
|
retaddr -= GETPC_ADJ;
|
||||||
|
|
||||||
/* If the TLB entry is for a different page, reload and try again. */
|
if (a_bits > 0 && (addr & ((1 << a_bits) - 1)) != 0) {
|
||||||
if ((addr & TARGET_PAGE_MASK)
|
|
||||||
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
|
||||||
if ((addr & (DATA_SIZE - 1)) != 0
|
|
||||||
&& (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
|
|
||||||
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
|
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
|
||||||
mmu_idx, retaddr);
|
mmu_idx, retaddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* If the TLB entry is for a different page, reload and try again. */
|
||||||
|
if ((addr & TARGET_PAGE_MASK)
|
||||||
|
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
||||||
if (!VICTIM_TLB_HIT(addr_write)) {
|
if (!VICTIM_TLB_HIT(addr_write)) {
|
||||||
tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
|
tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
|
||||||
}
|
}
|
||||||
|
@ -416,10 +397,6 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
||||||
>= TARGET_PAGE_SIZE)) {
|
>= TARGET_PAGE_SIZE)) {
|
||||||
int i;
|
int i;
|
||||||
do_unaligned_access:
|
do_unaligned_access:
|
||||||
if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
|
|
||||||
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
|
|
||||||
mmu_idx, retaddr);
|
|
||||||
}
|
|
||||||
/* XXX: not efficient, but simple */
|
/* XXX: not efficient, but simple */
|
||||||
/* Note: relies on the fact that tlb_fill() does not remove the
|
/* Note: relies on the fact that tlb_fill() does not remove the
|
||||||
* previous page from the TLB cache. */
|
* previous page from the TLB cache. */
|
||||||
|
@ -434,13 +411,6 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Handle aligned access or unaligned access in the same page. */
|
|
||||||
if ((addr & (DATA_SIZE - 1)) != 0
|
|
||||||
&& (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
|
|
||||||
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
|
|
||||||
mmu_idx, retaddr);
|
|
||||||
}
|
|
||||||
|
|
||||||
haddr = addr + env->tlb_table[mmu_idx][index].addend;
|
haddr = addr + env->tlb_table[mmu_idx][index].addend;
|
||||||
#if DATA_SIZE == 1
|
#if DATA_SIZE == 1
|
||||||
glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val);
|
glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val);
|
||||||
|
@ -456,19 +426,20 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
||||||
unsigned mmu_idx = get_mmuidx(oi);
|
unsigned mmu_idx = get_mmuidx(oi);
|
||||||
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
||||||
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
|
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
|
||||||
|
int a_bits = get_alignment_bits(get_memop(oi));
|
||||||
uintptr_t haddr;
|
uintptr_t haddr;
|
||||||
|
|
||||||
/* Adjust the given return address. */
|
/* Adjust the given return address. */
|
||||||
retaddr -= GETPC_ADJ;
|
retaddr -= GETPC_ADJ;
|
||||||
|
|
||||||
/* If the TLB entry is for a different page, reload and try again. */
|
if (a_bits > 0 && (addr & ((1 << a_bits) - 1)) != 0) {
|
||||||
if ((addr & TARGET_PAGE_MASK)
|
|
||||||
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
|
||||||
if ((addr & (DATA_SIZE - 1)) != 0
|
|
||||||
&& (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
|
|
||||||
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
|
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
|
||||||
mmu_idx, retaddr);
|
mmu_idx, retaddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* If the TLB entry is for a different page, reload and try again. */
|
||||||
|
if ((addr & TARGET_PAGE_MASK)
|
||||||
|
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
||||||
if (!VICTIM_TLB_HIT(addr_write)) {
|
if (!VICTIM_TLB_HIT(addr_write)) {
|
||||||
tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
|
tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
|
||||||
}
|
}
|
||||||
|
@ -496,10 +467,6 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
||||||
>= TARGET_PAGE_SIZE)) {
|
>= TARGET_PAGE_SIZE)) {
|
||||||
int i;
|
int i;
|
||||||
do_unaligned_access:
|
do_unaligned_access:
|
||||||
if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
|
|
||||||
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
|
|
||||||
mmu_idx, retaddr);
|
|
||||||
}
|
|
||||||
/* XXX: not efficient, but simple */
|
/* XXX: not efficient, but simple */
|
||||||
/* Note: relies on the fact that tlb_fill() does not remove the
|
/* Note: relies on the fact that tlb_fill() does not remove the
|
||||||
* previous page from the TLB cache. */
|
* previous page from the TLB cache. */
|
||||||
|
@ -514,13 +481,6 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Handle aligned access or unaligned access in the same page. */
|
|
||||||
if ((addr & (DATA_SIZE - 1)) != 0
|
|
||||||
&& (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
|
|
||||||
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
|
|
||||||
mmu_idx, retaddr);
|
|
||||||
}
|
|
||||||
|
|
||||||
haddr = addr + env->tlb_table[mmu_idx][index].addend;
|
haddr = addr + env->tlb_table[mmu_idx][index].addend;
|
||||||
glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val);
|
glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val);
|
||||||
}
|
}
|
||||||
|
|
|
@ -716,6 +716,16 @@ static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
|
||||||
arg, arg1, arg2);
|
arg, arg1, arg2);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
|
||||||
|
TCGReg base, intptr_t ofs)
|
||||||
|
{
|
||||||
|
if (val == 0) {
|
||||||
|
tcg_out_st(s, type, TCG_REG_XZR, base, ofs);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
static inline void tcg_out_bfm(TCGContext *s, TCGType ext, TCGReg rd,
|
static inline void tcg_out_bfm(TCGContext *s, TCGType ext, TCGReg rd,
|
||||||
TCGReg rn, unsigned int a, unsigned int b)
|
TCGReg rn, unsigned int a, unsigned int b)
|
||||||
{
|
{
|
||||||
|
@ -1071,19 +1081,20 @@ static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, TCGMemOp opc,
|
||||||
int tlb_offset = is_read ?
|
int tlb_offset = is_read ?
|
||||||
offsetof(CPUArchState, tlb_table[mem_index][0].addr_read)
|
offsetof(CPUArchState, tlb_table[mem_index][0].addr_read)
|
||||||
: offsetof(CPUArchState, tlb_table[mem_index][0].addr_write);
|
: offsetof(CPUArchState, tlb_table[mem_index][0].addr_write);
|
||||||
int s_mask = (1 << (opc & MO_SIZE)) - 1;
|
int a_bits = get_alignment_bits(opc);
|
||||||
TCGReg base = TCG_AREG0, x3;
|
TCGReg base = TCG_AREG0, x3;
|
||||||
uint64_t tlb_mask;
|
uint64_t tlb_mask;
|
||||||
|
|
||||||
/* For aligned accesses, we check the first byte and include the alignment
|
/* For aligned accesses, we check the first byte and include the alignment
|
||||||
bits within the address. For unaligned access, we check that we don't
|
bits within the address. For unaligned access, we check that we don't
|
||||||
cross pages using the address of the last byte of the access. */
|
cross pages using the address of the last byte of the access. */
|
||||||
if ((opc & MO_AMASK) == MO_ALIGN || s_mask == 0) {
|
if (a_bits >= 0) {
|
||||||
tlb_mask = TARGET_PAGE_MASK | s_mask;
|
/* A byte access or an alignment check required */
|
||||||
|
tlb_mask = TARGET_PAGE_MASK | ((1 << a_bits) - 1);
|
||||||
x3 = addr_reg;
|
x3 = addr_reg;
|
||||||
} else {
|
} else {
|
||||||
tcg_out_insn(s, 3401, ADDI, TARGET_LONG_BITS == 64,
|
tcg_out_insn(s, 3401, ADDI, TARGET_LONG_BITS == 64,
|
||||||
TCG_REG_X3, addr_reg, s_mask);
|
TCG_REG_X3, addr_reg, (1 << (opc & MO_SIZE)) - 1);
|
||||||
tlb_mask = TARGET_PAGE_MASK;
|
tlb_mask = TARGET_PAGE_MASK;
|
||||||
x3 = TCG_REG_X3;
|
x3 = TCG_REG_X3;
|
||||||
}
|
}
|
||||||
|
|
|
@ -2046,6 +2046,12 @@ static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
|
||||||
tcg_out_st32(s, COND_AL, arg, arg1, arg2);
|
tcg_out_st32(s, COND_AL, arg, arg1, arg2);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
|
||||||
|
TCGReg base, intptr_t ofs)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
static inline void tcg_out_mov(TCGContext *s, TCGType type,
|
static inline void tcg_out_mov(TCGContext *s, TCGType type,
|
||||||
TCGReg ret, TCGReg arg)
|
TCGReg ret, TCGReg arg)
|
||||||
{
|
{
|
||||||
|
|
|
@ -710,12 +710,19 @@ static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
|
||||||
tcg_out_modrm_offset(s, opc, arg, arg1, arg2);
|
tcg_out_modrm_offset(s, opc, arg, arg1, arg2);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void tcg_out_sti(TCGContext *s, TCGType type, TCGReg base,
|
static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
|
||||||
tcg_target_long ofs, tcg_target_long val)
|
TCGReg base, intptr_t ofs)
|
||||||
{
|
{
|
||||||
int opc = OPC_MOVL_EvIz + (type == TCG_TYPE_I64 ? P_REXW : 0);
|
int rexw = 0;
|
||||||
tcg_out_modrm_offset(s, opc, 0, base, ofs);
|
if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I64) {
|
||||||
|
if (val != (int32_t)val) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
rexw = P_REXW;
|
||||||
|
}
|
||||||
|
tcg_out_modrm_offset(s, OPC_MOVL_EvIz | rexw, 0, base, ofs);
|
||||||
tcg_out32(s, val);
|
tcg_out32(s, val);
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tcg_out_shifti(TCGContext *s, int subopc, int reg, int count)
|
static void tcg_out_shifti(TCGContext *s, int subopc, int reg, int count)
|
||||||
|
@ -1195,8 +1202,8 @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
|
||||||
TCGType ttype = TCG_TYPE_I32;
|
TCGType ttype = TCG_TYPE_I32;
|
||||||
TCGType tlbtype = TCG_TYPE_I32;
|
TCGType tlbtype = TCG_TYPE_I32;
|
||||||
int trexw = 0, hrexw = 0, tlbrexw = 0;
|
int trexw = 0, hrexw = 0, tlbrexw = 0;
|
||||||
int s_mask = (1 << (opc & MO_SIZE)) - 1;
|
int a_bits = get_alignment_bits(opc);
|
||||||
bool aligned = (opc & MO_AMASK) == MO_ALIGN || s_mask == 0;
|
target_ulong tlb_mask;
|
||||||
|
|
||||||
if (TCG_TARGET_REG_BITS == 64) {
|
if (TCG_TARGET_REG_BITS == 64) {
|
||||||
if (TARGET_LONG_BITS == 64) {
|
if (TARGET_LONG_BITS == 64) {
|
||||||
|
@ -1213,19 +1220,22 @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
|
||||||
}
|
}
|
||||||
|
|
||||||
tcg_out_mov(s, tlbtype, r0, addrlo);
|
tcg_out_mov(s, tlbtype, r0, addrlo);
|
||||||
if (aligned) {
|
if (a_bits >= 0) {
|
||||||
|
/* A byte access or an alignment check required */
|
||||||
tcg_out_mov(s, ttype, r1, addrlo);
|
tcg_out_mov(s, ttype, r1, addrlo);
|
||||||
|
tlb_mask = TARGET_PAGE_MASK | ((1 << a_bits) - 1);
|
||||||
} else {
|
} else {
|
||||||
/* For unaligned access check that we don't cross pages using
|
/* For unaligned access check that we don't cross pages using
|
||||||
the page address of the last byte. */
|
the page address of the last byte. */
|
||||||
tcg_out_modrm_offset(s, OPC_LEA + trexw, r1, addrlo, s_mask);
|
tcg_out_modrm_offset(s, OPC_LEA + trexw, r1, addrlo,
|
||||||
|
(1 << (opc & MO_SIZE)) - 1);
|
||||||
|
tlb_mask = TARGET_PAGE_MASK;
|
||||||
}
|
}
|
||||||
|
|
||||||
tcg_out_shifti(s, SHIFT_SHR + tlbrexw, r0,
|
tcg_out_shifti(s, SHIFT_SHR + tlbrexw, r0,
|
||||||
TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
|
TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
|
||||||
|
|
||||||
tgen_arithi(s, ARITH_AND + trexw, r1,
|
tgen_arithi(s, ARITH_AND + trexw, r1, tlb_mask, 0);
|
||||||
TARGET_PAGE_MASK | (aligned ? s_mask : 0), 0);
|
|
||||||
tgen_arithi(s, ARITH_AND + tlbrexw, r0,
|
tgen_arithi(s, ARITH_AND + tlbrexw, r0,
|
||||||
(CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS, 0);
|
(CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS, 0);
|
||||||
|
|
||||||
|
@ -1321,10 +1331,10 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
|
||||||
ofs += 4;
|
ofs += 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, oi);
|
tcg_out_sti(s, TCG_TYPE_I32, oi, TCG_REG_ESP, ofs);
|
||||||
ofs += 4;
|
ofs += 4;
|
||||||
|
|
||||||
tcg_out_sti(s, TCG_TYPE_PTR, TCG_REG_ESP, ofs, (uintptr_t)l->raddr);
|
tcg_out_sti(s, TCG_TYPE_PTR, (uintptr_t)l->raddr, TCG_REG_ESP, ofs);
|
||||||
} else {
|
} else {
|
||||||
tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
|
tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
|
||||||
/* The second argument is already loaded with addrlo. */
|
/* The second argument is already loaded with addrlo. */
|
||||||
|
@ -1413,7 +1423,7 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
|
||||||
ofs += 4;
|
ofs += 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, oi);
|
tcg_out_sti(s, TCG_TYPE_I32, oi, TCG_REG_ESP, ofs);
|
||||||
ofs += 4;
|
ofs += 4;
|
||||||
|
|
||||||
retaddr = TCG_REG_EAX;
|
retaddr = TCG_REG_EAX;
|
||||||
|
|
|
@ -973,6 +973,16 @@ static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
|
||||||
|
TCGReg base, intptr_t ofs)
|
||||||
|
{
|
||||||
|
if (val == 0) {
|
||||||
|
tcg_out_st(s, type, TCG_REG_R0, base, ofs);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
static inline void tcg_out_alu(TCGContext *s, uint64_t opc_a1, uint64_t opc_a3,
|
static inline void tcg_out_alu(TCGContext *s, uint64_t opc_a1, uint64_t opc_a3,
|
||||||
TCGReg ret, TCGArg arg1, int const_arg1,
|
TCGReg ret, TCGArg arg1, int const_arg1,
|
||||||
TCGArg arg2, int const_arg2)
|
TCGArg arg2, int const_arg2)
|
||||||
|
|
|
@ -576,6 +576,16 @@ static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
|
||||||
tcg_out_ldst(s, OPC_SW, arg, arg1, arg2);
|
tcg_out_ldst(s, OPC_SW, arg, arg1, arg2);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
|
||||||
|
TCGReg base, intptr_t ofs)
|
||||||
|
{
|
||||||
|
if (val == 0) {
|
||||||
|
tcg_out_st(s, type, TCG_REG_ZERO, base, ofs);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
static inline void tcg_out_addi(TCGContext *s, TCGReg reg, TCGArg val)
|
static inline void tcg_out_addi(TCGContext *s, TCGReg reg, TCGArg val)
|
||||||
{
|
{
|
||||||
if (val == (int16_t)val) {
|
if (val == (int16_t)val) {
|
||||||
|
|
|
@ -857,6 +857,12 @@ static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
|
||||||
tcg_out_mem_long(s, opi, opx, arg, arg1, arg2);
|
tcg_out_mem_long(s, opi, opx, arg, arg1, arg2);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
|
||||||
|
TCGReg base, intptr_t ofs)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2,
|
static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2,
|
||||||
int const_arg2, int cr, TCGType type)
|
int const_arg2, int cr, TCGType type)
|
||||||
{
|
{
|
||||||
|
@ -1399,6 +1405,7 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGMemOp opc,
|
||||||
int add_off = offsetof(CPUArchState, tlb_table[mem_index][0].addend);
|
int add_off = offsetof(CPUArchState, tlb_table[mem_index][0].addend);
|
||||||
TCGReg base = TCG_AREG0;
|
TCGReg base = TCG_AREG0;
|
||||||
TCGMemOp s_bits = opc & MO_SIZE;
|
TCGMemOp s_bits = opc & MO_SIZE;
|
||||||
|
int a_bits = get_alignment_bits(opc);
|
||||||
|
|
||||||
/* Extract the page index, shifted into place for tlb index. */
|
/* Extract the page index, shifted into place for tlb index. */
|
||||||
if (TCG_TARGET_REG_BITS == 64) {
|
if (TCG_TARGET_REG_BITS == 64) {
|
||||||
|
@ -1456,14 +1463,17 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGMemOp opc,
|
||||||
* the bottom bits and thus trigger a comparison failure on
|
* the bottom bits and thus trigger a comparison failure on
|
||||||
* unaligned accesses
|
* unaligned accesses
|
||||||
*/
|
*/
|
||||||
|
if (a_bits < 0) {
|
||||||
|
a_bits = s_bits;
|
||||||
|
}
|
||||||
tcg_out_rlw(s, RLWINM, TCG_REG_R0, addrlo, 0,
|
tcg_out_rlw(s, RLWINM, TCG_REG_R0, addrlo, 0,
|
||||||
(32 - s_bits) & 31, 31 - TARGET_PAGE_BITS);
|
(32 - a_bits) & 31, 31 - TARGET_PAGE_BITS);
|
||||||
} else if (s_bits) {
|
} else if (a_bits) {
|
||||||
/* > byte access, we need to handle alignment */
|
/* More than byte access, we need to handle alignment */
|
||||||
if ((opc & MO_AMASK) == MO_ALIGN) {
|
if (a_bits > 0) {
|
||||||
/* Alignment required by the front-end, same as 32-bits */
|
/* Alignment required by the front-end, same as 32-bits */
|
||||||
tcg_out_rld(s, RLDICL, TCG_REG_R0, addrlo,
|
tcg_out_rld(s, RLDICL, TCG_REG_R0, addrlo,
|
||||||
64 - TARGET_PAGE_BITS, TARGET_PAGE_BITS - s_bits);
|
64 - TARGET_PAGE_BITS, TARGET_PAGE_BITS - a_bits);
|
||||||
tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, TARGET_PAGE_BITS, 0);
|
tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, TARGET_PAGE_BITS, 0);
|
||||||
} else {
|
} else {
|
||||||
/* We support unaligned accesses, we need to make sure we fail
|
/* We support unaligned accesses, we need to make sure we fail
|
||||||
|
|
|
@ -798,6 +798,12 @@ static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg data,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
|
||||||
|
TCGReg base, intptr_t ofs)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
/* load data from an absolute host address */
|
/* load data from an absolute host address */
|
||||||
static void tcg_out_ld_abs(TCGContext *s, TCGType type, TCGReg dest, void *abs)
|
static void tcg_out_ld_abs(TCGContext *s, TCGType type, TCGReg dest, void *abs)
|
||||||
{
|
{
|
||||||
|
@ -1499,18 +1505,19 @@ QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table[NB_MMU_MODES - 1][1])
|
||||||
static TCGReg tcg_out_tlb_read(TCGContext* s, TCGReg addr_reg, TCGMemOp opc,
|
static TCGReg tcg_out_tlb_read(TCGContext* s, TCGReg addr_reg, TCGMemOp opc,
|
||||||
int mem_index, bool is_ld)
|
int mem_index, bool is_ld)
|
||||||
{
|
{
|
||||||
int s_mask = (1 << (opc & MO_SIZE)) - 1;
|
int a_bits = get_alignment_bits(opc);
|
||||||
int ofs, a_off;
|
int ofs, a_off;
|
||||||
uint64_t tlb_mask;
|
uint64_t tlb_mask;
|
||||||
|
|
||||||
/* For aligned accesses, we check the first byte and include the alignment
|
/* For aligned accesses, we check the first byte and include the alignment
|
||||||
bits within the address. For unaligned access, we check that we don't
|
bits within the address. For unaligned access, we check that we don't
|
||||||
cross pages using the address of the last byte of the access. */
|
cross pages using the address of the last byte of the access. */
|
||||||
if ((opc & MO_AMASK) == MO_ALIGN || s_mask == 0) {
|
if (a_bits >= 0) {
|
||||||
|
/* A byte access or an alignment check required */
|
||||||
a_off = 0;
|
a_off = 0;
|
||||||
tlb_mask = TARGET_PAGE_MASK | s_mask;
|
tlb_mask = TARGET_PAGE_MASK | ((1 << a_bits) - 1);
|
||||||
} else {
|
} else {
|
||||||
a_off = s_mask;
|
a_off = (1 << (opc & MO_SIZE)) - 1;
|
||||||
tlb_mask = TARGET_PAGE_MASK;
|
tlb_mask = TARGET_PAGE_MASK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -504,6 +504,16 @@ static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
|
||||||
tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX));
|
tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
|
||||||
|
TCGReg base, intptr_t ofs)
|
||||||
|
{
|
||||||
|
if (val == 0) {
|
||||||
|
tcg_out_st(s, type, TCG_REG_G0, base, ofs);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
static void tcg_out_ld_ptr(TCGContext *s, TCGReg ret, uintptr_t arg)
|
static void tcg_out_ld_ptr(TCGContext *s, TCGReg ret, uintptr_t arg)
|
||||||
{
|
{
|
||||||
tcg_out_movi(s, TCG_TYPE_PTR, ret, arg & ~0x3ff);
|
tcg_out_movi(s, TCG_TYPE_PTR, ret, arg & ~0x3ff);
|
||||||
|
|
|
@ -1851,6 +1851,9 @@ void tcg_gen_goto_tb(unsigned idx)
|
||||||
|
|
||||||
static inline TCGMemOp tcg_canonicalize_memop(TCGMemOp op, bool is64, bool st)
|
static inline TCGMemOp tcg_canonicalize_memop(TCGMemOp op, bool is64, bool st)
|
||||||
{
|
{
|
||||||
|
/* Trigger the asserts within as early as possible. */
|
||||||
|
(void)get_alignment_bits(op);
|
||||||
|
|
||||||
switch (op & MO_SIZE) {
|
switch (op & MO_SIZE) {
|
||||||
case MO_8:
|
case MO_8:
|
||||||
op &= ~MO_BSWAP;
|
op &= ~MO_BSWAP;
|
||||||
|
|
179
tcg/tcg.c
179
tcg/tcg.c
|
@ -108,6 +108,8 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
|
||||||
const int *const_args);
|
const int *const_args);
|
||||||
static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
|
static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
|
||||||
intptr_t arg2);
|
intptr_t arg2);
|
||||||
|
static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
|
||||||
|
TCGReg base, intptr_t ofs);
|
||||||
static void tcg_out_call(TCGContext *s, tcg_insn_unit *target);
|
static void tcg_out_call(TCGContext *s, tcg_insn_unit *target);
|
||||||
static int tcg_target_const_match(tcg_target_long val, TCGType type,
|
static int tcg_target_const_match(tcg_target_long val, TCGType type,
|
||||||
const TCGArgConstraint *arg_ct);
|
const TCGArgConstraint *arg_ct);
|
||||||
|
@ -557,7 +559,7 @@ int tcg_global_mem_new_internal(TCGType type, TCGv_ptr base,
|
||||||
ts2->mem_offset = offset + (1 - bigendian) * 4;
|
ts2->mem_offset = offset + (1 - bigendian) * 4;
|
||||||
pstrcpy(buf, sizeof(buf), name);
|
pstrcpy(buf, sizeof(buf), name);
|
||||||
pstrcat(buf, sizeof(buf), "_1");
|
pstrcat(buf, sizeof(buf), "_1");
|
||||||
ts->name = strdup(buf);
|
ts2->name = strdup(buf);
|
||||||
} else {
|
} else {
|
||||||
ts->base_type = type;
|
ts->base_type = type;
|
||||||
ts->type = type;
|
ts->type = type;
|
||||||
|
@ -997,6 +999,22 @@ static const char * const ldst_name[] =
|
||||||
[MO_BEQ] = "beq",
|
[MO_BEQ] = "beq",
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const char * const alignment_name[(MO_AMASK >> MO_ASHIFT) + 1] = {
|
||||||
|
#ifdef ALIGNED_ONLY
|
||||||
|
[MO_UNALN >> MO_ASHIFT] = "un+",
|
||||||
|
[MO_ALIGN >> MO_ASHIFT] = "",
|
||||||
|
#else
|
||||||
|
[MO_UNALN >> MO_ASHIFT] = "",
|
||||||
|
[MO_ALIGN >> MO_ASHIFT] = "al+",
|
||||||
|
#endif
|
||||||
|
[MO_ALIGN_2 >> MO_ASHIFT] = "al2+",
|
||||||
|
[MO_ALIGN_4 >> MO_ASHIFT] = "al4+",
|
||||||
|
[MO_ALIGN_8 >> MO_ASHIFT] = "al8+",
|
||||||
|
[MO_ALIGN_16 >> MO_ASHIFT] = "al16+",
|
||||||
|
[MO_ALIGN_32 >> MO_ASHIFT] = "al32+",
|
||||||
|
[MO_ALIGN_64 >> MO_ASHIFT] = "al64+",
|
||||||
|
};
|
||||||
|
|
||||||
void tcg_dump_ops(TCGContext *s)
|
void tcg_dump_ops(TCGContext *s)
|
||||||
{
|
{
|
||||||
char buf[128];
|
char buf[128];
|
||||||
|
@ -1098,14 +1116,8 @@ void tcg_dump_ops(TCGContext *s)
|
||||||
if (op & ~(MO_AMASK | MO_BSWAP | MO_SSIZE)) {
|
if (op & ~(MO_AMASK | MO_BSWAP | MO_SSIZE)) {
|
||||||
qemu_log(",$0x%x,%u", op, ix);
|
qemu_log(",$0x%x,%u", op, ix);
|
||||||
} else {
|
} else {
|
||||||
const char *s_al = "", *s_op;
|
const char *s_al, *s_op;
|
||||||
if (op & MO_AMASK) {
|
s_al = alignment_name[(op & MO_AMASK) >> MO_ASHIFT];
|
||||||
if ((op & MO_AMASK) == MO_ALIGN) {
|
|
||||||
s_al = "al+";
|
|
||||||
} else {
|
|
||||||
s_al = "un+";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
s_op = ldst_name[op & (MO_BSWAP | MO_SSIZE)];
|
s_op = ldst_name[op & (MO_BSWAP | MO_SSIZE)];
|
||||||
qemu_log(",%s%s,%u", s_al, s_op, ix);
|
qemu_log(",%s%s,%u", s_al, s_op, ix);
|
||||||
}
|
}
|
||||||
|
@ -1680,35 +1692,89 @@ static void temp_allocate_frame(TCGContext *s, int temp)
|
||||||
|
|
||||||
static void temp_load(TCGContext *, TCGTemp *, TCGRegSet, TCGRegSet);
|
static void temp_load(TCGContext *, TCGTemp *, TCGRegSet, TCGRegSet);
|
||||||
|
|
||||||
/* sync register 'reg' by saving it to the corresponding temporary */
|
/* Mark a temporary as free or dead. If 'free_or_dead' is negative,
|
||||||
static void tcg_reg_sync(TCGContext *s, TCGReg reg, TCGRegSet allocated_regs)
|
mark it free; otherwise mark it dead. */
|
||||||
|
static void temp_free_or_dead(TCGContext *s, TCGTemp *ts, int free_or_dead)
|
||||||
{
|
{
|
||||||
TCGTemp *ts = s->reg_to_temp[reg];
|
if (ts->fixed_reg) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (ts->val_type == TEMP_VAL_REG) {
|
||||||
|
s->reg_to_temp[ts->reg] = NULL;
|
||||||
|
}
|
||||||
|
ts->val_type = (free_or_dead < 0
|
||||||
|
|| ts->temp_local
|
||||||
|
|| temp_idx(s, ts) < s->nb_globals
|
||||||
|
? TEMP_VAL_MEM : TEMP_VAL_DEAD);
|
||||||
|
}
|
||||||
|
|
||||||
tcg_debug_assert(ts->val_type == TEMP_VAL_REG);
|
/* Mark a temporary as dead. */
|
||||||
if (!ts->mem_coherent && !ts->fixed_reg) {
|
static inline void temp_dead(TCGContext *s, TCGTemp *ts)
|
||||||
|
{
|
||||||
|
temp_free_or_dead(s, ts, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Sync a temporary to memory. 'allocated_regs' is used in case a temporary
|
||||||
|
registers needs to be allocated to store a constant. If 'free_or_dead'
|
||||||
|
is non-zero, subsequently release the temporary; if it is positive, the
|
||||||
|
temp is dead; if it is negative, the temp is free. */
|
||||||
|
static void temp_sync(TCGContext *s, TCGTemp *ts,
|
||||||
|
TCGRegSet allocated_regs, int free_or_dead)
|
||||||
|
{
|
||||||
|
if (ts->fixed_reg) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (!ts->mem_coherent) {
|
||||||
if (!ts->mem_allocated) {
|
if (!ts->mem_allocated) {
|
||||||
temp_allocate_frame(s, temp_idx(s, ts));
|
temp_allocate_frame(s, temp_idx(s, ts));
|
||||||
} else if (ts->indirect_reg) {
|
}
|
||||||
|
if (ts->indirect_reg) {
|
||||||
|
if (ts->val_type == TEMP_VAL_REG) {
|
||||||
tcg_regset_set_reg(allocated_regs, ts->reg);
|
tcg_regset_set_reg(allocated_regs, ts->reg);
|
||||||
|
}
|
||||||
temp_load(s, ts->mem_base,
|
temp_load(s, ts->mem_base,
|
||||||
tcg_target_available_regs[TCG_TYPE_PTR],
|
tcg_target_available_regs[TCG_TYPE_PTR],
|
||||||
allocated_regs);
|
allocated_regs);
|
||||||
}
|
}
|
||||||
tcg_out_st(s, ts->type, reg, ts->mem_base->reg, ts->mem_offset);
|
switch (ts->val_type) {
|
||||||
|
case TEMP_VAL_CONST:
|
||||||
|
/* If we're going to free the temp immediately, then we won't
|
||||||
|
require it later in a register, so attempt to store the
|
||||||
|
constant to memory directly. */
|
||||||
|
if (free_or_dead
|
||||||
|
&& tcg_out_sti(s, ts->type, ts->val,
|
||||||
|
ts->mem_base->reg, ts->mem_offset)) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
temp_load(s, ts, tcg_target_available_regs[ts->type],
|
||||||
|
allocated_regs);
|
||||||
|
/* fallthrough */
|
||||||
|
|
||||||
|
case TEMP_VAL_REG:
|
||||||
|
tcg_out_st(s, ts->type, ts->reg,
|
||||||
|
ts->mem_base->reg, ts->mem_offset);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case TEMP_VAL_MEM:
|
||||||
|
break;
|
||||||
|
|
||||||
|
case TEMP_VAL_DEAD:
|
||||||
|
default:
|
||||||
|
tcg_abort();
|
||||||
}
|
}
|
||||||
ts->mem_coherent = 1;
|
ts->mem_coherent = 1;
|
||||||
}
|
}
|
||||||
|
if (free_or_dead) {
|
||||||
|
temp_free_or_dead(s, ts, free_or_dead);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* free register 'reg' by spilling the corresponding temporary if necessary */
|
/* free register 'reg' by spilling the corresponding temporary if necessary */
|
||||||
static void tcg_reg_free(TCGContext *s, TCGReg reg, TCGRegSet allocated_regs)
|
static void tcg_reg_free(TCGContext *s, TCGReg reg, TCGRegSet allocated_regs)
|
||||||
{
|
{
|
||||||
TCGTemp *ts = s->reg_to_temp[reg];
|
TCGTemp *ts = s->reg_to_temp[reg];
|
||||||
|
|
||||||
if (ts != NULL) {
|
if (ts != NULL) {
|
||||||
tcg_reg_sync(s, reg, allocated_regs);
|
temp_sync(s, ts, allocated_regs, -1);
|
||||||
ts->val_type = TEMP_VAL_MEM;
|
|
||||||
s->reg_to_temp[reg] = NULL;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1778,45 +1844,9 @@ static void temp_load(TCGContext *s, TCGTemp *ts, TCGRegSet desired_regs,
|
||||||
s->reg_to_temp[reg] = ts;
|
s->reg_to_temp[reg] = ts;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* mark a temporary as dead. */
|
/* Save a temporary to memory. 'allocated_regs' is used in case a
|
||||||
static inline void temp_dead(TCGContext *s, TCGTemp *ts)
|
|
||||||
{
|
|
||||||
if (ts->fixed_reg) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if (ts->val_type == TEMP_VAL_REG) {
|
|
||||||
s->reg_to_temp[ts->reg] = NULL;
|
|
||||||
}
|
|
||||||
ts->val_type = (temp_idx(s, ts) < s->nb_globals || ts->temp_local
|
|
||||||
? TEMP_VAL_MEM : TEMP_VAL_DEAD);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* sync a temporary to memory. 'allocated_regs' is used in case a
|
|
||||||
temporary registers needs to be allocated to store a constant. */
|
temporary registers needs to be allocated to store a constant. */
|
||||||
static void temp_sync(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs)
|
static void temp_save(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs)
|
||||||
{
|
|
||||||
if (ts->fixed_reg) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
switch (ts->val_type) {
|
|
||||||
case TEMP_VAL_CONST:
|
|
||||||
temp_load(s, ts, tcg_target_available_regs[ts->type], allocated_regs);
|
|
||||||
/* fallthrough */
|
|
||||||
case TEMP_VAL_REG:
|
|
||||||
tcg_reg_sync(s, ts->reg, allocated_regs);
|
|
||||||
break;
|
|
||||||
case TEMP_VAL_DEAD:
|
|
||||||
case TEMP_VAL_MEM:
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
tcg_abort();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* save a temporary to memory. 'allocated_regs' is used in case a
|
|
||||||
temporary registers needs to be allocated to store a constant. */
|
|
||||||
static inline void temp_save(TCGContext *s, TCGTemp *ts,
|
|
||||||
TCGRegSet allocated_regs)
|
|
||||||
{
|
{
|
||||||
#ifdef USE_LIVENESS_ANALYSIS
|
#ifdef USE_LIVENESS_ANALYSIS
|
||||||
/* ??? Liveness does not yet incorporate indirect bases. */
|
/* ??? Liveness does not yet incorporate indirect bases. */
|
||||||
|
@ -1827,8 +1857,7 @@ static inline void temp_save(TCGContext *s, TCGTemp *ts,
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
temp_sync(s, ts, allocated_regs);
|
temp_sync(s, ts, allocated_regs, 1);
|
||||||
temp_dead(s, ts);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* save globals to their canonical location and assume they can be
|
/* save globals to their canonical location and assume they can be
|
||||||
|
@ -1861,7 +1890,7 @@ static void sync_globals(TCGContext *s, TCGRegSet allocated_regs)
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
temp_sync(s, ts, allocated_regs);
|
temp_sync(s, ts, allocated_regs, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1905,21 +1934,21 @@ static void tcg_reg_alloc_movi(TCGContext *s, const TCGArg *args,
|
||||||
val = args[1];
|
val = args[1];
|
||||||
|
|
||||||
if (ots->fixed_reg) {
|
if (ots->fixed_reg) {
|
||||||
/* for fixed registers, we do not do any constant
|
/* For fixed registers, we do not do any constant propagation. */
|
||||||
propagation */
|
|
||||||
tcg_out_movi(s, ots->type, ots->reg, val);
|
tcg_out_movi(s, ots->type, ots->reg, val);
|
||||||
} else {
|
return;
|
||||||
/* The movi is not explicitly generated here */
|
}
|
||||||
|
|
||||||
|
/* The movi is not explicitly generated here. */
|
||||||
if (ots->val_type == TEMP_VAL_REG) {
|
if (ots->val_type == TEMP_VAL_REG) {
|
||||||
s->reg_to_temp[ots->reg] = NULL;
|
s->reg_to_temp[ots->reg] = NULL;
|
||||||
}
|
}
|
||||||
ots->val_type = TEMP_VAL_CONST;
|
ots->val_type = TEMP_VAL_CONST;
|
||||||
ots->val = val;
|
ots->val = val;
|
||||||
}
|
ots->mem_coherent = 0;
|
||||||
if (NEED_SYNC_ARG(0)) {
|
if (NEED_SYNC_ARG(0)) {
|
||||||
temp_sync(s, ots, s->reserved_regs);
|
temp_sync(s, ots, s->reserved_regs, IS_DEAD_ARG(0));
|
||||||
}
|
} else if (IS_DEAD_ARG(0)) {
|
||||||
if (IS_DEAD_ARG(0)) {
|
|
||||||
temp_dead(s, ots);
|
temp_dead(s, ots);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2004,7 +2033,7 @@ static void tcg_reg_alloc_mov(TCGContext *s, const TCGOpDef *def,
|
||||||
ots->mem_coherent = 0;
|
ots->mem_coherent = 0;
|
||||||
s->reg_to_temp[ots->reg] = ots;
|
s->reg_to_temp[ots->reg] = ots;
|
||||||
if (NEED_SYNC_ARG(0)) {
|
if (NEED_SYNC_ARG(0)) {
|
||||||
tcg_reg_sync(s, ots->reg, allocated_regs);
|
temp_sync(s, ots, allocated_regs, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2163,9 +2192,8 @@ static void tcg_reg_alloc_op(TCGContext *s,
|
||||||
tcg_out_mov(s, ts->type, ts->reg, reg);
|
tcg_out_mov(s, ts->type, ts->reg, reg);
|
||||||
}
|
}
|
||||||
if (NEED_SYNC_ARG(i)) {
|
if (NEED_SYNC_ARG(i)) {
|
||||||
tcg_reg_sync(s, reg, allocated_regs);
|
temp_sync(s, ts, allocated_regs, IS_DEAD_ARG(i));
|
||||||
}
|
} else if (IS_DEAD_ARG(i)) {
|
||||||
if (IS_DEAD_ARG(i)) {
|
|
||||||
temp_dead(s, ts);
|
temp_dead(s, ts);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2298,9 +2326,8 @@ static void tcg_reg_alloc_call(TCGContext *s, int nb_oargs, int nb_iargs,
|
||||||
ts->mem_coherent = 0;
|
ts->mem_coherent = 0;
|
||||||
s->reg_to_temp[reg] = ts;
|
s->reg_to_temp[reg] = ts;
|
||||||
if (NEED_SYNC_ARG(i)) {
|
if (NEED_SYNC_ARG(i)) {
|
||||||
tcg_reg_sync(s, reg, allocated_regs);
|
temp_sync(s, ts, allocated_regs, IS_DEAD_ARG(i));
|
||||||
}
|
} else if (IS_DEAD_ARG(i)) {
|
||||||
if (IS_DEAD_ARG(i)) {
|
|
||||||
temp_dead(s, ts);
|
temp_dead(s, ts);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
87
tcg/tcg.h
87
tcg/tcg.h
|
@ -191,6 +191,15 @@ typedef uint64_t tcg_insn_unit;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
#ifdef CONFIG_DEBUG_TCG
|
||||||
|
# define tcg_debug_assert(X) do { assert(X); } while (0)
|
||||||
|
#elif QEMU_GNUC_PREREQ(4, 5)
|
||||||
|
# define tcg_debug_assert(X) \
|
||||||
|
do { if (!(X)) { __builtin_unreachable(); } } while (0)
|
||||||
|
#else
|
||||||
|
# define tcg_debug_assert(X) do { (void)(X); } while (0)
|
||||||
|
#endif
|
||||||
|
|
||||||
typedef struct TCGRelocation {
|
typedef struct TCGRelocation {
|
||||||
struct TCGRelocation *next;
|
struct TCGRelocation *next;
|
||||||
int type;
|
int type;
|
||||||
|
@ -275,10 +284,26 @@ typedef enum TCGMemOp {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* MO_UNALN accesses are never checked for alignment.
|
/* MO_UNALN accesses are never checked for alignment.
|
||||||
MO_ALIGN accesses will result in a call to the CPU's
|
* MO_ALIGN accesses will result in a call to the CPU's
|
||||||
do_unaligned_access hook if the guest address is not aligned.
|
* do_unaligned_access hook if the guest address is not aligned.
|
||||||
The default depends on whether the target CPU defines ALIGNED_ONLY. */
|
* The default depends on whether the target CPU defines ALIGNED_ONLY.
|
||||||
MO_AMASK = 16,
|
* Some architectures (e.g. ARMv8) need the address which is aligned
|
||||||
|
* to a size more than the size of the memory access.
|
||||||
|
* To support such check it's enough the current costless alignment
|
||||||
|
* check implementation in QEMU, but we need to support
|
||||||
|
* an alignment size specifying.
|
||||||
|
* MO_ALIGN supposes a natural alignment
|
||||||
|
* (i.e. the alignment size is the size of a memory access).
|
||||||
|
* Note that an alignment size must be equal or greater
|
||||||
|
* than an access size.
|
||||||
|
* There are three options:
|
||||||
|
* - an alignment to the size of an access (MO_ALIGN);
|
||||||
|
* - an alignment to the specified size that is equal or greater than
|
||||||
|
* an access size (MO_ALIGN_x where 'x' is a size in bytes);
|
||||||
|
* - unaligned access permitted (MO_UNALN).
|
||||||
|
*/
|
||||||
|
MO_ASHIFT = 4,
|
||||||
|
MO_AMASK = 7 << MO_ASHIFT,
|
||||||
#ifdef ALIGNED_ONLY
|
#ifdef ALIGNED_ONLY
|
||||||
MO_ALIGN = 0,
|
MO_ALIGN = 0,
|
||||||
MO_UNALN = MO_AMASK,
|
MO_UNALN = MO_AMASK,
|
||||||
|
@ -286,6 +311,12 @@ typedef enum TCGMemOp {
|
||||||
MO_ALIGN = MO_AMASK,
|
MO_ALIGN = MO_AMASK,
|
||||||
MO_UNALN = 0,
|
MO_UNALN = 0,
|
||||||
#endif
|
#endif
|
||||||
|
MO_ALIGN_2 = 1 << MO_ASHIFT,
|
||||||
|
MO_ALIGN_4 = 2 << MO_ASHIFT,
|
||||||
|
MO_ALIGN_8 = 3 << MO_ASHIFT,
|
||||||
|
MO_ALIGN_16 = 4 << MO_ASHIFT,
|
||||||
|
MO_ALIGN_32 = 5 << MO_ASHIFT,
|
||||||
|
MO_ALIGN_64 = 6 << MO_ASHIFT,
|
||||||
|
|
||||||
/* Combinations of the above, for ease of use. */
|
/* Combinations of the above, for ease of use. */
|
||||||
MO_UB = MO_8,
|
MO_UB = MO_8,
|
||||||
|
@ -317,6 +348,45 @@ typedef enum TCGMemOp {
|
||||||
MO_SSIZE = MO_SIZE | MO_SIGN,
|
MO_SSIZE = MO_SIZE | MO_SIGN,
|
||||||
} TCGMemOp;
|
} TCGMemOp;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* get_alignment_bits
|
||||||
|
* @memop: TCGMemOp value
|
||||||
|
*
|
||||||
|
* Extract the alignment size from the memop.
|
||||||
|
*
|
||||||
|
* Returns: 0 in case of byte access (which is always aligned);
|
||||||
|
* positive value - number of alignment bits;
|
||||||
|
* negative value if unaligned access enabled
|
||||||
|
* and this is not a byte access.
|
||||||
|
*/
|
||||||
|
static inline int get_alignment_bits(TCGMemOp memop)
|
||||||
|
{
|
||||||
|
int a = memop & MO_AMASK;
|
||||||
|
int s = memop & MO_SIZE;
|
||||||
|
int r;
|
||||||
|
|
||||||
|
if (a == MO_UNALN) {
|
||||||
|
/* Negative value if unaligned access enabled,
|
||||||
|
* or zero value in case of byte access.
|
||||||
|
*/
|
||||||
|
return -s;
|
||||||
|
} else if (a == MO_ALIGN) {
|
||||||
|
/* A natural alignment: return a number of access size bits */
|
||||||
|
r = s;
|
||||||
|
} else {
|
||||||
|
/* Specific alignment size. It must be equal or greater
|
||||||
|
* than the access size.
|
||||||
|
*/
|
||||||
|
r = a >> MO_ASHIFT;
|
||||||
|
tcg_debug_assert(r >= s);
|
||||||
|
}
|
||||||
|
#if defined(CONFIG_SOFTMMU)
|
||||||
|
/* The requested alignment cannot overlap the TLB flags. */
|
||||||
|
tcg_debug_assert((TLB_FLAGS_MASK & ((1 << r) - 1)) == 0);
|
||||||
|
#endif
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
typedef tcg_target_ulong TCGArg;
|
typedef tcg_target_ulong TCGArg;
|
||||||
|
|
||||||
/* Define a type and accessor macros for variables. Using pointer types
|
/* Define a type and accessor macros for variables. Using pointer types
|
||||||
|
@ -790,15 +860,6 @@ do {\
|
||||||
abort();\
|
abort();\
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_TCG
|
|
||||||
# define tcg_debug_assert(X) do { assert(X); } while (0)
|
|
||||||
#elif QEMU_GNUC_PREREQ(4, 5)
|
|
||||||
# define tcg_debug_assert(X) \
|
|
||||||
do { if (!(X)) { __builtin_unreachable(); } } while (0)
|
|
||||||
#else
|
|
||||||
# define tcg_debug_assert(X) do { (void)(X); } while (0)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
void tcg_add_target_add_op_defs(const TCGTargetOpDef *tdefs);
|
void tcg_add_target_add_op_defs(const TCGTargetOpDef *tdefs);
|
||||||
|
|
||||||
#if UINTPTR_MAX == UINT32_MAX
|
#if UINTPTR_MAX == UINT32_MAX
|
||||||
|
|
|
@ -834,6 +834,12 @@ static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
|
||||||
old_code_ptr[1] = s->code_ptr - old_code_ptr;
|
old_code_ptr[1] = s->code_ptr - old_code_ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
|
||||||
|
TCGReg base, intptr_t ofs)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
/* Test if a constant matches the constraint. */
|
/* Test if a constant matches the constraint. */
|
||||||
static int tcg_target_const_match(tcg_target_long val, TCGType type,
|
static int tcg_target_const_match(tcg_target_long val, TCGType type,
|
||||||
const TCGArgConstraint *arg_ct)
|
const TCGArgConstraint *arg_ct)
|
||||||
|
|
Loading…
Reference in New Issue