mirror of https://github.com/xemu-project/xemu.git
Fix s390x ICMH cc computation.
Minor adjustments to satisfy Coverity. -----BEGIN PGP SIGNATURE----- iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmJoyJcdHHJpY2hhcmQu aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV8ZBQf+OWlDwqNOF+XzyLfb pPFAwqNCDX+9rRP6eyouydoCe2n4djj6I4rF+ESdkzbXAxrDzhfBF496CWgFd/Ar HRdssehq0V8UY6Blyhig9OXrcwtdJAZrZhQrl5541VqEak89Sii84F0RNt1QdhvE HArSm5D78DJx7ZmAtDRZhc3uGOxJefKPTD/4FVnQZQRh9jHeuR9oClMm+1ksYkxo 52SkalMlUXZNVvpud8AkuZxWtTeEdzgGPRX/zXdXLMrYI0ZdrqVS/DbuJBA3zwkL r+VmPwDIwojn5cHnS8QzP545XdsQ3alWM1Blhi7lKrwS0LHjyD3BOSH1Dxen9IOc /Ip5fA== =ysOK -----END PGP SIGNATURE----- Merge tag 'pull-tcg-20220426' of https://gitlab.com/rth7680/qemu into staging Fix s390x ICMH cc computation. Minor adjustments to satisfy Coverity. # -----BEGIN PGP SIGNATURE----- # # iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmJoyJcdHHJpY2hhcmQu # aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV8ZBQf+OWlDwqNOF+XzyLfb # pPFAwqNCDX+9rRP6eyouydoCe2n4djj6I4rF+ESdkzbXAxrDzhfBF496CWgFd/Ar # HRdssehq0V8UY6Blyhig9OXrcwtdJAZrZhQrl5541VqEak89Sii84F0RNt1QdhvE # HArSm5D78DJx7ZmAtDRZhc3uGOxJefKPTD/4FVnQZQRh9jHeuR9oClMm+1ksYkxo # 52SkalMlUXZNVvpud8AkuZxWtTeEdzgGPRX/zXdXLMrYI0ZdrqVS/DbuJBA3zwkL # r+VmPwDIwojn5cHnS8QzP545XdsQ3alWM1Blhi7lKrwS0LHjyD3BOSH1Dxen9IOc # /Ip5fA== # =ysOK # -----END PGP SIGNATURE----- # gpg: Signature made Tue 26 Apr 2022 09:37:43 PM PDT # gpg: using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F # gpg: issuer "richard.henderson@linaro.org" # gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [ultimate] * tag 'pull-tcg-20220426' of https://gitlab.com/rth7680/qemu: softfloat: Use FloatRelation for fracN_cmp softfloat: Use FloatRelation within partsN_compare softfloat: Fix declaration of partsN_compare target/i386: Suppress coverity warning on fsave/frstor target/s390x: Fix the accumulation of ccm in op_icm accel/tcg: Assert mmu_idx in range before use in cputlb Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
commit
34723f5937
|
@ -1761,7 +1761,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
|
|||
MemOpIdx oi, int size, int prot,
|
||||
uintptr_t retaddr)
|
||||
{
|
||||
size_t mmu_idx = get_mmuidx(oi);
|
||||
uintptr_t mmu_idx = get_mmuidx(oi);
|
||||
MemOp mop = get_memop(oi);
|
||||
int a_bits = get_alignment_bits(mop);
|
||||
uintptr_t index;
|
||||
|
@ -1769,6 +1769,8 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
|
|||
target_ulong tlb_addr;
|
||||
void *hostaddr;
|
||||
|
||||
tcg_debug_assert(mmu_idx < NB_MMU_MODES);
|
||||
|
||||
/* Adjust the given return address. */
|
||||
retaddr -= GETPC_ADJ;
|
||||
|
||||
|
@ -1908,18 +1910,20 @@ load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi,
|
|||
uintptr_t retaddr, MemOp op, bool code_read,
|
||||
FullLoadHelper *full_load)
|
||||
{
|
||||
uintptr_t mmu_idx = get_mmuidx(oi);
|
||||
uintptr_t index = tlb_index(env, mmu_idx, addr);
|
||||
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
|
||||
target_ulong tlb_addr = code_read ? entry->addr_code : entry->addr_read;
|
||||
const size_t tlb_off = code_read ?
|
||||
offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read);
|
||||
const MMUAccessType access_type =
|
||||
code_read ? MMU_INST_FETCH : MMU_DATA_LOAD;
|
||||
unsigned a_bits = get_alignment_bits(get_memop(oi));
|
||||
const unsigned a_bits = get_alignment_bits(get_memop(oi));
|
||||
const size_t size = memop_size(op);
|
||||
uintptr_t mmu_idx = get_mmuidx(oi);
|
||||
uintptr_t index;
|
||||
CPUTLBEntry *entry;
|
||||
target_ulong tlb_addr;
|
||||
void *haddr;
|
||||
uint64_t res;
|
||||
size_t size = memop_size(op);
|
||||
|
||||
tcg_debug_assert(mmu_idx < NB_MMU_MODES);
|
||||
|
||||
/* Handle CPU specific unaligned behaviour */
|
||||
if (addr & ((1 << a_bits) - 1)) {
|
||||
|
@ -1927,6 +1931,10 @@ load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi,
|
|||
mmu_idx, retaddr);
|
||||
}
|
||||
|
||||
index = tlb_index(env, mmu_idx, addr);
|
||||
entry = tlb_entry(env, mmu_idx, addr);
|
||||
tlb_addr = code_read ? entry->addr_code : entry->addr_read;
|
||||
|
||||
/* If the TLB entry is for a different page, reload and try again. */
|
||||
if (!tlb_hit(tlb_addr, addr)) {
|
||||
if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
|
||||
|
@ -2310,14 +2318,16 @@ static inline void QEMU_ALWAYS_INLINE
|
|||
store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
|
||||
MemOpIdx oi, uintptr_t retaddr, MemOp op)
|
||||
{
|
||||
uintptr_t mmu_idx = get_mmuidx(oi);
|
||||
uintptr_t index = tlb_index(env, mmu_idx, addr);
|
||||
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
|
||||
target_ulong tlb_addr = tlb_addr_write(entry);
|
||||
const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
|
||||
unsigned a_bits = get_alignment_bits(get_memop(oi));
|
||||
const unsigned a_bits = get_alignment_bits(get_memop(oi));
|
||||
const size_t size = memop_size(op);
|
||||
uintptr_t mmu_idx = get_mmuidx(oi);
|
||||
uintptr_t index;
|
||||
CPUTLBEntry *entry;
|
||||
target_ulong tlb_addr;
|
||||
void *haddr;
|
||||
size_t size = memop_size(op);
|
||||
|
||||
tcg_debug_assert(mmu_idx < NB_MMU_MODES);
|
||||
|
||||
/* Handle CPU specific unaligned behaviour */
|
||||
if (addr & ((1 << a_bits) - 1)) {
|
||||
|
@ -2325,6 +2335,10 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
|
|||
mmu_idx, retaddr);
|
||||
}
|
||||
|
||||
index = tlb_index(env, mmu_idx, addr);
|
||||
entry = tlb_entry(env, mmu_idx, addr);
|
||||
tlb_addr = tlb_addr_write(entry);
|
||||
|
||||
/* If the TLB entry is for a different page, reload and try again. */
|
||||
if (!tlb_hit(tlb_addr, addr)) {
|
||||
if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
|
||||
|
|
|
@ -1327,16 +1327,19 @@ static FloatRelation partsN(compare)(FloatPartsN *a, FloatPartsN *b,
|
|||
float_status *s, bool is_quiet)
|
||||
{
|
||||
int ab_mask = float_cmask(a->cls) | float_cmask(b->cls);
|
||||
int cmp;
|
||||
|
||||
if (likely(ab_mask == float_cmask_normal)) {
|
||||
FloatRelation cmp;
|
||||
|
||||
if (a->sign != b->sign) {
|
||||
goto a_sign;
|
||||
}
|
||||
if (a->exp != b->exp) {
|
||||
cmp = a->exp < b->exp ? -1 : 1;
|
||||
} else {
|
||||
if (a->exp == b->exp) {
|
||||
cmp = frac_cmp(a, b);
|
||||
} else if (a->exp < b->exp) {
|
||||
cmp = float_relation_less;
|
||||
} else {
|
||||
cmp = float_relation_greater;
|
||||
}
|
||||
if (a->sign) {
|
||||
cmp = -cmp;
|
||||
|
|
|
@ -874,10 +874,10 @@ static FloatParts128 *parts128_minmax(FloatParts128 *a, FloatParts128 *b,
|
|||
#define parts_minmax(A, B, S, F) \
|
||||
PARTS_GENERIC_64_128(minmax, A)(A, B, S, F)
|
||||
|
||||
static int parts64_compare(FloatParts64 *a, FloatParts64 *b,
|
||||
float_status *s, bool q);
|
||||
static int parts128_compare(FloatParts128 *a, FloatParts128 *b,
|
||||
float_status *s, bool q);
|
||||
static FloatRelation parts64_compare(FloatParts64 *a, FloatParts64 *b,
|
||||
float_status *s, bool q);
|
||||
static FloatRelation parts128_compare(FloatParts128 *a, FloatParts128 *b,
|
||||
float_status *s, bool q);
|
||||
|
||||
#define parts_compare(A, B, S, Q) \
|
||||
PARTS_GENERIC_64_128(compare, A)(A, B, S, Q)
|
||||
|
@ -957,21 +957,23 @@ static void frac128_allones(FloatParts128 *a)
|
|||
|
||||
#define frac_allones(A) FRAC_GENERIC_64_128(allones, A)(A)
|
||||
|
||||
static int frac64_cmp(FloatParts64 *a, FloatParts64 *b)
|
||||
static FloatRelation frac64_cmp(FloatParts64 *a, FloatParts64 *b)
|
||||
{
|
||||
return a->frac == b->frac ? 0 : a->frac < b->frac ? -1 : 1;
|
||||
return (a->frac == b->frac ? float_relation_equal
|
||||
: a->frac < b->frac ? float_relation_less
|
||||
: float_relation_greater);
|
||||
}
|
||||
|
||||
static int frac128_cmp(FloatParts128 *a, FloatParts128 *b)
|
||||
static FloatRelation frac128_cmp(FloatParts128 *a, FloatParts128 *b)
|
||||
{
|
||||
uint64_t ta = a->frac_hi, tb = b->frac_hi;
|
||||
if (ta == tb) {
|
||||
ta = a->frac_lo, tb = b->frac_lo;
|
||||
if (ta == tb) {
|
||||
return 0;
|
||||
return float_relation_equal;
|
||||
}
|
||||
}
|
||||
return ta < tb ? -1 : 1;
|
||||
return ta < tb ? float_relation_less : float_relation_greater;
|
||||
}
|
||||
|
||||
#define frac_cmp(A, B) FRAC_GENERIC_64_128(cmp, A)(A, B)
|
||||
|
|
|
@ -2466,7 +2466,7 @@ static void do_fsave(CPUX86State *env, target_ulong ptr, int data32,
|
|||
|
||||
do_fstenv(env, ptr, data32, retaddr);
|
||||
|
||||
ptr += (14 << data32);
|
||||
ptr += (target_ulong)14 << data32;
|
||||
for (i = 0; i < 8; i++) {
|
||||
tmp = ST(i);
|
||||
do_fstt(env, tmp, ptr, retaddr);
|
||||
|
@ -2488,7 +2488,7 @@ static void do_frstor(CPUX86State *env, target_ulong ptr, int data32,
|
|||
int i;
|
||||
|
||||
do_fldenv(env, ptr, data32, retaddr);
|
||||
ptr += (14 << data32);
|
||||
ptr += (target_ulong)14 << data32;
|
||||
|
||||
for (i = 0; i < 8; i++) {
|
||||
tmp = do_fldt(env, ptr, retaddr);
|
||||
|
|
|
@ -2622,7 +2622,7 @@ static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
|
|||
tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
|
||||
tcg_gen_addi_i64(o->in2, o->in2, 1);
|
||||
tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
|
||||
ccm |= 0xff << pos;
|
||||
ccm |= 0xffull << pos;
|
||||
}
|
||||
m3 = (m3 << 1) & 0xf;
|
||||
pos -= 8;
|
||||
|
|
Loading…
Reference in New Issue