mirror of https://github.com/xemu-project/xemu.git
target-arm queue:
* fix gen_load_exclusive handling of ldaxp -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABCAAGBQJYRaqfAAoJEDwlJe0UNgzeoz4P/15CjtKAli9d+8YLDgx0wPSD cA/srw5WoYzIx5NmE+CJRRBRjbuVEZ6lh6e5wXHCufjRy7POYNATB9p1WHvPVzB+ DUJrlXoBFTRkHZd+1IeKp7+20sh1J7CntPWkJXmm30gIQCTVx6sA28bPrRm7ZyEx NA1/fc/+iFYLcI3fJdF4V6t8d67rQn1ZKz9WVtoZIdmvnXaNY3Y/NV2iVcaQ0Qlx YxW2I2E75awqOT7x3QB3+TW0JtOdL8d+KjDkNm3CdhLjMqMnq41TajWHBVHfqZtH 76n6UmgQqHnG3zfCkUCutIyHa8YL2MHuj/UPSheLgEAvzONBb6XcqbUURykmH2y1 qtTR4H6SfjPzeaZ89Rg1dnaX6YJJ9F2dZEQ3npQ6KKAMashYBSVxzsa6wrijE0y0 MPfDO4zuYEzuo7UCKmMP6OZ59O9l8LxE2aDZcg5ymwfhYnQyewx5pHNjWeM1telp Za9G79K/zfA45z1rafYQaq3RCs/JR6wRIc1U9Ycrbi/aQheuuG0RcYwexkIf7DYb JCa4S0eMyR/uWOnnSVoJOGiZChpIP+nVP8I+sPIuUSTZhIzfMnMDtSqTUzq0QE8d OrU0GJDFraU46l4F3I2tSBEf4A1iVM3PDQiEtdA6LJd4EuveXoEgcjqZQqY0XeQD yuMT/OoXcs0umJqnVBt+ =S7y9 -----END PGP SIGNATURE----- Merge remote-tracking branch 'pm215/tags/pull-target-arm-20161205' into staging target-arm queue: * fix gen_load_exclusive handling of ldaxp # gpg: Signature made Mon 05 Dec 2016 05:57:51 PM GMT # gpg: using RSA key 0x3C2525ED14360CDE # gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" # gpg: aka "Peter Maydell <pmaydell@gmail.com>" # gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" # Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83 15CF 3C25 25ED 1436 0CDE * pm215/tags/pull-target-arm-20161205: target-arm/translate-a64: fix gen_load_exclusive Message-id: 1480960775-5002-1-git-send-email-peter.maydell@linaro.org Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
commit
5d3074f0db
|
@ -1839,41 +1839,37 @@ static void disas_b_exc_sys(DisasContext *s, uint32_t insn)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Load/Store exclusive instructions are implemented by remembering
|
||||
* the value/address loaded, and seeing if these are the same
|
||||
* when the store is performed. This is not actually the architecturally
|
||||
* mandated semantics, but it works for typical guest code sequences
|
||||
* and avoids having to monitor regular stores.
|
||||
*
|
||||
* The store exclusive uses the atomic cmpxchg primitives to avoid
|
||||
* races in multi-threaded linux-user and when MTTCG softmmu is
|
||||
* enabled.
|
||||
*/
|
||||
static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
|
||||
TCGv_i64 addr, int size, bool is_pair)
|
||||
{
|
||||
TCGv_i64 tmp = tcg_temp_new_i64();
|
||||
TCGMemOp be = s->be_data;
|
||||
TCGMemOp memop = s->be_data + size;
|
||||
|
||||
g_assert(size <= 3);
|
||||
tcg_gen_qemu_ld_i64(tmp, addr, get_mem_index(s), memop);
|
||||
|
||||
if (is_pair) {
|
||||
TCGv_i64 addr2 = tcg_temp_new_i64();
|
||||
TCGv_i64 hitmp = tcg_temp_new_i64();
|
||||
|
||||
if (size == 3) {
|
||||
TCGv_i64 addr2 = tcg_temp_new_i64();
|
||||
|
||||
tcg_gen_qemu_ld_i64(tmp, addr, get_mem_index(s),
|
||||
MO_64 | MO_ALIGN_16 | be);
|
||||
tcg_gen_addi_i64(addr2, addr, 8);
|
||||
tcg_gen_qemu_ld_i64(hitmp, addr2, get_mem_index(s),
|
||||
MO_64 | MO_ALIGN | be);
|
||||
tcg_temp_free_i64(addr2);
|
||||
} else {
|
||||
g_assert(size == 2);
|
||||
tcg_gen_qemu_ld_i64(tmp, addr, get_mem_index(s),
|
||||
MO_64 | MO_ALIGN | be);
|
||||
if (be == MO_LE) {
|
||||
tcg_gen_extr32_i64(tmp, hitmp, tmp);
|
||||
} else {
|
||||
tcg_gen_extr32_i64(hitmp, tmp, tmp);
|
||||
}
|
||||
}
|
||||
|
||||
g_assert(size >= 2);
|
||||
tcg_gen_addi_i64(addr2, addr, 1 << size);
|
||||
tcg_gen_qemu_ld_i64(hitmp, addr2, get_mem_index(s), memop);
|
||||
tcg_temp_free_i64(addr2);
|
||||
tcg_gen_mov_i64(cpu_exclusive_high, hitmp);
|
||||
tcg_gen_mov_i64(cpu_reg(s, rt2), hitmp);
|
||||
tcg_temp_free_i64(hitmp);
|
||||
} else {
|
||||
tcg_gen_qemu_ld_i64(tmp, addr, get_mem_index(s), size | MO_ALIGN | be);
|
||||
}
|
||||
|
||||
tcg_gen_mov_i64(cpu_exclusive_val, tmp);
|
||||
|
|
Loading…
Reference in New Issue