target/s390x: Convert to HAVE_CMPXCHG128 and HAVE_ATOMIC128

Reviewed-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2018-08-15 16:50:00 -07:00
parent f34ec0f6d7
commit 5e95612e2e
1 changed files with 41 additions and 51 deletions

View File

@ -25,6 +25,7 @@
#include "exec/exec-all.h" #include "exec/exec-all.h"
#include "exec/cpu_ldst.h" #include "exec/cpu_ldst.h"
#include "qemu/int128.h" #include "qemu/int128.h"
#include "qemu/atomic128.h"
#if !defined(CONFIG_USER_ONLY) #if !defined(CONFIG_USER_ONLY)
#include "hw/s390x/storage-keys.h" #include "hw/s390x/storage-keys.h"
@ -1389,7 +1390,7 @@ static void do_cdsg(CPUS390XState *env, uint64_t addr,
bool fail; bool fail;
if (parallel) { if (parallel) {
#ifndef CONFIG_ATOMIC128 #if !HAVE_CMPXCHG128
cpu_loop_exit_atomic(ENV_GET_CPU(env), ra); cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
#else #else
int mem_idx = cpu_mmu_index(env, false); int mem_idx = cpu_mmu_index(env, false);
@ -1435,9 +1436,7 @@ void HELPER(cdsg_parallel)(CPUS390XState *env, uint64_t addr,
static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1, static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
uint64_t a2, bool parallel) uint64_t a2, bool parallel)
{ {
#if !defined(CONFIG_USER_ONLY) || defined(CONFIG_ATOMIC128)
uint32_t mem_idx = cpu_mmu_index(env, false); uint32_t mem_idx = cpu_mmu_index(env, false);
#endif
uintptr_t ra = GETPC(); uintptr_t ra = GETPC();
uint32_t fc = extract32(env->regs[0], 0, 8); uint32_t fc = extract32(env->regs[0], 0, 8);
uint32_t sc = extract32(env->regs[0], 8, 8); uint32_t sc = extract32(env->regs[0], 8, 8);
@ -1465,18 +1464,20 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
probe_write(env, a2, 0, mem_idx, ra); probe_write(env, a2, 0, mem_idx, ra);
#endif #endif
/* Note that the compare-and-swap is atomic, and the store is atomic, but /*
the complete operation is not. Therefore we do not need to assert serial * Note that the compare-and-swap is atomic, and the store is atomic,
context in order to implement this. That said, restart early if we can't * but the complete operation is not. Therefore we do not need to
support either operation that is supposed to be atomic. */ * assert serial context in order to implement this. That said,
* restart early if we can't support either operation that is supposed
* to be atomic.
*/
if (parallel) { if (parallel) {
int mask = 0; uint32_t max = 2;
#if !defined(CONFIG_ATOMIC64) #ifdef CONFIG_ATOMIC64
mask = -8; max = 3;
#elif !defined(CONFIG_ATOMIC128)
mask = -16;
#endif #endif
if (((4 << fc) | (1 << sc)) & mask) { if ((HAVE_CMPXCHG128 ? 0 : fc + 2 > max) ||
(HAVE_ATOMIC128 ? 0 : sc > max)) {
cpu_loop_exit_atomic(ENV_GET_CPU(env), ra); cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
} }
} }
@ -1546,16 +1547,7 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
Int128 cv = int128_make128(env->regs[r3 + 1], env->regs[r3]); Int128 cv = int128_make128(env->regs[r3 + 1], env->regs[r3]);
Int128 ov; Int128 ov;
if (parallel) { if (!parallel) {
#ifdef CONFIG_ATOMIC128
TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
ov = helper_atomic_cmpxchgo_be_mmu(env, a1, cv, nv, oi, ra);
cc = !int128_eq(ov, cv);
#else
/* Note that we asserted !parallel above. */
g_assert_not_reached();
#endif
} else {
uint64_t oh = cpu_ldq_data_ra(env, a1 + 0, ra); uint64_t oh = cpu_ldq_data_ra(env, a1 + 0, ra);
uint64_t ol = cpu_ldq_data_ra(env, a1 + 8, ra); uint64_t ol = cpu_ldq_data_ra(env, a1 + 8, ra);
@ -1567,6 +1559,13 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
cpu_stq_data_ra(env, a1 + 0, int128_gethi(nv), ra); cpu_stq_data_ra(env, a1 + 0, int128_gethi(nv), ra);
cpu_stq_data_ra(env, a1 + 8, int128_getlo(nv), ra); cpu_stq_data_ra(env, a1 + 8, int128_getlo(nv), ra);
} else if (HAVE_CMPXCHG128) {
TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
ov = helper_atomic_cmpxchgo_be_mmu(env, a1, cv, nv, oi, ra);
cc = !int128_eq(ov, cv);
} else {
/* Note that we asserted !parallel above. */
g_assert_not_reached();
} }
env->regs[r3 + 0] = int128_gethi(ov); env->regs[r3 + 0] = int128_gethi(ov);
@ -1596,18 +1595,16 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
cpu_stq_data_ra(env, a2, svh, ra); cpu_stq_data_ra(env, a2, svh, ra);
break; break;
case 4: case 4:
if (parallel) { if (!parallel) {
#ifdef CONFIG_ATOMIC128 cpu_stq_data_ra(env, a2 + 0, svh, ra);
cpu_stq_data_ra(env, a2 + 8, svl, ra);
} else if (HAVE_ATOMIC128) {
TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
Int128 sv = int128_make128(svl, svh); Int128 sv = int128_make128(svl, svh);
helper_atomic_sto_be_mmu(env, a2, sv, oi, ra); helper_atomic_sto_be_mmu(env, a2, sv, oi, ra);
#else } else {
/* Note that we asserted !parallel above. */ /* Note that we asserted !parallel above. */
g_assert_not_reached(); g_assert_not_reached();
#endif
} else {
cpu_stq_data_ra(env, a2 + 0, svh, ra);
cpu_stq_data_ra(env, a2 + 8, svl, ra);
} }
break; break;
default: default:
@ -2105,21 +2102,18 @@ static uint64_t do_lpq(CPUS390XState *env, uint64_t addr, bool parallel)
uintptr_t ra = GETPC(); uintptr_t ra = GETPC();
uint64_t hi, lo; uint64_t hi, lo;
if (parallel) { if (!parallel) {
#ifndef CONFIG_ATOMIC128 check_alignment(env, addr, 16, ra);
cpu_loop_exit_atomic(ENV_GET_CPU(env), ra); hi = cpu_ldq_data_ra(env, addr + 0, ra);
#else lo = cpu_ldq_data_ra(env, addr + 8, ra);
} else if (HAVE_ATOMIC128) {
int mem_idx = cpu_mmu_index(env, false); int mem_idx = cpu_mmu_index(env, false);
TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
Int128 v = helper_atomic_ldo_be_mmu(env, addr, oi, ra); Int128 v = helper_atomic_ldo_be_mmu(env, addr, oi, ra);
hi = int128_gethi(v); hi = int128_gethi(v);
lo = int128_getlo(v); lo = int128_getlo(v);
#endif
} else { } else {
check_alignment(env, addr, 16, ra); cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
hi = cpu_ldq_data_ra(env, addr + 0, ra);
lo = cpu_ldq_data_ra(env, addr + 8, ra);
} }
env->retxl = lo; env->retxl = lo;
@ -2142,21 +2136,17 @@ static void do_stpq(CPUS390XState *env, uint64_t addr,
{ {
uintptr_t ra = GETPC(); uintptr_t ra = GETPC();
if (parallel) { if (!parallel) {
#ifndef CONFIG_ATOMIC128
cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
#else
int mem_idx = cpu_mmu_index(env, false);
TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
Int128 v = int128_make128(low, high);
helper_atomic_sto_be_mmu(env, addr, v, oi, ra);
#endif
} else {
check_alignment(env, addr, 16, ra); check_alignment(env, addr, 16, ra);
cpu_stq_data_ra(env, addr + 0, high, ra); cpu_stq_data_ra(env, addr + 0, high, ra);
cpu_stq_data_ra(env, addr + 8, low, ra); cpu_stq_data_ra(env, addr + 8, low, ra);
} else if (HAVE_ATOMIC128) {
int mem_idx = cpu_mmu_index(env, false);
TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
Int128 v = int128_make128(low, high);
helper_atomic_sto_be_mmu(env, addr, v, oi, ra);
} else {
cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
} }
} }