target/alpha: Use MO_ALIGN where required

Mark all memory operations that are not already marked with UNALIGN.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2023-05-02 15:36:47 +01:00
parent 6ffaac9ca0
commit 33948b68a7
1 changed files with 20 additions and 16 deletions

View File

@ -2399,21 +2399,21 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
switch ((insn >> 12) & 0xF) { switch ((insn >> 12) & 0xF) {
case 0x0: case 0x0:
/* Longword physical access (hw_ldl/p) */ /* Longword physical access (hw_ldl/p) */
tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL); tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
break; break;
case 0x1: case 0x1:
/* Quadword physical access (hw_ldq/p) */ /* Quadword physical access (hw_ldq/p) */
tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ); tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
break; break;
case 0x2: case 0x2:
/* Longword physical access with lock (hw_ldl_l/p) */ /* Longword physical access with lock (hw_ldl_l/p) */
tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL); tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
tcg_gen_mov_i64(cpu_lock_addr, addr); tcg_gen_mov_i64(cpu_lock_addr, addr);
tcg_gen_mov_i64(cpu_lock_value, va); tcg_gen_mov_i64(cpu_lock_value, va);
break; break;
case 0x3: case 0x3:
/* Quadword physical access with lock (hw_ldq_l/p) */ /* Quadword physical access with lock (hw_ldq_l/p) */
tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ); tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
tcg_gen_mov_i64(cpu_lock_addr, addr); tcg_gen_mov_i64(cpu_lock_addr, addr);
tcg_gen_mov_i64(cpu_lock_value, va); tcg_gen_mov_i64(cpu_lock_value, va);
break; break;
@ -2438,11 +2438,13 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
goto invalid_opc; goto invalid_opc;
case 0xA: case 0xA:
/* Longword virtual access with protection check (hw_ldl/w) */ /* Longword virtual access with protection check (hw_ldl/w) */
tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LESL); tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX,
MO_LESL | MO_ALIGN);
break; break;
case 0xB: case 0xB:
/* Quadword virtual access with protection check (hw_ldq/w) */ /* Quadword virtual access with protection check (hw_ldq/w) */
tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEUQ); tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX,
MO_LEUQ | MO_ALIGN);
break; break;
case 0xC: case 0xC:
/* Longword virtual access with alt access mode (hw_ldl/a)*/ /* Longword virtual access with alt access mode (hw_ldl/a)*/
@ -2453,12 +2455,14 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
case 0xE: case 0xE:
/* Longword virtual access with alternate access mode and /* Longword virtual access with alternate access mode and
protection checks (hw_ldl/wa) */ protection checks (hw_ldl/wa) */
tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LESL); tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX,
MO_LESL | MO_ALIGN);
break; break;
case 0xF: case 0xF:
/* Quadword virtual access with alternate access mode and /* Quadword virtual access with alternate access mode and
protection checks (hw_ldq/wa) */ protection checks (hw_ldq/wa) */
tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEUQ); tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX,
MO_LEUQ | MO_ALIGN);
break; break;
} }
break; break;
@ -2659,7 +2663,7 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
vb = load_gpr(ctx, rb); vb = load_gpr(ctx, rb);
tmp = tcg_temp_new(); tmp = tcg_temp_new();
tcg_gen_addi_i64(tmp, vb, disp12); tcg_gen_addi_i64(tmp, vb, disp12);
tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LESL); tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
break; break;
case 0x1: case 0x1:
/* Quadword physical access */ /* Quadword physical access */
@ -2667,17 +2671,17 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
vb = load_gpr(ctx, rb); vb = load_gpr(ctx, rb);
tmp = tcg_temp_new(); tmp = tcg_temp_new();
tcg_gen_addi_i64(tmp, vb, disp12); tcg_gen_addi_i64(tmp, vb, disp12);
tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEUQ); tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
break; break;
case 0x2: case 0x2:
/* Longword physical access with lock */ /* Longword physical access with lock */
ret = gen_store_conditional(ctx, ra, rb, disp12, ret = gen_store_conditional(ctx, ra, rb, disp12,
MMU_PHYS_IDX, MO_LESL); MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
break; break;
case 0x3: case 0x3:
/* Quadword physical access with lock */ /* Quadword physical access with lock */
ret = gen_store_conditional(ctx, ra, rb, disp12, ret = gen_store_conditional(ctx, ra, rb, disp12,
MMU_PHYS_IDX, MO_LEUQ); MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
break; break;
case 0x4: case 0x4:
/* Longword virtual access */ /* Longword virtual access */
@ -2771,11 +2775,11 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
break; break;
case 0x2A: case 0x2A:
/* LDL_L */ /* LDL_L */
gen_load_int(ctx, ra, rb, disp16, MO_LESL, 0, 1); gen_load_int(ctx, ra, rb, disp16, MO_LESL | MO_ALIGN, 0, 1);
break; break;
case 0x2B: case 0x2B:
/* LDQ_L */ /* LDQ_L */
gen_load_int(ctx, ra, rb, disp16, MO_LEUQ, 0, 1); gen_load_int(ctx, ra, rb, disp16, MO_LEUQ | MO_ALIGN, 0, 1);
break; break;
case 0x2C: case 0x2C:
/* STL */ /* STL */
@ -2788,12 +2792,12 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
case 0x2E: case 0x2E:
/* STL_C */ /* STL_C */
ret = gen_store_conditional(ctx, ra, rb, disp16, ret = gen_store_conditional(ctx, ra, rb, disp16,
ctx->mem_idx, MO_LESL); ctx->mem_idx, MO_LESL | MO_ALIGN);
break; break;
case 0x2F: case 0x2F:
/* STQ_C */ /* STQ_C */
ret = gen_store_conditional(ctx, ra, rb, disp16, ret = gen_store_conditional(ctx, ra, rb, disp16,
ctx->mem_idx, MO_LEUQ); ctx->mem_idx, MO_LEUQ | MO_ALIGN);
break; break;
case 0x30: case 0x30:
/* BR */ /* BR */