mirror of https://github.com/xemu-project/xemu.git
target/arm: Avoid tcg_const_ptr in handle_vec_simd_sqshrn
It is easy enough to use mov instead of or-with-zero and relying on the optimizer to fold away the or. Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
a2c4fb8cae
commit
1b7bc9b5c8
|
@ -8459,7 +8459,7 @@ static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q,
|
||||||
tcg_rn = tcg_temp_new_i64();
|
tcg_rn = tcg_temp_new_i64();
|
||||||
tcg_rd = tcg_temp_new_i64();
|
tcg_rd = tcg_temp_new_i64();
|
||||||
tcg_rd_narrowed = tcg_temp_new_i32();
|
tcg_rd_narrowed = tcg_temp_new_i32();
|
||||||
tcg_final = tcg_const_i64(0);
|
tcg_final = tcg_temp_new_i64();
|
||||||
|
|
||||||
if (round) {
|
if (round) {
|
||||||
tcg_round = tcg_constant_i64(1ULL << (shift - 1));
|
tcg_round = tcg_constant_i64(1ULL << (shift - 1));
|
||||||
|
@ -8473,7 +8473,11 @@ static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q,
|
||||||
false, is_u_shift, size+1, shift);
|
false, is_u_shift, size+1, shift);
|
||||||
narrowfn(tcg_rd_narrowed, cpu_env, tcg_rd);
|
narrowfn(tcg_rd_narrowed, cpu_env, tcg_rd);
|
||||||
tcg_gen_extu_i32_i64(tcg_rd, tcg_rd_narrowed);
|
tcg_gen_extu_i32_i64(tcg_rd, tcg_rd_narrowed);
|
||||||
tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
|
if (i == 0) {
|
||||||
|
tcg_gen_mov_i64(tcg_final, tcg_rd);
|
||||||
|
} else {
|
||||||
|
tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!is_q) {
|
if (!is_q) {
|
||||||
|
|
Loading…
Reference in New Issue