mirror of https://github.com/xemu-project/xemu.git
Merge remote-tracking branch 'sweil/tci' into staging
# By Stefan Weil # Via Stefan Weil * sweil/tci: misc: Use new rotate functions bitops: Add rotate functions (rol8, ror8, ...) tci: Add implementation of rotl_i64, rotr_i64 Message-id: 1380137693-3729-1-git-send-email-sw@weilnetz.de Signed-off-by: Anthony Liguori <anthony@codemonkey.ws>
This commit is contained in:
commit
ce079abb41
|
@ -183,6 +183,86 @@ static inline unsigned long hweight_long(unsigned long w)
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* rol8 - rotate an 8-bit value left
|
||||||
|
* @word: value to rotate
|
||||||
|
* @shift: bits to roll
|
||||||
|
*/
|
||||||
|
static inline uint8_t rol8(uint8_t word, unsigned int shift)
|
||||||
|
{
|
||||||
|
return (word << shift) | (word >> (8 - shift));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ror8 - rotate an 8-bit value right
|
||||||
|
* @word: value to rotate
|
||||||
|
* @shift: bits to roll
|
||||||
|
*/
|
||||||
|
static inline uint8_t ror8(uint8_t word, unsigned int shift)
|
||||||
|
{
|
||||||
|
return (word >> shift) | (word << (8 - shift));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* rol16 - rotate a 16-bit value left
|
||||||
|
* @word: value to rotate
|
||||||
|
* @shift: bits to roll
|
||||||
|
*/
|
||||||
|
static inline uint16_t rol16(uint16_t word, unsigned int shift)
|
||||||
|
{
|
||||||
|
return (word << shift) | (word >> (16 - shift));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ror16 - rotate a 16-bit value right
|
||||||
|
* @word: value to rotate
|
||||||
|
* @shift: bits to roll
|
||||||
|
*/
|
||||||
|
static inline uint16_t ror16(uint16_t word, unsigned int shift)
|
||||||
|
{
|
||||||
|
return (word >> shift) | (word << (16 - shift));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* rol32 - rotate a 32-bit value left
|
||||||
|
* @word: value to rotate
|
||||||
|
* @shift: bits to roll
|
||||||
|
*/
|
||||||
|
static inline uint32_t rol32(uint32_t word, unsigned int shift)
|
||||||
|
{
|
||||||
|
return (word << shift) | (word >> (32 - shift));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ror32 - rotate a 32-bit value right
|
||||||
|
* @word: value to rotate
|
||||||
|
* @shift: bits to roll
|
||||||
|
*/
|
||||||
|
static inline uint32_t ror32(uint32_t word, unsigned int shift)
|
||||||
|
{
|
||||||
|
return (word >> shift) | (word << (32 - shift));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* rol64 - rotate a 64-bit value left
|
||||||
|
* @word: value to rotate
|
||||||
|
* @shift: bits to roll
|
||||||
|
*/
|
||||||
|
static inline uint64_t rol64(uint64_t word, unsigned int shift)
|
||||||
|
{
|
||||||
|
return (word << shift) | (word >> (64 - shift));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ror64 - rotate a 64-bit value right
|
||||||
|
* @word: value to rotate
|
||||||
|
* @shift: bits to roll
|
||||||
|
*/
|
||||||
|
static inline uint64_t ror64(uint64_t word, unsigned int shift)
|
||||||
|
{
|
||||||
|
return (word >> shift) | (word << (64 - shift));
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* extract32:
|
* extract32:
|
||||||
* @value: the value to extract the bit field from
|
* @value: the value to extract the bit field from
|
||||||
|
|
|
@ -577,7 +577,7 @@ uint64_t HELPER(iwmmxt_rorl)(CPUARMState *env, uint64_t x, uint32_t n)
|
||||||
|
|
||||||
uint64_t HELPER(iwmmxt_rorq)(CPUARMState *env, uint64_t x, uint32_t n)
|
uint64_t HELPER(iwmmxt_rorq)(CPUARMState *env, uint64_t x, uint32_t n)
|
||||||
{
|
{
|
||||||
x = (x >> n) | (x << (64 - n));
|
x = ror64(x, n);
|
||||||
env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x);
|
env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x);
|
||||||
return x;
|
return x;
|
||||||
}
|
}
|
||||||
|
|
|
@ -238,20 +238,16 @@ static TCGArg do_constant_folding_2(TCGOpcode op, TCGArg x, TCGArg y)
|
||||||
return (int64_t)x >> (int64_t)y;
|
return (int64_t)x >> (int64_t)y;
|
||||||
|
|
||||||
case INDEX_op_rotr_i32:
|
case INDEX_op_rotr_i32:
|
||||||
x = ((uint32_t)x << (32 - y)) | ((uint32_t)x >> y);
|
return ror32(x, y);
|
||||||
return x;
|
|
||||||
|
|
||||||
case INDEX_op_rotr_i64:
|
case INDEX_op_rotr_i64:
|
||||||
x = ((uint64_t)x << (64 - y)) | ((uint64_t)x >> y);
|
return ror64(x, y);
|
||||||
return x;
|
|
||||||
|
|
||||||
case INDEX_op_rotl_i32:
|
case INDEX_op_rotl_i32:
|
||||||
x = ((uint32_t)x << y) | ((uint32_t)x >> (32 - y));
|
return rol32(x, y);
|
||||||
return x;
|
|
||||||
|
|
||||||
case INDEX_op_rotl_i64:
|
case INDEX_op_rotl_i64:
|
||||||
x = ((uint64_t)x << y) | ((uint64_t)x >> (64 - y));
|
return rol64(x, y);
|
||||||
return x;
|
|
||||||
|
|
||||||
CASE_OP_32_64(not):
|
CASE_OP_32_64(not):
|
||||||
return ~x;
|
return ~x;
|
||||||
|
|
|
@ -670,7 +670,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
|
||||||
case INDEX_op_shl_i64:
|
case INDEX_op_shl_i64:
|
||||||
case INDEX_op_shr_i64:
|
case INDEX_op_shr_i64:
|
||||||
case INDEX_op_sar_i64:
|
case INDEX_op_sar_i64:
|
||||||
/* TODO: Implementation of rotl_i64, rotr_i64 missing in tci.c. */
|
|
||||||
case INDEX_op_rotl_i64: /* Optional (TCG_TARGET_HAS_rot_i64). */
|
case INDEX_op_rotl_i64: /* Optional (TCG_TARGET_HAS_rot_i64). */
|
||||||
case INDEX_op_rotr_i64: /* Optional (TCG_TARGET_HAS_rot_i64). */
|
case INDEX_op_rotr_i64: /* Optional (TCG_TARGET_HAS_rot_i64). */
|
||||||
tcg_out_r(s, args[0]);
|
tcg_out_r(s, args[0]);
|
||||||
|
|
14
tci.c
14
tci.c
|
@ -688,13 +688,13 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr)
|
||||||
t0 = *tb_ptr++;
|
t0 = *tb_ptr++;
|
||||||
t1 = tci_read_ri32(&tb_ptr);
|
t1 = tci_read_ri32(&tb_ptr);
|
||||||
t2 = tci_read_ri32(&tb_ptr);
|
t2 = tci_read_ri32(&tb_ptr);
|
||||||
tci_write_reg32(t0, (t1 << t2) | (t1 >> (32 - t2)));
|
tci_write_reg32(t0, rol32(t1, t2));
|
||||||
break;
|
break;
|
||||||
case INDEX_op_rotr_i32:
|
case INDEX_op_rotr_i32:
|
||||||
t0 = *tb_ptr++;
|
t0 = *tb_ptr++;
|
||||||
t1 = tci_read_ri32(&tb_ptr);
|
t1 = tci_read_ri32(&tb_ptr);
|
||||||
t2 = tci_read_ri32(&tb_ptr);
|
t2 = tci_read_ri32(&tb_ptr);
|
||||||
tci_write_reg32(t0, (t1 >> t2) | (t1 << (32 - t2)));
|
tci_write_reg32(t0, ror32(t1, t2));
|
||||||
break;
|
break;
|
||||||
#endif
|
#endif
|
||||||
#if TCG_TARGET_HAS_deposit_i32
|
#if TCG_TARGET_HAS_deposit_i32
|
||||||
|
@ -952,8 +952,16 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr)
|
||||||
break;
|
break;
|
||||||
#if TCG_TARGET_HAS_rot_i64
|
#if TCG_TARGET_HAS_rot_i64
|
||||||
case INDEX_op_rotl_i64:
|
case INDEX_op_rotl_i64:
|
||||||
|
t0 = *tb_ptr++;
|
||||||
|
t1 = tci_read_ri64(&tb_ptr);
|
||||||
|
t2 = tci_read_ri64(&tb_ptr);
|
||||||
|
tci_write_reg64(t0, rol64(t1, t2));
|
||||||
|
break;
|
||||||
case INDEX_op_rotr_i64:
|
case INDEX_op_rotr_i64:
|
||||||
TODO();
|
t0 = *tb_ptr++;
|
||||||
|
t1 = tci_read_ri64(&tb_ptr);
|
||||||
|
t2 = tci_read_ri64(&tb_ptr);
|
||||||
|
tci_write_reg64(t0, ror64(t1, t2));
|
||||||
break;
|
break;
|
||||||
#endif
|
#endif
|
||||||
#if TCG_TARGET_HAS_deposit_i64
|
#if TCG_TARGET_HAS_deposit_i64
|
||||||
|
|
Loading…
Reference in New Issue