target-arm queue:

* refactor/clean up armv7m_init()
  * some initial cleanup in the direction of supporting 64-bit EL3
  * fix broken synchronization of registers between QEMU and KVM
    for 32-bit ARM hosts (which among other things broke memory
    access via gdbstub)
  * fix flush-to-zero handling in FMULX, FRECPS, FRSQRTS and FRECPE
  * don't crash QEMU for UNPREDICTABLE BFI insns in A32 encoding
  * explain why virt board's device-to-transport mapping code is
    the way it is
  * implement mmu_idx values which match the architectural
    distinctions, and introduce the concept of a translation
    regime to get_phys_addr() rather than incorrectly looking
    at the current CPU state
  * update to upstream VIXL 1.7 (gives us correct code addresses
    when dissassembling pc-relative references)
  * sync system register state between KVM and QEMU for 64-bit ARM
  * support virtio on big-endian guests by implementing the
    "which endian is the guest now?" CPU method
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQIcBAABCAAGBQJU03foAAoJEDwlJe0UNgze6xsP/jJHiEE4EieGfzkd0rKBAWlP
 0iW0oI8VRdYgbwmgRfZJwdbcR/7qYATS6ffS1QINfb6zQRyNHF1J3Qv9sOnU/NC5
 k7hQedqLoG68RUe3QwA0LxrF3r6NVYIddDKMPkjWgbByDbcAtUdElB2UTpd6yLFF
 hrRfkQWUbWqUoe1yqSPUaffo8s88MXFHqArhCHOhN5LQ/KAr70iggAEity6irJIX
 z+dhXaIoi7V6R1rSX+uAt6YAfja3/7GYzG3zK+xy/wdLv3Ka7ametCkwZZP+cvcp
 Zfbo1cpkbKSoPcxmaPoT/FDVH5AGKyO00QKQI/1Nsjb4CcR49dKczqIvlFfK82XL
 M0lNmfDFIf5K4D6KYsXkCbSCETEPuTeDQFI14z/gFNevAUMmRp02HGK+6/Z/mn0W
 n17nWiLiKhpvKo7xoPrIhCaYuaFP7OzL4g0ZktGlKYEGBrNATzpAH2v8pAYn4S41
 aF9Yzo5PF4lVlNpCZQSmilX6VmXLAuC4WSEB8nUkRjjk+wsBxzYO7SqXB+gxvagW
 leahFyHExRMTbOFXsrRAoCGcdOCpNjAam3QYKQaIAhgYy89XmqSl8wKnV6PESOxt
 QSBB/frbmn1Uj4aPMM2xSG/5vVpM/TtaBWDIi6+nlokCE4PO37kSjOprNUT/INeJ
 QATeeqh5iPio7BQwgjH8
 =gmUH
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20150205' into staging

target-arm queue:
 * refactor/clean up armv7m_init()
 * some initial cleanup in the direction of supporting 64-bit EL3
 * fix broken synchronization of registers between QEMU and KVM
   for 32-bit ARM hosts (which among other things broke memory
   access via gdbstub)
 * fix flush-to-zero handling in FMULX, FRECPS, FRSQRTS and FRECPE
 * don't crash QEMU for UNPREDICTABLE BFI insns in A32 encoding
 * explain why virt board's device-to-transport mapping code is
   the way it is
 * implement mmu_idx values which match the architectural
   distinctions, and introduce the concept of a translation
   regime to get_phys_addr() rather than incorrectly looking
   at the current CPU state
 * update to upstream VIXL 1.7 (gives us correct code addresses
   when dissassembling pc-relative references)
 * sync system register state between KVM and QEMU for 64-bit ARM
 * support virtio on big-endian guests by implementing the
   "which endian is the guest now?" CPU method

# gpg: Signature made Thu 05 Feb 2015 14:02:16 GMT using RSA key ID 14360CDE
# gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>"

* remotes/pmaydell/tags/pull-target-arm-20150205: (28 commits)
  target-arm: fix for exponent comparison in recpe_f64
  target-arm: Guest cpu endianness determination for virtio KVM ARM/ARM64
  target-arm: KVM64: Get and Sync up guest register state like kvm32.
  disas/arm-a64.cc: Tell libvixl correct code addresses
  disas/libvixl: Update to upstream VIXL 1.7
  target-arm: Fix brace style in reindented code
  target-arm: Reindent ancient page-table-walk code
  target-arm: Use mmu_idx in get_phys_addr()
  target-arm: Pass mmu_idx to get_phys_addr()
  target-arm: Split AArch64 cases out of ats_write()
  target-arm: Don't define any MMU_MODE*_SUFFIXes
  target-arm: Use correct mmu_idx for unprivileged loads and stores
  target-arm: Define correct mmu_idx values and pass them in TB flags
  target-arm/translate-a64: Fix wrong mmu_idx usage for LDT/STT
  target-arm: Make arm_current_el() return sensible values for M profile
  cpu_ldst.h: Allow NB_MMU_MODES to be 7
  hw/arm/virt: explain device-to-transport mapping in create_virtio_devices()
  target-arm: check that LSB <= MSB in BFI instruction
  target-arm: Squash input denormals in FRECPS and FRSQRTS
  Fix FMULX not squashing denormalized inputs when FZ is set.
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2015-02-05 14:22:51 +00:00
commit cd07b19307
26 changed files with 1417 additions and 557 deletions

View File

@ -67,7 +67,8 @@ static void vixl_init(FILE *f) {
int print_insn_arm_a64(uint64_t addr, disassemble_info *info)
{
uint8_t bytes[INSN_SIZE];
uint32_t instr;
uint32_t instrval;
const Instruction *instr;
int status;
status = info->read_memory_func(addr, bytes, INSN_SIZE, info);
@ -80,8 +81,10 @@ int print_insn_arm_a64(uint64_t addr, disassemble_info *info)
vixl_init(info->stream);
}
instr = bytes[0] | bytes[1] << 8 | bytes[2] << 16 | bytes[3] << 24;
vixl_decoder->Decode(reinterpret_cast<Instruction*>(&instr));
instrval = bytes[0] | bytes[1] << 8 | bytes[2] << 16 | bytes[3] << 24;
instr = reinterpret_cast<const Instruction *>(&instrval);
vixl_disasm->MapCodeAddress(addr, instr);
vixl_decoder->Decode(instr);
return INSN_SIZE;
}

View File

@ -2,7 +2,7 @@
The code in this directory is a subset of libvixl:
https://github.com/armvixl/vixl
(specifically, it is the set of files needed for disassembly only,
taken from libvixl 1.6).
taken from libvixl 1.7).
Bugfixes should preferably be sent upstream initially.
The disassembler does not currently support the entire A64 instruction

View File

@ -151,21 +151,21 @@ class CPURegister {
return Aliases(other) && (size_ == other.size_);
}
inline bool IsZero() const {
bool IsZero() const {
VIXL_ASSERT(IsValid());
return IsRegister() && (code_ == kZeroRegCode);
}
inline bool IsSP() const {
bool IsSP() const {
VIXL_ASSERT(IsValid());
return IsRegister() && (code_ == kSPRegInternalCode);
}
inline bool IsRegister() const {
bool IsRegister() const {
return type_ == kRegister;
}
inline bool IsFPRegister() const {
bool IsFPRegister() const {
return type_ == kFPRegister;
}
@ -179,7 +179,7 @@ class CPURegister {
const FPRegister& S() const;
const FPRegister& D() const;
inline bool IsSameSizeAndType(const CPURegister& other) const {
bool IsSameSizeAndType(const CPURegister& other) const {
return (size_ == other.size_) && (type_ == other.type_);
}
@ -198,7 +198,7 @@ class CPURegister {
class Register : public CPURegister {
public:
Register() : CPURegister() {}
inline explicit Register(const CPURegister& other)
explicit Register(const CPURegister& other)
: CPURegister(other.code(), other.size(), other.type()) {
VIXL_ASSERT(IsValidRegister());
}
@ -213,10 +213,6 @@ class Register : public CPURegister {
static const Register& WRegFromCode(unsigned code);
static const Register& XRegFromCode(unsigned code);
// V8 compatibility.
static const int kNumRegisters = kNumberOfRegisters;
static const int kNumAllocatableRegisters = kNumberOfRegisters - 1;
private:
static const Register wregisters[];
static const Register xregisters[];
@ -225,12 +221,12 @@ class Register : public CPURegister {
class FPRegister : public CPURegister {
public:
inline FPRegister() : CPURegister() {}
inline explicit FPRegister(const CPURegister& other)
FPRegister() : CPURegister() {}
explicit FPRegister(const CPURegister& other)
: CPURegister(other.code(), other.size(), other.type()) {
VIXL_ASSERT(IsValidFPRegister());
}
inline FPRegister(unsigned code, unsigned size)
FPRegister(unsigned code, unsigned size)
: CPURegister(code, size, kFPRegister) {}
bool IsValid() const {
@ -241,10 +237,6 @@ class FPRegister : public CPURegister {
static const FPRegister& SRegFromCode(unsigned code);
static const FPRegister& DRegFromCode(unsigned code);
// V8 compatibility.
static const int kNumRegisters = kNumberOfFPRegisters;
static const int kNumAllocatableRegisters = kNumberOfFPRegisters - 1;
private:
static const FPRegister sregisters[];
static const FPRegister dregisters[];
@ -312,23 +304,23 @@ bool AreSameSizeAndType(const CPURegister& reg1,
// Lists of registers.
class CPURegList {
public:
inline explicit CPURegList(CPURegister reg1,
CPURegister reg2 = NoCPUReg,
CPURegister reg3 = NoCPUReg,
CPURegister reg4 = NoCPUReg)
explicit CPURegList(CPURegister reg1,
CPURegister reg2 = NoCPUReg,
CPURegister reg3 = NoCPUReg,
CPURegister reg4 = NoCPUReg)
: list_(reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit()),
size_(reg1.size()), type_(reg1.type()) {
VIXL_ASSERT(AreSameSizeAndType(reg1, reg2, reg3, reg4));
VIXL_ASSERT(IsValid());
}
inline CPURegList(CPURegister::RegisterType type, unsigned size, RegList list)
CPURegList(CPURegister::RegisterType type, unsigned size, RegList list)
: list_(list), size_(size), type_(type) {
VIXL_ASSERT(IsValid());
}
inline CPURegList(CPURegister::RegisterType type, unsigned size,
unsigned first_reg, unsigned last_reg)
CPURegList(CPURegister::RegisterType type, unsigned size,
unsigned first_reg, unsigned last_reg)
: size_(size), type_(type) {
VIXL_ASSERT(((type == CPURegister::kRegister) &&
(last_reg < kNumberOfRegisters)) ||
@ -340,7 +332,7 @@ class CPURegList {
VIXL_ASSERT(IsValid());
}
inline CPURegister::RegisterType type() const {
CPURegister::RegisterType type() const {
VIXL_ASSERT(IsValid());
return type_;
}
@ -366,13 +358,13 @@ class CPURegList {
}
// Variants of Combine and Remove which take a single register.
inline void Combine(const CPURegister& other) {
void Combine(const CPURegister& other) {
VIXL_ASSERT(other.type() == type_);
VIXL_ASSERT(other.size() == size_);
Combine(other.code());
}
inline void Remove(const CPURegister& other) {
void Remove(const CPURegister& other) {
VIXL_ASSERT(other.type() == type_);
VIXL_ASSERT(other.size() == size_);
Remove(other.code());
@ -380,24 +372,51 @@ class CPURegList {
// Variants of Combine and Remove which take a single register by its code;
// the type and size of the register is inferred from this list.
inline void Combine(int code) {
void Combine(int code) {
VIXL_ASSERT(IsValid());
VIXL_ASSERT(CPURegister(code, size_, type_).IsValid());
list_ |= (UINT64_C(1) << code);
}
inline void Remove(int code) {
void Remove(int code) {
VIXL_ASSERT(IsValid());
VIXL_ASSERT(CPURegister(code, size_, type_).IsValid());
list_ &= ~(UINT64_C(1) << code);
}
inline RegList list() const {
static CPURegList Union(const CPURegList& list_1, const CPURegList& list_2) {
VIXL_ASSERT(list_1.type_ == list_2.type_);
VIXL_ASSERT(list_1.size_ == list_2.size_);
return CPURegList(list_1.type_, list_1.size_, list_1.list_ | list_2.list_);
}
static CPURegList Union(const CPURegList& list_1,
const CPURegList& list_2,
const CPURegList& list_3);
static CPURegList Union(const CPURegList& list_1,
const CPURegList& list_2,
const CPURegList& list_3,
const CPURegList& list_4);
static CPURegList Intersection(const CPURegList& list_1,
const CPURegList& list_2) {
VIXL_ASSERT(list_1.type_ == list_2.type_);
VIXL_ASSERT(list_1.size_ == list_2.size_);
return CPURegList(list_1.type_, list_1.size_, list_1.list_ & list_2.list_);
}
static CPURegList Intersection(const CPURegList& list_1,
const CPURegList& list_2,
const CPURegList& list_3);
static CPURegList Intersection(const CPURegList& list_1,
const CPURegList& list_2,
const CPURegList& list_3,
const CPURegList& list_4);
RegList list() const {
VIXL_ASSERT(IsValid());
return list_;
}
inline void set_list(RegList new_list) {
void set_list(RegList new_list) {
VIXL_ASSERT(IsValid());
list_ = new_list;
}
@ -417,38 +436,38 @@ class CPURegList {
static CPURegList GetCallerSaved(unsigned size = kXRegSize);
static CPURegList GetCallerSavedFP(unsigned size = kDRegSize);
inline bool IsEmpty() const {
bool IsEmpty() const {
VIXL_ASSERT(IsValid());
return list_ == 0;
}
inline bool IncludesAliasOf(const CPURegister& other) const {
bool IncludesAliasOf(const CPURegister& other) const {
VIXL_ASSERT(IsValid());
return (type_ == other.type()) && ((other.Bit() & list_) != 0);
}
inline bool IncludesAliasOf(int code) const {
bool IncludesAliasOf(int code) const {
VIXL_ASSERT(IsValid());
return ((code & list_) != 0);
}
inline int Count() const {
int Count() const {
VIXL_ASSERT(IsValid());
return CountSetBits(list_, kRegListSizeInBits);
}
inline unsigned RegisterSizeInBits() const {
unsigned RegisterSizeInBits() const {
VIXL_ASSERT(IsValid());
return size_;
}
inline unsigned RegisterSizeInBytes() const {
unsigned RegisterSizeInBytes() const {
int size_in_bits = RegisterSizeInBits();
VIXL_ASSERT((size_in_bits % 8) == 0);
return size_in_bits / 8;
}
inline unsigned TotalSizeInBytes() const {
unsigned TotalSizeInBytes() const {
VIXL_ASSERT(IsValid());
return RegisterSizeInBytes() * Count();
}
@ -587,8 +606,10 @@ class Label {
VIXL_ASSERT(!IsLinked() || IsBound());
}
inline bool IsBound() const { return location_ >= 0; }
inline bool IsLinked() const { return !links_.empty(); }
bool IsBound() const { return location_ >= 0; }
bool IsLinked() const { return !links_.empty(); }
ptrdiff_t location() const { return location_; }
private:
// The list of linked instructions is stored in a stack-like structure. We
@ -647,22 +668,20 @@ class Label {
std::stack<ptrdiff_t> * links_extended_;
};
inline ptrdiff_t location() const { return location_; }
inline void Bind(ptrdiff_t location) {
void Bind(ptrdiff_t location) {
// Labels can only be bound once.
VIXL_ASSERT(!IsBound());
location_ = location;
}
inline void AddLink(ptrdiff_t instruction) {
void AddLink(ptrdiff_t instruction) {
// If a label is bound, the assembler already has the information it needs
// to write the instruction, so there is no need to add it to links_.
VIXL_ASSERT(!IsBound());
links_.push(instruction);
}
inline ptrdiff_t GetAndRemoveNextLink() {
ptrdiff_t GetAndRemoveNextLink() {
VIXL_ASSERT(IsLinked());
ptrdiff_t link = links_.top();
links_.pop();
@ -845,14 +864,14 @@ class Assembler {
// Return the address of an offset in the buffer.
template <typename T>
inline T GetOffsetAddress(ptrdiff_t offset) {
T GetOffsetAddress(ptrdiff_t offset) {
VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t));
return buffer_->GetOffsetAddress<T>(offset);
}
// Return the address of a bound label.
template <typename T>
inline T GetLabelAddress(const Label * label) {
T GetLabelAddress(const Label * label) {
VIXL_ASSERT(label->IsBound());
VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t));
return GetOffsetAddress<T>(label->location());
@ -860,14 +879,14 @@ class Assembler {
// Return the address of the cursor.
template <typename T>
inline T GetCursorAddress() {
T GetCursorAddress() {
VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t));
return GetOffsetAddress<T>(CursorOffset());
}
// Return the address of the start of the buffer.
template <typename T>
inline T GetStartAddress() {
T GetStartAddress() {
VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t));
return GetOffsetAddress<T>(0);
}
@ -1074,20 +1093,20 @@ class Assembler {
// Bfm aliases.
// Bitfield insert.
inline void bfi(const Register& rd,
const Register& rn,
unsigned lsb,
unsigned width) {
void bfi(const Register& rd,
const Register& rn,
unsigned lsb,
unsigned width) {
VIXL_ASSERT(width >= 1);
VIXL_ASSERT(lsb + width <= rn.size());
bfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1);
}
// Bitfield extract and insert low.
inline void bfxil(const Register& rd,
const Register& rn,
unsigned lsb,
unsigned width) {
void bfxil(const Register& rd,
const Register& rn,
unsigned lsb,
unsigned width) {
VIXL_ASSERT(width >= 1);
VIXL_ASSERT(lsb + width <= rn.size());
bfm(rd, rn, lsb, lsb + width - 1);
@ -1095,92 +1114,92 @@ class Assembler {
// Sbfm aliases.
// Arithmetic shift right.
inline void asr(const Register& rd, const Register& rn, unsigned shift) {
void asr(const Register& rd, const Register& rn, unsigned shift) {
VIXL_ASSERT(shift < rd.size());
sbfm(rd, rn, shift, rd.size() - 1);
}
// Signed bitfield insert with zero at right.
inline void sbfiz(const Register& rd,
const Register& rn,
unsigned lsb,
unsigned width) {
void sbfiz(const Register& rd,
const Register& rn,
unsigned lsb,
unsigned width) {
VIXL_ASSERT(width >= 1);
VIXL_ASSERT(lsb + width <= rn.size());
sbfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1);
}
// Signed bitfield extract.
inline void sbfx(const Register& rd,
const Register& rn,
unsigned lsb,
unsigned width) {
void sbfx(const Register& rd,
const Register& rn,
unsigned lsb,
unsigned width) {
VIXL_ASSERT(width >= 1);
VIXL_ASSERT(lsb + width <= rn.size());
sbfm(rd, rn, lsb, lsb + width - 1);
}
// Signed extend byte.
inline void sxtb(const Register& rd, const Register& rn) {
void sxtb(const Register& rd, const Register& rn) {
sbfm(rd, rn, 0, 7);
}
// Signed extend halfword.
inline void sxth(const Register& rd, const Register& rn) {
void sxth(const Register& rd, const Register& rn) {
sbfm(rd, rn, 0, 15);
}
// Signed extend word.
inline void sxtw(const Register& rd, const Register& rn) {
void sxtw(const Register& rd, const Register& rn) {
sbfm(rd, rn, 0, 31);
}
// Ubfm aliases.
// Logical shift left.
inline void lsl(const Register& rd, const Register& rn, unsigned shift) {
void lsl(const Register& rd, const Register& rn, unsigned shift) {
unsigned reg_size = rd.size();
VIXL_ASSERT(shift < reg_size);
ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1);
}
// Logical shift right.
inline void lsr(const Register& rd, const Register& rn, unsigned shift) {
void lsr(const Register& rd, const Register& rn, unsigned shift) {
VIXL_ASSERT(shift < rd.size());
ubfm(rd, rn, shift, rd.size() - 1);
}
// Unsigned bitfield insert with zero at right.
inline void ubfiz(const Register& rd,
const Register& rn,
unsigned lsb,
unsigned width) {
void ubfiz(const Register& rd,
const Register& rn,
unsigned lsb,
unsigned width) {
VIXL_ASSERT(width >= 1);
VIXL_ASSERT(lsb + width <= rn.size());
ubfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1);
}
// Unsigned bitfield extract.
inline void ubfx(const Register& rd,
const Register& rn,
unsigned lsb,
unsigned width) {
void ubfx(const Register& rd,
const Register& rn,
unsigned lsb,
unsigned width) {
VIXL_ASSERT(width >= 1);
VIXL_ASSERT(lsb + width <= rn.size());
ubfm(rd, rn, lsb, lsb + width - 1);
}
// Unsigned extend byte.
inline void uxtb(const Register& rd, const Register& rn) {
void uxtb(const Register& rd, const Register& rn) {
ubfm(rd, rn, 0, 7);
}
// Unsigned extend halfword.
inline void uxth(const Register& rd, const Register& rn) {
void uxth(const Register& rd, const Register& rn) {
ubfm(rd, rn, 0, 15);
}
// Unsigned extend word.
inline void uxtw(const Register& rd, const Register& rn) {
void uxtw(const Register& rd, const Register& rn) {
ubfm(rd, rn, 0, 31);
}
@ -1230,7 +1249,7 @@ class Assembler {
void cneg(const Register& rd, const Register& rn, Condition cond);
// Rotate right.
inline void ror(const Register& rd, const Register& rs, unsigned shift) {
void ror(const Register& rd, const Register& rs, unsigned shift) {
extr(rd, rs, rs, shift);
}
@ -1495,6 +1514,19 @@ class Assembler {
// Load-acquire register.
void ldar(const Register& rt, const MemOperand& src);
// Prefetch memory.
void prfm(PrefetchOperation op, const MemOperand& addr,
LoadStoreScalingOption option = PreferScaledOffset);
// Prefetch memory (with unscaled offset).
void prfum(PrefetchOperation op, const MemOperand& addr,
LoadStoreScalingOption option = PreferUnscaledOffset);
// Prefetch memory in the literal pool.
void prfm(PrefetchOperation op, RawLiteral* literal);
// Prefetch from pc + imm19 << 2.
void prfm(PrefetchOperation op, int imm19);
// Move instructions. The default shift of -1 indicates that the move
// instruction will calculate an appropriate 16-bit immediate and left shift
@ -1638,12 +1670,21 @@ class Assembler {
// FP round to integer (nearest with ties to away).
void frinta(const FPRegister& fd, const FPRegister& fn);
// FP round to integer (implicit rounding).
void frinti(const FPRegister& fd, const FPRegister& fn);
// FP round to integer (toward minus infinity).
void frintm(const FPRegister& fd, const FPRegister& fn);
// FP round to integer (nearest with ties to even).
void frintn(const FPRegister& fd, const FPRegister& fn);
// FP round to integer (toward plus infinity).
void frintp(const FPRegister& fd, const FPRegister& fn);
// FP round to integer (exact, implicit rounding).
void frintx(const FPRegister& fd, const FPRegister& fn);
// FP round to integer (towards zero).
void frintz(const FPRegister& fd, const FPRegister& fn);
@ -1705,16 +1746,16 @@ class Assembler {
// Emit generic instructions.
// Emit raw instructions into the instruction stream.
inline void dci(Instr raw_inst) { Emit(raw_inst); }
void dci(Instr raw_inst) { Emit(raw_inst); }
// Emit 32 bits of data into the instruction stream.
inline void dc32(uint32_t data) {
void dc32(uint32_t data) {
VIXL_ASSERT(buffer_monitor_ > 0);
buffer_->Emit32(data);
}
// Emit 64 bits of data into the instruction stream.
inline void dc64(uint64_t data) {
void dc64(uint64_t data) {
VIXL_ASSERT(buffer_monitor_ > 0);
buffer_->Emit64(data);
}
@ -1849,14 +1890,14 @@ class Assembler {
}
}
static inline Instr ImmS(unsigned imms, unsigned reg_size) {
static Instr ImmS(unsigned imms, unsigned reg_size) {
VIXL_ASSERT(((reg_size == kXRegSize) && is_uint6(imms)) ||
((reg_size == kWRegSize) && is_uint5(imms)));
USE(reg_size);
return imms << ImmS_offset;
}
static inline Instr ImmR(unsigned immr, unsigned reg_size) {
static Instr ImmR(unsigned immr, unsigned reg_size) {
VIXL_ASSERT(((reg_size == kXRegSize) && is_uint6(immr)) ||
((reg_size == kWRegSize) && is_uint5(immr)));
USE(reg_size);
@ -1864,7 +1905,7 @@ class Assembler {
return immr << ImmR_offset;
}
static inline Instr ImmSetBits(unsigned imms, unsigned reg_size) {
static Instr ImmSetBits(unsigned imms, unsigned reg_size) {
VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
VIXL_ASSERT(is_uint6(imms));
VIXL_ASSERT((reg_size == kXRegSize) || is_uint6(imms + 3));
@ -1872,7 +1913,7 @@ class Assembler {
return imms << ImmSetBits_offset;
}
static inline Instr ImmRotate(unsigned immr, unsigned reg_size) {
static Instr ImmRotate(unsigned immr, unsigned reg_size) {
VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
VIXL_ASSERT(((reg_size == kXRegSize) && is_uint6(immr)) ||
((reg_size == kWRegSize) && is_uint5(immr)));
@ -1880,12 +1921,12 @@ class Assembler {
return immr << ImmRotate_offset;
}
static inline Instr ImmLLiteral(int imm19) {
static Instr ImmLLiteral(int imm19) {
VIXL_ASSERT(is_int19(imm19));
return truncate_to_int19(imm19) << ImmLLiteral_offset;
}
static inline Instr BitN(unsigned bitn, unsigned reg_size) {
static Instr BitN(unsigned bitn, unsigned reg_size) {
VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
VIXL_ASSERT((reg_size == kXRegSize) || (bitn == 0));
USE(reg_size);
@ -1943,6 +1984,11 @@ class Assembler {
return shift_amount << ImmShiftLS_offset;
}
static Instr ImmPrefetchOperation(int imm5) {
VIXL_ASSERT(is_uint5(imm5));
return imm5 << ImmPrefetchOperation_offset;
}
static Instr ImmException(int imm16) {
VIXL_ASSERT(is_uint16(imm16));
return imm16 << ImmException_offset;
@ -2003,12 +2049,32 @@ class Assembler {
return scale << FPScale_offset;
}
// Immediate field checking helpers.
static bool IsImmAddSub(int64_t immediate);
static bool IsImmConditionalCompare(int64_t immediate);
static bool IsImmFP32(float imm);
static bool IsImmFP64(double imm);
static bool IsImmLogical(uint64_t value,
unsigned width,
unsigned* n = NULL,
unsigned* imm_s = NULL,
unsigned* imm_r = NULL);
static bool IsImmLSPair(int64_t offset, LSDataSize size);
static bool IsImmLSScaled(int64_t offset, LSDataSize size);
static bool IsImmLSUnscaled(int64_t offset);
static bool IsImmMovn(uint64_t imm, unsigned reg_size);
static bool IsImmMovz(uint64_t imm, unsigned reg_size);
// Size of the code generated since label to the current position.
size_t SizeOfCodeGeneratedSince(Label* label) const {
VIXL_ASSERT(label->IsBound());
return buffer_->OffsetFrom(label->location());
}
size_t SizeOfCodeGenerated() const {
return buffer_->CursorOffset();
}
size_t BufferCapacity() const { return buffer_->capacity(); }
size_t RemainingBufferSpace() const { return buffer_->RemainingBytes(); }
@ -2025,7 +2091,7 @@ class Assembler {
}
}
#ifdef DEBUG
#ifdef VIXL_DEBUG
void AcquireBuffer() {
VIXL_ASSERT(buffer_monitor_ >= 0);
buffer_monitor_++;
@ -2037,16 +2103,16 @@ class Assembler {
}
#endif
inline PositionIndependentCodeOption pic() {
PositionIndependentCodeOption pic() const {
return pic_;
}
inline bool AllowPageOffsetDependentCode() {
bool AllowPageOffsetDependentCode() const {
return (pic() == PageOffsetDependentCode) ||
(pic() == PositionDependentCode);
}
static inline const Register& AppropriateZeroRegFor(const CPURegister& reg) {
static const Register& AppropriateZeroRegFor(const CPURegister& reg) {
return reg.Is64Bits() ? xzr : wzr;
}
@ -2056,14 +2122,15 @@ class Assembler {
const MemOperand& addr,
LoadStoreOp op,
LoadStoreScalingOption option = PreferScaledOffset);
static bool IsImmLSUnscaled(int64_t offset);
static bool IsImmLSScaled(int64_t offset, LSDataSize size);
void LoadStorePair(const CPURegister& rt,
const CPURegister& rt2,
const MemOperand& addr,
LoadStorePairOp op);
static bool IsImmLSPair(int64_t offset, LSDataSize size);
void Prefetch(PrefetchOperation op,
const MemOperand& addr,
LoadStoreScalingOption option = PreferScaledOffset);
// TODO(all): The third parameter should be passed by reference but gcc 4.8.2
// reports a bogus uninitialised warning then.
@ -2077,18 +2144,12 @@ class Assembler {
unsigned imm_s,
unsigned imm_r,
LogicalOp op);
static bool IsImmLogical(uint64_t value,
unsigned width,
unsigned* n = NULL,
unsigned* imm_s = NULL,
unsigned* imm_r = NULL);
void ConditionalCompare(const Register& rn,
const Operand& operand,
StatusFlags nzcv,
Condition cond,
ConditionalCompareOp op);
static bool IsImmConditionalCompare(int64_t immediate);
void AddSubWithCarry(const Register& rd,
const Register& rn,
@ -2096,8 +2157,6 @@ class Assembler {
FlagsUpdate S,
AddSubWithCarryOp op);
static bool IsImmFP32(float imm);
static bool IsImmFP64(double imm);
// Functions for emulating operands not directly supported by the instruction
// set.
@ -2115,7 +2174,6 @@ class Assembler {
const Operand& operand,
FlagsUpdate S,
AddSubOp op);
static bool IsImmAddSub(int64_t immediate);
// Find an appropriate LoadStoreOp or LoadStorePairOp for the specified
// registers. Only simple loads are supported; sign- and zero-extension (such
@ -2180,6 +2238,12 @@ class Assembler {
const FPRegister& fa,
FPDataProcessing3SourceOp op);
// Encode the specified MemOperand for the specified access size and scaling
// preference.
Instr LoadStoreMemOperand(const MemOperand& addr,
LSDataSize size,
LoadStoreScalingOption option);
// Link the current (not-yet-emitted) instruction to the specified label, then
// return an offset to be encoded in the instruction. If the label is not yet
// bound, an offset of 0 is returned.
@ -2205,7 +2269,7 @@ class Assembler {
CodeBuffer* buffer_;
PositionIndependentCodeOption pic_;
#ifdef DEBUG
#ifdef VIXL_DEBUG
int64_t buffer_monitor_;
#endif
};
@ -2239,7 +2303,7 @@ class CodeBufferCheckScope {
AssertPolicy assert_policy = kMaximumSize)
: assm_(assm) {
if (check_policy == kCheck) assm->EnsureSpaceFor(size);
#ifdef DEBUG
#ifdef VIXL_DEBUG
assm->bind(&start_);
size_ = size;
assert_policy_ = assert_policy;
@ -2251,7 +2315,7 @@ class CodeBufferCheckScope {
// This is a shortcut for CodeBufferCheckScope(assm, 0, kNoCheck, kNoAssert).
explicit CodeBufferCheckScope(Assembler* assm) : assm_(assm) {
#ifdef DEBUG
#ifdef VIXL_DEBUG
size_ = 0;
assert_policy_ = kNoAssert;
assm->AcquireBuffer();
@ -2259,7 +2323,7 @@ class CodeBufferCheckScope {
}
~CodeBufferCheckScope() {
#ifdef DEBUG
#ifdef VIXL_DEBUG
assm_->ReleaseBuffer();
switch (assert_policy_) {
case kNoAssert: break;
@ -2277,7 +2341,7 @@ class CodeBufferCheckScope {
protected:
Assembler* assm_;
#ifdef DEBUG
#ifdef VIXL_DEBUG
Label start_;
size_t size_;
AssertPolicy assert_policy_;

View File

@ -31,12 +31,6 @@ namespace vixl {
const unsigned kNumberOfRegisters = 32;
const unsigned kNumberOfFPRegisters = 32;
// Callee saved registers are x21-x30(lr).
const int kNumberOfCalleeSavedRegisters = 10;
const int kFirstCalleeSavedRegisterIndex = 21;
// Callee saved FP registers are d8-d15.
const int kNumberOfCalleeSavedFPRegisters = 8;
const int kFirstCalleeSavedFPRegisterIndex = 8;
#define REGISTER_CODE_LIST(R) \
R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
@ -53,7 +47,6 @@ V_(Ra, 14, 10, Bits) /* Third source register. */ \
V_(Rt, 4, 0, Bits) /* Load/store register. */ \
V_(Rt2, 14, 10, Bits) /* Load/store second register. */ \
V_(Rs, 20, 16, Bits) /* Exclusive access status. */ \
V_(PrefetchMode, 4, 0, Bits) \
\
/* Common bits */ \
V_(SixtyFourBits, 31, 31, Bits) \
@ -109,6 +102,10 @@ V_(ImmLSUnsigned, 21, 10, Bits) \
V_(ImmLSPair, 21, 15, SignedBits) \
V_(SizeLS, 31, 30, Bits) \
V_(ImmShiftLS, 12, 12, Bits) \
V_(ImmPrefetchOperation, 4, 0, Bits) \
V_(PrefetchHint, 4, 3, Bits) \
V_(PrefetchTarget, 2, 1, Bits) \
V_(PrefetchStream, 0, 0, Bits) \
\
/* Other immediates */ \
V_(ImmUncondBranch, 25, 0, SignedBits) \
@ -269,6 +266,29 @@ enum BarrierType {
BarrierAll = 3
};
enum PrefetchOperation {
PLDL1KEEP = 0x00,
PLDL1STRM = 0x01,
PLDL2KEEP = 0x02,
PLDL2STRM = 0x03,
PLDL3KEEP = 0x04,
PLDL3STRM = 0x05,
PLIL1KEEP = 0x08,
PLIL1STRM = 0x09,
PLIL2KEEP = 0x0a,
PLIL2STRM = 0x0b,
PLIL3KEEP = 0x0c,
PLIL3STRM = 0x0d,
PSTL1KEEP = 0x10,
PSTL1STRM = 0x11,
PSTL2KEEP = 0x12,
PSTL2STRM = 0x13,
PSTL3KEEP = 0x14,
PSTL3STRM = 0x15
};
// System/special register names.
// This information is not encoded as one field but as the concatenation of
// multiple fields (Op0<0>, Op1, Crn, Crm, Op2).
@ -605,6 +625,12 @@ enum LoadStoreAnyOp {
LoadStoreAnyFixed = 0x08000000
};
// Any load pair or store pair.
enum LoadStorePairAnyOp {
LoadStorePairAnyFMask = 0x3a000000,
LoadStorePairAnyFixed = 0x28000000
};
#define LOAD_STORE_PAIR_OP_LIST(V) \
V(STP, w, 0x00000000), \
V(LDP, w, 0x00400000), \
@ -703,17 +729,6 @@ enum LoadLiteralOp {
V(LD, R, d, 0xC4400000)
// Load/store unscaled offset.
enum LoadStoreUnscaledOffsetOp {
LoadStoreUnscaledOffsetFixed = 0x38000000,
LoadStoreUnscaledOffsetFMask = 0x3B200C00,
LoadStoreUnscaledOffsetMask = 0xFFE00C00,
#define LOAD_STORE_UNSCALED(A, B, C, D) \
A##U##B##_##C = LoadStoreUnscaledOffsetFixed | D
LOAD_STORE_OP_LIST(LOAD_STORE_UNSCALED)
#undef LOAD_STORE_UNSCALED
};
// Load/store (post, pre, offset and unsigned.)
enum LoadStoreOp {
LoadStoreOpMask = 0xC4C00000,
@ -724,6 +739,18 @@ enum LoadStoreOp {
PRFM = 0xC0800000
};
// Load/store unscaled offset.
enum LoadStoreUnscaledOffsetOp {
LoadStoreUnscaledOffsetFixed = 0x38000000,
LoadStoreUnscaledOffsetFMask = 0x3B200C00,
LoadStoreUnscaledOffsetMask = 0xFFE00C00,
PRFUM = LoadStoreUnscaledOffsetFixed | PRFM,
#define LOAD_STORE_UNSCALED(A, B, C, D) \
A##U##B##_##C = LoadStoreUnscaledOffsetFixed | D
LOAD_STORE_OP_LIST(LOAD_STORE_UNSCALED)
#undef LOAD_STORE_UNSCALED
};
// Load/store post index.
enum LoadStorePostIndex {
LoadStorePostIndexFixed = 0x38000400,

View File

@ -108,7 +108,7 @@ class DecoderVisitor {
}
private:
VisitorConstness constness_;
const VisitorConstness constness_;
};

View File

@ -34,6 +34,7 @@ Disassembler::Disassembler() {
buffer_ = reinterpret_cast<char*>(malloc(buffer_size_));
buffer_pos_ = 0;
own_buffer_ = true;
code_address_offset_ = 0;
}
@ -42,6 +43,7 @@ Disassembler::Disassembler(char* text_buffer, int buffer_size) {
buffer_ = text_buffer;
buffer_pos_ = 0;
own_buffer_ = false;
code_address_offset_ = 0;
}
@ -739,9 +741,25 @@ void Disassembler::VisitMoveWideImmediate(const Instruction* instr) {
// shift calculation.
switch (instr->Mask(MoveWideImmediateMask)) {
case MOVN_w:
case MOVN_x: mnemonic = "movn"; break;
case MOVN_x:
if ((instr->ImmMoveWide()) || (instr->ShiftMoveWide() == 0)) {
if ((instr->SixtyFourBits() == 0) && (instr->ImmMoveWide() == 0xffff)) {
mnemonic = "movn";
} else {
mnemonic = "mov";
form = "'Rd, 'IMoveNeg";
}
} else {
mnemonic = "movn";
}
break;
case MOVZ_w:
case MOVZ_x: mnemonic = "movz"; break;
case MOVZ_x:
if ((instr->ImmMoveWide()) || (instr->ShiftMoveWide() == 0))
mnemonic = "mov";
else
mnemonic = "movz";
break;
case MOVK_w:
case MOVK_x: mnemonic = "movk"; form = "'Rd, 'IMoveLSL"; break;
default: VIXL_UNREACHABLE();
@ -806,7 +824,7 @@ void Disassembler::VisitLoadStoreUnsignedOffset(const Instruction* instr) {
case A##_unsigned: mnemonic = B; form = C ", ['Xns'ILU]"; break;
LOAD_STORE_LIST(LS_UNSIGNEDOFFSET)
#undef LS_UNSIGNEDOFFSET
case PRFM_unsigned: mnemonic = "prfm"; form = "'PrefOp, ['Xn'ILU]";
case PRFM_unsigned: mnemonic = "prfm"; form = "'PrefOp, ['Xns'ILU]";
}
Format(instr, mnemonic, form);
}
@ -833,6 +851,7 @@ void Disassembler::VisitLoadStoreUnscaledOffset(const Instruction* instr) {
const char *form_x = "'Xt, ['Xns'ILS]";
const char *form_s = "'St, ['Xns'ILS]";
const char *form_d = "'Dt, ['Xns'ILS]";
const char *form_prefetch = "'PrefOp, ['Xns'ILS]";
switch (instr->Mask(LoadStoreUnscaledOffsetMask)) {
case STURB_w: mnemonic = "sturb"; break;
@ -852,6 +871,7 @@ void Disassembler::VisitLoadStoreUnscaledOffset(const Instruction* instr) {
case LDURSH_x: form = form_x; // Fall through.
case LDURSH_w: mnemonic = "ldursh"; break;
case LDURSW_x: mnemonic = "ldursw"; form = form_x; break;
case PRFUM: mnemonic = "prfum"; form = form_prefetch; break;
default: form = "(LoadStoreUnscaledOffset)";
}
Format(instr, mnemonic, form);
@ -872,6 +892,11 @@ void Disassembler::VisitLoadLiteral(const Instruction* instr) {
form = "'Xt, 'ILLiteral 'LValue";
break;
}
case PRFM_lit: {
mnemonic = "prfm";
form = "'PrefOp, 'ILLiteral 'LValue";
break;
}
default: mnemonic = "unimplemented";
}
Format(instr, mnemonic, form);
@ -1344,7 +1369,7 @@ void Disassembler::AppendPCRelativeOffsetToOutput(const Instruction* instr,
void Disassembler::AppendAddressToOutput(const Instruction* instr,
const void* addr) {
USE(instr);
AppendToOutput("(addr %p)", addr);
AppendToOutput("(addr 0x%" PRIxPTR ")", reinterpret_cast<uintptr_t>(addr));
}
@ -1360,6 +1385,40 @@ void Disassembler::AppendDataAddressToOutput(const Instruction* instr,
}
void Disassembler::AppendCodeRelativeAddressToOutput(const Instruction* instr,
const void* addr) {
USE(instr);
int64_t rel_addr = CodeRelativeAddress(addr);
if (rel_addr >= 0) {
AppendToOutput("(addr 0x%" PRIx64 ")", rel_addr);
} else {
AppendToOutput("(addr -0x%" PRIx64 ")", -rel_addr);
}
}
void Disassembler::AppendCodeRelativeCodeAddressToOutput(
const Instruction* instr, const void* addr) {
AppendCodeRelativeAddressToOutput(instr, addr);
}
void Disassembler::AppendCodeRelativeDataAddressToOutput(
const Instruction* instr, const void* addr) {
AppendCodeRelativeAddressToOutput(instr, addr);
}
void Disassembler::MapCodeAddress(int64_t base_address,
const Instruction* instr_address) {
set_code_address_offset(
base_address - reinterpret_cast<intptr_t>(instr_address));
}
int64_t Disassembler::CodeRelativeAddress(const void* addr) {
return reinterpret_cast<intptr_t>(addr) + code_address_offset();
}
void Disassembler::Format(const Instruction* instr, const char* mnemonic,
const char* format) {
VIXL_ASSERT(mnemonic != NULL);
@ -1486,16 +1545,20 @@ int Disassembler::SubstituteImmediateField(const Instruction* instr,
VIXL_ASSERT(format[0] == 'I');
switch (format[1]) {
case 'M': { // IMoveImm or IMoveLSL.
if (format[5] == 'I') {
uint64_t imm = instr->ImmMoveWide() << (16 * instr->ShiftMoveWide());
AppendToOutput("#0x%" PRIx64, imm);
} else {
VIXL_ASSERT(format[5] == 'L');
case 'M': { // IMoveImm, IMoveNeg or IMoveLSL.
if (format[5] == 'L') {
AppendToOutput("#0x%" PRIx64, instr->ImmMoveWide());
if (instr->ShiftMoveWide() > 0) {
AppendToOutput(", lsl #%" PRId64, 16 * instr->ShiftMoveWide());
}
} else {
VIXL_ASSERT((format[5] == 'I') || (format[5] == 'N'));
uint64_t imm = instr->ImmMoveWide() << (16 * instr->ShiftMoveWide());
if (format[5] == 'N')
imm = ~imm;
if (!instr->SixtyFourBits())
imm &= UINT64_C(0xffffffff);
AppendToOutput("#0x%" PRIx64, imm);
}
return 8;
}
@ -1634,14 +1697,31 @@ int Disassembler::SubstituteLiteralField(const Instruction* instr,
VIXL_ASSERT(strncmp(format, "LValue", 6) == 0);
USE(format);
const void * address = instr->LiteralAddress<const void *>();
switch (instr->Mask(LoadLiteralMask)) {
case LDR_w_lit:
case LDR_x_lit:
case LDRSW_x_lit:
case LDR_s_lit:
case LDR_d_lit:
AppendDataAddressToOutput(instr, instr->LiteralAddress());
AppendCodeRelativeDataAddressToOutput(instr, address);
break;
case PRFM_lit: {
// Use the prefetch hint to decide how to print the address.
switch (instr->PrefetchHint()) {
case 0x0: // PLD: prefetch for load.
case 0x2: // PST: prepare for store.
AppendCodeRelativeDataAddressToOutput(instr, address);
break;
case 0x1: // PLI: preload instructions.
AppendCodeRelativeCodeAddressToOutput(instr, address);
break;
case 0x3: // Unallocated hint.
AppendCodeRelativeAddressToOutput(instr, address);
break;
}
break;
}
default:
VIXL_UNREACHABLE();
}
@ -1701,17 +1781,22 @@ int Disassembler::SubstitutePCRelAddressField(const Instruction* instr,
(strcmp(format, "AddrPCRelPage") == 0)); // Used by `adrp`.
int64_t offset = instr->ImmPCRel();
const Instruction * base = instr;
// Compute the target address based on the effective address (after applying
// code_address_offset). This is required for correct behaviour of adrp.
const Instruction* base = instr + code_address_offset();
if (format[9] == 'P') {
offset *= kPageSize;
base = AlignDown(base, kPageSize);
}
// Strip code_address_offset before printing, so we can use the
// semantically-correct AppendCodeRelativeAddressToOutput.
const void* target =
reinterpret_cast<const void*>(base + offset - code_address_offset());
const void* target = reinterpret_cast<const void*>(base + offset);
AppendPCRelativeOffsetToOutput(instr, offset);
AppendToOutput(" ");
AppendAddressToOutput(instr, target);
AppendCodeRelativeAddressToOutput(instr, target);
return 13;
}
@ -1738,7 +1823,7 @@ int Disassembler::SubstituteBranchTargetField(const Instruction* instr,
AppendPCRelativeOffsetToOutput(instr, offset);
AppendToOutput(" ");
AppendCodeAddressToOutput(instr, target_address);
AppendCodeRelativeCodeAddressToOutput(instr, target_address);
return 8;
}
@ -1805,13 +1890,26 @@ int Disassembler::SubstitutePrefetchField(const Instruction* instr,
VIXL_ASSERT(format[0] == 'P');
USE(format);
int prefetch_mode = instr->PrefetchMode();
static const char* hints[] = {"ld", "li", "st"};
static const char* stream_options[] = {"keep", "strm"};
const char* ls = (prefetch_mode & 0x10) ? "st" : "ld";
int level = (prefetch_mode >> 1) + 1;
const char* ks = (prefetch_mode & 1) ? "strm" : "keep";
unsigned hint = instr->PrefetchHint();
unsigned target = instr->PrefetchTarget() + 1;
unsigned stream = instr->PrefetchStream();
AppendToOutput("p%sl%d%s", ls, level, ks);
if ((hint >= (sizeof(hints) / sizeof(hints[0]))) || (target > 3)) {
// Unallocated prefetch operations.
int prefetch_mode = instr->ImmPrefetchOperation();
AppendToOutput("#0b%c%c%c%c%c",
(prefetch_mode & (1 << 4)) ? '1' : '0',
(prefetch_mode & (1 << 3)) ? '1' : '0',
(prefetch_mode & (1 << 2)) ? '1' : '0',
(prefetch_mode & (1 << 1)) ? '1' : '0',
(prefetch_mode & (1 << 0)) ? '1' : '0');
} else {
VIXL_ASSERT(stream < (sizeof(stream_options) / sizeof(stream_options[0])));
AppendToOutput("p%sl%d%s", hints[hint], target, stream_options[stream]);
}
return 6;
}

View File

@ -43,7 +43,7 @@ class Disassembler: public DecoderVisitor {
char* GetOutput();
// Declare all Visitor functions.
#define DECLARE(A) void Visit##A(const Instruction* instr);
#define DECLARE(A) virtual void Visit##A(const Instruction* instr);
VISITOR_LIST(DECLARE)
#undef DECLARE
@ -65,23 +65,45 @@ class Disassembler: public DecoderVisitor {
// Prints an address, in the general case. It can be code or data. This is
// used for example to print the target address of an ADR instruction.
virtual void AppendAddressToOutput(const Instruction* instr,
const void* addr);
virtual void AppendCodeRelativeAddressToOutput(const Instruction* instr,
const void* addr);
// Prints the address of some code.
// This is used for example to print the target address of a branch to an
// immediate offset.
// A sub-class can for example override this method to lookup the address and
// print an appropriate name.
virtual void AppendCodeAddressToOutput(const Instruction* instr,
const void* addr);
virtual void AppendCodeRelativeCodeAddressToOutput(const Instruction* instr,
const void* addr);
// Prints the address of some data.
// This is used for example to print the source address of a load literal
// instruction.
virtual void AppendCodeRelativeDataAddressToOutput(const Instruction* instr,
const void* addr);
// Same as the above, but for addresses that are not relative to the code
// buffer. They are currently not used by VIXL.
virtual void AppendAddressToOutput(const Instruction* instr,
const void* addr);
virtual void AppendCodeAddressToOutput(const Instruction* instr,
const void* addr);
virtual void AppendDataAddressToOutput(const Instruction* instr,
const void* addr);
public:
// Get/Set the offset that should be added to code addresses when printing
// code-relative addresses in the AppendCodeRelative<Type>AddressToOutput()
// helpers.
// Below is an example of how a branch immediate instruction in memory at
// address 0xb010200 would disassemble with different offsets.
// Base address | Disassembly
// 0x0 | 0xb010200: b #+0xcc (addr 0xb0102cc)
// 0x10000 | 0xb000200: b #+0xcc (addr 0xb0002cc)
// 0xb010200 | 0x0: b #+0xcc (addr 0xcc)
void MapCodeAddress(int64_t base_address, const Instruction* instr_address);
int64_t CodeRelativeAddress(const void* instr);
private:
void Format(
const Instruction* instr, const char* mnemonic, const char* format);
@ -101,32 +123,40 @@ class Disassembler: public DecoderVisitor {
int SubstitutePrefetchField(const Instruction* instr, const char* format);
int SubstituteBarrierField(const Instruction* instr, const char* format);
inline bool RdIsZROrSP(const Instruction* instr) const {
bool RdIsZROrSP(const Instruction* instr) const {
return (instr->Rd() == kZeroRegCode);
}
inline bool RnIsZROrSP(const Instruction* instr) const {
bool RnIsZROrSP(const Instruction* instr) const {
return (instr->Rn() == kZeroRegCode);
}
inline bool RmIsZROrSP(const Instruction* instr) const {
bool RmIsZROrSP(const Instruction* instr) const {
return (instr->Rm() == kZeroRegCode);
}
inline bool RaIsZROrSP(const Instruction* instr) const {
bool RaIsZROrSP(const Instruction* instr) const {
return (instr->Ra() == kZeroRegCode);
}
bool IsMovzMovnImm(unsigned reg_size, uint64_t value);
int64_t code_address_offset() const { return code_address_offset_; }
protected:
void ResetOutput();
void AppendToOutput(const char* string, ...) PRINTF_CHECK(2, 3);
void set_code_address_offset(int64_t code_address_offset) {
code_address_offset_ = code_address_offset;
}
char* buffer_;
uint32_t buffer_pos_;
uint32_t buffer_size_;
bool own_buffer_;
int64_t code_address_offset_;
};

View File

@ -30,6 +30,20 @@
namespace vixl {
// Floating-point infinity values.
const float kFP32PositiveInfinity = rawbits_to_float(0x7f800000);
const float kFP32NegativeInfinity = rawbits_to_float(0xff800000);
const double kFP64PositiveInfinity =
rawbits_to_double(UINT64_C(0x7ff0000000000000));
const double kFP64NegativeInfinity =
rawbits_to_double(UINT64_C(0xfff0000000000000));
// The default NaN values (for FPCR.DN=1).
const double kFP64DefaultNaN = rawbits_to_double(UINT64_C(0x7ff8000000000000));
const float kFP32DefaultNaN = rawbits_to_float(0x7fc00000);
static uint64_t RotateRight(uint64_t value,
unsigned int rotate,
unsigned int width) {
@ -54,6 +68,55 @@ static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
}
bool Instruction::IsLoad() const {
if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
return false;
}
if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
return Mask(LoadStorePairLBit) != 0;
} else {
LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreOpMask));
switch (op) {
case LDRB_w:
case LDRH_w:
case LDR_w:
case LDR_x:
case LDRSB_w:
case LDRSB_x:
case LDRSH_w:
case LDRSH_x:
case LDRSW_x:
case LDR_s:
case LDR_d: return true;
default: return false;
}
}
}
bool Instruction::IsStore() const {
if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
return false;
}
if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
return Mask(LoadStorePairLBit) == 0;
} else {
LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreOpMask));
switch (op) {
case STRB_w:
case STRH_w:
case STR_w:
case STR_x:
case STR_s:
case STR_d: return true;
default: return false;
}
}
}
// Logical immediates can't encode zero, so a return value of zero is used to
// indicate a failure case. Specifically, where the constraints on imm_s are
// not met.

View File

@ -96,6 +96,17 @@ const unsigned kDoubleExponentBits = 11;
const unsigned kFloatMantissaBits = 23;
const unsigned kFloatExponentBits = 8;
// Floating-point infinity values.
extern const float kFP32PositiveInfinity;
extern const float kFP32NegativeInfinity;
extern const double kFP64PositiveInfinity;
extern const double kFP64NegativeInfinity;
// The default NaN values (for FPCR.DN=1).
extern const double kFP64DefaultNaN;
extern const float kFP32DefaultNaN;
enum LSDataSize {
LSByte = 0,
LSHalfword = 1,
@ -140,33 +151,33 @@ enum Reg31Mode {
class Instruction {
public:
inline Instr InstructionBits() const {
Instr InstructionBits() const {
return *(reinterpret_cast<const Instr*>(this));
}
inline void SetInstructionBits(Instr new_instr) {
void SetInstructionBits(Instr new_instr) {
*(reinterpret_cast<Instr*>(this)) = new_instr;
}
inline int Bit(int pos) const {
int Bit(int pos) const {
return (InstructionBits() >> pos) & 1;
}
inline uint32_t Bits(int msb, int lsb) const {
uint32_t Bits(int msb, int lsb) const {
return unsigned_bitextract_32(msb, lsb, InstructionBits());
}
inline int32_t SignedBits(int msb, int lsb) const {
int32_t SignedBits(int msb, int lsb) const {
int32_t bits = *(reinterpret_cast<const int32_t*>(this));
return signed_bitextract_32(msb, lsb, bits);
}
inline Instr Mask(uint32_t mask) const {
Instr Mask(uint32_t mask) const {
return InstructionBits() & mask;
}
#define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
inline int64_t Name() const { return Func(HighBit, LowBit); }
int64_t Name() const { return Func(HighBit, LowBit); }
INSTRUCTION_FIELDS_LIST(DEFINE_GETTER)
#undef DEFINE_GETTER
@ -182,56 +193,64 @@ class Instruction {
float ImmFP32() const;
double ImmFP64() const;
inline LSDataSize SizeLSPair() const {
LSDataSize SizeLSPair() const {
return CalcLSPairDataSize(
static_cast<LoadStorePairOp>(Mask(LoadStorePairMask)));
}
// Helpers.
inline bool IsCondBranchImm() const {
bool IsCondBranchImm() const {
return Mask(ConditionalBranchFMask) == ConditionalBranchFixed;
}
inline bool IsUncondBranchImm() const {
bool IsUncondBranchImm() const {
return Mask(UnconditionalBranchFMask) == UnconditionalBranchFixed;
}
inline bool IsCompareBranch() const {
bool IsCompareBranch() const {
return Mask(CompareBranchFMask) == CompareBranchFixed;
}
inline bool IsTestBranch() const {
bool IsTestBranch() const {
return Mask(TestBranchFMask) == TestBranchFixed;
}
inline bool IsPCRelAddressing() const {
bool IsPCRelAddressing() const {
return Mask(PCRelAddressingFMask) == PCRelAddressingFixed;
}
inline bool IsLogicalImmediate() const {
bool IsLogicalImmediate() const {
return Mask(LogicalImmediateFMask) == LogicalImmediateFixed;
}
inline bool IsAddSubImmediate() const {
bool IsAddSubImmediate() const {
return Mask(AddSubImmediateFMask) == AddSubImmediateFixed;
}
inline bool IsAddSubExtended() const {
bool IsAddSubExtended() const {
return Mask(AddSubExtendedFMask) == AddSubExtendedFixed;
}
inline bool IsLoadOrStore() const {
bool IsLoadOrStore() const {
return Mask(LoadStoreAnyFMask) == LoadStoreAnyFixed;
}
inline bool IsMovn() const {
bool IsLoad() const;
bool IsStore() const;
bool IsLoadLiteral() const {
// This includes PRFM_lit.
return Mask(LoadLiteralFMask) == LoadLiteralFixed;
}
bool IsMovn() const {
return (Mask(MoveWideImmediateMask) == MOVN_x) ||
(Mask(MoveWideImmediateMask) == MOVN_w);
}
// Indicate whether Rd can be the stack pointer or the zero register. This
// does not check that the instruction actually has an Rd field.
inline Reg31Mode RdMode() const {
Reg31Mode RdMode() const {
// The following instructions use sp or wsp as Rd:
// Add/sub (immediate) when not setting the flags.
// Add/sub (extended) when not setting the flags.
@ -260,7 +279,7 @@ class Instruction {
// Indicate whether Rn can be the stack pointer or the zero register. This
// does not check that the instruction actually has an Rn field.
inline Reg31Mode RnMode() const {
Reg31Mode RnMode() const {
// The following instructions use sp or wsp as Rn:
// All loads and stores.
// Add/sub (immediate).
@ -272,7 +291,7 @@ class Instruction {
return Reg31IsZeroRegister;
}
inline ImmBranchType BranchType() const {
ImmBranchType BranchType() const {
if (IsCondBranchImm()) {
return CondBranchType;
} else if (IsUncondBranchImm()) {
@ -296,55 +315,66 @@ class Instruction {
// Patch a literal load instruction to load from 'source'.
void SetImmLLiteral(const Instruction* source);
inline uint8_t* LiteralAddress() const {
int offset = ImmLLiteral() << kLiteralEntrySizeLog2;
const uint8_t* address = reinterpret_cast<const uint8_t*>(this) + offset;
// Note that the result is safely mutable only if the backing buffer is
// safely mutable.
return const_cast<uint8_t*>(address);
// Calculate the address of a literal referred to by a load-literal
// instruction, and return it as the specified type.
//
// The literal itself is safely mutable only if the backing buffer is safely
// mutable.
template <typename T>
T LiteralAddress() const {
uint64_t base_raw = reinterpret_cast<uintptr_t>(this);
ptrdiff_t offset = ImmLLiteral() << kLiteralEntrySizeLog2;
uint64_t address_raw = base_raw + offset;
// Cast the address using a C-style cast. A reinterpret_cast would be
// appropriate, but it can't cast one integral type to another.
T address = (T)(address_raw);
// Assert that the address can be represented by the specified type.
VIXL_ASSERT((uint64_t)(address) == address_raw);
return address;
}
inline uint32_t Literal32() const {
uint32_t Literal32() const {
uint32_t literal;
memcpy(&literal, LiteralAddress(), sizeof(literal));
memcpy(&literal, LiteralAddress<const void*>(), sizeof(literal));
return literal;
}
inline uint64_t Literal64() const {
uint64_t Literal64() const {
uint64_t literal;
memcpy(&literal, LiteralAddress(), sizeof(literal));
memcpy(&literal, LiteralAddress<const void*>(), sizeof(literal));
return literal;
}
inline float LiteralFP32() const {
float LiteralFP32() const {
return rawbits_to_float(Literal32());
}
inline double LiteralFP64() const {
double LiteralFP64() const {
return rawbits_to_double(Literal64());
}
inline const Instruction* NextInstruction() const {
const Instruction* NextInstruction() const {
return this + kInstructionSize;
}
inline const Instruction* InstructionAtOffset(int64_t offset) const {
const Instruction* InstructionAtOffset(int64_t offset) const {
VIXL_ASSERT(IsWordAligned(this + offset));
return this + offset;
}
template<typename T> static inline Instruction* Cast(T src) {
template<typename T> static Instruction* Cast(T src) {
return reinterpret_cast<Instruction*>(src);
}
template<typename T> static inline const Instruction* CastConst(T src) {
template<typename T> static const Instruction* CastConst(T src) {
return reinterpret_cast<const Instruction*>(src);
}
private:
inline int ImmBranch() const;
int ImmBranch() const;
void SetPCRelImmTarget(const Instruction* target);
void SetBranchImmTarget(const Instruction* target);

View File

@ -58,7 +58,7 @@ const int KBytes = 1024;
const int MBytes = 1024 * KBytes;
#define VIXL_ABORT() printf("in %s, line %i", __FILE__, __LINE__); abort()
#ifdef DEBUG
#ifdef VIXL_DEBUG
#define VIXL_ASSERT(condition) assert(condition)
#define VIXL_CHECK(condition) VIXL_ASSERT(condition)
#define VIXL_UNIMPLEMENTED() printf("UNIMPLEMENTED\t"); VIXL_ABORT()

View File

@ -135,4 +135,17 @@ bool IsPowerOf2(int64_t value) {
return (value != 0) && ((value & (value - 1)) == 0);
}
unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size) {
VIXL_ASSERT((reg_size % 8) == 0);
int count = 0;
for (unsigned i = 0; i < (reg_size / 16); i++) {
if ((imm & 0xffff) == 0) {
count++;
}
imm >>= 16;
}
return count;
}
} // namespace vixl

View File

@ -166,6 +166,8 @@ int CountSetBits(uint64_t value, int width);
uint64_t LowestSetBit(uint64_t value);
bool IsPowerOf2(int64_t value);
unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size);
// Pointer alignment
// TODO: rename/refactor to make it specific to instructions.
template<typename T>
@ -174,14 +176,14 @@ bool IsWordAligned(T pointer) {
return ((intptr_t)(pointer) & 3) == 0;
}
// Increment a pointer until it has the specified alignment.
// Increment a pointer (up to 64 bits) until it has the specified alignment.
template<class T>
T AlignUp(T pointer, size_t alignment) {
// Use C-style casts to get static_cast behaviour for integral types (T), and
// reinterpret_cast behaviour for other types.
uintptr_t pointer_raw = (uintptr_t)pointer;
VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(pointer_raw));
uint64_t pointer_raw = (uint64_t)pointer;
VIXL_STATIC_ASSERT(sizeof(pointer) <= sizeof(pointer_raw));
size_t align_step = (alignment - pointer_raw) % alignment;
VIXL_ASSERT((pointer_raw + align_step) % alignment == 0);
@ -189,14 +191,14 @@ T AlignUp(T pointer, size_t alignment) {
return (T)(pointer_raw + align_step);
}
// Decrement a pointer until it has the specified alignment.
// Decrement a pointer (up to 64 bits) until it has the specified alignment.
template<class T>
T AlignDown(T pointer, size_t alignment) {
// Use C-style casts to get static_cast behaviour for integral types (T), and
// reinterpret_cast behaviour for other types.
uintptr_t pointer_raw = (uintptr_t)pointer;
VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(pointer_raw));
uint64_t pointer_raw = (uint64_t)pointer;
VIXL_STATIC_ASSERT(sizeof(pointer) <= sizeof(pointer_raw));
size_t align_step = pointer_raw % alignment;
VIXL_ASSERT((pointer_raw - align_step) % alignment == 0);

View File

@ -163,30 +163,23 @@ static void armv7m_reset(void *opaque)
}
/* Init CPU and memory for a v7-M based board.
flash_size and sram_size are in kb.
mem_size is in bytes.
Returns the NVIC array. */
qemu_irq *armv7m_init(MemoryRegion *system_memory,
int flash_size, int sram_size,
qemu_irq *armv7m_init(MemoryRegion *system_memory, int mem_size, int num_irq,
const char *kernel_filename, const char *cpu_model)
{
ARMCPU *cpu;
CPUARMState *env;
DeviceState *nvic;
/* FIXME: make this local state. */
static qemu_irq pic[64];
qemu_irq *pic = g_new(qemu_irq, num_irq);
int image_size;
uint64_t entry;
uint64_t lowaddr;
int i;
int big_endian;
MemoryRegion *sram = g_new(MemoryRegion, 1);
MemoryRegion *flash = g_new(MemoryRegion, 1);
MemoryRegion *hack = g_new(MemoryRegion, 1);
flash_size *= 1024;
sram_size *= 1024;
if (cpu_model == NULL) {
cpu_model = "cortex-m3";
}
@ -197,35 +190,15 @@ qemu_irq *armv7m_init(MemoryRegion *system_memory,
}
env = &cpu->env;
#if 0
/* > 32Mb SRAM gets complicated because it overlaps the bitband area.
We don't have proper commandline options, so allocate half of memory
as SRAM, up to a maximum of 32Mb, and the rest as code. */
if (ram_size > (512 + 32) * 1024 * 1024)
ram_size = (512 + 32) * 1024 * 1024;
sram_size = (ram_size / 2) & TARGET_PAGE_MASK;
if (sram_size > 32 * 1024 * 1024)
sram_size = 32 * 1024 * 1024;
code_size = ram_size - sram_size;
#endif
/* Flash programming is done via the SCU, so pretend it is ROM. */
memory_region_init_ram(flash, NULL, "armv7m.flash", flash_size,
&error_abort);
vmstate_register_ram_global(flash);
memory_region_set_readonly(flash, true);
memory_region_add_subregion(system_memory, 0, flash);
memory_region_init_ram(sram, NULL, "armv7m.sram", sram_size, &error_abort);
vmstate_register_ram_global(sram);
memory_region_add_subregion(system_memory, 0x20000000, sram);
armv7m_bitband_init();
nvic = qdev_create(NULL, "armv7m_nvic");
qdev_prop_set_uint32(nvic, "num-irq", num_irq);
env->nvic = nvic;
qdev_init_nofail(nvic);
sysbus_connect_irq(SYS_BUS_DEVICE(nvic), 0,
qdev_get_gpio_in(DEVICE(cpu), ARM_CPU_IRQ));
for (i = 0; i < 64; i++) {
for (i = 0; i < num_irq; i++) {
pic[i] = qdev_get_gpio_in(nvic, i);
}
@ -244,7 +217,7 @@ qemu_irq *armv7m_init(MemoryRegion *system_memory,
image_size = load_elf(kernel_filename, NULL, NULL, &entry, &lowaddr,
NULL, big_endian, ELF_MACHINE, 1);
if (image_size < 0) {
image_size = load_image_targphys(kernel_filename, 0, flash_size);
image_size = load_image_targphys(kernel_filename, 0, mem_size);
lowaddr = 0;
}
if (image_size < 0) {

View File

@ -463,8 +463,26 @@ static void do_cpu_reset(void *opaque)
* (SCR.NS = 0), we change that here if non-secure boot has been
* requested.
*/
if (arm_feature(env, ARM_FEATURE_EL3) && !info->secure_boot) {
env->cp15.scr_el3 |= SCR_NS;
if (arm_feature(env, ARM_FEATURE_EL3)) {
/* AArch64 is defined to come out of reset into EL3 if enabled.
* If we are booting Linux then we need to adjust our EL as
* Linux expects us to be in EL2 or EL1. AArch32 resets into
* SVC, which Linux expects, so no privilege/exception level to
* adjust.
*/
if (env->aarch64) {
if (arm_feature(env, ARM_FEATURE_EL2)) {
env->pstate = PSTATE_MODE_EL2h;
} else {
env->pstate = PSTATE_MODE_EL1h;
}
}
/* Set to non-secure if not a secure boot */
if (!info->secure_boot) {
/* Linux expects non-secure state */
env->cp15.scr_el3 |= SCR_NS;
}
}
if (CPU(cpu) == first_cpu) {

View File

@ -29,6 +29,8 @@
#define BP_OLED_SSI 0x02
#define BP_GAMEPAD 0x04
#define NUM_IRQ_LINES 64
typedef const struct {
const char *name;
uint32_t did0;
@ -1220,10 +1222,27 @@ static void stellaris_init(const char *kernel_filename, const char *cpu_model,
int i;
int j;
flash_size = ((board->dc0 & 0xffff) + 1) << 1;
sram_size = (board->dc0 >> 18) + 1;
pic = armv7m_init(get_system_memory(),
flash_size, sram_size, kernel_filename, cpu_model);
MemoryRegion *sram = g_new(MemoryRegion, 1);
MemoryRegion *flash = g_new(MemoryRegion, 1);
MemoryRegion *system_memory = get_system_memory();
flash_size = (((board->dc0 & 0xffff) + 1) << 1) * 1024;
sram_size = ((board->dc0 >> 18) + 1) * 1024;
/* Flash programming is done via the SCU, so pretend it is ROM. */
memory_region_init_ram(flash, NULL, "stellaris.flash", flash_size,
&error_abort);
vmstate_register_ram_global(flash);
memory_region_set_readonly(flash, true);
memory_region_add_subregion(system_memory, 0, flash);
memory_region_init_ram(sram, NULL, "stellaris.sram", sram_size,
&error_abort);
vmstate_register_ram_global(sram);
memory_region_add_subregion(system_memory, 0x20000000, sram);
pic = armv7m_init(system_memory, flash_size, NUM_IRQ_LINES,
kernel_filename, cpu_model);
if (board->dc1 & (1 << 16)) {
dev = sysbus_create_varargs(TYPE_STELLARIS_ADC, 0x40038000,

View File

@ -441,10 +441,32 @@ static void create_virtio_devices(const VirtBoardInfo *vbi, qemu_irq *pic)
int i;
hwaddr size = vbi->memmap[VIRT_MMIO].size;
/* Note that we have to create the transports in forwards order
* so that command line devices are inserted lowest address first,
* and then add dtb nodes in reverse order so that they appear in
* the finished device tree lowest address first.
/* We create the transports in forwards order. Since qbus_realize()
* prepends (not appends) new child buses, the incrementing loop below will
* create a list of virtio-mmio buses with decreasing base addresses.
*
* When a -device option is processed from the command line,
* qbus_find_recursive() picks the next free virtio-mmio bus in forwards
* order. The upshot is that -device options in increasing command line
* order are mapped to virtio-mmio buses with decreasing base addresses.
*
* When this code was originally written, that arrangement ensured that the
* guest Linux kernel would give the lowest "name" (/dev/vda, eth0, etc) to
* the first -device on the command line. (The end-to-end order is a
* function of this loop, qbus_realize(), qbus_find_recursive(), and the
* guest kernel's name-to-address assignment strategy.)
*
* Meanwhile, the kernel's traversal seems to have been reversed; see eg.
* the message, if not necessarily the code, of commit 70161ff336.
* Therefore the loop now establishes the inverse of the original intent.
*
* Unfortunately, we can't counteract the kernel change by reversing the
* loop; it would break existing command lines.
*
* In any case, the kernel makes no guarantee about the stability of
* enumeration order of virtio devices (as demonstrated by it changing
* between kernel versions). For reliable and stable identification
* of disks users must use UUIDs or similar mechanisms.
*/
for (i = 0; i < NUM_VIRTIO_TRANSPORTS; i++) {
int irq = vbi->irqmap[VIRT_MMIO] + i;
@ -453,6 +475,13 @@ static void create_virtio_devices(const VirtBoardInfo *vbi, qemu_irq *pic)
sysbus_create_simple("virtio-mmio", base, pic[irq]);
}
/* We add dtb nodes in reverse order so that they appear in the finished
* device tree lowest address first.
*
* Note that this mapping is independent of the loop above. The previous
* loop influences virtio device to virtio transport assignment, whereas
* this loop controls how virtio transports are laid out in the dtb.
*/
for (i = NUM_VIRTIO_TRANSPORTS - 1; i >= 0; i--) {
char *nodename;
int irq = vbi->irqmap[VIRT_MMIO] + i;

View File

@ -244,9 +244,31 @@ uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
#undef MEMSUFFIX
#endif /* (NB_MMU_MODES >= 6) */
#if (NB_MMU_MODES > 6)
#error "NB_MMU_MODES > 6 is not supported for now"
#endif /* (NB_MMU_MODES > 6) */
#if (NB_MMU_MODES >= 7) && defined(MMU_MODE6_SUFFIX)
#define CPU_MMU_INDEX 6
#define MEMSUFFIX MMU_MODE6_SUFFIX
#define DATA_SIZE 1
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 2
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 4
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 8
#include "exec/cpu_ldst_template.h"
#undef CPU_MMU_INDEX
#undef MEMSUFFIX
#endif /* (NB_MMU_MODES >= 7) */
#if (NB_MMU_MODES > 7)
/* Note that supporting NB_MMU_MODES == 9 would require
* changes to at least the ARM TCG backend.
*/
#error "NB_MMU_MODES > 7 is not supported for now"
#endif /* (NB_MMU_MODES > 7) */
/* these access are slower, they must be as rare as possible */
#define CPU_MMU_INDEX (cpu_mmu_index(env))

View File

@ -15,8 +15,7 @@
#include "hw/irq.h"
/* armv7m.c */
qemu_irq *armv7m_init(MemoryRegion *system_memory,
int flash_size, int sram_size,
qemu_irq *armv7m_init(MemoryRegion *system_memory, int mem_size, int num_irq,
const char *kernel_filename, const char *cpu_model);
/* arm_boot.c */

View File

@ -113,7 +113,14 @@ static void arm_cpu_reset(CPUState *s)
/* and to the FP/Neon instructions */
env->cp15.c1_coproc = deposit64(env->cp15.c1_coproc, 20, 2, 3);
#else
env->pstate = PSTATE_MODE_EL1h;
/* Reset into the highest available EL */
if (arm_feature(env, ARM_FEATURE_EL3)) {
env->pstate = PSTATE_MODE_EL3h;
} else if (arm_feature(env, ARM_FEATURE_EL2)) {
env->pstate = PSTATE_MODE_EL2h;
} else {
env->pstate = PSTATE_MODE_EL1h;
}
env->pc = cpu->rvbar;
#endif
} else {
@ -320,6 +327,29 @@ static void arm_cpu_kvm_set_irq(void *opaque, int irq, int level)
kvm_set_irq(kvm_state, kvm_irq, level ? 1 : 0);
#endif
}
static bool arm_cpu_is_big_endian(CPUState *cs)
{
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
int cur_el;
cpu_synchronize_state(cs);
/* In 32bit guest endianness is determined by looking at CPSR's E bit */
if (!is_a64(env)) {
return (env->uncached_cpsr & CPSR_E) ? 1 : 0;
}
cur_el = arm_current_el(env);
if (cur_el == 0) {
return (env->cp15.sctlr_el[1] & SCTLR_E0E) != 0;
}
return (env->cp15.sctlr_el[cur_el] & SCTLR_EE) != 0;
}
#endif
static inline void set_feature(CPUARMState *env, int feature)
@ -1189,6 +1219,7 @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
cc->do_interrupt = arm_cpu_do_interrupt;
cc->get_phys_page_debug = arm_cpu_get_phys_page_debug;
cc->vmsd = &vmstate_arm_cpu;
cc->virtio_is_big_endian = arm_cpu_is_big_endian;
#endif
cc->gdb_num_core_regs = 26;
cc->gdb_core_xml_file = "arm-core.xml";

View File

@ -32,6 +32,8 @@
# define ELF_MACHINE EM_ARM
#endif
#define TARGET_IS_BIENDIAN 1
#define CPUArchState struct CPUARMState
#include "qemu-common.h"
@ -98,7 +100,7 @@ typedef uint32_t ARMReadCPFunc(void *opaque, int cp_info,
struct arm_boot_info;
#define NB_MMU_MODES 4
#define NB_MMU_MODES 7
/* We currently assume float and double are IEEE single and double
precision respectively.
@ -1110,8 +1112,14 @@ static inline uint64_t cpreg_to_kvm_id(uint32_t cpregid)
* a register definition to override a previous definition for the
* same (cp, is64, crn, crm, opc1, opc2) tuple: either the new or the
* old must have the OVERRIDE bit set.
* NO_MIGRATE indicates that this register should be ignored for migration;
* (eg because any state is accessed via some other coprocessor register).
* ALIAS indicates that this register is an alias view of some underlying
* state which is also visible via another register, and that the other
* register is handling migration; registers marked ALIAS will not be migrated
* but may have their state set by syncing of register state from KVM.
* NO_RAW indicates that this register has no underlying state and does not
* support raw access for state saving/loading; it will not be used for either
* migration or KVM state synchronization. (Typically this is for "registers"
* which are actually used as instructions for cache maintenance and so on.)
* IO indicates that this register does I/O and therefore its accesses
* need to be surrounded by gen_io_start()/gen_io_end(). In particular,
* registers which implement clocks or timers require this.
@ -1121,8 +1129,9 @@ static inline uint64_t cpreg_to_kvm_id(uint32_t cpregid)
#define ARM_CP_64BIT 4
#define ARM_CP_SUPPRESS_TB_END 8
#define ARM_CP_OVERRIDE 16
#define ARM_CP_NO_MIGRATE 32
#define ARM_CP_ALIAS 32
#define ARM_CP_IO 64
#define ARM_CP_NO_RAW 128
#define ARM_CP_NOP (ARM_CP_SPECIAL | (1 << 8))
#define ARM_CP_WFI (ARM_CP_SPECIAL | (2 << 8))
#define ARM_CP_NZCV (ARM_CP_SPECIAL | (3 << 8))
@ -1132,7 +1141,7 @@ static inline uint64_t cpreg_to_kvm_id(uint32_t cpregid)
/* Used only as a terminator for ARMCPRegInfo lists */
#define ARM_CP_SENTINEL 0xffff
/* Mask of only the flag bits in a type field */
#define ARM_CP_FLAG_MASK 0x7f
#define ARM_CP_FLAG_MASK 0xff
/* Valid values for ARMCPRegInfo state field, indicating which of
* the AArch32 and AArch64 execution states this register is visible in.
@ -1211,6 +1220,10 @@ static inline bool cptype_valid(int cptype)
*/
static inline int arm_current_el(CPUARMState *env)
{
if (arm_feature(env, ARM_FEATURE_M)) {
return !((env->v7m.exception == 0) && (env->v7m.control & 1));
}
if (is_a64(env)) {
return extract32(env->pstate, 2, 2);
}
@ -1568,13 +1581,90 @@ static inline CPUARMState *cpu_init(const char *cpu_model)
#define cpu_signal_handler cpu_arm_signal_handler
#define cpu_list arm_cpu_list
/* MMU modes definitions */
#define MMU_MODE0_SUFFIX _user
#define MMU_MODE1_SUFFIX _kernel
/* ARM has the following "translation regimes" (as the ARM ARM calls them):
*
* If EL3 is 64-bit:
* + NonSecure EL1 & 0 stage 1
* + NonSecure EL1 & 0 stage 2
* + NonSecure EL2
* + Secure EL1 & EL0
* + Secure EL3
* If EL3 is 32-bit:
* + NonSecure PL1 & 0 stage 1
* + NonSecure PL1 & 0 stage 2
* + NonSecure PL2
* + Secure PL0 & PL1
* (reminder: for 32 bit EL3, Secure PL1 is *EL3*, not EL1.)
*
* For QEMU, an mmu_idx is not quite the same as a translation regime because:
* 1. we need to split the "EL1 & 0" regimes into two mmu_idxes, because they
* may differ in access permissions even if the VA->PA map is the same
* 2. we want to cache in our TLB the full VA->IPA->PA lookup for a stage 1+2
* translation, which means that we have one mmu_idx that deals with two
* concatenated translation regimes [this sort of combined s1+2 TLB is
* architecturally permitted]
* 3. we don't need to allocate an mmu_idx to translations that we won't be
* handling via the TLB. The only way to do a stage 1 translation without
* the immediate stage 2 translation is via the ATS or AT system insns,
* which can be slow-pathed and always do a page table walk.
* 4. we can also safely fold together the "32 bit EL3" and "64 bit EL3"
* translation regimes, because they map reasonably well to each other
* and they can't both be active at the same time.
* This gives us the following list of mmu_idx values:
*
* NS EL0 (aka NS PL0) stage 1+2
* NS EL1 (aka NS PL1) stage 1+2
* NS EL2 (aka NS PL2)
* S EL3 (aka S PL1)
* S EL0 (aka S PL0)
* S EL1 (not used if EL3 is 32 bit)
* NS EL0+1 stage 2
*
* (The last of these is an mmu_idx because we want to be able to use the TLB
* for the accesses done as part of a stage 1 page table walk, rather than
* having to walk the stage 2 page table over and over.)
*
* Our enumeration includes at the end some entries which are not "true"
* mmu_idx values in that they don't have corresponding TLBs and are only
* valid for doing slow path page table walks.
*
* The constant names here are patterned after the general style of the names
* of the AT/ATS operations.
* The values used are carefully arranged to make mmu_idx => EL lookup easy.
*/
typedef enum ARMMMUIdx {
ARMMMUIdx_S12NSE0 = 0,
ARMMMUIdx_S12NSE1 = 1,
ARMMMUIdx_S1E2 = 2,
ARMMMUIdx_S1E3 = 3,
ARMMMUIdx_S1SE0 = 4,
ARMMMUIdx_S1SE1 = 5,
ARMMMUIdx_S2NS = 6,
/* Indexes below here don't have TLBs and are used only for AT system
* instructions or for the first stage of an S12 page table walk.
*/
ARMMMUIdx_S1NSE0 = 7,
ARMMMUIdx_S1NSE1 = 8,
} ARMMMUIdx;
#define MMU_USER_IDX 0
static inline int cpu_mmu_index (CPUARMState *env)
/* Return the exception level we're running at if this is our mmu_idx */
static inline int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
{
return arm_current_el(env);
assert(mmu_idx < ARMMMUIdx_S2NS);
return mmu_idx & 3;
}
/* Determine the current mmu_idx to use for normal loads/stores */
static inline int cpu_mmu_index(CPUARMState *env)
{
int el = arm_current_el(env);
if (el < 2 && arm_is_secure_below_el3(env)) {
return ARMMMUIdx_S1SE0 + el;
}
return el;
}
/* Return the Exception Level targeted by debug exceptions;
@ -1641,9 +1731,13 @@ static inline bool arm_singlestep_active(CPUARMState *env)
/* Bit usage in the TB flags field: bit 31 indicates whether we are
* in 32 or 64 bit mode. The meaning of the other bits depends on that.
* We put flags which are shared between 32 and 64 bit mode at the top
* of the word, and flags which apply to only one mode at the bottom.
*/
#define ARM_TBFLAG_AARCH64_STATE_SHIFT 31
#define ARM_TBFLAG_AARCH64_STATE_MASK (1U << ARM_TBFLAG_AARCH64_STATE_SHIFT)
#define ARM_TBFLAG_MMUIDX_SHIFT 28
#define ARM_TBFLAG_MMUIDX_MASK (0x7 << ARM_TBFLAG_MMUIDX_SHIFT)
/* Bit usage when in AArch32 state: */
#define ARM_TBFLAG_THUMB_SHIFT 0
@ -1652,8 +1746,6 @@ static inline bool arm_singlestep_active(CPUARMState *env)
#define ARM_TBFLAG_VECLEN_MASK (0x7 << ARM_TBFLAG_VECLEN_SHIFT)
#define ARM_TBFLAG_VECSTRIDE_SHIFT 4
#define ARM_TBFLAG_VECSTRIDE_MASK (0x3 << ARM_TBFLAG_VECSTRIDE_SHIFT)
#define ARM_TBFLAG_PRIV_SHIFT 6
#define ARM_TBFLAG_PRIV_MASK (1 << ARM_TBFLAG_PRIV_SHIFT)
#define ARM_TBFLAG_VFPEN_SHIFT 7
#define ARM_TBFLAG_VFPEN_MASK (1 << ARM_TBFLAG_VFPEN_SHIFT)
#define ARM_TBFLAG_CONDEXEC_SHIFT 8
@ -1679,8 +1771,6 @@ static inline bool arm_singlestep_active(CPUARMState *env)
#define ARM_TBFLAG_NS_MASK (1 << ARM_TBFLAG_NS_SHIFT)
/* Bit usage when in AArch64 state */
#define ARM_TBFLAG_AA64_EL_SHIFT 0
#define ARM_TBFLAG_AA64_EL_MASK (0x3 << ARM_TBFLAG_AA64_EL_SHIFT)
#define ARM_TBFLAG_AA64_FPEN_SHIFT 2
#define ARM_TBFLAG_AA64_FPEN_MASK (1 << ARM_TBFLAG_AA64_FPEN_SHIFT)
#define ARM_TBFLAG_AA64_SS_ACTIVE_SHIFT 3
@ -1691,14 +1781,14 @@ static inline bool arm_singlestep_active(CPUARMState *env)
/* some convenience accessor macros */
#define ARM_TBFLAG_AARCH64_STATE(F) \
(((F) & ARM_TBFLAG_AARCH64_STATE_MASK) >> ARM_TBFLAG_AARCH64_STATE_SHIFT)
#define ARM_TBFLAG_MMUIDX(F) \
(((F) & ARM_TBFLAG_MMUIDX_MASK) >> ARM_TBFLAG_MMUIDX_SHIFT)
#define ARM_TBFLAG_THUMB(F) \
(((F) & ARM_TBFLAG_THUMB_MASK) >> ARM_TBFLAG_THUMB_SHIFT)
#define ARM_TBFLAG_VECLEN(F) \
(((F) & ARM_TBFLAG_VECLEN_MASK) >> ARM_TBFLAG_VECLEN_SHIFT)
#define ARM_TBFLAG_VECSTRIDE(F) \
(((F) & ARM_TBFLAG_VECSTRIDE_MASK) >> ARM_TBFLAG_VECSTRIDE_SHIFT)
#define ARM_TBFLAG_PRIV(F) \
(((F) & ARM_TBFLAG_PRIV_MASK) >> ARM_TBFLAG_PRIV_SHIFT)
#define ARM_TBFLAG_VFPEN(F) \
(((F) & ARM_TBFLAG_VFPEN_MASK) >> ARM_TBFLAG_VFPEN_SHIFT)
#define ARM_TBFLAG_CONDEXEC(F) \
@ -1713,8 +1803,6 @@ static inline bool arm_singlestep_active(CPUARMState *env)
(((F) & ARM_TBFLAG_PSTATE_SS_MASK) >> ARM_TBFLAG_PSTATE_SS_SHIFT)
#define ARM_TBFLAG_XSCALE_CPAR(F) \
(((F) & ARM_TBFLAG_XSCALE_CPAR_MASK) >> ARM_TBFLAG_XSCALE_CPAR_SHIFT)
#define ARM_TBFLAG_AA64_EL(F) \
(((F) & ARM_TBFLAG_AA64_EL_MASK) >> ARM_TBFLAG_AA64_EL_SHIFT)
#define ARM_TBFLAG_AA64_FPEN(F) \
(((F) & ARM_TBFLAG_AA64_FPEN_MASK) >> ARM_TBFLAG_AA64_FPEN_SHIFT)
#define ARM_TBFLAG_AA64_SS_ACTIVE(F) \
@ -1738,8 +1826,7 @@ static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
if (is_a64(env)) {
*pc = env->pc;
*flags = ARM_TBFLAG_AARCH64_STATE_MASK
| (arm_current_el(env) << ARM_TBFLAG_AA64_EL_SHIFT);
*flags = ARM_TBFLAG_AARCH64_STATE_MASK;
if (fpen == 3 || (fpen == 1 && arm_current_el(env) != 0)) {
*flags |= ARM_TBFLAG_AA64_FPEN_MASK;
}
@ -1757,21 +1844,12 @@ static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
}
}
} else {
int privmode;
*pc = env->regs[15];
*flags = (env->thumb << ARM_TBFLAG_THUMB_SHIFT)
| (env->vfp.vec_len << ARM_TBFLAG_VECLEN_SHIFT)
| (env->vfp.vec_stride << ARM_TBFLAG_VECSTRIDE_SHIFT)
| (env->condexec_bits << ARM_TBFLAG_CONDEXEC_SHIFT)
| (env->bswap_code << ARM_TBFLAG_BSWAP_CODE_SHIFT);
if (arm_feature(env, ARM_FEATURE_M)) {
privmode = !((env->v7m.exception == 0) && (env->v7m.control & 1));
} else {
privmode = (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR;
}
if (privmode) {
*flags |= ARM_TBFLAG_PRIV_MASK;
}
if (!(access_secure_reg(env))) {
*flags |= ARM_TBFLAG_NS_MASK;
}
@ -1799,6 +1877,8 @@ static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
<< ARM_TBFLAG_XSCALE_CPAR_SHIFT);
}
*flags |= (cpu_mmu_index(env) << ARM_TBFLAG_MMUIDX_SHIFT);
*cs_base = 0;
}

View File

@ -135,6 +135,9 @@ float32 HELPER(vfp_mulxs)(float32 a, float32 b, void *fpstp)
{
float_status *fpst = fpstp;
a = float32_squash_input_denormal(a, fpst);
b = float32_squash_input_denormal(b, fpst);
if ((float32_is_zero(a) && float32_is_infinity(b)) ||
(float32_is_infinity(a) && float32_is_zero(b))) {
/* 2.0 with the sign bit set to sign(A) XOR sign(B) */
@ -148,6 +151,9 @@ float64 HELPER(vfp_mulxd)(float64 a, float64 b, void *fpstp)
{
float_status *fpst = fpstp;
a = float64_squash_input_denormal(a, fpst);
b = float64_squash_input_denormal(b, fpst);
if ((float64_is_zero(a) && float64_is_infinity(b)) ||
(float64_is_infinity(a) && float64_is_zero(b))) {
/* 2.0 with the sign bit set to sign(A) XOR sign(B) */
@ -223,6 +229,9 @@ float32 HELPER(recpsf_f32)(float32 a, float32 b, void *fpstp)
{
float_status *fpst = fpstp;
a = float32_squash_input_denormal(a, fpst);
b = float32_squash_input_denormal(b, fpst);
a = float32_chs(a);
if ((float32_is_infinity(a) && float32_is_zero(b)) ||
(float32_is_infinity(b) && float32_is_zero(a))) {
@ -235,6 +244,9 @@ float64 HELPER(recpsf_f64)(float64 a, float64 b, void *fpstp)
{
float_status *fpst = fpstp;
a = float64_squash_input_denormal(a, fpst);
b = float64_squash_input_denormal(b, fpst);
a = float64_chs(a);
if ((float64_is_infinity(a) && float64_is_zero(b)) ||
(float64_is_infinity(b) && float64_is_zero(a))) {
@ -247,6 +259,9 @@ float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, void *fpstp)
{
float_status *fpst = fpstp;
a = float32_squash_input_denormal(a, fpst);
b = float32_squash_input_denormal(b, fpst);
a = float32_chs(a);
if ((float32_is_infinity(a) && float32_is_zero(b)) ||
(float32_is_infinity(b) && float32_is_zero(a))) {
@ -259,6 +274,9 @@ float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, void *fpstp)
{
float_status *fpst = fpstp;
a = float64_squash_input_denormal(a, fpst);
b = float64_squash_input_denormal(b, fpst);
a = float64_chs(a);
if ((float64_is_infinity(a) && float64_is_zero(b)) ||
(float64_is_infinity(b) && float64_is_zero(a))) {

File diff suppressed because it is too large Load Diff

View File

@ -193,9 +193,12 @@ int kvm_arch_put_registers(CPUState *cs, int level)
}
}
if (!write_list_to_kvmstate(cpu)) {
return EINVAL;
}
/* TODO:
* FP state
* system registers
*/
return ret;
}
@ -269,6 +272,14 @@ int kvm_arch_get_registers(CPUState *cs)
}
}
if (!write_kvmstate_to_list(cpu)) {
return EINVAL;
}
/* Note that it's OK to have registers which aren't in CPUState,
* so we can ignore a failure return here.
*/
write_list_to_cpustate(cpu);
/* TODO: other registers */
return ret;
}

View File

@ -123,6 +123,23 @@ void a64_translate_init(void)
#endif
}
static inline ARMMMUIdx get_a64_user_mem_index(DisasContext *s)
{
/* Return the mmu_idx to use for A64 "unprivileged load/store" insns:
* if EL1, access as if EL0; otherwise access at current EL
*/
switch (s->mmu_idx) {
case ARMMMUIdx_S12NSE1:
return ARMMMUIdx_S12NSE0;
case ARMMMUIdx_S1SE1:
return ARMMMUIdx_S1SE0;
case ARMMMUIdx_S2NS:
g_assert_not_reached();
default:
return s->mmu_idx;
}
}
void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
fprintf_function cpu_fprintf, int flags)
{
@ -2107,7 +2124,7 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn)
}
} else {
TCGv_i64 tcg_rt = cpu_reg(s, rt);
int memidx = is_unpriv ? 1 : get_mem_index(s);
int memidx = is_unpriv ? get_a64_user_mem_index(s) : get_mem_index(s);
if (is_store) {
do_gpr_st_memidx(s, tcg_rt, tcg_addr, size, memidx);
@ -10922,14 +10939,15 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu,
dc->bswap_code = 0;
dc->condexec_mask = 0;
dc->condexec_cond = 0;
dc->mmu_idx = ARM_TBFLAG_MMUIDX(tb->flags);
dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
#if !defined(CONFIG_USER_ONLY)
dc->user = (ARM_TBFLAG_AA64_EL(tb->flags) == 0);
dc->user = (dc->current_el == 0);
#endif
dc->cpacr_fpen = ARM_TBFLAG_AA64_FPEN(tb->flags);
dc->vec_len = 0;
dc->vec_stride = 0;
dc->cp_regs = cpu->cp_regs;
dc->current_el = arm_current_el(env);
dc->features = env->features;
/* Single step state. The code-generation logic here is:

View File

@ -113,6 +113,28 @@ void arm_translate_init(void)
a64_translate_init();
}
static inline ARMMMUIdx get_a32_user_mem_index(DisasContext *s)
{
/* Return the mmu_idx to use for A32/T32 "unprivileged load/store"
* insns:
* if PL2, UNPREDICTABLE (we choose to implement as if PL0)
* otherwise, access as if at PL0.
*/
switch (s->mmu_idx) {
case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
case ARMMMUIdx_S12NSE0:
case ARMMMUIdx_S12NSE1:
return ARMMMUIdx_S12NSE0;
case ARMMMUIdx_S1E3:
case ARMMMUIdx_S1SE0:
case ARMMMUIdx_S1SE1:
return ARMMMUIdx_S1SE0;
case ARMMMUIdx_S2NS:
default:
g_assert_not_reached();
}
}
static inline TCGv_i32 load_cpu_offset(int offset)
{
TCGv_i32 tmp = tcg_temp_new_i32();
@ -8739,6 +8761,10 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
ARCH(6T2);
shift = (insn >> 7) & 0x1f;
i = (insn >> 16) & 0x1f;
if (i < shift) {
/* UNPREDICTABLE; we choose to UNDEF */
goto illegal_op;
}
i = i + 1 - shift;
if (rm == 15) {
tmp = tcg_temp_new_i32();
@ -8793,7 +8819,7 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
tmp2 = load_reg(s, rn);
if ((insn & 0x01200000) == 0x00200000) {
/* ldrt/strt */
i = MMU_USER_IDX;
i = get_a32_user_mem_index(s);
} else {
i = get_mem_index(s);
}
@ -10173,7 +10199,7 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
break;
case 0xe: /* User privilege. */
tcg_gen_addi_i32(addr, addr, imm);
memidx = MMU_USER_IDX;
memidx = get_a32_user_mem_index(s);
break;
case 0x9: /* Post-decrement. */
imm = -imm;
@ -11032,8 +11058,10 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
dc->mmu_idx = ARM_TBFLAG_MMUIDX(tb->flags);
dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
#if !defined(CONFIG_USER_ONLY)
dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
dc->user = (dc->current_el == 0);
#endif
dc->ns = ARM_TBFLAG_NS(tb->flags);
dc->cpacr_fpen = ARM_TBFLAG_CPACR_FPEN(tb->flags);
@ -11042,7 +11070,6 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(tb->flags);
dc->cp_regs = cpu->cp_regs;
dc->current_el = arm_current_el(env);
dc->features = env->features;
/* Single step state. The code-generation logic here is:

View File

@ -20,6 +20,7 @@ typedef struct DisasContext {
#if !defined(CONFIG_USER_ONLY)
int user;
#endif
ARMMMUIdx mmu_idx; /* MMU index to use for normal loads/stores */
bool ns; /* Use non-secure CPREG bank on access */
bool cpacr_fpen; /* FP enabled via CPACR.FPEN */
bool vfp_enabled; /* FP enabled via FPSCR.EN */
@ -69,7 +70,7 @@ static inline int arm_dc_feature(DisasContext *dc, int feature)
static inline int get_mem_index(DisasContext *s)
{
return s->current_el;
return s->mmu_idx;
}
/* target-specific extra values for is_jmp */