diff --git a/dep/vixl/CMakeLists.txt b/dep/vixl/CMakeLists.txt index 6a17843bb..49cb09a3e 100644 --- a/dep/vixl/CMakeLists.txt +++ b/dep/vixl/CMakeLists.txt @@ -59,26 +59,30 @@ if(CPU_ARCH_ARM64) include/vixl/aarch64/constants-aarch64.h include/vixl/aarch64/cpu-aarch64.h include/vixl/aarch64/cpu-features-auditor-aarch64.h + include/vixl/aarch64/debugger-aarch64.h include/vixl/aarch64/decoder-aarch64.h + include/vixl/aarch64/decoder-constants-aarch64.h + include/vixl/aarch64/decoder-visitor-map-aarch64.h include/vixl/aarch64/disasm-aarch64.h include/vixl/aarch64/instructions-aarch64.h - include/vixl/aarch64/instrument-aarch64.h include/vixl/aarch64/macro-assembler-aarch64.h include/vixl/aarch64/operands-aarch64.h + include/vixl/aarch64/registers-aarch64.h include/vixl/aarch64/simulator-aarch64.h include/vixl/aarch64/simulator-constants-aarch64.h src/aarch64/assembler-aarch64.cc + src/aarch64/assembler-sve-aarch64.cc src/aarch64/cpu-aarch64.cc src/aarch64/cpu-features-auditor-aarch64.cc src/aarch64/decoder-aarch64.cc src/aarch64/disasm-aarch64.cc src/aarch64/instructions-aarch64.cc - src/aarch64/instrument-aarch64.cc src/aarch64/logic-aarch64.cc src/aarch64/macro-assembler-aarch64.cc + src/aarch64/macro-assembler-sve-aarch64.cc src/aarch64/operands-aarch64.cc src/aarch64/pointer-auth-aarch64.cc - src/aarch64/simulator-aarch64.cc + src/aarch64/registers-aarch64.cc ) target_include_directories(vixl PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include/vixl/aarch64 diff --git a/dep/vixl/include/vixl/aarch32/assembler-aarch32.h b/dep/vixl/include/vixl/aarch32/assembler-aarch32.h index 9c6b6e12b..bb7df8404 100644 --- a/dep/vixl/include/vixl/aarch32/assembler-aarch32.h +++ b/dep/vixl/include/vixl/aarch32/assembler-aarch32.h @@ -27,10 +27,10 @@ #ifndef VIXL_AARCH32_ASSEMBLER_AARCH32_H_ #define VIXL_AARCH32_ASSEMBLER_AARCH32_H_ -#include "../assembler-base-vixl.h" +#include "assembler-base-vixl.h" -#include "instructions-aarch32.h" -#include "location-aarch32.h" +#include "aarch32/instructions-aarch32.h" +#include "aarch32/location-aarch32.h" namespace vixl { namespace aarch32 { diff --git a/dep/vixl/include/vixl/aarch32/constants-aarch32.h b/dep/vixl/include/vixl/aarch32/constants-aarch32.h index ea7244e21..6d79834d9 100644 --- a/dep/vixl/include/vixl/aarch32/constants-aarch32.h +++ b/dep/vixl/include/vixl/aarch32/constants-aarch32.h @@ -32,7 +32,7 @@ extern "C" { #include } -#include "../globals-vixl.h" +#include "globals-vixl.h" namespace vixl { diff --git a/dep/vixl/include/vixl/aarch32/disasm-aarch32.h b/dep/vixl/include/vixl/aarch32/disasm-aarch32.h index 81520f22b..46964081e 100644 --- a/dep/vixl/include/vixl/aarch32/disasm-aarch32.h +++ b/dep/vixl/include/vixl/aarch32/disasm-aarch32.h @@ -33,8 +33,14 @@ extern "C" { #include -#include "constants-aarch32.h" -#include "operands-aarch32.h" +#include "aarch32/constants-aarch32.h" +#include "aarch32/operands-aarch32.h" + +// Microsoft Visual C++ defines a `mvn` macro that conflicts with our own +// definition. +#if defined(_MSC_VER) && defined(mvn) +#undef mvn +#endif namespace vixl { namespace aarch32 { diff --git a/dep/vixl/include/vixl/aarch32/instructions-aarch32.h b/dep/vixl/include/vixl/aarch32/instructions-aarch32.h index b235917e9..393f1ea42 100644 --- a/dep/vixl/include/vixl/aarch32/instructions-aarch32.h +++ b/dep/vixl/include/vixl/aarch32/instructions-aarch32.h @@ -34,13 +34,14 @@ extern "C" { #include #include -#include "../code-buffer-vixl.h" -#include "../utils-vixl.h" +#include "code-buffer-vixl.h" +#include "utils-vixl.h" +#include "aarch32/constants-aarch32.h" -#include "constants-aarch32.h" - -#ifdef __arm__ +#if defined(__arm__) && !defined(__SOFTFP__) #define HARDFLOAT __attribute__((noinline, pcs("aapcs-vfp"))) +#elif defined(_MSC_VER) +#define HARDFLOAT __declspec(noinline) #else #define HARDFLOAT __attribute__((noinline)) #endif @@ -492,6 +493,8 @@ class RegisterList { } Register GetFirstAvailableRegister() const; bool IsEmpty() const { return list_ == 0; } + bool IsSingleRegister() const { return IsPowerOf2(list_); } + int GetCount() const { return CountSetBits(list_); } static RegisterList Union(const RegisterList& list_1, const RegisterList& list_2) { return RegisterList(list_1.list_ | list_2.list_); @@ -1039,7 +1042,9 @@ class Sign { const char* GetName() const { return (IsPlus() ? "" : "-"); } bool IsPlus() const { return sign_ == plus; } bool IsMinus() const { return sign_ == minus; } - int32_t ApplyTo(uint32_t value) { return IsPlus() ? value : -value; } + int32_t ApplyTo(uint32_t value) { + return IsPlus() ? value : UnsignedNegate(value); + } private: SignType sign_; diff --git a/dep/vixl/include/vixl/aarch32/location-aarch32.h b/dep/vixl/include/vixl/aarch32/location-aarch32.h index 637d11922..ae803f639 100644 --- a/dep/vixl/include/vixl/aarch32/location-aarch32.h +++ b/dep/vixl/include/vixl/aarch32/location-aarch32.h @@ -36,9 +36,9 @@ extern "C" { #include #include -#include "../invalset-vixl.h" -#include "../pool-manager.h" -#include "../utils-vixl.h" +#include "invalset-vixl.h" +#include "pool-manager.h" +#include "utils-vixl.h" #include "constants-aarch32.h" #include "instructions-aarch32.h" @@ -58,12 +58,12 @@ class Location : public LocationBase { // with the assembler methods for generating instructions, but will never // be handled by the pool manager. Location() - : LocationBase(kRawLocation, 1 /* dummy size*/), + : LocationBase(kRawLocation, 1 /* placeholder size*/), referenced_(false) {} typedef int32_t Offset; - ~Location() { + ~Location() VIXL_NEGATIVE_TESTING_ALLOW_EXCEPTION { #ifdef VIXL_DEBUG if (IsReferenced() && !IsBound()) { VIXL_ABORT_WITH_MSG("Location, label or literal used but not bound.\n"); @@ -217,7 +217,7 @@ class Location : public LocationBase { protected: // Types passed to LocationBase. Must be distinct for unbound Locations (not - // relevant for bound locations, as they don't have a correspoding + // relevant for bound locations, as they don't have a corresponding // PoolObject). static const int kRawLocation = 0; // Will not be used by the pool manager. static const int kVeneerType = 1; diff --git a/dep/vixl/include/vixl/aarch32/macro-assembler-aarch32.h b/dep/vixl/include/vixl/aarch32/macro-assembler-aarch32.h index 8286a999e..390b90880 100644 --- a/dep/vixl/include/vixl/aarch32/macro-assembler-aarch32.h +++ b/dep/vixl/include/vixl/aarch32/macro-assembler-aarch32.h @@ -28,15 +28,15 @@ #ifndef VIXL_AARCH32_MACRO_ASSEMBLER_AARCH32_H_ #define VIXL_AARCH32_MACRO_ASSEMBLER_AARCH32_H_ -#include "../code-generation-scopes-vixl.h" -#include "../macro-assembler-interface.h" -#include "../pool-manager-impl.h" -#include "../pool-manager.h" -#include "../utils-vixl.h" +#include "code-generation-scopes-vixl.h" +#include "macro-assembler-interface.h" +#include "pool-manager-impl.h" +#include "pool-manager.h" +#include "utils-vixl.h" -#include "assembler-aarch32.h" -#include "instructions-aarch32.h" -#include "operands-aarch32.h" +#include "aarch32/assembler-aarch32.h" +#include "aarch32/instructions-aarch32.h" +#include "aarch32/operands-aarch32.h" namespace vixl { @@ -268,7 +268,8 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface { generate_simulator_code_(VIXL_AARCH32_GENERATE_SIMULATOR_CODE), pool_end_(NULL) { #ifdef VIXL_DEBUG - SetAllowMacroInstructions(true); + SetAllowMacroInstructions( // NOLINT(clang-analyzer-optin.cplusplus.VirtualCall) + true); #else USE(allow_macro_instructions_); #endif @@ -283,7 +284,8 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface { generate_simulator_code_(VIXL_AARCH32_GENERATE_SIMULATOR_CODE), pool_end_(NULL) { #ifdef VIXL_DEBUG - SetAllowMacroInstructions(true); + SetAllowMacroInstructions( // NOLINT(clang-analyzer-optin.cplusplus.VirtualCall) + true); #endif } MacroAssembler(byte* buffer, size_t size, InstructionSet isa = kDefaultISA) @@ -296,7 +298,8 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface { generate_simulator_code_(VIXL_AARCH32_GENERATE_SIMULATOR_CODE), pool_end_(NULL) { #ifdef VIXL_DEBUG - SetAllowMacroInstructions(true); + SetAllowMacroInstructions( // NOLINT(clang-analyzer-optin.cplusplus.VirtualCall) + true); #endif } @@ -399,13 +402,13 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface { VIXL_ASSERT(GetBuffer()->Is32bitAligned()); } // If we need to add padding, check if we have to emit the pool. - const int32_t pc = GetCursorOffset(); - if (label->Needs16BitPadding(pc)) { + const int32_t cursor = GetCursorOffset(); + if (label->Needs16BitPadding(cursor)) { const int kPaddingBytes = 2; - if (pool_manager_.MustEmit(pc, kPaddingBytes)) { - int32_t new_pc = pool_manager_.Emit(this, pc, kPaddingBytes); - USE(new_pc); - VIXL_ASSERT(new_pc == GetCursorOffset()); + if (pool_manager_.MustEmit(cursor, kPaddingBytes)) { + int32_t new_cursor = pool_manager_.Emit(this, cursor, kPaddingBytes); + USE(new_cursor); + VIXL_ASSERT(new_cursor == GetCursorOffset()); } } pool_manager_.Bind(this, label, GetCursorOffset()); @@ -427,30 +430,30 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface { Location* location, Condition* cond = NULL) { int size = info->size; - int32_t pc = GetCursorOffset(); + int32_t cursor = GetCursorOffset(); // If we need to emit a branch over the instruction, take this into account. if ((cond != NULL) && NeedBranch(cond)) { size += kBranchSize; - pc += kBranchSize; + cursor += kBranchSize; } - int32_t from = pc; + int32_t from = cursor; from += IsUsingT32() ? kT32PcDelta : kA32PcDelta; if (info->pc_needs_aligning) from = AlignDown(from, 4); int32_t min = from + info->min_offset; int32_t max = from + info->max_offset; - ForwardReference temp_ref(pc, + ForwardReference temp_ref(cursor, info->size, min, max, info->alignment); if (pool_manager_.MustEmit(GetCursorOffset(), size, &temp_ref, location)) { - int32_t new_pc = pool_manager_.Emit(this, - GetCursorOffset(), - info->size, - &temp_ref, - location); - USE(new_pc); - VIXL_ASSERT(new_pc == GetCursorOffset()); + int32_t new_cursor = pool_manager_.Emit(this, + GetCursorOffset(), + info->size, + &temp_ref, + location); + USE(new_cursor); + VIXL_ASSERT(new_cursor == GetCursorOffset()); } } @@ -461,13 +464,13 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface { // into account, as well as potential 16-bit padding needed to reach the // minimum accessible location. int alignment = literal->GetMaxAlignment(); - int32_t pc = GetCursorOffset(); - int total_size = AlignUp(pc, alignment) - pc + literal->GetSize(); - if (literal->Needs16BitPadding(pc)) total_size += 2; - if (pool_manager_.MustEmit(pc, total_size)) { - int32_t new_pc = pool_manager_.Emit(this, pc, total_size); - USE(new_pc); - VIXL_ASSERT(new_pc == GetCursorOffset()); + int32_t cursor = GetCursorOffset(); + int total_size = AlignUp(cursor, alignment) - cursor + literal->GetSize(); + if (literal->Needs16BitPadding(cursor)) total_size += 2; + if (pool_manager_.MustEmit(cursor, total_size)) { + int32_t new_cursor = pool_manager_.Emit(this, cursor, total_size); + USE(new_cursor); + VIXL_ASSERT(new_cursor == GetCursorOffset()); } pool_manager_.Bind(this, literal, GetCursorOffset()); literal->EmitPoolObject(this); @@ -2894,7 +2897,12 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface { VIXL_ASSERT(OutsideITBlock()); MacroEmissionCheckScope guard(this); ITScope it_scope(this, &cond, guard); - pop(cond, registers); + if (registers.IsSingleRegister() && + (!IsUsingT32() || !registers.IsR0toR7orPC())) { + pop(cond, registers.GetFirstAvailableRegister()); + } else if (!registers.IsEmpty()) { + pop(cond, registers); + } } void Pop(RegisterList registers) { Pop(al, registers); } @@ -2914,7 +2922,12 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface { VIXL_ASSERT(OutsideITBlock()); MacroEmissionCheckScope guard(this); ITScope it_scope(this, &cond, guard); - push(cond, registers); + if (registers.IsSingleRegister() && !registers.Includes(sp) && + (!IsUsingT32() || !registers.IsR0toR7orLR())) { + push(cond, registers.GetFirstAvailableRegister()); + } else if (!registers.IsEmpty()) { + push(cond, registers); + } } void Push(RegisterList registers) { Push(al, registers); } @@ -2924,7 +2937,12 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface { VIXL_ASSERT(OutsideITBlock()); MacroEmissionCheckScope guard(this); ITScope it_scope(this, &cond, guard); - push(cond, rt); + if (IsUsingA32() && rt.IsSP()) { + // Only the A32 multiple-register form can push sp. + push(cond, RegisterList(rt)); + } else { + push(cond, rt); + } } void Push(Register rt) { Push(al, rt); } @@ -11170,10 +11188,11 @@ class UseScratchRegisterScope { uint32_t old_available_; // kRRegister uint64_t old_available_vfp_; // kVRegister - VIXL_DEBUG_NO_RETURN UseScratchRegisterScope(const UseScratchRegisterScope&) { + VIXL_NO_RETURN_IN_DEBUG_MODE UseScratchRegisterScope( + const UseScratchRegisterScope&) { VIXL_UNREACHABLE(); } - VIXL_DEBUG_NO_RETURN void operator=(const UseScratchRegisterScope&) { + VIXL_NO_RETURN_IN_DEBUG_MODE void operator=(const UseScratchRegisterScope&) { VIXL_UNREACHABLE(); } }; diff --git a/dep/vixl/include/vixl/aarch32/operands-aarch32.h b/dep/vixl/include/vixl/aarch32/operands-aarch32.h index 0eebef583..1f01d81b1 100644 --- a/dep/vixl/include/vixl/aarch32/operands-aarch32.h +++ b/dep/vixl/include/vixl/aarch32/operands-aarch32.h @@ -28,7 +28,7 @@ #ifndef VIXL_AARCH32_OPERANDS_AARCH32_H_ #define VIXL_AARCH32_OPERANDS_AARCH32_H_ -#include "instructions-aarch32.h" +#include "aarch32/instructions-aarch32.h" namespace vixl { namespace aarch32 { @@ -54,28 +54,16 @@ class Operand { // This is allowed to be an implicit constructor because Operand is // a wrapper class that doesn't normally perform any type conversion. Operand(uint32_t immediate) // NOLINT(runtime/explicit) - : imm_(immediate), - rm_(NoReg), - shift_(LSL), - amount_(0), - rs_(NoReg) {} + : imm_(immediate), rm_(NoReg), shift_(LSL), amount_(0), rs_(NoReg) {} Operand(int32_t immediate) // NOLINT(runtime/explicit) - : imm_(immediate), - rm_(NoReg), - shift_(LSL), - amount_(0), - rs_(NoReg) {} + : imm_(immediate), rm_(NoReg), shift_(LSL), amount_(0), rs_(NoReg) {} // rm // where rm is the base register // This is allowed to be an implicit constructor because Operand is // a wrapper class that doesn't normally perform any type conversion. Operand(Register rm) // NOLINT(runtime/explicit) - : imm_(0), - rm_(rm), - shift_(LSL), - amount_(0), - rs_(NoReg) { + : imm_(0), rm_(rm), shift_(LSL), amount_(0), rs_(NoReg) { VIXL_ASSERT(rm_.IsValid()); } @@ -202,7 +190,7 @@ class Operand { } private: -// Forbid implicitely creating operands around types that cannot be encoded +// Forbid implicitly creating operands around types that cannot be encoded // into a uint32_t without loss. #if __cplusplus >= 201103L Operand(int64_t) = delete; // NOLINT(runtime/explicit) @@ -245,22 +233,18 @@ class NeonImmediate { // This is allowed to be an implicit constructor because NeonImmediate is // a wrapper class that doesn't normally perform any type conversion. NeonImmediate(uint32_t immediate) // NOLINT(runtime/explicit) - : imm_(immediate), - immediate_type_(I32) {} + : imm_(immediate), immediate_type_(I32) {} NeonImmediate(int immediate) // NOLINT(runtime/explicit) - : imm_(immediate), - immediate_type_(I32) {} + : imm_(immediate), immediate_type_(I32) {} // { # } // where is a 64 bit number // This is allowed to be an implicit constructor because NeonImmediate is // a wrapper class that doesn't normally perform any type conversion. NeonImmediate(int64_t immediate) // NOLINT(runtime/explicit) - : imm_(immediate), - immediate_type_(I64) {} + : imm_(immediate), immediate_type_(I64) {} NeonImmediate(uint64_t immediate) // NOLINT(runtime/explicit) - : imm_(immediate), - immediate_type_(I64) {} + : imm_(immediate), immediate_type_(I64) {} // { # } // where is a non zero floating point number which can be encoded @@ -268,11 +252,9 @@ class NeonImmediate { // This is allowed to be an implicit constructor because NeonImmediate is // a wrapper class that doesn't normally perform any type conversion. NeonImmediate(float immediate) // NOLINT(runtime/explicit) - : imm_(immediate), - immediate_type_(F32) {} + : imm_(immediate), immediate_type_(F32) {} NeonImmediate(double immediate) // NOLINT(runtime/explicit) - : imm_(immediate), - immediate_type_(F64) {} + : imm_(immediate), immediate_type_(F64) {} NeonImmediate(const NeonImmediate& src) : imm_(src.imm_), immediate_type_(src.immediate_type_) {} @@ -311,7 +293,7 @@ class NeonImmediate { bool IsInteger32() const { return immediate_type_.Is(I32); } bool IsInteger64() const { return immediate_type_.Is(I64); } - bool IsInteger() const { return IsInteger32() | IsInteger64(); } + bool IsInteger() const { return IsInteger32() || IsInteger64(); } bool IsFloat() const { return immediate_type_.Is(F32); } bool IsDouble() const { return immediate_type_.Is(F64); } bool IsFloatZero() const { @@ -374,29 +356,21 @@ std::ostream& operator<<(std::ostream& os, const NeonImmediate& operand); class NeonOperand { public: NeonOperand(int32_t immediate) // NOLINT(runtime/explicit) - : imm_(immediate), - rm_(NoDReg) {} + : imm_(immediate), rm_(NoDReg) {} NeonOperand(uint32_t immediate) // NOLINT(runtime/explicit) - : imm_(immediate), - rm_(NoDReg) {} + : imm_(immediate), rm_(NoDReg) {} NeonOperand(int64_t immediate) // NOLINT(runtime/explicit) - : imm_(immediate), - rm_(NoDReg) {} + : imm_(immediate), rm_(NoDReg) {} NeonOperand(uint64_t immediate) // NOLINT(runtime/explicit) - : imm_(immediate), - rm_(NoDReg) {} + : imm_(immediate), rm_(NoDReg) {} NeonOperand(float immediate) // NOLINT(runtime/explicit) - : imm_(immediate), - rm_(NoDReg) {} + : imm_(immediate), rm_(NoDReg) {} NeonOperand(double immediate) // NOLINT(runtime/explicit) - : imm_(immediate), - rm_(NoDReg) {} + : imm_(immediate), rm_(NoDReg) {} NeonOperand(const NeonImmediate& imm) // NOLINT(runtime/explicit) - : imm_(imm), - rm_(NoDReg) {} + : imm_(imm), rm_(NoDReg) {} NeonOperand(const VRegister& rm) // NOLINT(runtime/explicit) - : imm_(0), - rm_(rm) { + : imm_(0), rm_(rm) { VIXL_ASSERT(rm_.IsValid()); } @@ -641,7 +615,7 @@ class ImmediateVorn : public ImmediateVorr { // - a shifted index register , # // // The index register may have an associated {+/-} sign, -// which if ommitted, defaults to + . +// which if omitted, defaults to + . // // We have two constructors for the offset: // diff --git a/dep/vixl/include/vixl/aarch64/abi-aarch64.h b/dep/vixl/include/vixl/aarch64/abi-aarch64.h index a00580241..388cf10f2 100644 --- a/dep/vixl/include/vixl/aarch64/abi-aarch64.h +++ b/dep/vixl/include/vixl/aarch64/abi-aarch64.h @@ -105,7 +105,7 @@ class ABI { // Stage C.1 if (is_floating_point_type && (NSRN_ < 8)) { - return GenericOperand(FPRegister(NSRN_++, size * kBitsPerByte)); + return GenericOperand(VRegister(NSRN_++, size * kBitsPerByte)); } // Stages C.2, C.3, and C.4: Unsupported. Caught by the assertions above. // Stages C.5 and C.6 @@ -159,8 +159,8 @@ template <> inline GenericOperand ABI::GetReturnGenericOperand() const { return GenericOperand(); } -} -} // namespace vixl::aarch64 +} // namespace aarch64 +} // namespace vixl #endif // VIXL_AARCH64_ABI_AARCH64_H_ diff --git a/dep/vixl/include/vixl/aarch64/assembler-aarch64.h b/dep/vixl/include/vixl/aarch64/assembler-aarch64.h index b806cbfc2..54310f1f1 100644 --- a/dep/vixl/include/vixl/aarch64/assembler-aarch64.h +++ b/dep/vixl/include/vixl/aarch64/assembler-aarch64.h @@ -33,12 +33,8 @@ #include "../globals-vixl.h" #include "../invalset-vixl.h" #include "../utils-vixl.h" -#include "operands-aarch64.h" -#ifdef _MSC_VER -// This is defined in arm_neon.h on MSVC. -#undef mvn -#endif +#include "operands-aarch64.h" namespace vixl { namespace aarch64 { @@ -408,13 +404,8 @@ enum LoadStoreScalingOption { // Assembler. class Assembler : public vixl::internal::AssemblerBase { public: - explicit Assembler( - PositionIndependentCodeOption pic = PositionIndependentCode) - : pic_(pic), cpu_features_(CPUFeatures::AArch64LegacyBaseline()) {} - explicit Assembler( - size_t capacity, - PositionIndependentCodeOption pic = PositionIndependentCode) - : AssemblerBase(capacity), + Assembler(PositionIndependentCodeOption pic = PositionIndependentCode) + : AssemblerBase(), pic_(pic), cpu_features_(CPUFeatures::AArch64LegacyBaseline()) {} Assembler(byte* buffer, @@ -436,7 +427,6 @@ class Assembler : public vixl::internal::AssemblerBase { // and data that has already been emitted into the buffer. void Reset(); - // Label. // Bind a label to the current PC. void bind(Label* label); @@ -491,6 +481,7 @@ class Assembler : public vixl::internal::AssemblerBase { // Instruction set functions. // Branch / Jump instructions. + // Branch to register. void br(const Register& xn); @@ -643,6 +634,7 @@ class Assembler : public vixl::internal::AssemblerBase { void adrp(const Register& xd, int64_t imm21); // Data Processing instructions. + // Add. void add(const Register& rd, const Register& rn, const Operand& operand); @@ -679,6 +671,16 @@ class Assembler : public vixl::internal::AssemblerBase { // Subtract with carry bit and update status flags. void sbcs(const Register& rd, const Register& rn, const Operand& operand); + // Rotate register right and insert into NZCV flags under the control of a + // mask [Armv8.4]. + void rmif(const Register& xn, unsigned rotation, StatusFlags flags); + + // Set NZCV flags from register, treated as an 8-bit value [Armv8.4]. + void setf8(const Register& rn); + + // Set NZCV flags from register, treated as an 16-bit value [Armv8.4]. + void setf16(const Register& rn); + // Negate with carry bit. void ngc(const Register& rd, const Operand& operand); @@ -686,6 +688,7 @@ class Assembler : public vixl::internal::AssemblerBase { void ngcs(const Register& rd, const Operand& operand); // Logical instructions. + // Bitwise and (A & B). void and_(const Register& rd, const Register& rn, const Operand& operand); @@ -726,6 +729,7 @@ class Assembler : public vixl::internal::AssemblerBase { void rorv(const Register& rd, const Register& rn, const Register& rm); // Bitfield instructions. + // Bitfield move. void bfm(const Register& rd, const Register& rn, @@ -745,6 +749,7 @@ class Assembler : public vixl::internal::AssemblerBase { unsigned imms); // Bfm aliases. + // Bitfield insert. void bfi(const Register& rd, const Register& rn, @@ -774,6 +779,7 @@ class Assembler : public vixl::internal::AssemblerBase { } // Sbfm aliases. + // Arithmetic shift right. void asr(const Register& rd, const Register& rn, unsigned shift) { VIXL_ASSERT(shift < static_cast(rd.GetSizeInBits())); @@ -813,6 +819,7 @@ class Assembler : public vixl::internal::AssemblerBase { void sxtw(const Register& rd, const Register& rn) { sbfm(rd, rn, 0, 31); } // Ubfm aliases. + // Logical shift left. void lsl(const Register& rd, const Register& rn, unsigned shift) { unsigned reg_size = rd.GetSizeInBits(); @@ -909,6 +916,7 @@ class Assembler : public vixl::internal::AssemblerBase { } // Conditional comparison. + // Conditional compare negative. void ccmn(const Register& rn, const Operand& operand, @@ -1077,18 +1085,6 @@ class Assembler : public vixl::internal::AssemblerBase { // zero [Armv8.3]. void pacdza(const Register& xd); - // Pointer Authentication Code for Data address, using key A, with address in - // x17 and modifier in x16 [Armv8.3]. - void pacda1716(); - - // Pointer Authentication Code for Data address, using key A, with address in - // LR and modifier in SP [Armv8.3]. - void pacdasp(); - - // Pointer Authentication Code for Data address, using key A, with address in - // LR and a modifier of zero [Armv8.3]. - void pacdaz(); - // Pointer Authentication Code for Data address, using key B [Armv8.3]. void pacdb(const Register& xd, const Register& xn); @@ -1096,18 +1092,6 @@ class Assembler : public vixl::internal::AssemblerBase { // zero [Armv8.3]. void pacdzb(const Register& xd); - // Pointer Authentication Code for Data address, using key B, with address in - // x17 and modifier in x16 [Armv8.3]. - void pacdb1716(); - - // Pointer Authentication Code for Data address, using key B, with address in - // LR and modifier in SP [Armv8.3]. - void pacdbsp(); - - // Pointer Authentication Code for Data address, using key B, with address in - // LR and a modifier of zero [Armv8.3]. - void pacdbz(); - // Pointer Authentication Code, using Generic key [Armv8.3]. void pacga(const Register& xd, const Register& xn, const Register& xm); @@ -1155,36 +1139,12 @@ class Assembler : public vixl::internal::AssemblerBase { // Authenticate Data address, using key A and a modifier of zero [Armv8.3]. void autdza(const Register& xd); - // Authenticate Data address, using key A, with address in x17 and modifier in - // x16 [Armv8.3]. - void autda1716(); - - // Authenticate Data address, using key A, with address in LR and modifier in - // SP [Armv8.3]. - void autdasp(); - - // Authenticate Data address, using key A, with address in LR and a modifier - // of zero [Armv8.3]. - void autdaz(); - // Authenticate Data address, using key B [Armv8.3]. void autdb(const Register& xd, const Register& xn); // Authenticate Data address, using key B and a modifier of zero [Armv8.3]. void autdzb(const Register& xd); - // Authenticate Data address, using key B, with address in x17 and modifier in - // x16 [Armv8.3]. - void autdb1716(); - - // Authenticate Data address, using key B, with address in LR and modifier in - // SP [Armv8.3]. - void autdbsp(); - - // Authenticate Data address, using key B, with address in LR and a modifier - // of zero [Armv8.3]. - void autdbz(); - // Strip Pointer Authentication Code of Data address [Armv8.3]. void xpacd(const Register& xd); @@ -1195,6 +1155,7 @@ class Assembler : public vixl::internal::AssemblerBase { void xpaclri(); // Memory instructions. + // Load integer or FP register. void ldr(const CPURegister& rt, const MemOperand& src, @@ -1285,6 +1246,14 @@ class Assembler : public vixl::internal::AssemblerBase { const MemOperand& src, LoadStoreScalingOption option = PreferUnscaledOffset); + // Load double-word with pointer authentication, using data key A and a + // modifier of zero [Armv8.3]. + void ldraa(const Register& xt, const MemOperand& src); + + // Load double-word with pointer authentication, using data key B and a + // modifier of zero [Armv8.3]. + void ldrab(const Register& xt, const MemOperand& src); + // Load integer or FP register pair. void ldp(const CPURegister& rt, const CPURegister& rt2, @@ -1474,6 +1443,35 @@ class Assembler : public vixl::internal::AssemblerBase { const Register& rt2, const MemOperand& src); + // Store-release byte (with unscaled offset) [Armv8.4]. + void stlurb(const Register& rt, const MemOperand& dst); + + // Load-acquire RCpc Register byte (with unscaled offset) [Armv8.4]. + void ldapurb(const Register& rt, const MemOperand& src); + + // Load-acquire RCpc Register signed byte (with unscaled offset) [Armv8.4]. + void ldapursb(const Register& rt, const MemOperand& src); + + // Store-release half-word (with unscaled offset) [Armv8.4]. + void stlurh(const Register& rt, const MemOperand& dst); + + // Load-acquire RCpc Register half-word (with unscaled offset) [Armv8.4]. + void ldapurh(const Register& rt, const MemOperand& src); + + // Load-acquire RCpc Register signed half-word (with unscaled offset) + // [Armv8.4]. + void ldapursh(const Register& rt, const MemOperand& src); + + // Store-release word or double-word (with unscaled offset) [Armv8.4]. + void stlur(const Register& rt, const MemOperand& dst); + + // Load-acquire RCpc Register word or double-word (with unscaled offset) + // [Armv8.4]. + void ldapur(const Register& rt, const MemOperand& src); + + // Load-acquire RCpc Register signed word (with unscaled offset) [Armv8.4]. + void ldapursw(const Register& xt, const MemOperand& src); + // Atomic add on byte in memory [Armv8.1] void ldaddb(const Register& rs, const Register& rt, const MemOperand& src); @@ -2062,6 +2060,22 @@ class Assembler : public vixl::internal::AssemblerBase { // Prefetch from pc + imm19 << 2. void prfm(PrefetchOperation op, int64_t imm19); + // Prefetch memory (allowing unallocated hints). + void prfm(int op, + const MemOperand& addr, + LoadStoreScalingOption option = PreferScaledOffset); + + // Prefetch memory (with unscaled offset, allowing unallocated hints). + void prfum(int op, + const MemOperand& addr, + LoadStoreScalingOption option = PreferUnscaledOffset); + + // Prefetch memory in the literal pool (allowing unallocated hints). + void prfm(int op, RawLiteral* literal); + + // Prefetch from pc + imm19 << 2 (allowing unallocated hints). + void prfm(int op, int64_t imm19); + // Move instructions. The default shift of -1 indicates that the move // instruction will calculate an appropriate 16-bit immediate and left shift // that is equal to the 64-bit immediate argument. If an explicit left shift @@ -2087,7 +2101,15 @@ class Assembler : public vixl::internal::AssemblerBase { MoveWide(rd, imm, shift, MOVZ); } + // Move immediate, aliases for movz, movn, orr. + void mov(const Register& rd, uint64_t imm) { + if (!OneInstrMoveImmediateHelper(this, rd, imm)) { + VIXL_UNIMPLEMENTED(); + } + } + // Misc instructions. + // Monitor debug-mode breakpoint. void brk(int code); @@ -2097,6 +2119,9 @@ class Assembler : public vixl::internal::AssemblerBase { // Generate exception targeting EL1. void svc(int code); + // Generate undefined instruction exception. + void udf(int code); + // Move register to register. void mov(const Register& rd, const Register& rn); @@ -2104,12 +2129,24 @@ class Assembler : public vixl::internal::AssemblerBase { void mvn(const Register& rd, const Operand& operand); // System instructions. + // Move to register from system register. void mrs(const Register& xt, SystemRegister sysreg); // Move from register to system register. void msr(SystemRegister sysreg, const Register& xt); + // Invert carry flag [Armv8.4]. + void cfinv(); + + // Convert floating-point condition flags from alternative format to Arm + // format [Armv8.5]. + void xaflag(); + + // Convert floating-point condition flags from Arm format to alternative + // format [Armv8.5]. + void axflag(); + // System instruction. void sys(int op1, int crn, int crm, int op2, const Register& xt = xzr); @@ -2146,11 +2183,14 @@ class Assembler : public vixl::internal::AssemblerBase { // Conditional speculation dependency barrier. void csdb(); - // Alias for system instructions. // No-op. void nop() { hint(NOP); } + // Branch target identification. + void bti(BranchTargetIdentifier id); + // FP and NEON instructions. + // Move double precision immediate to FP register. void fmov(const VRegister& vd, double imm); @@ -2259,6 +2299,18 @@ class Assembler : public vixl::internal::AssemblerBase { // FP round to integer, towards zero. void frintz(const VRegister& vd, const VRegister& vn); + // FP round to 32-bit integer, exact, implicit rounding [Armv8.5]. + void frint32x(const VRegister& vd, const VRegister& vn); + + // FP round to 32-bit integer, towards zero [Armv8.5]. + void frint32z(const VRegister& vd, const VRegister& vn); + + // FP round to 64-bit integer, exact, implicit rounding [Armv8.5]. + void frint64x(const VRegister& vd, const VRegister& vn); + + // FP round to 64-bit integer, towards zero [Armv8.5]. + void frint64z(const VRegister& vd, const VRegister& vn); + void FPCompareMacro(const VRegister& vn, double value, FPTrapFlags trap); void FPCompareMacro(const VRegister& vn, @@ -3311,6 +3363,21 @@ class Assembler : public vixl::internal::AssemblerBase { // Unsigned dot product [Armv8.2]. void udot(const VRegister& vd, const VRegister& vn, const VRegister& vm); + // Dot Product with unsigned and signed integers (vector). + void usdot(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Dot product with signed and unsigned integers (vector, by element). + void sudot(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Dot product with unsigned and signed integers (vector, by element). + void usdot(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + // Signed saturating rounding doubling multiply subtract returning high half // [Armv8.1]. void sqrdmlsh(const VRegister& vd, const VRegister& vn, const VRegister& vm); @@ -3386,9 +3453,45 @@ class Assembler : public vixl::internal::AssemblerBase { // FP vector multiply accumulate. void fmla(const VRegister& vd, const VRegister& vn, const VRegister& vm); + // FP fused multiply-add long to accumulator. + void fmlal(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP fused multiply-add long to accumulator (second part). + void fmlal2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP fused multiply-add long to accumulator by element. + void fmlal(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // FP fused multiply-add long to accumulator by element (second part). + void fmlal2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + // FP vector multiply subtract. void fmls(const VRegister& vd, const VRegister& vn, const VRegister& vm); + // FP fused multiply-subtract long to accumulator. + void fmlsl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP fused multiply-subtract long to accumulator (second part). + void fmlsl2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP fused multiply-subtract long to accumulator by element. + void fmlsl(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // FP fused multiply-subtract long to accumulator by element (second part). + void fmlsl2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + // FP vector multiply extended. void fmulx(const VRegister& vd, const VRegister& vn, const VRegister& vm); @@ -3481,6 +3584,7 @@ class Assembler : public vixl::internal::AssemblerBase { // v8.3 complex numbers - note that these are only partial/helper functions // and must be used in series in order to perform full CN operations. + // FP complex multiply accumulate (by element) [Armv8.3]. void fcmla(const VRegister& vd, const VRegister& vn, @@ -3500,7 +3604,3471 @@ class Assembler : public vixl::internal::AssemblerBase { const VRegister& vm, int rot); + // Signed 8-bit integer matrix multiply-accumulate (vector). + void smmla(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned and signed 8-bit integer matrix multiply-accumulate (vector). + void usmmla(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned 8-bit integer matrix multiply-accumulate (vector). + void ummla(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Scalable Vector Extensions. + + // Absolute value (predicated). + void abs(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn); + + // Add vectors (predicated). + void add(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Add vectors (unpredicated). + void add(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Add immediate (unpredicated). + void add(const ZRegister& zd, const ZRegister& zn, int imm8, int shift = -1); + + // Add multiple of predicate register size to scalar register. + void addpl(const Register& xd, const Register& xn, int imm6); + + // Add multiple of vector register size to scalar register. + void addvl(const Register& xd, const Register& xn, int imm6); + + // Compute vector address. + void adr(const ZRegister& zd, const SVEMemOperand& addr); + + // Bitwise AND predicates. + void and_(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm); + + // Bitwise AND vectors (predicated). + void and_(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Bitwise AND with immediate (unpredicated). + void and_(const ZRegister& zd, const ZRegister& zn, uint64_t imm); + + // Bitwise AND vectors (unpredicated). + void and_(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Bitwise AND predicates. + void ands(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm); + + // Bitwise AND reduction to scalar. + void andv(const VRegister& vd, const PRegister& pg, const ZRegister& zn); + + // Arithmetic shift right by immediate (predicated). + void asr(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + int shift); + + // Arithmetic shift right by 64-bit wide elements (predicated). + void asr(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Arithmetic shift right by immediate (unpredicated). + void asr(const ZRegister& zd, const ZRegister& zn, int shift); + + // Arithmetic shift right by 64-bit wide elements (unpredicated). + void asr(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Arithmetic shift right for divide by immediate (predicated). + void asrd(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + int shift); + + // Reversed arithmetic shift right by vector (predicated). + void asrr(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Bitwise clear predicates. + void bic(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm); + + // Bitwise clear vectors (predicated). + void bic(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Bitwise clear bits using immediate (unpredicated). + void bic(const ZRegister& zd, const ZRegister& zn, uint64_t imm); + + // Bitwise clear vectors (unpredicated). + void bic(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Bitwise clear predicates. + void bics(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm); + + // Break after first true condition. + void brka(const PRegisterWithLaneSize& pd, + const PRegister& pg, + const PRegisterWithLaneSize& pn); + + // Break after first true condition. + void brkas(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn); + + // Break before first true condition. + void brkb(const PRegisterWithLaneSize& pd, + const PRegister& pg, + const PRegisterWithLaneSize& pn); + + // Break before first true condition. + void brkbs(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn); + + // Propagate break to next partition. + void brkn(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm); + + // Propagate break to next partition. + void brkns(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm); + + // Break after first true condition, propagating from previous partition. + void brkpa(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm); + + // Break after first true condition, propagating from previous partition. + void brkpas(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm); + + // Break before first true condition, propagating from previous partition. + void brkpb(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm); + + // Break before first true condition, propagating from previous partition. + void brkpbs(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm); + + // Conditionally extract element after last to general-purpose register. + void clasta(const Register& rd, + const PRegister& pg, + const Register& rn, + const ZRegister& zm); + + // Conditionally extract element after last to SIMD&FP scalar register. + void clasta(const VRegister& vd, + const PRegister& pg, + const VRegister& vn, + const ZRegister& zm); + + // Conditionally extract element after last to vector register. + void clasta(const ZRegister& zd, + const PRegister& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Conditionally extract last element to general-purpose register. + void clastb(const Register& rd, + const PRegister& pg, + const Register& rn, + const ZRegister& zm); + + // Conditionally extract last element to SIMD&FP scalar register. + void clastb(const VRegister& vd, + const PRegister& pg, + const VRegister& vn, + const ZRegister& zm); + + // Conditionally extract last element to vector register. + void clastb(const ZRegister& zd, + const PRegister& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Count leading sign bits (predicated). + void cls(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn); + + // Count leading zero bits (predicated). + void clz(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn); + + void cmp(Condition cond, + const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Compare vector to 64-bit wide elements. + void cmpeq(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Compare vector to immediate. + void cmpeq(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + int imm5); + + // Compare vector to 64-bit wide elements. + void cmpge(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Compare vector to immediate. + void cmpge(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + int imm5); + + // Compare vector to 64-bit wide elements. + void cmpgt(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Compare vector to immediate. + void cmpgt(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + int imm5); + + // Compare vector to 64-bit wide elements. + void cmphi(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Compare vector to immediate. + void cmphi(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + unsigned imm7); + + // Compare vector to 64-bit wide elements. + void cmphs(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Compare vector to immediate. + void cmphs(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + unsigned imm7); + + // Compare vector to 64-bit wide elements. + void cmple(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Compare vector to immediate. + void cmple(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + int imm5); + + // Compare vector to 64-bit wide elements. + void cmplo(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Compare vector to immediate. + void cmplo(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + unsigned imm7); + + // Compare vector to 64-bit wide elements. + void cmpls(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Compare vector to immediate. + void cmpls(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + unsigned imm7); + + // Compare vector to 64-bit wide elements. + void cmplt(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Compare vector to immediate. + void cmplt(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + int imm5); + + // Compare vector to 64-bit wide elements. + void cmpne(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Compare vector to immediate. + void cmpne(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + int imm5); + + // Logically invert boolean condition in vector (predicated). + void cnot(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn); + + // Count non-zero bits (predicated). + void cnt(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn); + + // Set scalar to multiple of predicate constraint element count. + void cntb(const Register& rd, int pattern = SVE_ALL, int multiplier = 1); + + // Set scalar to multiple of predicate constraint element count. + void cntd(const Register& rd, int pattern = SVE_ALL, int multiplier = 1); + + // Set scalar to multiple of predicate constraint element count. + void cnth(const Register& rd, int pattern = SVE_ALL, int multiplier = 1); + + // Set scalar to active predicate element count. + void cntp(const Register& xd, + const PRegister& pg, + const PRegisterWithLaneSize& pn); + + // Set scalar to multiple of predicate constraint element count. + void cntw(const Register& rd, int pattern = SVE_ALL, int multiplier = 1); + + // Shuffle active elements of vector to the right and fill with zero. + void compact(const ZRegister& zd, const PRegister& pg, const ZRegister& zn); + + // Copy signed integer immediate to vector elements (predicated). + void cpy(const ZRegister& zd, const PRegister& pg, int imm8, int shift = -1); + + // Copy general-purpose register to vector elements (predicated). + void cpy(const ZRegister& zd, const PRegisterM& pg, const Register& rn); + + // Copy SIMD&FP scalar register to vector elements (predicated). + void cpy(const ZRegister& zd, const PRegisterM& pg, const VRegister& vn); + + // Compare and terminate loop. + void ctermeq(const Register& rn, const Register& rm); + + // Compare and terminate loop. + void ctermne(const Register& rn, const Register& rm); + + // Decrement scalar by multiple of predicate constraint element count. + void decb(const Register& xdn, int pattern = SVE_ALL, int multiplier = 1); + + // Decrement scalar by multiple of predicate constraint element count. + void decd(const Register& xdn, int pattern = SVE_ALL, int multiplier = 1); + + // Decrement vector by multiple of predicate constraint element count. + void decd(const ZRegister& zdn, int pattern = SVE_ALL, int multiplier = 1); + + // Decrement scalar by multiple of predicate constraint element count. + void dech(const Register& xdn, int pattern = SVE_ALL, int multiplier = 1); + + // Decrement vector by multiple of predicate constraint element count. + void dech(const ZRegister& zdn, int pattern = SVE_ALL, int multiplier = 1); + + // Decrement scalar by active predicate element count. + void decp(const Register& rdn, const PRegisterWithLaneSize& pg); + + // Decrement vector by active predicate element count. + void decp(const ZRegister& zdn, const PRegister& pg); + + // Decrement scalar by multiple of predicate constraint element count. + void decw(const Register& xdn, int pattern = SVE_ALL, int multiplier = 1); + + // Decrement vector by multiple of predicate constraint element count. + void decw(const ZRegister& zdn, int pattern = SVE_ALL, int multiplier = 1); + + // Broadcast general-purpose register to vector elements (unpredicated). + void dup(const ZRegister& zd, const Register& xn); + + // Broadcast indexed element to vector (unpredicated). + void dup(const ZRegister& zd, const ZRegister& zn, unsigned index); + + // As for movz/movk/movn, if the default shift of -1 is specified to dup, the + // assembler will pick an appropriate immediate and left shift that is + // equivalent to the immediate argument. If an explicit left shift is + // specified (0 or 8), the immediate must be a signed 8-bit integer. + + // Broadcast signed immediate to vector elements (unpredicated). + void dup(const ZRegister& zd, int imm8, int shift = -1); + + // Broadcast logical bitmask immediate to vector (unpredicated). + void dupm(const ZRegister& zd, uint64_t imm); + + // Bitwise exclusive OR with inverted immediate (unpredicated). + void eon(const ZRegister& zd, const ZRegister& zn, uint64_t imm); + + // Bitwise exclusive OR predicates. + void eor(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm); + + // Bitwise exclusive OR vectors (predicated). + void eor(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Bitwise exclusive OR with immediate (unpredicated). + void eor(const ZRegister& zd, const ZRegister& zn, uint64_t imm); + + // Bitwise exclusive OR vectors (unpredicated). + void eor(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Bitwise exclusive OR predicates. + void eors(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm); + + // Bitwise XOR reduction to scalar. + void eorv(const VRegister& vd, const PRegister& pg, const ZRegister& zn); + + // Extract vector from pair of vectors. + void ext(const ZRegister& zd, + const ZRegister& zn, + const ZRegister& zm, + unsigned offset); + + // Floating-point absolute difference (predicated). + void fabd(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Floating-point absolute value (predicated). + void fabs(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn); + + // Floating-point absolute compare vectors. + void facge(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Floating-point absolute compare vectors. + void facgt(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Floating-point add immediate (predicated). + void fadd(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + double imm); + + // Floating-point add vector (predicated). + void fadd(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Floating-point add vector (unpredicated). + void fadd(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Floating-point add strictly-ordered reduction, accumulating in scalar. + void fadda(const VRegister& vd, + const PRegister& pg, + const VRegister& vn, + const ZRegister& zm); + + // Floating-point add recursive reduction to scalar. + void faddv(const VRegister& vd, const PRegister& pg, const ZRegister& zn); + + // Floating-point complex add with rotate (predicated). + void fcadd(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm, + int rot); + + // Floating-point compare vector with zero. + void fcmeq(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + double zero); + + // Floating-point compare vectors. + void fcmeq(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Floating-point compare vector with zero. + void fcmge(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + double zero); + + // Floating-point compare vectors. + void fcmge(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Floating-point compare vector with zero. + void fcmgt(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + double zero); + + // Floating-point compare vectors. + void fcmgt(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Floating-point complex multiply-add with rotate (predicated). + void fcmla(const ZRegister& zda, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm, + int rot); + + // Floating-point complex multiply-add by indexed values with rotate. + void fcmla(const ZRegister& zda, + const ZRegister& zn, + const ZRegister& zm, + int index, + int rot); + + // Floating-point compare vector with zero. + void fcmle(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + double zero); + + // Floating-point compare vector with zero. + void fcmlt(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + double zero); + + // Floating-point compare vector with zero. + void fcmne(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + double zero); + + // Floating-point compare vectors. + void fcmne(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Floating-point compare vectors. + void fcmuo(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Copy floating-point immediate to vector elements (predicated). + void fcpy(const ZRegister& zd, const PRegisterM& pg, double imm); + + // Copy half-precision floating-point immediate to vector elements + // (predicated). + void fcpy(const ZRegister& zd, const PRegisterM& pg, Float16 imm) { + fcpy(zd, pg, FPToDouble(imm, kIgnoreDefaultNaN)); + } + + // Floating-point convert precision (predicated). + void fcvt(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn); + + // Floating-point convert to signed integer, rounding toward zero + // (predicated). + void fcvtzs(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn); + + // Floating-point convert to unsigned integer, rounding toward zero + // (predicated). + void fcvtzu(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn); + + // Floating-point divide by vector (predicated). + void fdiv(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Floating-point reversed divide by vector (predicated). + void fdivr(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Broadcast floating-point immediate to vector elements. + void fdup(const ZRegister& zd, double imm); + + // Broadcast half-precision floating-point immediate to vector elements. + void fdup(const ZRegister& zd, Float16 imm) { + fdup(zd, FPToDouble(imm, kIgnoreDefaultNaN)); + } + + // Floating-point exponential accelerator. + void fexpa(const ZRegister& zd, const ZRegister& zn); + + // Floating-point fused multiply-add vectors (predicated), writing + // multiplicand [Zdn = Za + Zdn * Zm]. + void fmad(const ZRegister& zdn, + const PRegisterM& pg, + const ZRegister& zm, + const ZRegister& za); + + // Floating-point maximum with immediate (predicated). + void fmax(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + double imm); + + // Floating-point maximum (predicated). + void fmax(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Floating-point maximum number with immediate (predicated). + void fmaxnm(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + double imm); + + // Floating-point maximum number (predicated). + void fmaxnm(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Floating-point maximum number recursive reduction to scalar. + void fmaxnmv(const VRegister& vd, const PRegister& pg, const ZRegister& zn); + + // Floating-point maximum recursive reduction to scalar. + void fmaxv(const VRegister& vd, const PRegister& pg, const ZRegister& zn); + + // Floating-point minimum with immediate (predicated). + void fmin(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + double imm); + + // Floating-point minimum (predicated). + void fmin(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Floating-point minimum number with immediate (predicated). + void fminnm(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + double imm); + + // Floating-point minimum number (predicated). + void fminnm(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Floating-point minimum number recursive reduction to scalar. + void fminnmv(const VRegister& vd, const PRegister& pg, const ZRegister& zn); + + // Floating-point minimum recursive reduction to scalar. + void fminv(const VRegister& vd, const PRegister& pg, const ZRegister& zn); + + // Floating-point fused multiply-add vectors (predicated), writing addend + // [Zda = Zda + Zn * Zm]. + void fmla(const ZRegister& zda, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Floating-point fused multiply-add by indexed elements + // (Zda = Zda + Zn * Zm[indexed]). + void fmla(const ZRegister& zda, + const ZRegister& zn, + const ZRegister& zm, + int index); + + // Floating-point fused multiply-subtract vectors (predicated), writing + // addend [Zda = Zda + -Zn * Zm]. + void fmls(const ZRegister& zda, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Floating-point fused multiply-subtract by indexed elements + // (Zda = Zda + -Zn * Zm[indexed]). + void fmls(const ZRegister& zda, + const ZRegister& zn, + const ZRegister& zm, + int index); + + // Move 8-bit floating-point immediate to vector elements (unpredicated). + void fmov(const ZRegister& zd, double imm); + + // Move 8-bit floating-point immediate to vector elements (predicated). + void fmov(const ZRegister& zd, const PRegisterM& pg, double imm); + + // Floating-point fused multiply-subtract vectors (predicated), writing + // multiplicand [Zdn = Za + -Zdn * Zm]. + void fmsb(const ZRegister& zdn, + const PRegisterM& pg, + const ZRegister& zm, + const ZRegister& za); + + // Floating-point multiply by immediate (predicated). + void fmul(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + double imm); + + // Floating-point multiply vectors (predicated). + void fmul(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Floating-point multiply by indexed elements. + void fmul(const ZRegister& zd, + const ZRegister& zn, + const ZRegister& zm, + unsigned index); + + // Floating-point multiply vectors (unpredicated). + void fmul(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Floating-point multiply-extended vectors (predicated). + void fmulx(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Floating-point negate (predicated). + void fneg(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn); + + // Floating-point negated fused multiply-add vectors (predicated), writing + // multiplicand [Zdn = -Za + -Zdn * Zm]. + void fnmad(const ZRegister& zdn, + const PRegisterM& pg, + const ZRegister& zm, + const ZRegister& za); + + // Floating-point negated fused multiply-add vectors (predicated), writing + // addend [Zda = -Zda + -Zn * Zm]. + void fnmla(const ZRegister& zda, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Floating-point negated fused multiply-subtract vectors (predicated), + // writing addend [Zda = -Zda + Zn * Zm]. + void fnmls(const ZRegister& zda, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Floating-point negated fused multiply-subtract vectors (predicated), + // writing multiplicand [Zdn = -Za + Zdn * Zm]. + void fnmsb(const ZRegister& zdn, + const PRegisterM& pg, + const ZRegister& zm, + const ZRegister& za); + + // Floating-point reciprocal estimate (unpredicated). + void frecpe(const ZRegister& zd, const ZRegister& zn); + + // Floating-point reciprocal step (unpredicated). + void frecps(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Floating-point reciprocal exponent (predicated). + void frecpx(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn); + + // Floating-point round to integral value (predicated). + void frinta(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn); + + // Floating-point round to integral value (predicated). + void frinti(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn); + + // Floating-point round to integral value (predicated). + void frintm(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn); + + // Floating-point round to integral value (predicated). + void frintn(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn); + + // Floating-point round to integral value (predicated). + void frintp(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn); + + // Floating-point round to integral value (predicated). + void frintx(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn); + + // Floating-point round to integral value (predicated). + void frintz(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn); + + // Floating-point reciprocal square root estimate (unpredicated). + void frsqrte(const ZRegister& zd, const ZRegister& zn); + + // Floating-point reciprocal square root step (unpredicated). + void frsqrts(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Floating-point adjust exponent by vector (predicated). + void fscale(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Floating-point square root (predicated). + void fsqrt(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn); + + // Floating-point subtract immediate (predicated). + void fsub(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + double imm); + + // Floating-point subtract vectors (predicated). + void fsub(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Floating-point subtract vectors (unpredicated). + void fsub(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Floating-point reversed subtract from immediate (predicated). + void fsubr(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + double imm); + + // Floating-point reversed subtract vectors (predicated). + void fsubr(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Floating-point trigonometric multiply-add coefficient. + void ftmad(const ZRegister& zd, + const ZRegister& zn, + const ZRegister& zm, + int imm3); + + // Floating-point trigonometric starting value. + void ftsmul(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Floating-point trigonometric select coefficient. + void ftssel(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Increment scalar by multiple of predicate constraint element count. + void incb(const Register& xdn, int pattern = SVE_ALL, int multiplier = 1); + + // Increment scalar by multiple of predicate constraint element count. + void incd(const Register& xdn, int pattern = SVE_ALL, int multiplier = 1); + + // Increment vector by multiple of predicate constraint element count. + void incd(const ZRegister& zdn, int pattern = SVE_ALL, int multiplier = 1); + + // Increment scalar by multiple of predicate constraint element count. + void inch(const Register& xdn, int pattern = SVE_ALL, int multiplier = 1); + + // Increment vector by multiple of predicate constraint element count. + void inch(const ZRegister& zdn, int pattern = SVE_ALL, int multiplier = 1); + + // Increment scalar by active predicate element count. + void incp(const Register& rdn, const PRegisterWithLaneSize& pg); + + // Increment vector by active predicate element count. + void incp(const ZRegister& zdn, const PRegister& pg); + + // Increment scalar by multiple of predicate constraint element count. + void incw(const Register& xdn, int pattern = SVE_ALL, int multiplier = 1); + + // Increment vector by multiple of predicate constraint element count. + void incw(const ZRegister& zdn, int pattern = SVE_ALL, int multiplier = 1); + + // Create index starting from and incremented by immediate. + void index(const ZRegister& zd, int start, int step); + + // Create index starting from and incremented by general-purpose register. + void index(const ZRegister& zd, const Register& rn, const Register& rm); + + // Create index starting from general-purpose register and incremented by + // immediate. + void index(const ZRegister& zd, const Register& rn, int imm5); + + // Create index starting from immediate and incremented by general-purpose + // register. + void index(const ZRegister& zd, int imm5, const Register& rm); + + // Insert general-purpose register in shifted vector. + void insr(const ZRegister& zdn, const Register& rm); + + // Insert SIMD&FP scalar register in shifted vector. + void insr(const ZRegister& zdn, const VRegister& vm); + + // Extract element after last to general-purpose register. + void lasta(const Register& rd, const PRegister& pg, const ZRegister& zn); + + // Extract element after last to SIMD&FP scalar register. + void lasta(const VRegister& vd, const PRegister& pg, const ZRegister& zn); + + // Extract last element to general-purpose register. + void lastb(const Register& rd, const PRegister& pg, const ZRegister& zn); + + // Extract last element to SIMD&FP scalar register. + void lastb(const VRegister& vd, const PRegister& pg, const ZRegister& zn); + + // Contiguous/gather load bytes to vector. + void ld1b(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Contiguous/gather load halfwords to vector. + void ld1h(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Contiguous/gather load words to vector. + void ld1w(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Contiguous/gather load doublewords to vector. + void ld1d(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // TODO: Merge other loads into the SVEMemOperand versions. + + // Load and broadcast unsigned byte to vector. + void ld1rb(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Load and broadcast unsigned halfword to vector. + void ld1rh(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Load and broadcast unsigned word to vector. + void ld1rw(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Load and broadcast doubleword to vector. + void ld1rd(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Contiguous load and replicate sixteen bytes. + void ld1rqb(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Contiguous load and replicate eight halfwords. + void ld1rqh(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Contiguous load and replicate four words. + void ld1rqw(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Contiguous load and replicate two doublewords. + void ld1rqd(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Contiguous load and replicate thirty-two bytes. + void ld1rob(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Contiguous load and replicate sixteen halfwords. + void ld1roh(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Contiguous load and replicate eight words. + void ld1row(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Contiguous load and replicate four doublewords. + void ld1rod(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Load and broadcast signed byte to vector. + void ld1rsb(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Load and broadcast signed halfword to vector. + void ld1rsh(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Load and broadcast signed word to vector. + void ld1rsw(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Contiguous/gather load signed bytes to vector. + void ld1sb(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Contiguous/gather load signed halfwords to vector. + void ld1sh(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Contiguous/gather load signed words to vector. + void ld1sw(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // TODO: Merge other loads into the SVEMemOperand versions. + + // Contiguous load two-byte structures to two vectors. + void ld2b(const ZRegister& zt1, + const ZRegister& zt2, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Contiguous load two-halfword structures to two vectors. + void ld2h(const ZRegister& zt1, + const ZRegister& zt2, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Contiguous load two-word structures to two vectors. + void ld2w(const ZRegister& zt1, + const ZRegister& zt2, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Contiguous load two-doubleword structures to two vectors. + void ld2d(const ZRegister& zt1, + const ZRegister& zt2, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Contiguous load three-byte structures to three vectors. + void ld3b(const ZRegister& zt1, + const ZRegister& zt2, + const ZRegister& zt3, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Contiguous load three-halfword structures to three vectors. + void ld3h(const ZRegister& zt1, + const ZRegister& zt2, + const ZRegister& zt3, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Contiguous load three-word structures to three vectors. + void ld3w(const ZRegister& zt1, + const ZRegister& zt2, + const ZRegister& zt3, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Contiguous load three-doubleword structures to three vectors. + void ld3d(const ZRegister& zt1, + const ZRegister& zt2, + const ZRegister& zt3, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Contiguous load four-byte structures to four vectors. + void ld4b(const ZRegister& zt1, + const ZRegister& zt2, + const ZRegister& zt3, + const ZRegister& zt4, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Contiguous load four-halfword structures to four vectors. + void ld4h(const ZRegister& zt1, + const ZRegister& zt2, + const ZRegister& zt3, + const ZRegister& zt4, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Contiguous load four-word structures to four vectors. + void ld4w(const ZRegister& zt1, + const ZRegister& zt2, + const ZRegister& zt3, + const ZRegister& zt4, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Contiguous load four-doubleword structures to four vectors. + void ld4d(const ZRegister& zt1, + const ZRegister& zt2, + const ZRegister& zt3, + const ZRegister& zt4, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Contiguous load first-fault unsigned bytes to vector. + void ldff1b(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Contiguous load first-fault unsigned halfwords to vector. + void ldff1h(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Contiguous load first-fault unsigned words to vector. + void ldff1w(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Contiguous load first-fault doublewords to vector. + void ldff1d(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Contiguous load first-fault signed bytes to vector. + void ldff1sb(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Contiguous load first-fault signed halfwords to vector. + void ldff1sh(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Contiguous load first-fault signed words to vector. + void ldff1sw(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Gather load first-fault unsigned bytes to vector. + void ldff1b(const ZRegister& zt, + const PRegisterZ& pg, + const Register& xn, + const ZRegister& zm); + + // Gather load first-fault unsigned bytes to vector (immediate index). + void ldff1b(const ZRegister& zt, + const PRegisterZ& pg, + const ZRegister& zn, + int imm5); + + // Gather load first-fault doublewords to vector (vector index). + void ldff1d(const ZRegister& zt, + const PRegisterZ& pg, + const Register& xn, + const ZRegister& zm); + + // Gather load first-fault doublewords to vector (immediate index). + void ldff1d(const ZRegister& zt, + const PRegisterZ& pg, + const ZRegister& zn, + int imm5); + + // Gather load first-fault unsigned halfwords to vector (vector index). + void ldff1h(const ZRegister& zt, + const PRegisterZ& pg, + const Register& xn, + const ZRegister& zm); + + // Gather load first-fault unsigned halfwords to vector (immediate index). + void ldff1h(const ZRegister& zt, + const PRegisterZ& pg, + const ZRegister& zn, + int imm5); + + // Gather load first-fault signed bytes to vector (vector index). + void ldff1sb(const ZRegister& zt, + const PRegisterZ& pg, + const Register& xn, + const ZRegister& zm); + + // Gather load first-fault signed bytes to vector (immediate index). + void ldff1sb(const ZRegister& zt, + const PRegisterZ& pg, + const ZRegister& zn, + int imm5); + + // Gather load first-fault signed halfwords to vector (vector index). + void ldff1sh(const ZRegister& zt, + const PRegisterZ& pg, + const Register& xn, + const ZRegister& zm); + + // Gather load first-fault signed halfwords to vector (immediate index). + void ldff1sh(const ZRegister& zt, + const PRegisterZ& pg, + const ZRegister& zn, + int imm5); + + // Gather load first-fault signed words to vector (vector index). + void ldff1sw(const ZRegister& zt, + const PRegisterZ& pg, + const Register& xn, + const ZRegister& zm); + + // Gather load first-fault signed words to vector (immediate index). + void ldff1sw(const ZRegister& zt, + const PRegisterZ& pg, + const ZRegister& zn, + int imm5); + + // Gather load first-fault unsigned words to vector (vector index). + void ldff1w(const ZRegister& zt, + const PRegisterZ& pg, + const Register& xn, + const ZRegister& zm); + + // Gather load first-fault unsigned words to vector (immediate index). + void ldff1w(const ZRegister& zt, + const PRegisterZ& pg, + const ZRegister& zn, + int imm5); + + // Contiguous load non-fault unsigned bytes to vector (immediate index). + void ldnf1b(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Contiguous load non-fault doublewords to vector (immediate index). + void ldnf1d(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Contiguous load non-fault unsigned halfwords to vector (immediate + // index). + void ldnf1h(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Contiguous load non-fault signed bytes to vector (immediate index). + void ldnf1sb(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Contiguous load non-fault signed halfwords to vector (immediate index). + void ldnf1sh(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Contiguous load non-fault signed words to vector (immediate index). + void ldnf1sw(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Contiguous load non-fault unsigned words to vector (immediate index). + void ldnf1w(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Contiguous load non-temporal bytes to vector. + void ldnt1b(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Contiguous load non-temporal halfwords to vector. + void ldnt1h(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Contiguous load non-temporal words to vector. + void ldnt1w(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Contiguous load non-temporal doublewords to vector. + void ldnt1d(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Load SVE predicate/vector register. + void ldr(const CPURegister& rt, const SVEMemOperand& addr); + + // Logical shift left by immediate (predicated). + void lsl(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + int shift); + + // Logical shift left by 64-bit wide elements (predicated). + void lsl(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Logical shift left by immediate (unpredicated). + void lsl(const ZRegister& zd, const ZRegister& zn, int shift); + + // Logical shift left by 64-bit wide elements (unpredicated). + void lsl(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Reversed logical shift left by vector (predicated). + void lslr(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Logical shift right by immediate (predicated). + void lsr(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + int shift); + + // Logical shift right by 64-bit wide elements (predicated). + void lsr(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Logical shift right by immediate (unpredicated). + void lsr(const ZRegister& zd, const ZRegister& zn, int shift); + + // Logical shift right by 64-bit wide elements (unpredicated). + void lsr(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Reversed logical shift right by vector (predicated). + void lsrr(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Bitwise invert predicate. + void not_(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn); + + // Bitwise invert predicate, setting the condition flags. + void nots(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn); + + // Multiply-add vectors (predicated), writing multiplicand + // [Zdn = Za + Zdn * Zm]. + void mad(const ZRegister& zdn, + const PRegisterM& pg, + const ZRegister& zm, + const ZRegister& za); + + // Multiply-add vectors (predicated), writing addend + // [Zda = Zda + Zn * Zm]. + void mla(const ZRegister& zda, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Multiply-subtract vectors (predicated), writing addend + // [Zda = Zda - Zn * Zm]. + void mls(const ZRegister& zda, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Move predicates (unpredicated) + void mov(const PRegister& pd, const PRegister& pn); + + // Move predicates (merging) + void mov(const PRegisterWithLaneSize& pd, + const PRegisterM& pg, + const PRegisterWithLaneSize& pn); + + // Move predicates (zeroing) + void mov(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn); + + // Move general-purpose register to vector elements (unpredicated) + void mov(const ZRegister& zd, const Register& xn); + + // Move SIMD&FP scalar register to vector elements (unpredicated) + void mov(const ZRegister& zd, const VRegister& vn); + + // Move vector register (unpredicated) + void mov(const ZRegister& zd, const ZRegister& zn); + + // Move indexed element to vector elements (unpredicated) + void mov(const ZRegister& zd, const ZRegister& zn, unsigned index); + + // Move general-purpose register to vector elements (predicated) + void mov(const ZRegister& zd, const PRegisterM& pg, const Register& rn); + + // Move SIMD&FP scalar register to vector elements (predicated) + void mov(const ZRegister& zd, const PRegisterM& pg, const VRegister& vn); + + // Move vector elements (predicated) + void mov(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn); + + // Move signed integer immediate to vector elements (predicated) + void mov(const ZRegister& zd, const PRegister& pg, int imm8, int shift = -1); + + // Move signed immediate to vector elements (unpredicated). + void mov(const ZRegister& zd, int imm8, int shift); + + // Move logical bitmask immediate to vector (unpredicated). + void mov(const ZRegister& zd, uint64_t imm); + + // Move predicate (unpredicated), setting the condition flags + void movs(const PRegister& pd, const PRegister& pn); + + // Move predicates (zeroing), setting the condition flags + void movs(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn); + + // Move prefix (predicated). + void movprfx(const ZRegister& zd, const PRegister& pg, const ZRegister& zn); + + // Move prefix (unpredicated). + void movprfx(const ZRegister& zd, const ZRegister& zn); + + // Multiply-subtract vectors (predicated), writing multiplicand + // [Zdn = Za - Zdn * Zm]. + void msb(const ZRegister& zdn, + const PRegisterM& pg, + const ZRegister& zm, + const ZRegister& za); + + // Multiply vectors (predicated). + void mul(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Multiply by immediate (unpredicated). + void mul(const ZRegister& zd, const ZRegister& zn, int imm8); + + // Bitwise NAND predicates. + void nand(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm); + + // Bitwise NAND predicates. + void nands(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm); + + // Negate (predicated). + void neg(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn); + + // Bitwise NOR predicates. + void nor(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm); + + // Bitwise NOR predicates. + void nors(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm); + + // Bitwise invert vector (predicated). + void not_(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn); + + // Bitwise OR inverted predicate. + void orn(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm); + + // Bitwise OR inverted predicate. + void orns(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm); + + // Bitwise OR with inverted immediate (unpredicated). + void orn(const ZRegister& zd, const ZRegister& zn, uint64_t imm); + + // Bitwise OR predicate. + void orr(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm); + + // Bitwise OR vectors (predicated). + void orr(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Bitwise OR with immediate (unpredicated). + void orr(const ZRegister& zd, const ZRegister& zn, uint64_t imm); + + // Bitwise OR vectors (unpredicated). + void orr(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Bitwise OR predicate. + void orrs(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm); + + // Bitwise OR reduction to scalar. + void orv(const VRegister& vd, const PRegister& pg, const ZRegister& zn); + + // Set all predicate elements to false. + void pfalse(const PRegisterWithLaneSize& pd); + + // Set the first active predicate element to true. + void pfirst(const PRegisterWithLaneSize& pd, + const PRegister& pg, + const PRegisterWithLaneSize& pn); + + // Find next active predicate. + void pnext(const PRegisterWithLaneSize& pd, + const PRegister& pg, + const PRegisterWithLaneSize& pn); + + // Prefetch bytes. + void prfb(PrefetchOperation prfop, + const PRegister& pg, + const SVEMemOperand& addr); + + // Prefetch halfwords. + void prfh(PrefetchOperation prfop, + const PRegister& pg, + const SVEMemOperand& addr); + + // Prefetch words. + void prfw(PrefetchOperation prfop, + const PRegister& pg, + const SVEMemOperand& addr); + + // Prefetch doublewords. + void prfd(PrefetchOperation prfop, + const PRegister& pg, + const SVEMemOperand& addr); + + // Set condition flags for predicate. + void ptest(const PRegister& pg, const PRegisterWithLaneSize& pn); + + // Initialise predicate from named constraint. + void ptrue(const PRegisterWithLaneSize& pd, int pattern = SVE_ALL); + + // Initialise predicate from named constraint. + void ptrues(const PRegisterWithLaneSize& pd, int pattern = SVE_ALL); + + // Unpack and widen half of predicate. + void punpkhi(const PRegisterWithLaneSize& pd, + const PRegisterWithLaneSize& pn); + + // Unpack and widen half of predicate. + void punpklo(const PRegisterWithLaneSize& pd, + const PRegisterWithLaneSize& pn); + + // Reverse bits (predicated). + void rbit(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn); + + // Read the first-fault register. + void rdffr(const PRegisterWithLaneSize& pd); + + // Return predicate of succesfully loaded elements. + void rdffr(const PRegisterWithLaneSize& pd, const PRegisterZ& pg); + + // Return predicate of succesfully loaded elements. + void rdffrs(const PRegisterWithLaneSize& pd, const PRegisterZ& pg); + + // Read multiple of vector register size to scalar register. + void rdvl(const Register& xd, int imm6); + + // Reverse all elements in a predicate. + void rev(const PRegisterWithLaneSize& pd, const PRegisterWithLaneSize& pn); + + // Reverse all elements in a vector (unpredicated). + void rev(const ZRegister& zd, const ZRegister& zn); + + // Reverse bytes / halfwords / words within elements (predicated). + void revb(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn); + + // Reverse bytes / halfwords / words within elements (predicated). + void revh(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn); + + // Reverse bytes / halfwords / words within elements (predicated). + void revw(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn); + + // Signed absolute difference (predicated). + void sabd(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Signed add reduction to scalar. + void saddv(const VRegister& dd, const PRegister& pg, const ZRegister& zn); + + // Signed integer convert to floating-point (predicated). + void scvtf(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn); + + // Signed divide (predicated). + void sdiv(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Signed reversed divide (predicated). + void sdivr(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Signed dot product by indexed quadtuplet. + void sdot(const ZRegister& zda, + const ZRegister& zn, + const ZRegister& zm, + int index); + + // Signed dot product. + void sdot(const ZRegister& zda, const ZRegister& zn, const ZRegister& zm); + + // Conditionally select elements from two predicates. + void sel(const PRegisterWithLaneSize& pd, + const PRegister& pg, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm); + + // Conditionally select elements from two vectors. + void sel(const ZRegister& zd, + const PRegister& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Initialise the first-fault register to all true. + void setffr(); + + // Signed maximum vectors (predicated). + void smax(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Signed maximum with immediate (unpredicated). + void smax(const ZRegister& zd, const ZRegister& zn, int imm8); + + // Signed maximum reduction to scalar. + void smaxv(const VRegister& vd, const PRegister& pg, const ZRegister& zn); + + // Signed minimum vectors (predicated). + void smin(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Signed minimum with immediate (unpredicated). + void smin(const ZRegister& zd, const ZRegister& zn, int imm8); + + // Signed minimum reduction to scalar. + void sminv(const VRegister& vd, const PRegister& pg, const ZRegister& zn); + + // Signed multiply returning high half (predicated). + void smulh(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Splice two vectors under predicate control. + void splice(const ZRegister& zd, + const PRegister& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Splice two vectors under predicate control (constructive). + void splice_con(const ZRegister& zd, + const PRegister& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Signed saturating add vectors (unpredicated). + void sqadd(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Signed saturating add immediate (unpredicated). + void sqadd(const ZRegister& zd, + const ZRegister& zn, + int imm8, + int shift = -1); + + // Signed saturating decrement scalar by multiple of 8-bit predicate + // constraint element count. + void sqdecb(const Register& xd, + const Register& wn, + int pattern, + int multiplier); + + // Signed saturating decrement scalar by multiple of 8-bit predicate + // constraint element count. + void sqdecb(const Register& rdn, int pattern = SVE_ALL, int multiplier = 1); + + // Signed saturating decrement scalar by multiple of 64-bit predicate + // constraint element count. + void sqdecd(const Register& xd, + const Register& wn, + int pattern = SVE_ALL, + int multiplier = 1); + + // Signed saturating decrement scalar by multiple of 64-bit predicate + // constraint element count. + void sqdecd(const Register& rdn, int pattern = SVE_ALL, int multiplier = 1); + + // Signed saturating decrement vector by multiple of 64-bit predicate + // constraint element count. + void sqdecd(const ZRegister& zdn, int pattern = SVE_ALL, int multiplier = 1); + + // Signed saturating decrement scalar by multiple of 16-bit predicate + // constraint element count. + void sqdech(const Register& xd, + const Register& wn, + int pattern = SVE_ALL, + int multiplier = 1); + + // Signed saturating decrement scalar by multiple of 16-bit predicate + // constraint element count. + void sqdech(const Register& rdn, int pattern = SVE_ALL, int multiplier = 1); + + // Signed saturating decrement vector by multiple of 16-bit predicate + // constraint element count. + void sqdech(const ZRegister& zdn, int pattern = SVE_ALL, int multiplier = 1); + + // Signed saturating decrement scalar by active predicate element count. + void sqdecp(const Register& xd, + const PRegisterWithLaneSize& pg, + const Register& wn); + + // Signed saturating decrement scalar by active predicate element count. + void sqdecp(const Register& xdn, const PRegisterWithLaneSize& pg); + + // Signed saturating decrement vector by active predicate element count. + void sqdecp(const ZRegister& zdn, const PRegister& pg); + + // Signed saturating decrement scalar by multiple of 32-bit predicate + // constraint element count. + void sqdecw(const Register& xd, + const Register& wn, + int pattern = SVE_ALL, + int multiplier = 1); + + // Signed saturating decrement scalar by multiple of 32-bit predicate + // constraint element count. + void sqdecw(const Register& rdn, int pattern = SVE_ALL, int multiplier = 1); + + // Signed saturating decrement vector by multiple of 32-bit predicate + // constraint element count. + void sqdecw(const ZRegister& zdn, int pattern = SVE_ALL, int multiplier = 1); + + // Signed saturating increment scalar by multiple of 8-bit predicate + // constraint element count. + void sqincb(const Register& xd, + const Register& wn, + int pattern = SVE_ALL, + int multiplier = 1); + + // Signed saturating increment scalar by multiple of 8-bit predicate + // constraint element count. + void sqincb(const Register& rdn, int pattern = SVE_ALL, int multiplier = 1); + + // Signed saturating increment scalar by multiple of 64-bit predicate + // constraint element count. + void sqincd(const Register& xd, + const Register& wn, + int pattern, + int multiplier); + + // Signed saturating increment scalar by multiple of 64-bit predicate + // constraint element count. + void sqincd(const Register& rdn, int pattern = SVE_ALL, int multiplier = 1); + + // Signed saturating increment vector by multiple of 64-bit predicate + // constraint element count. + void sqincd(const ZRegister& zdn, int pattern = SVE_ALL, int multiplier = 1); + + // Signed saturating increment scalar by multiple of 16-bit predicate + // constraint element count. + void sqinch(const Register& xd, + const Register& wn, + int pattern = SVE_ALL, + int multiplier = 1); + + // Signed saturating increment scalar by multiple of 16-bit predicate + // constraint element count. + void sqinch(const Register& rdn, int pattern = SVE_ALL, int multiplier = 1); + + // Signed saturating increment vector by multiple of 16-bit predicate + // constraint element count. + void sqinch(const ZRegister& zdn, int pattern = SVE_ALL, int multiplier = 1); + + // Signed saturating increment scalar by active predicate element count. + void sqincp(const Register& xd, + const PRegisterWithLaneSize& pg, + const Register& wn); + + // Signed saturating increment scalar by active predicate element count. + void sqincp(const Register& xdn, const PRegisterWithLaneSize& pg); + + // Signed saturating increment vector by active predicate element count. + void sqincp(const ZRegister& zdn, const PRegister& pg); + + // Signed saturating increment scalar by multiple of 32-bit predicate + // constraint element count. + void sqincw(const Register& xd, + const Register& wn, + int pattern = SVE_ALL, + int multiplier = 1); + + // Signed saturating increment scalar by multiple of 32-bit predicate + // constraint element count. + void sqincw(const Register& rdn, int pattern = SVE_ALL, int multiplier = 1); + + // Signed saturating increment vector by multiple of 32-bit predicate + // constraint element count. + void sqincw(const ZRegister& zdn, int pattern = SVE_ALL, int multiplier = 1); + + // Signed saturating subtract vectors (unpredicated). + void sqsub(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Signed saturating subtract immediate (unpredicated). + void sqsub(const ZRegister& zd, + const ZRegister& zn, + int imm8, + int shift = -1); + + // Contiguous/scatter store bytes from vector. + void st1b(const ZRegister& zt, + const PRegister& pg, + const SVEMemOperand& addr); + + // Contiguous/scatter store halfwords from vector. + void st1h(const ZRegister& zt, + const PRegister& pg, + const SVEMemOperand& addr); + + // Contiguous/scatter store words from vector. + void st1w(const ZRegister& zt, + const PRegister& pg, + const SVEMemOperand& addr); + + // Contiguous/scatter store doublewords from vector. + void st1d(const ZRegister& zt, + const PRegister& pg, + const SVEMemOperand& addr); + + // Contiguous store two-byte structures from two vectors. + void st2b(const ZRegister& zt1, + const ZRegister& zt2, + const PRegister& pg, + const SVEMemOperand& addr); + + // Contiguous store two-halfword structures from two vectors. + void st2h(const ZRegister& zt1, + const ZRegister& zt2, + const PRegister& pg, + const SVEMemOperand& addr); + + // Contiguous store two-word structures from two vectors. + void st2w(const ZRegister& zt1, + const ZRegister& zt2, + const PRegister& pg, + const SVEMemOperand& addr); + + // Contiguous store two-doubleword structures from two vectors, + void st2d(const ZRegister& zt1, + const ZRegister& zt2, + const PRegister& pg, + const SVEMemOperand& addr); + + // Contiguous store three-byte structures from three vectors. + void st3b(const ZRegister& zt1, + const ZRegister& zt2, + const ZRegister& zt3, + const PRegister& pg, + const SVEMemOperand& addr); + + // Contiguous store three-halfword structures from three vectors. + void st3h(const ZRegister& zt1, + const ZRegister& zt2, + const ZRegister& zt3, + const PRegister& pg, + const SVEMemOperand& addr); + + // Contiguous store three-word structures from three vectors. + void st3w(const ZRegister& zt1, + const ZRegister& zt2, + const ZRegister& zt3, + const PRegister& pg, + const SVEMemOperand& addr); + + // Contiguous store three-doubleword structures from three vectors. + void st3d(const ZRegister& zt1, + const ZRegister& zt2, + const ZRegister& zt3, + const PRegister& pg, + const SVEMemOperand& addr); + + // Contiguous store four-byte structures from four vectors. + void st4b(const ZRegister& zt1, + const ZRegister& zt2, + const ZRegister& zt3, + const ZRegister& zt4, + const PRegister& pg, + const SVEMemOperand& addr); + + // Contiguous store four-halfword structures from four vectors. + void st4h(const ZRegister& zt1, + const ZRegister& zt2, + const ZRegister& zt3, + const ZRegister& zt4, + const PRegister& pg, + const SVEMemOperand& addr); + + // Contiguous store four-word structures from four vectors. + void st4w(const ZRegister& zt1, + const ZRegister& zt2, + const ZRegister& zt3, + const ZRegister& zt4, + const PRegister& pg, + const SVEMemOperand& addr); + + // Contiguous store four-doubleword structures from four vectors. + void st4d(const ZRegister& zt1, + const ZRegister& zt2, + const ZRegister& zt3, + const ZRegister& zt4, + const PRegister& pg, + const SVEMemOperand& addr); + + // Contiguous store non-temporal bytes from vector. + void stnt1b(const ZRegister& zt, + const PRegister& pg, + const SVEMemOperand& addr); + + // Contiguous store non-temporal halfwords from vector. + void stnt1h(const ZRegister& zt, + const PRegister& pg, + const SVEMemOperand& addr); + + // Contiguous store non-temporal words from vector. + void stnt1w(const ZRegister& zt, + const PRegister& pg, + const SVEMemOperand& addr); + + // Contiguous store non-temporal doublewords from vector. + void stnt1d(const ZRegister& zt, + const PRegister& pg, + const SVEMemOperand& addr); + + // Store SVE predicate/vector register. + void str(const CPURegister& rt, const SVEMemOperand& addr); + + // Subtract vectors (predicated). + void sub(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Subtract vectors (unpredicated). + void sub(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Subtract immediate (unpredicated). + void sub(const ZRegister& zd, const ZRegister& zn, int imm8, int shift = -1); + + // Reversed subtract vectors (predicated). + void subr(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Reversed subtract from immediate (unpredicated). + void subr(const ZRegister& zd, const ZRegister& zn, int imm8, int shift = -1); + + // Signed unpack and extend half of vector. + void sunpkhi(const ZRegister& zd, const ZRegister& zn); + + // Signed unpack and extend half of vector. + void sunpklo(const ZRegister& zd, const ZRegister& zn); + + // Signed byte extend (predicated). + void sxtb(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn); + + // Signed halfword extend (predicated). + void sxth(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn); + + // Signed word extend (predicated). + void sxtw(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn); + + // Programmable table lookup/permute using vector of indices into a + // vector. + void tbl(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Interleave even or odd elements from two predicates. + void trn1(const PRegisterWithLaneSize& pd, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm); + + // Interleave even or odd elements from two vectors. + void trn1(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Interleave even or odd elements from two predicates. + void trn2(const PRegisterWithLaneSize& pd, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm); + + // Interleave even or odd elements from two vectors. + void trn2(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Unsigned absolute difference (predicated). + void uabd(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Unsigned add reduction to scalar. + void uaddv(const VRegister& dd, const PRegister& pg, const ZRegister& zn); + + // Unsigned integer convert to floating-point (predicated). + void ucvtf(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn); + + // Unsigned divide (predicated). + void udiv(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Unsigned reversed divide (predicated). + void udivr(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Unsigned dot product by indexed quadtuplet. + void udot(const ZRegister& zda, + const ZRegister& zn, + const ZRegister& zm, + int index); + + // Unsigned dot product. + void udot(const ZRegister& zda, const ZRegister& zn, const ZRegister& zm); + + // Unsigned maximum vectors (predicated). + void umax(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Unsigned maximum with immediate (unpredicated). + void umax(const ZRegister& zd, const ZRegister& zn, int imm8); + + // Unsigned maximum reduction to scalar. + void umaxv(const VRegister& vd, const PRegister& pg, const ZRegister& zn); + + // Unsigned minimum vectors (predicated). + void umin(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Unsigned minimum with immediate (unpredicated). + void umin(const ZRegister& zd, const ZRegister& zn, int imm8); + + // Unsigned minimum reduction to scalar. + void uminv(const VRegister& vd, const PRegister& pg, const ZRegister& zn); + + // Unsigned multiply returning high half (predicated). + void umulh(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Unsigned saturating add vectors (unpredicated). + void uqadd(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Unsigned saturating add immediate (unpredicated). + void uqadd(const ZRegister& zd, + const ZRegister& zn, + int imm8, + int shift = -1); + + // Unsigned saturating decrement scalar by multiple of 8-bit predicate + // constraint element count. + void uqdecb(const Register& rdn, int pattern = SVE_ALL, int multiplier = 1); + + // Unsigned saturating decrement scalar by multiple of 64-bit predicate + // constraint element count. + void uqdecd(const Register& rdn, int pattern = SVE_ALL, int multiplier = 1); + + // Unsigned saturating decrement vector by multiple of 64-bit predicate + // constraint element count. + void uqdecd(const ZRegister& zdn, int pattern = SVE_ALL, int multiplier = 1); + + // Unsigned saturating decrement scalar by multiple of 16-bit predicate + // constraint element count. + void uqdech(const Register& rdn, int pattern = SVE_ALL, int multiplier = 1); + + // Unsigned saturating decrement vector by multiple of 16-bit predicate + // constraint element count. + void uqdech(const ZRegister& zdn, int pattern = SVE_ALL, int multiplier = 1); + + // Unsigned saturating decrement scalar by active predicate element count. + void uqdecp(const Register& rdn, const PRegisterWithLaneSize& pg); + + // Unsigned saturating decrement vector by active predicate element count. + void uqdecp(const ZRegister& zdn, const PRegister& pg); + + // Unsigned saturating decrement scalar by multiple of 32-bit predicate + // constraint element count. + void uqdecw(const Register& rdn, int pattern = SVE_ALL, int multiplier = 1); + + // Unsigned saturating decrement vector by multiple of 32-bit predicate + // constraint element count. + void uqdecw(const ZRegister& zdn, int pattern = SVE_ALL, int multiplier = 1); + + // Unsigned saturating increment scalar by multiple of 8-bit predicate + // constraint element count. + void uqincb(const Register& rdn, int pattern = SVE_ALL, int multiplier = 1); + + // Unsigned saturating increment scalar by multiple of 64-bit predicate + // constraint element count. + void uqincd(const Register& rdn, int pattern = SVE_ALL, int multiplier = 1); + + // Unsigned saturating increment vector by multiple of 64-bit predicate + // constraint element count. + void uqincd(const ZRegister& zdn, int pattern = SVE_ALL, int multiplier = 1); + + // Unsigned saturating increment scalar by multiple of 16-bit predicate + // constraint element count. + void uqinch(const Register& rdn, int pattern = SVE_ALL, int multiplier = 1); + + // Unsigned saturating increment vector by multiple of 16-bit predicate + // constraint element count. + void uqinch(const ZRegister& zdn, int pattern = SVE_ALL, int multiplier = 1); + + // Unsigned saturating increment scalar by active predicate element count. + void uqincp(const Register& rdn, const PRegisterWithLaneSize& pg); + + // Unsigned saturating increment vector by active predicate element count. + void uqincp(const ZRegister& zdn, const PRegister& pg); + + // Unsigned saturating increment scalar by multiple of 32-bit predicate + // constraint element count. + void uqincw(const Register& rdn, int pattern = SVE_ALL, int multiplier = 1); + + // Unsigned saturating increment vector by multiple of 32-bit predicate + // constraint element count. + void uqincw(const ZRegister& zdn, int pattern = SVE_ALL, int multiplier = 1); + + // Unsigned saturating subtract vectors (unpredicated). + void uqsub(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Unsigned saturating subtract immediate (unpredicated). + void uqsub(const ZRegister& zd, + const ZRegister& zn, + int imm8, + int shift = -1); + + // Unsigned unpack and extend half of vector. + void uunpkhi(const ZRegister& zd, const ZRegister& zn); + + // Unsigned unpack and extend half of vector. + void uunpklo(const ZRegister& zd, const ZRegister& zn); + + // Unsigned byte extend (predicated). + void uxtb(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn); + + // Unsigned halfword extend (predicated). + void uxth(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn); + + // Unsigned word extend (predicated). + void uxtw(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn); + + // Concatenate even or odd elements from two predicates. + void uzp1(const PRegisterWithLaneSize& pd, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm); + + // Concatenate even or odd elements from two vectors. + void uzp1(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Concatenate even or odd elements from two predicates. + void uzp2(const PRegisterWithLaneSize& pd, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm); + + // Concatenate even or odd elements from two vectors. + void uzp2(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // While incrementing signed scalar less than or equal to scalar. + void whilele(const PRegisterWithLaneSize& pd, + const Register& rn, + const Register& rm); + + // While incrementing unsigned scalar lower than scalar. + void whilelo(const PRegisterWithLaneSize& pd, + const Register& rn, + const Register& rm); + + // While incrementing unsigned scalar lower or same as scalar. + void whilels(const PRegisterWithLaneSize& pd, + const Register& rn, + const Register& rm); + + // While incrementing signed scalar less than scalar. + void whilelt(const PRegisterWithLaneSize& pd, + const Register& rn, + const Register& rm); + + // Write the first-fault register. + void wrffr(const PRegisterWithLaneSize& pn); + + // Interleave elements from two half predicates. + void zip1(const PRegisterWithLaneSize& pd, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm); + + // Interleave elements from two half vectors. + void zip1(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Interleave elements from two half predicates. + void zip2(const PRegisterWithLaneSize& pd, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm); + + // Interleave elements from two half vectors. + void zip2(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Add with carry long (bottom). + void adclb(const ZRegister& zda, const ZRegister& zn, const ZRegister& zm); + + // Add with carry long (top). + void adclt(const ZRegister& zda, const ZRegister& zn, const ZRegister& zm); + + // Add narrow high part (bottom). + void addhnb(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Add narrow high part (top). + void addhnt(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Add pairwise. + void addp(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Bitwise clear and exclusive OR. + void bcax(const ZRegister& zd, + const ZRegister& zn, + const ZRegister& zm, + const ZRegister& zk); + + // Scatter lower bits into positions selected by bitmask. + void bdep(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Gather lower bits from positions selected by bitmask. + void bext(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Group bits to right or left as selected by bitmask. + void bgrp(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Bitwise select. + void bsl(const ZRegister& zd, + const ZRegister& zn, + const ZRegister& zm, + const ZRegister& zk); + + // Bitwise select with first input inverted. + void bsl1n(const ZRegister& zd, + const ZRegister& zn, + const ZRegister& zm, + const ZRegister& zk); + + // Bitwise select with second input inverted. + void bsl2n(const ZRegister& zd, + const ZRegister& zn, + const ZRegister& zm, + const ZRegister& zk); + + // Complex integer add with rotate. + void cadd(const ZRegister& zd, + const ZRegister& zn, + const ZRegister& zm, + int rot); + + // Complex integer dot product (indexed). + void cdot(const ZRegister& zda, + const ZRegister& zn, + const ZRegister& zm, + int index, + int rot); + + // Complex integer dot product. + void cdot(const ZRegister& zda, + const ZRegister& zn, + const ZRegister& zm, + int rot); + + // Complex integer multiply-add with rotate (indexed). + void cmla(const ZRegister& zda, + const ZRegister& zn, + const ZRegister& zm, + int index, + int rot); + + // Complex integer multiply-add with rotate. + void cmla(const ZRegister& zda, + const ZRegister& zn, + const ZRegister& zm, + int rot); + + // Bitwise exclusive OR of three vectors. + void eor3(const ZRegister& zd, + const ZRegister& zn, + const ZRegister& zm, + const ZRegister& zk); + + // Interleaving exclusive OR (bottom, top). + void eorbt(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Interleaving exclusive OR (top, bottom). + void eortb(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Floating-point add pairwise. + void faddp(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Floating-point up convert long (top, predicated). + void fcvtlt(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn); + + // Floating-point down convert and narrow (top, predicated). + void fcvtnt(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn); + + // Floating-point down convert, rounding to odd (predicated). + void fcvtx(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn); + + // Floating-point down convert, rounding to odd (top, predicated). + void fcvtxnt(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn); + + // Floating-point base 2 logarithm as integer. + void flogb(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn); + + // Floating-point maximum number pairwise. + void fmaxnmp(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Floating-point maximum pairwise. + void fmaxp(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Floating-point minimum number pairwise. + void fminnmp(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Floating-point minimum pairwise. + void fminp(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Half-precision floating-point multiply-add long to single-precision + // (bottom). + void fmlalb(const ZRegister& zda, const ZRegister& zn, const ZRegister& zm); + + // Half-precision floating-point multiply-add long to single-precision + // (top). + void fmlalt(const ZRegister& zda, const ZRegister& zn, const ZRegister& zm); + + // Half-precision floating-point multiply-subtract long from + // single-precision (bottom). + void fmlslb(const ZRegister& zda, const ZRegister& zn, const ZRegister& zm); + + // Half-precision floating-point multiply-subtract long from + // single-precision (top, indexed). + void fmlslt(const ZRegister& zda, + const ZRegister& zn, + const ZRegister& zm, + int index); + + // Half-precision floating-point multiply-add long to single-precision + // (bottom, indexed). + void fmlalb(const ZRegister& zda, + const ZRegister& zn, + const ZRegister& zm, + int index); + + // Half-precision floating-point multiply-add long to single-precision + // (top, indexed). + void fmlalt(const ZRegister& zda, + const ZRegister& zn, + const ZRegister& zm, + int index); + + // Half-precision floating-point multiply-subtract long from + // single-precision (bottom, indexed). + void fmlslb(const ZRegister& zda, + const ZRegister& zn, + const ZRegister& zm, + int index); + + // Half-precision floating-point multiply-subtract long from + // single-precision (top). + void fmlslt(const ZRegister& zda, const ZRegister& zn, const ZRegister& zm); + + // Count matching elements in vector. + void histcnt(const ZRegister& zd, + const PRegisterZ& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Count matching elements in vector segments. + void histseg(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Gather load non-temporal signed bytes. + void ldnt1sb(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Gather load non-temporal signed halfwords. + void ldnt1sh(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Gather load non-temporal signed words. + void ldnt1sw(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Detect any matching elements, setting the condition flags. + void match(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Multiply-add to accumulator (indexed). + void mla(const ZRegister& zda, + const ZRegister& zn, + const ZRegister& zm, + int index); + + // Multiply-subtract from accumulator (indexed). + void mls(const ZRegister& zda, + const ZRegister& zn, + const ZRegister& zm, + int index); + + // Multiply (indexed). + void mul(const ZRegister& zd, + const ZRegister& zn, + const ZRegister& zm, + int index); + + // Multiply vectors (unpredicated). + void mul(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Bitwise inverted select. + void nbsl(const ZRegister& zd, + const ZRegister& zn, + const ZRegister& zm, + const ZRegister& zk); + + // Detect no matching elements, setting the condition flags. + void nmatch(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Polynomial multiply vectors (unpredicated). + void pmul(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Polynomial multiply long (bottom). + void pmullb(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Polynomial multiply long (top). + void pmullt(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Rounding add narrow high part (bottom). + void raddhnb(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Rounding add narrow high part (top). + void raddhnt(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Rounding shift right narrow by immediate (bottom). + void rshrnb(const ZRegister& zd, const ZRegister& zn, int shift); + + // Rounding shift right narrow by immediate (top). + void rshrnt(const ZRegister& zd, const ZRegister& zn, int shift); + + // Rounding subtract narrow high part (bottom). + void rsubhnb(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Rounding subtract narrow high part (top). + void rsubhnt(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Signed absolute difference and accumulate. + void saba(const ZRegister& zda, const ZRegister& zn, const ZRegister& zm); + + // Signed absolute difference and accumulate long (bottom). + void sabalb(const ZRegister& zda, const ZRegister& zn, const ZRegister& zm); + + // Signed absolute difference and accumulate long (top). + void sabalt(const ZRegister& zda, const ZRegister& zn, const ZRegister& zm); + + // Signed absolute difference long (bottom). + void sabdlb(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Signed absolute difference long (top). + void sabdlt(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Signed add and accumulate long pairwise. + void sadalp(const ZRegister& zda, const PRegisterM& pg, const ZRegister& zn); + + // Signed add long (bottom). + void saddlb(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Signed add long (bottom + top). + void saddlbt(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Signed add long (top). + void saddlt(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Signed add wide (bottom). + void saddwb(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Signed add wide (top). + void saddwt(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Subtract with carry long (bottom). + void sbclb(const ZRegister& zda, const ZRegister& zn, const ZRegister& zm); + + // Subtract with carry long (top). + void sbclt(const ZRegister& zda, const ZRegister& zn, const ZRegister& zm); + + // Signed halving addition. + void shadd(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Shift right narrow by immediate (bottom). + void shrnb(const ZRegister& zd, const ZRegister& zn, int shift); + + // Shift right narrow by immediate (top). + void shrnt(const ZRegister& zd, const ZRegister& zn, int shift); + + // Signed halving subtract. + void shsub(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Signed halving subtract reversed vectors. + void shsubr(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Shift left and insert (immediate). + void sli(const ZRegister& zd, const ZRegister& zn, int shift); + + // Signed maximum pairwise. + void smaxp(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Signed minimum pairwise. + void sminp(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Signed multiply-add long to accumulator (bottom, indexed). + void smlalb(const ZRegister& zda, + const ZRegister& zn, + const ZRegister& zm, + int index); + + // Signed multiply-add long to accumulator (bottom). + void smlalb(const ZRegister& zda, const ZRegister& zn, const ZRegister& zm); + + // Signed multiply-add long to accumulator (top, indexed). + void smlalt(const ZRegister& zda, + const ZRegister& zn, + const ZRegister& zm, + int index); + + // Signed multiply-add long to accumulator (top). + void smlalt(const ZRegister& zda, const ZRegister& zn, const ZRegister& zm); + + // Signed multiply-subtract long from accumulator (bottom, indexed). + void smlslb(const ZRegister& zda, + const ZRegister& zn, + const ZRegister& zm, + int index); + + // Signed multiply-subtract long from accumulator (bottom). + void smlslb(const ZRegister& zda, const ZRegister& zn, const ZRegister& zm); + + // Signed multiply-subtract long from accumulator (top, indexed). + void smlslt(const ZRegister& zda, + const ZRegister& zn, + const ZRegister& zm, + int index); + + // Signed multiply-subtract long from accumulator (top). + void smlslt(const ZRegister& zda, const ZRegister& zn, const ZRegister& zm); + + // Signed multiply returning high half (unpredicated). + void smulh(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Signed multiply long (bottom, indexed). + void smullb(const ZRegister& zd, + const ZRegister& zn, + const ZRegister& zm, + int index); + + // Signed multiply long (bottom). + void smullb(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Signed multiply long (top, indexed). + void smullt(const ZRegister& zd, + const ZRegister& zn, + const ZRegister& zm, + int index); + + // Signed multiply long (top). + void smullt(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Signed saturating absolute value. + void sqabs(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn); + + // Signed saturating addition (predicated). + void sqadd(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Saturating complex integer add with rotate. + void sqcadd(const ZRegister& zd, + const ZRegister& zn, + const ZRegister& zm, + int rot); + + // Signed saturating doubling multiply-add long to accumulator (bottom, + // indexed). + void sqdmlalb(const ZRegister& zda, + const ZRegister& zn, + const ZRegister& zm, + int index); + + // Signed saturating doubling multiply-add long to accumulator (bottom). + void sqdmlalb(const ZRegister& zda, const ZRegister& zn, const ZRegister& zm); + + // Signed saturating doubling multiply-add long to accumulator (bottom x + // top). + void sqdmlalbt(const ZRegister& zda, + const ZRegister& zn, + const ZRegister& zm); + + // Signed saturating doubling multiply-add long to accumulator (top, + // indexed). + void sqdmlalt(const ZRegister& zda, + const ZRegister& zn, + const ZRegister& zm, + int index); + + // Signed saturating doubling multiply-add long to accumulator (top). + void sqdmlalt(const ZRegister& zda, const ZRegister& zn, const ZRegister& zm); + + // Signed saturating doubling multiply-subtract long from accumulator + // (bottom, indexed). + void sqdmlslb(const ZRegister& zda, + const ZRegister& zn, + const ZRegister& zm, + int index); + + // Signed saturating doubling multiply-subtract long from accumulator + // (bottom). + void sqdmlslb(const ZRegister& zda, const ZRegister& zn, const ZRegister& zm); + + // Signed saturating doubling multiply-subtract long from accumulator + // (bottom x top). + void sqdmlslbt(const ZRegister& zda, + const ZRegister& zn, + const ZRegister& zm); + + // Signed saturating doubling multiply-subtract long from accumulator + // (top, indexed). + void sqdmlslt(const ZRegister& zda, + const ZRegister& zn, + const ZRegister& zm, + int index); + + // Signed saturating doubling multiply-subtract long from accumulator + // (top). + void sqdmlslt(const ZRegister& zda, const ZRegister& zn, const ZRegister& zm); + + // Signed saturating doubling multiply high (indexed). + void sqdmulh(const ZRegister& zd, + const ZRegister& zn, + const ZRegister& zm, + int index); + + // Signed saturating doubling multiply high (unpredicated). + void sqdmulh(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Signed saturating doubling multiply long (bottom, indexed). + void sqdmullb(const ZRegister& zd, + const ZRegister& zn, + const ZRegister& zm, + int index); + + // Signed saturating doubling multiply long (bottom). + void sqdmullb(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Signed saturating doubling multiply long (top, indexed). + void sqdmullt(const ZRegister& zd, + const ZRegister& zn, + const ZRegister& zm, + int index); + + // Signed saturating doubling multiply long (top). + void sqdmullt(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Signed saturating negate. + void sqneg(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn); + + // Saturating rounding doubling complex integer multiply-add high with + // rotate (indexed). + void sqrdcmlah(const ZRegister& zda, + const ZRegister& zn, + const ZRegister& zm, + int index, + int rot); + + // Saturating rounding doubling complex integer multiply-add high with + // rotate. + void sqrdcmlah(const ZRegister& zda, + const ZRegister& zn, + const ZRegister& zm, + int rot); + + // Signed saturating rounding doubling multiply-add high to accumulator + // (indexed). + void sqrdmlah(const ZRegister& zda, + const ZRegister& zn, + const ZRegister& zm, + int index); + + // Signed saturating rounding doubling multiply-add high to accumulator + // (unpredicated). + void sqrdmlah(const ZRegister& zda, const ZRegister& zn, const ZRegister& zm); + + // Signed saturating rounding doubling multiply-subtract high from + // accumulator (indexed). + void sqrdmlsh(const ZRegister& zda, + const ZRegister& zn, + const ZRegister& zm, + int index); + + // Signed saturating rounding doubling multiply-subtract high from + // accumulator (unpredicated). + void sqrdmlsh(const ZRegister& zda, const ZRegister& zn, const ZRegister& zm); + + // Signed saturating rounding doubling multiply high (indexed). + void sqrdmulh(const ZRegister& zd, + const ZRegister& zn, + const ZRegister& zm, + int index); + + // Signed saturating rounding doubling multiply high (unpredicated). + void sqrdmulh(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Signed saturating rounding shift left by vector (predicated). + void sqrshl(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Signed saturating rounding shift left reversed vectors (predicated). + void sqrshlr(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Signed saturating rounding shift right narrow by immediate (bottom). + void sqrshrnb(const ZRegister& zd, const ZRegister& zn, int shift); + + // Signed saturating rounding shift right narrow by immediate (top). + void sqrshrnt(const ZRegister& zd, const ZRegister& zn, int shift); + + // Signed saturating rounding shift right unsigned narrow by immediate + // (bottom). + void sqrshrunb(const ZRegister& zd, const ZRegister& zn, int shift); + + // Signed saturating rounding shift right unsigned narrow by immediate + // (top). + void sqrshrunt(const ZRegister& zd, const ZRegister& zn, int shift); + + // Signed saturating shift left by immediate. + void sqshl(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + int shift); + + // Signed saturating shift left by vector (predicated). + void sqshl(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Signed saturating shift left reversed vectors (predicated). + void sqshlr(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Signed saturating shift left unsigned by immediate. + void sqshlu(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + int shift); + + // Signed saturating shift right narrow by immediate (bottom). + void sqshrnb(const ZRegister& zd, const ZRegister& zn, int shift); + + // Signed saturating shift right narrow by immediate (top). + void sqshrnt(const ZRegister& zd, const ZRegister& zn, int shift); + + // Signed saturating shift right unsigned narrow by immediate (bottom). + void sqshrunb(const ZRegister& zd, const ZRegister& zn, int shift); + + // Signed saturating shift right unsigned narrow by immediate (top). + void sqshrunt(const ZRegister& zd, const ZRegister& zn, int shift); + + // Signed saturating subtraction (predicated). + void sqsub(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Signed saturating subtraction reversed vectors (predicated). + void sqsubr(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Signed saturating extract narrow (bottom). + void sqxtnb(const ZRegister& zd, const ZRegister& zn); + + // Signed saturating extract narrow (top). + void sqxtnt(const ZRegister& zd, const ZRegister& zn); + + // Signed saturating unsigned extract narrow (bottom). + void sqxtunb(const ZRegister& zd, const ZRegister& zn); + + // Signed saturating unsigned extract narrow (top). + void sqxtunt(const ZRegister& zd, const ZRegister& zn); + + // Signed rounding halving addition. + void srhadd(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Shift right and insert (immediate). + void sri(const ZRegister& zd, const ZRegister& zn, int shift); + + // Signed rounding shift left by vector (predicated). + void srshl(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Signed rounding shift left reversed vectors (predicated). + void srshlr(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Signed rounding shift right by immediate. + void srshr(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + int shift); + + // Signed rounding shift right and accumulate (immediate). + void srsra(const ZRegister& zda, const ZRegister& zn, int shift); + + // Signed shift left long by immediate (bottom). + void sshllb(const ZRegister& zd, const ZRegister& zn, int shift); + + // Signed shift left long by immediate (top). + void sshllt(const ZRegister& zd, const ZRegister& zn, int shift); + + // Signed shift right and accumulate (immediate). + void ssra(const ZRegister& zda, const ZRegister& zn, int shift); + + // Signed subtract long (bottom). + void ssublb(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Signed subtract long (bottom - top). + void ssublbt(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Signed subtract long (top). + void ssublt(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Signed subtract long (top - bottom). + void ssubltb(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Signed subtract wide (bottom). + void ssubwb(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Signed subtract wide (top). + void ssubwt(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Subtract narrow high part (bottom). + void subhnb(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Subtract narrow high part (top). + void subhnt(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Signed saturating addition of unsigned value. + void suqadd(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Programmable table lookup in one or two vector table (zeroing). + void tbl(const ZRegister& zd, + const ZRegister& zn1, + const ZRegister& zn2, + const ZRegister& zm); + + // Programmable table lookup in single vector table (merging). + void tbx(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Unsigned absolute difference and accumulate. + void uaba(const ZRegister& zda, const ZRegister& zn, const ZRegister& zm); + + // Unsigned absolute difference and accumulate long (bottom). + void uabalb(const ZRegister& zda, const ZRegister& zn, const ZRegister& zm); + + // Unsigned absolute difference and accumulate long (top). + void uabalt(const ZRegister& zda, const ZRegister& zn, const ZRegister& zm); + + // Unsigned absolute difference long (bottom). + void uabdlb(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Unsigned absolute difference long (top). + void uabdlt(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Unsigned add and accumulate long pairwise. + void uadalp(const ZRegister& zda, const PRegisterM& pg, const ZRegister& zn); + + // Unsigned add long (bottom). + void uaddlb(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Unsigned add long (top). + void uaddlt(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Unsigned add wide (bottom). + void uaddwb(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Unsigned add wide (top). + void uaddwt(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Unsigned halving addition. + void uhadd(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Unsigned halving subtract. + void uhsub(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Unsigned halving subtract reversed vectors. + void uhsubr(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Unsigned maximum pairwise. + void umaxp(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Unsigned minimum pairwise. + void uminp(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Unsigned multiply-add long to accumulator (bottom, indexed). + void umlalb(const ZRegister& zda, + const ZRegister& zn, + const ZRegister& zm, + int index); + + // Unsigned multiply-add long to accumulator (bottom). + void umlalb(const ZRegister& zda, const ZRegister& zn, const ZRegister& zm); + + // Unsigned multiply-add long to accumulator (top, indexed). + void umlalt(const ZRegister& zda, + const ZRegister& zn, + const ZRegister& zm, + int index); + + // Unsigned multiply-add long to accumulator (top). + void umlalt(const ZRegister& zda, const ZRegister& zn, const ZRegister& zm); + + // Unsigned multiply-subtract long from accumulator (bottom, indexed). + void umlslb(const ZRegister& zda, + const ZRegister& zn, + const ZRegister& zm, + int index); + + // Unsigned multiply-subtract long from accumulator (bottom). + void umlslb(const ZRegister& zda, const ZRegister& zn, const ZRegister& zm); + + // Unsigned multiply-subtract long from accumulator (top, indexed). + void umlslt(const ZRegister& zda, + const ZRegister& zn, + const ZRegister& zm, + int index); + + // Unsigned multiply-subtract long from accumulator (top). + void umlslt(const ZRegister& zda, const ZRegister& zn, const ZRegister& zm); + + // Unsigned multiply returning high half (unpredicated). + void umulh(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Unsigned multiply long (bottom, indexed). + void umullb(const ZRegister& zd, + const ZRegister& zn, + const ZRegister& zm, + int index); + + // Unsigned multiply long (bottom). + void umullb(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Unsigned multiply long (top, indexed). + void umullt(const ZRegister& zd, + const ZRegister& zn, + const ZRegister& zm, + int index); + + // Unsigned multiply long (top). + void umullt(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Unsigned saturating addition (predicated). + void uqadd(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Unsigned saturating rounding shift left by vector (predicated). + void uqrshl(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Unsigned saturating rounding shift left reversed vectors (predicated). + void uqrshlr(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Unsigned saturating rounding shift right narrow by immediate (bottom). + void uqrshrnb(const ZRegister& zd, const ZRegister& zn, int shift); + + // Unsigned saturating rounding shift right narrow by immediate (top). + void uqrshrnt(const ZRegister& zd, const ZRegister& zn, int shift); + + // Unsigned saturating shift left by immediate. + void uqshl(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + int shift); + + // Unsigned saturating shift left by vector (predicated). + void uqshl(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Unsigned saturating shift left reversed vectors (predicated). + void uqshlr(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Unsigned saturating shift right narrow by immediate (bottom). + void uqshrnb(const ZRegister& zd, const ZRegister& zn, int shift); + + // Unsigned saturating shift right narrow by immediate (top). + void uqshrnt(const ZRegister& zd, const ZRegister& zn, int shift); + + // Unsigned saturating subtraction (predicated). + void uqsub(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Unsigned saturating subtraction reversed vectors (predicated). + void uqsubr(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Unsigned saturating extract narrow (bottom). + void uqxtnb(const ZRegister& zd, const ZRegister& zn); + + // Unsigned saturating extract narrow (top). + void uqxtnt(const ZRegister& zd, const ZRegister& zn); + + // Unsigned reciprocal estimate (predicated). + void urecpe(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn); + + // Unsigned rounding halving addition. + void urhadd(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Unsigned rounding shift left by vector (predicated). + void urshl(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Unsigned rounding shift left reversed vectors (predicated). + void urshlr(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Unsigned rounding shift right by immediate. + void urshr(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + int shift); + + // Unsigned reciprocal square root estimate (predicated). + void ursqrte(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn); + + // Unsigned rounding shift right and accumulate (immediate). + void ursra(const ZRegister& zda, const ZRegister& zn, int shift); + + // Unsigned shift left long by immediate (bottom). + void ushllb(const ZRegister& zd, const ZRegister& zn, int shift); + + // Unsigned shift left long by immediate (top). + void ushllt(const ZRegister& zd, const ZRegister& zn, int shift); + + // Unsigned saturating addition of signed value. + void usqadd(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Unsigned shift right and accumulate (immediate). + void usra(const ZRegister& zda, const ZRegister& zn, int shift); + + // Unsigned subtract long (bottom). + void usublb(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Unsigned subtract long (top). + void usublt(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Unsigned subtract wide (bottom). + void usubwb(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // Unsigned subtract wide (top). + void usubwt(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm); + + // While decrementing signed scalar greater than or equal to scalar. + void whilege(const PRegisterWithLaneSize& pd, + const Register& rn, + const Register& rm); + + // While decrementing signed scalar greater than scalar. + void whilegt(const PRegisterWithLaneSize& pd, + const Register& rn, + const Register& rm); + + // While decrementing unsigned scalar higher than scalar. + void whilehi(const PRegisterWithLaneSize& pd, + const Register& rn, + const Register& rm); + + // While decrementing unsigned scalar higher or same as scalar. + void whilehs(const PRegisterWithLaneSize& pd, + const Register& rn, + const Register& rm); + + // While free of read-after-write conflicts. + void whilerw(const PRegisterWithLaneSize& pd, + const Register& rn, + const Register& rm); + + // While free of write-after-read/write conflicts. + void whilewr(const PRegisterWithLaneSize& pd, + const Register& rn, + const Register& rm); + + // Bitwise exclusive OR and rotate right by immediate. + void xar(const ZRegister& zd, + const ZRegister& zn, + const ZRegister& zm, + int shift); + + // Floating-point matrix multiply-accumulate. + void fmmla(const ZRegister& zda, const ZRegister& zn, const ZRegister& zm); + + // Signed integer matrix multiply-accumulate. + void smmla(const ZRegister& zda, const ZRegister& zn, const ZRegister& zm); + + // Unsigned by signed integer matrix multiply-accumulate. + void usmmla(const ZRegister& zda, const ZRegister& zn, const ZRegister& zm); + + // Unsigned integer matrix multiply-accumulate. + void ummla(const ZRegister& zda, const ZRegister& zn, const ZRegister& zm); + + // Unsigned by signed integer dot product. + void usdot(const ZRegister& zda, const ZRegister& zn, const ZRegister& zm); + + // Unsigned by signed integer indexed dot product. + void usdot(const ZRegister& zda, + const ZRegister& zn, + const ZRegister& zm, + int index); + + // Signed by unsigned integer indexed dot product. + void sudot(const ZRegister& zda, + const ZRegister& zn, + const ZRegister& zm, + int index); + + // Add with Tag. + void addg(const Register& xd, const Register& xn, int offset, int tag_offset); + + // Tag Mask Insert. + void gmi(const Register& xd, const Register& xn, const Register& xm); + + // Insert Random Tag. + void irg(const Register& xd, const Register& xn, const Register& xm = xzr); + + // Load Allocation Tag. + void ldg(const Register& xt, const MemOperand& addr); + + void StoreTagHelper(const Register& xt, const MemOperand& addr, Instr op); + + // Store Allocation Tags. + void st2g(const Register& xt, const MemOperand& addr); + + // Store Allocation Tag. + void stg(const Register& xt, const MemOperand& addr); + + // Store Allocation Tag and Pair of registers. + void stgp(const Register& xt1, const Register& xt2, const MemOperand& addr); + + // Store Allocation Tags, Zeroing. + void stz2g(const Register& xt, const MemOperand& addr); + + // Store Allocation Tag, Zeroing. + void stzg(const Register& xt, const MemOperand& addr); + + // Subtract with Tag. + void subg(const Register& xd, const Register& xn, int offset, int tag_offset); + + // Subtract Pointer. + void subp(const Register& xd, const Register& xn, const Register& xm); + + // Subtract Pointer, setting Flags. + void subps(const Register& xd, const Register& xn, const Register& xm); + + // Compare with Tag. + void cmpp(const Register& xn, const Register& xm) { subps(xzr, xn, xm); } + + // Memory Copy. + void cpye(const Register& rd, const Register& rs, const Register& rn); + + // Memory Copy, reads and writes non-temporal. + void cpyen(const Register& rd, const Register& rs, const Register& rn); + + // Memory Copy, reads non-temporal. + void cpyern(const Register& rd, const Register& rs, const Register& rn); + + // Memory Copy, writes non-temporal. + void cpyewn(const Register& rd, const Register& rs, const Register& rn); + + // Memory Copy Forward-only. + void cpyfe(const Register& rd, const Register& rs, const Register& rn); + + // Memory Copy Forward-only, reads and writes non-temporal. + void cpyfen(const Register& rd, const Register& rs, const Register& rn); + + // Memory Copy Forward-only, reads non-temporal. + void cpyfern(const Register& rd, const Register& rs, const Register& rn); + + // Memory Copy Forward-only, writes non-temporal. + void cpyfewn(const Register& rd, const Register& rs, const Register& rn); + + // Memory Copy Forward-only. + void cpyfm(const Register& rd, const Register& rs, const Register& rn); + + // Memory Copy Forward-only, reads and writes non-temporal. + void cpyfmn(const Register& rd, const Register& rs, const Register& rn); + + // Memory Copy Forward-only, reads non-temporal. + void cpyfmrn(const Register& rd, const Register& rs, const Register& rn); + + // Memory Copy Forward-only, writes non-temporal. + void cpyfmwn(const Register& rd, const Register& rs, const Register& rn); + + // Memory Copy Forward-only. + void cpyfp(const Register& rd, const Register& rs, const Register& rn); + + // Memory Copy Forward-only, reads and writes non-temporal. + void cpyfpn(const Register& rd, const Register& rs, const Register& rn); + + // Memory Copy Forward-only, reads non-temporal. + void cpyfprn(const Register& rd, const Register& rs, const Register& rn); + + // Memory Copy Forward-only, writes non-temporal. + void cpyfpwn(const Register& rd, const Register& rs, const Register& rn); + + // Memory Copy. + void cpym(const Register& rd, const Register& rs, const Register& rn); + + // Memory Copy, reads and writes non-temporal. + void cpymn(const Register& rd, const Register& rs, const Register& rn); + + // Memory Copy, reads non-temporal. + void cpymrn(const Register& rd, const Register& rs, const Register& rn); + + // Memory Copy, writes non-temporal. + void cpymwn(const Register& rd, const Register& rs, const Register& rn); + + // Memory Copy. + void cpyp(const Register& rd, const Register& rs, const Register& rn); + + // Memory Copy, reads and writes non-temporal. + void cpypn(const Register& rd, const Register& rs, const Register& rn); + + // Memory Copy, reads non-temporal. + void cpyprn(const Register& rd, const Register& rs, const Register& rn); + + // Memory Copy, writes non-temporal. + void cpypwn(const Register& rd, const Register& rs, const Register& rn); + + // Memory Set. + void sete(const Register& rd, const Register& rn, const Register& rs); + + // Memory Set, non-temporal. + void seten(const Register& rd, const Register& rn, const Register& rs); + + // Memory Set with tag setting. + void setge(const Register& rd, const Register& rn, const Register& rs); + + // Memory Set with tag setting, non-temporal. + void setgen(const Register& rd, const Register& rn, const Register& rs); + + // Memory Set with tag setting. + void setgm(const Register& rd, const Register& rn, const Register& rs); + + // Memory Set with tag setting, non-temporal. + void setgmn(const Register& rd, const Register& rn, const Register& rs); + + // Memory Set with tag setting. + void setgp(const Register& rd, const Register& rn, const Register& rs); + + // Memory Set with tag setting, non-temporal. + void setgpn(const Register& rd, const Register& rn, const Register& rs); + + // Memory Set. + void setm(const Register& rd, const Register& rn, const Register& rs); + + // Memory Set, non-temporal. + void setmn(const Register& rd, const Register& rn, const Register& rs); + + // Memory Set. + void setp(const Register& rd, const Register& rn, const Register& rs); + + // Memory Set, non-temporal. + void setpn(const Register& rd, const Register& rn, const Register& rs); + + // Absolute value. + void abs(const Register& rd, const Register& rn); + + // Count bits. + void cnt(const Register& rd, const Register& rn); + + // Count Trailing Zeros. + void ctz(const Register& rd, const Register& rn); + + // Signed Maximum. + void smax(const Register& rd, const Register& rn, const Operand& op); + + // Signed Minimum. + void smin(const Register& rd, const Register& rn, const Operand& op); + + // Unsigned Maximum. + void umax(const Register& rd, const Register& rn, const Operand& op); + + // Unsigned Minimum. + void umin(const Register& rd, const Register& rn, const Operand& op); + // Emit generic instructions. + // Emit raw instructions into the instruction stream. void dci(Instr raw_inst) { Emit(raw_inst); } @@ -3529,22 +7097,25 @@ class Assembler : public vixl::internal::AssemblerBase { } // Code generation helpers. + static bool OneInstrMoveImmediateHelper(Assembler* assm, + const Register& dst, + uint64_t imm); // Register encoding. - static Instr Rd(CPURegister rd) { - VIXL_ASSERT(rd.GetCode() != kSPRegInternalCode); - return rd.GetCode() << Rd_offset; + template + static Instr Rx(CPURegister rx) { + VIXL_ASSERT(rx.GetCode() != kSPRegInternalCode); + return ImmUnsignedField(rx.GetCode()); } - static Instr Rn(CPURegister rn) { - VIXL_ASSERT(rn.GetCode() != kSPRegInternalCode); - return rn.GetCode() << Rn_offset; - } - - static Instr Rm(CPURegister rm) { - VIXL_ASSERT(rm.GetCode() != kSPRegInternalCode); - return rm.GetCode() << Rm_offset; +#define CPU_REGISTER_FIELD_NAMES(V) V(d) V(n) V(m) V(a) V(t) V(t2) V(s) +#define REGISTER_ENCODER(N) \ + static Instr R##N(CPURegister r##N) { \ + return Rx(r##N); \ } + CPU_REGISTER_FIELD_NAMES(REGISTER_ENCODER) +#undef REGISTER_ENCODER +#undef CPU_REGISTER_FIELD_NAMES static Instr RmNot31(CPURegister rm) { VIXL_ASSERT(rm.GetCode() != kSPRegInternalCode); @@ -3552,26 +7123,6 @@ class Assembler : public vixl::internal::AssemblerBase { return Rm(rm); } - static Instr Ra(CPURegister ra) { - VIXL_ASSERT(ra.GetCode() != kSPRegInternalCode); - return ra.GetCode() << Ra_offset; - } - - static Instr Rt(CPURegister rt) { - VIXL_ASSERT(rt.GetCode() != kSPRegInternalCode); - return rt.GetCode() << Rt_offset; - } - - static Instr Rt2(CPURegister rt2) { - VIXL_ASSERT(rt2.GetCode() != kSPRegInternalCode); - return rt2.GetCode() << Rt2_offset; - } - - static Instr Rs(CPURegister rs) { - VIXL_ASSERT(rs.GetCode() != kSPRegInternalCode); - return rs.GetCode() << Rs_offset; - } - // These encoding functions allow the stack pointer to be encoded, and // disallow the zero register. static Instr RdSP(Register rd) { @@ -3589,6 +7140,33 @@ class Assembler : public vixl::internal::AssemblerBase { return (rm.GetCode() & kRegCodeMask) << Rm_offset; } + static Instr Pd(PRegister pd) { + return Rx(pd); + } + + static Instr Pm(PRegister pm) { + return Rx(pm); + } + + static Instr Pn(PRegister pn) { + return Rx(pn); + } + + static Instr PgLow8(PRegister pg) { + // Governing predicates can be merging, zeroing, or unqualified. They should + // never have a lane size. + VIXL_ASSERT(!pg.HasLaneSize()); + return Rx(pg); + } + + template + static Instr Pg(PRegister pg) { + // Governing predicates can be merging, zeroing, or unqualified. They should + // never have a lane size. + VIXL_ASSERT(!pg.HasLaneSize()); + return Rx(pg); + } + // Flags encoding. static Instr Flags(FlagsUpdate S) { if (S == SetFlags) { @@ -3602,6 +7180,26 @@ class Assembler : public vixl::internal::AssemblerBase { static Instr Cond(Condition cond) { return cond << Condition_offset; } + // Generic immediate encoding. + template + static Instr ImmField(int64_t imm) { + VIXL_STATIC_ASSERT((hibit >= lobit) && (lobit >= 0)); + VIXL_STATIC_ASSERT(hibit < (sizeof(Instr) * kBitsPerByte)); + int fieldsize = hibit - lobit + 1; + VIXL_ASSERT(IsIntN(fieldsize, imm)); + return static_cast(TruncateToUintN(fieldsize, imm) << lobit); + } + + // For unsigned immediate encoding. + // TODO: Handle signed and unsigned immediate in satisfactory way. + template + static Instr ImmUnsignedField(uint64_t imm) { + VIXL_STATIC_ASSERT((hibit >= lobit) && (lobit >= 0)); + VIXL_STATIC_ASSERT(hibit < (sizeof(Instr) * kBitsPerByte)); + VIXL_ASSERT(IsUintN(hibit - lobit + 1, imm)); + return static_cast(imm << lobit); + } + // PC-relative address encoding. static Instr ImmPCRelAddress(int64_t imm21) { VIXL_ASSERT(IsInt21(imm21)); @@ -3635,11 +7233,11 @@ class Assembler : public vixl::internal::AssemblerBase { static Instr ImmTestBranchBit(unsigned bit_pos) { VIXL_ASSERT(IsUint6(bit_pos)); // Subtract five from the shift offset, as we need bit 5 from bit_pos. - unsigned b5 = bit_pos << (ImmTestBranchBit5_offset - 5); - unsigned b40 = bit_pos << ImmTestBranchBit40_offset; - b5 &= ImmTestBranchBit5_mask; - b40 &= ImmTestBranchBit40_mask; - return b5 | b40; + unsigned bit5 = bit_pos << (ImmTestBranchBit5_offset - 5); + unsigned bit40 = bit_pos << ImmTestBranchBit40_offset; + bit5 &= ImmTestBranchBit5_mask; + bit40 &= ImmTestBranchBit40_mask; + return bit5 | bit40; } // Data Processing encoding. @@ -3652,11 +7250,60 @@ class Assembler : public vixl::internal::AssemblerBase { if (IsUint12(imm)) { // No shift required. imm <<= ImmAddSub_offset; } else { - imm = ((imm >> 12) << ImmAddSub_offset) | (1 << ShiftAddSub_offset); + imm = ((imm >> 12) << ImmAddSub_offset) | (1 << ImmAddSubShift_offset); } return imm; } + static Instr SVEImmSetBits(unsigned imms, unsigned lane_size) { + VIXL_ASSERT(IsUint6(imms)); + VIXL_ASSERT((lane_size == kDRegSize) || IsUint6(imms + 3)); + USE(lane_size); + return imms << SVEImmSetBits_offset; + } + + static Instr SVEImmRotate(unsigned immr, unsigned lane_size) { + VIXL_ASSERT(IsUintN(WhichPowerOf2(lane_size), immr)); + USE(lane_size); + return immr << SVEImmRotate_offset; + } + + static Instr SVEBitN(unsigned bitn) { + VIXL_ASSERT(IsUint1(bitn)); + return bitn << SVEBitN_offset; + } + + static Instr SVEDtype(unsigned msize_in_bytes_log2, + unsigned esize_in_bytes_log2, + bool is_signed, + int dtype_h_lsb = 23, + int dtype_l_lsb = 21) { + VIXL_ASSERT(msize_in_bytes_log2 <= kDRegSizeInBytesLog2); + VIXL_ASSERT(esize_in_bytes_log2 <= kDRegSizeInBytesLog2); + Instr dtype_h = msize_in_bytes_log2; + Instr dtype_l = esize_in_bytes_log2; + // Signed forms use the encodings where msize would be greater than esize. + if (is_signed) { + dtype_h = dtype_h ^ 0x3; + dtype_l = dtype_l ^ 0x3; + } + VIXL_ASSERT(IsUint2(dtype_h)); + VIXL_ASSERT(IsUint2(dtype_l)); + VIXL_ASSERT((dtype_h > dtype_l) == is_signed); + + return (dtype_h << dtype_h_lsb) | (dtype_l << dtype_l_lsb); + } + + static Instr SVEDtypeSplit(unsigned msize_in_bytes_log2, + unsigned esize_in_bytes_log2, + bool is_signed) { + return SVEDtype(msize_in_bytes_log2, + esize_in_bytes_log2, + is_signed, + 23, + 13); + } + static Instr ImmS(unsigned imms, unsigned reg_size) { VIXL_ASSERT(((reg_size == kXRegSize) && IsUint6(imms)) || ((reg_size == kWRegSize) && IsUint5(imms))); @@ -3737,9 +7384,10 @@ class Assembler : public vixl::internal::AssemblerBase { return TruncateToUint9(imm9) << ImmLS_offset; } - static Instr ImmLSPair(int64_t imm7, unsigned access_size) { - VIXL_ASSERT(IsMultiple(imm7, 1 << access_size)); - int64_t scaled_imm7 = imm7 / (1 << access_size); + static Instr ImmLSPair(int64_t imm7, unsigned access_size_in_bytes_log2) { + const auto access_size_in_bytes = 1U << access_size_in_bytes_log2; + VIXL_ASSERT(IsMultiple(imm7, access_size_in_bytes)); + int64_t scaled_imm7 = imm7 / access_size_in_bytes; VIXL_ASSERT(IsInt7(scaled_imm7)); return TruncateToUint7(scaled_imm7) << ImmLSPair_offset; } @@ -3749,6 +7397,15 @@ class Assembler : public vixl::internal::AssemblerBase { return shift_amount << ImmShiftLS_offset; } + static Instr ImmLSPAC(int64_t imm10) { + VIXL_ASSERT(IsMultiple(imm10, 1 << 3)); + int64_t scaled_imm10 = imm10 / (1 << 3); + VIXL_ASSERT(IsInt10(scaled_imm10)); + uint32_t s_bit = (scaled_imm10 >> 9) & 1; + return (s_bit << ImmLSPACHi_offset) | + (TruncateToUint9(scaled_imm10) << ImmLSPACLo_offset); + } + static Instr ImmPrefetchOperation(int imm5) { VIXL_ASSERT(IsUint5(imm5)); return imm5 << ImmPrefetchOperation_offset; @@ -3759,11 +7416,21 @@ class Assembler : public vixl::internal::AssemblerBase { return imm16 << ImmException_offset; } + static Instr ImmUdf(int imm16) { + VIXL_ASSERT(IsUint16(imm16)); + return imm16 << ImmUdf_offset; + } + static Instr ImmSystemRegister(int imm16) { VIXL_ASSERT(IsUint16(imm16)); return imm16 << ImmSystemRegister_offset; } + static Instr ImmRMIFRotation(int imm6) { + VIXL_ASSERT(IsUint6(imm6)); + return imm6 << ImmRMIFRotation_offset; + } + static Instr ImmHint(int imm7) { VIXL_ASSERT(IsUint7(imm7)); return imm7 << ImmHint_offset; @@ -3821,7 +7488,8 @@ class Assembler : public vixl::internal::AssemblerBase { static Instr ImmFP64(double imm); // FP register type. - static Instr FPType(FPRegister fd) { + static Instr FPType(VRegister fd) { + VIXL_ASSERT(fd.IsScalar()); switch (fd.GetSizeInBits()) { case 16: return FP16; @@ -3844,15 +7512,21 @@ class Assembler : public vixl::internal::AssemblerBase { static bool IsImmAddSub(int64_t immediate); static bool IsImmConditionalCompare(int64_t immediate); static bool IsImmFP16(Float16 imm); - static bool IsImmFP32(float imm); - static bool IsImmFP64(double imm); + + static bool IsImmFP32(float imm) { return IsImmFP32(FloatToRawbits(imm)); } + + static bool IsImmFP32(uint32_t bits); + + static bool IsImmFP64(double imm) { return IsImmFP64(DoubleToRawbits(imm)); } + + static bool IsImmFP64(uint64_t bits); static bool IsImmLogical(uint64_t value, unsigned width, unsigned* n = NULL, unsigned* imm_s = NULL, unsigned* imm_r = NULL); - static bool IsImmLSPair(int64_t offset, unsigned access_size); - static bool IsImmLSScaled(int64_t offset, unsigned access_size); + static bool IsImmLSPair(int64_t offset, unsigned access_size_in_bytes_log2); + static bool IsImmLSScaled(int64_t offset, unsigned access_size_in_bytes_log2); static bool IsImmLSUnscaled(int64_t offset); static bool IsImmMovn(uint64_t imm, unsigned reg_size); static bool IsImmMovz(uint64_t imm, unsigned reg_size); @@ -3861,6 +7535,8 @@ class Assembler : public vixl::internal::AssemblerBase { static Instr VFormat(VRegister vd) { if (vd.Is64Bits()) { switch (vd.GetLanes()) { + case 1: + return NEON_1D; case 2: return NEON_2S; case 4: @@ -3987,6 +7663,30 @@ class Assembler : public vixl::internal::AssemblerBase { } } + template + static Instr SVESize(const T& rd) { + VIXL_ASSERT(rd.IsZRegister() || rd.IsPRegister()); + VIXL_ASSERT(rd.HasLaneSize()); + switch (rd.GetLaneSizeInBytes()) { + case 1: + return SVE_B; + case 2: + return SVE_H; + case 4: + return SVE_S; + case 8: + return SVE_D; + default: + return 0xffffffff; + } + } + + static Instr ImmSVEPredicateConstraint(int pattern) { + VIXL_ASSERT(IsUint5(pattern)); + return (pattern << ImmSVEPredicateConstraint_offset) & + ImmSVEPredicateConstraint_mask; + } + static Instr ImmNEONHLM(int index, int num_bits) { int h, l, m; if (num_bits == 3) { @@ -4113,6 +7813,10 @@ class Assembler : public vixl::internal::AssemblerBase { LoadStoreOp op, LoadStoreScalingOption option = PreferScaledOffset); + void LoadStorePAC(const Register& xt, + const MemOperand& addr, + LoadStorePACOp op); + void LoadStorePair(const CPURegister& rt, const CPURegister& rt2, const MemOperand& addr, @@ -4134,9 +7838,103 @@ class Assembler : public vixl::internal::AssemblerBase { const MemOperand& addr, Instr op); + // Set `is_load` to false in default as it's only used in the + // scalar-plus-vector form. + Instr SVEMemOperandHelper(unsigned msize_in_bytes_log2, + int num_regs, + const SVEMemOperand& addr, + bool is_load = false); + + // E.g. st1b, st1h, ... + // This supports both contiguous and scatter stores. + void SVESt1Helper(unsigned msize_in_bytes_log2, + const ZRegister& zt, + const PRegister& pg, + const SVEMemOperand& addr); + + // E.g. ld1b, ld1h, ... + // This supports both contiguous and gather loads. + void SVELd1Helper(unsigned msize_in_bytes_log2, + const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr, + bool is_signed); + + // E.g. ld1rb, ld1rh, ... + void SVELd1BroadcastHelper(unsigned msize_in_bytes_log2, + const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr, + bool is_signed); + + // E.g. ldff1b, ldff1h, ... + // This supports both contiguous and gather loads. + void SVELdff1Helper(unsigned msize_in_bytes_log2, + const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr, + bool is_signed); + + // Common code for the helpers above. + void SVELdSt1Helper(unsigned msize_in_bytes_log2, + const ZRegister& zt, + const PRegister& pg, + const SVEMemOperand& addr, + bool is_signed, + Instr op); + + // Common code for the helpers above. + void SVEScatterGatherHelper(unsigned msize_in_bytes_log2, + const ZRegister& zt, + const PRegister& pg, + const SVEMemOperand& addr, + bool is_load, + bool is_signed, + bool is_first_fault); + + // E.g. st2b, st3h, ... + void SVESt234Helper(int num_regs, + const ZRegister& zt1, + const PRegister& pg, + const SVEMemOperand& addr); + + // E.g. ld2b, ld3h, ... + void SVELd234Helper(int num_regs, + const ZRegister& zt1, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + // Common code for the helpers above. + void SVELdSt234Helper(int num_regs, + const ZRegister& zt1, + const PRegister& pg, + const SVEMemOperand& addr, + Instr op); + + // E.g. ld1qb, ld1qh, ldnt1b, ... + void SVELd1St1ScaImmHelper(const ZRegister& zt, + const PRegister& pg, + const SVEMemOperand& addr, + Instr regoffset_op, + Instr immoffset_op, + int imm_divisor = 1); + + void SVELd1VecScaHelper(const ZRegister& zt, + const PRegister& pg, + const SVEMemOperand& addr, + uint32_t msize, + bool is_signed); + void SVESt1VecScaHelper(const ZRegister& zt, + const PRegister& pg, + const SVEMemOperand& addr, + uint32_t msize); + void Prefetch(PrefetchOperation op, const MemOperand& addr, LoadStoreScalingOption option = PreferScaledOffset); + void Prefetch(int op, + const MemOperand& addr, + LoadStoreScalingOption option = PreferScaledOffset); // TODO(all): The third parameter should be passed by reference but gcc 4.8.2 // reports a bogus uninitialised warning then. @@ -4144,6 +7942,9 @@ class Assembler : public vixl::internal::AssemblerBase { const Register& rn, const Operand operand, LogicalOp op); + + void SVELogicalImmediate(const ZRegister& zd, uint64_t imm, Instr op); + void LogicalImmediate(const Register& rd, const Register& rn, unsigned n, @@ -4163,6 +7964,95 @@ class Assembler : public vixl::internal::AssemblerBase { FlagsUpdate S, AddSubWithCarryOp op); + void CompareVectors(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + const ZRegister& zm, + SVEIntCompareVectorsOp op); + + void CompareVectors(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + int imm, + SVEIntCompareSignedImmOp op); + + void CompareVectors(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + unsigned imm, + SVEIntCompareUnsignedImmOp op); + + void SVEIntAddSubtractImmUnpredicatedHelper( + SVEIntAddSubtractImm_UnpredicatedOp op, + const ZRegister& zd, + int imm8, + int shift); + + void SVEElementCountToRegisterHelper(Instr op, + const Register& rd, + int pattern, + int multiplier); + + Instr EncodeSVEShiftLeftImmediate(int shift, int lane_size_in_bits); + + Instr EncodeSVEShiftRightImmediate(int shift, int lane_size_in_bits); + + void SVEBitwiseShiftImmediate(const ZRegister& zd, + const ZRegister& zn, + Instr encoded_imm, + Instr op); + + void SVEBitwiseShiftImmediatePred(const ZRegister& zdn, + const PRegisterM& pg, + Instr encoded_imm, + Instr op); + + Instr SVEMulIndexHelper(unsigned lane_size_in_bytes_log2, + const ZRegister& zm, + int index, + Instr op_h, + Instr op_s, + Instr op_d); + + Instr SVEMulLongIndexHelper(const ZRegister& zm, int index); + + Instr SVEMulComplexIndexHelper(const ZRegister& zm, int index); + + void SVEContiguousPrefetchScalarPlusScalarHelper(PrefetchOperation prfop, + const PRegister& pg, + const SVEMemOperand& addr, + int prefetch_size); + + void SVEContiguousPrefetchScalarPlusVectorHelper(PrefetchOperation prfop, + const PRegister& pg, + const SVEMemOperand& addr, + int prefetch_size); + + void SVEGatherPrefetchVectorPlusImmediateHelper(PrefetchOperation prfop, + const PRegister& pg, + const SVEMemOperand& addr, + int prefetch_size); + + void SVEGatherPrefetchScalarPlusImmediateHelper(PrefetchOperation prfop, + const PRegister& pg, + const SVEMemOperand& addr, + int prefetch_size); + + void SVEPrefetchHelper(PrefetchOperation prfop, + const PRegister& pg, + const SVEMemOperand& addr, + int prefetch_size); + + static Instr SVEImmPrefetchOperation(PrefetchOperation prfop) { + // SVE only supports PLD and PST, not PLI. + VIXL_ASSERT(((prfop >= PLDL1KEEP) && (prfop <= PLDL3STRM)) || + ((prfop >= PSTL1KEEP) && (prfop <= PSTL3STRM))); + // Check that we can simply map bits. + VIXL_STATIC_ASSERT(PLDL1KEEP == 0b00000); + VIXL_STATIC_ASSERT(PSTL1KEEP == 0b10000); + // Remaining operations map directly. + return ((prfop & 0b10000) >> 1) | (prfop & 0b00111); + } // Functions for emulating operands not directly supported by the instruction // set. @@ -4219,6 +8109,8 @@ class Assembler : public vixl::internal::AssemblerBase { bool CPUHas(const CPURegister& rt) const; bool CPUHas(const CPURegister& rt, const CPURegister& rt2) const; + bool CPUHas(SystemRegister sysreg) const; + private: static uint32_t FP16ToImm8(Float16 imm); static uint32_t FP32ToImm8(float imm); @@ -4362,12 +8254,16 @@ class Assembler : public vixl::internal::AssemblerBase { NEONShiftImmediateOp op); void NEONXtn(const VRegister& vd, const VRegister& vn, NEON2RegMiscOp vop); + // If *shift is -1, find values of *imm8 and *shift such that IsInt8(*imm8) + // and *shift is either 0 or 8. Otherwise, leave the values unchanged. + void ResolveSVEImm8Shift(int* imm8, int* shift); + Instr LoadStoreStructAddrModeField(const MemOperand& addr); // Encode the specified MemOperand for the specified access size and scaling // preference. Instr LoadStoreMemOperand(const MemOperand& addr, - unsigned access_size, + unsigned access_size_in_bytes_log2, LoadStoreScalingOption option); // Link the current (not-yet-emitted) instruction to the specified label, then diff --git a/dep/vixl/include/vixl/aarch64/constants-aarch64.h b/dep/vixl/include/vixl/aarch64/constants-aarch64.h index b474211df..e667f81b2 100644 --- a/dep/vixl/include/vixl/aarch64/constants-aarch64.h +++ b/dep/vixl/include/vixl/aarch64/constants-aarch64.h @@ -29,35 +29,68 @@ #include "../globals-vixl.h" +#ifdef __clang__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wdeprecated-enum-enum-conversion" +#endif + namespace vixl { namespace aarch64 { const unsigned kNumberOfRegisters = 32; const unsigned kNumberOfVRegisters = 32; -const unsigned kNumberOfFPRegisters = kNumberOfVRegisters; +const unsigned kNumberOfZRegisters = kNumberOfVRegisters; +const unsigned kNumberOfPRegisters = 16; // Callee saved registers are x21-x30(lr). const int kNumberOfCalleeSavedRegisters = 10; const int kFirstCalleeSavedRegisterIndex = 21; -// Callee saved FP registers are d8-d15. +// Callee saved FP registers are d8-d15. Note that the high parts of v8-v15 are +// still caller-saved. const int kNumberOfCalleeSavedFPRegisters = 8; const int kFirstCalleeSavedFPRegisterIndex = 8; +// All predicated instructions accept at least p0-p7 as the governing predicate. +const unsigned kNumberOfGoverningPRegisters = 8; // clang-format off +#define AARCH64_P_REGISTER_CODE_LIST(R) \ + R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \ + R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) + #define AARCH64_REGISTER_CODE_LIST(R) \ R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \ R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \ R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \ R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31) +// SVE loads and stores use "w" instead of "s" for word-sized accesses, so the +// mapping from the load/store variant to constants like k*RegSize is irregular. +#define VIXL_SVE_LOAD_STORE_VARIANT_LIST(V) \ + V(b, B) \ + V(h, H) \ + V(w, S) \ + V(d, D) + +// Sign-extending loads don't have double-word variants. +#define VIXL_SVE_LOAD_STORE_SIGNED_VARIANT_LIST(V) \ + V(b, B) \ + V(h, H) \ + V(w, S) + #define INSTRUCTION_FIELDS_LIST(V_) \ /* Register fields */ \ -V_(Rd, 4, 0, ExtractBits) /* Destination register. */ \ -V_(Rn, 9, 5, ExtractBits) /* First source register. */ \ -V_(Rm, 20, 16, ExtractBits) /* Second source register. */ \ -V_(Ra, 14, 10, ExtractBits) /* Third source register. */ \ -V_(Rt, 4, 0, ExtractBits) /* Load/store register. */ \ -V_(Rt2, 14, 10, ExtractBits) /* Load/store second register. */ \ -V_(Rs, 20, 16, ExtractBits) /* Exclusive access status. */ \ +V_(Rd, 4, 0, ExtractBits) /* Destination register. */ \ +V_(Rn, 9, 5, ExtractBits) /* First source register. */ \ +V_(Rm, 20, 16, ExtractBits) /* Second source register. */ \ +V_(RmLow16, 19, 16, ExtractBits) /* Second source register (code 0-15). */ \ +V_(Ra, 14, 10, ExtractBits) /* Third source register. */ \ +V_(Rt, 4, 0, ExtractBits) /* Load/store register. */ \ +V_(Rt2, 14, 10, ExtractBits) /* Load/store second register. */ \ +V_(Rs, 20, 16, ExtractBits) /* Exclusive access status. */ \ +V_(Pt, 3, 0, ExtractBits) /* Load/store register (p0-p7). */ \ +V_(Pd, 3, 0, ExtractBits) /* SVE destination predicate register. */ \ +V_(Pn, 8, 5, ExtractBits) /* SVE first source predicate register. */ \ +V_(Pm, 19, 16, ExtractBits) /* SVE second source predicate register.*/ \ +V_(PgLow8, 12, 10, ExtractBits) /* Governing predicate (p0-p7). */ \ \ /* Common bits */ \ V_(SixtyFourBits, 31, 31, ExtractBits) \ @@ -73,7 +106,7 @@ V_(ImmDPShift, 15, 10, ExtractBits) \ \ /* Add/subtract immediate */ \ V_(ImmAddSub, 21, 10, ExtractBits) \ -V_(ShiftAddSub, 23, 22, ExtractBits) \ +V_(ImmAddSubShift, 22, 22, ExtractBits) \ \ /* Add/substract extend */ \ V_(ImmExtendShift, 12, 10, ExtractBits) \ @@ -119,6 +152,8 @@ V_(ImmPrefetchOperation, 4, 0, ExtractBits) \ V_(PrefetchHint, 4, 3, ExtractBits) \ V_(PrefetchTarget, 2, 1, ExtractBits) \ V_(PrefetchStream, 0, 0, ExtractBits) \ +V_(ImmLSPACHi, 22, 22, ExtractSignedBits) \ +V_(ImmLSPACLo, 20, 12, ExtractBits) \ \ /* Other immediates */ \ V_(ImmUncondBranch, 25, 0, ExtractSignedBits) \ @@ -128,6 +163,7 @@ V_(ImmException, 20, 5, ExtractBits) \ V_(ImmHint, 11, 5, ExtractBits) \ V_(ImmBarrierDomain, 11, 10, ExtractBits) \ V_(ImmBarrierType, 9, 8, ExtractBits) \ +V_(ImmUdf, 15, 0, ExtractBits) \ \ /* System (MRS, MSR, SYS) */ \ V_(ImmSystemRegister, 20, 5, ExtractBits) \ @@ -138,6 +174,7 @@ V_(SysOp1, 18, 16, ExtractBits) \ V_(SysOp2, 7, 5, ExtractBits) \ V_(CRn, 15, 12, ExtractBits) \ V_(CRm, 11, 8, ExtractBits) \ +V_(ImmRMIFRotation, 20, 15, ExtractBits) \ \ /* Load-/store-exclusive */ \ V_(LdStXLoad, 22, 22, ExtractBits) \ @@ -172,7 +209,23 @@ V_(NEONCmode, 15, 12, ExtractBits) \ /* NEON Shift Immediate fields */ \ V_(ImmNEONImmhImmb, 22, 16, ExtractBits) \ V_(ImmNEONImmh, 22, 19, ExtractBits) \ -V_(ImmNEONImmb, 18, 16, ExtractBits) +V_(ImmNEONImmb, 18, 16, ExtractBits) \ + \ +/* SVE generic fields */ \ +V_(SVESize, 23, 22, ExtractBits) \ +V_(ImmSVEVLScale, 10, 5, ExtractSignedBits) \ +V_(ImmSVEIntWideSigned, 12, 5, ExtractSignedBits) \ +V_(ImmSVEIntWideUnsigned, 12, 5, ExtractBits) \ +V_(ImmSVEPredicateConstraint, 9, 5, ExtractBits) \ + \ +/* SVE Bitwise Immediate bitfield */ \ +V_(SVEBitN, 17, 17, ExtractBits) \ +V_(SVEImmRotate, 16, 11, ExtractBits) \ +V_(SVEImmSetBits, 10, 5, ExtractBits) \ + \ +V_(SVEImmPrefetchOperation, 3, 0, ExtractBits) \ +V_(SVEPrefetchHint, 3, 3, ExtractBits) + // clang-format on #define SYSTEM_REGISTER_FIELDS_LIST(V_, M_) \ @@ -230,7 +283,22 @@ enum Condition { // Aliases. hs = cs, // C set Unsigned higher or same. - lo = cc // C clear Unsigned lower. + lo = cc, // C clear Unsigned lower. + + // Floating-point additional condition code. + uo, // Unordered comparison. + + // SVE predicate condition aliases. + sve_none = eq, // No active elements were true. + sve_any = ne, // An active element was true. + sve_nlast = cs, // The last element was not true. + sve_last = cc, // The last element was true. + sve_first = mi, // The first element was true. + sve_nfrst = pl, // The first element was not true. + sve_pmore = hi, // An active element was true but not the last element. + sve_plast = ls, // The last active element was true or no active elements were true. + sve_tcont = ge, // CTERM termination condition not deleted. + sve_tstop = lt // CTERM termination condition deleted. }; inline Condition InvertCondition(Condition cond) { @@ -274,7 +342,12 @@ enum StatusFlags { FPEqualFlag = ZCFlag, FPLessThanFlag = NFlag, FPGreaterThanFlag = CFlag, - FPUnorderedFlag = CVFlag + FPUnorderedFlag = CVFlag, + + // SVE condition flags. + SVEFirstFlag = NFlag, + SVENoneFlag = ZFlag, + SVENotLastFlag = CFlag }; enum Shift { @@ -298,15 +371,43 @@ enum Extend { SXTX = 7 }; +enum SVEOffsetModifier { + NO_SVE_OFFSET_MODIFIER, + // Multiply (each element of) the offset by either the vector or predicate + // length, according to the context. + SVE_MUL_VL, + // Shift or extend modifiers (as in `Shift` or `Extend`). + SVE_LSL, + SVE_UXTW, + SVE_SXTW +}; + enum SystemHint { - NOP = 0, - YIELD = 1, - WFE = 2, - WFI = 3, - SEV = 4, - SEVL = 5, - ESB = 16, - CSDB = 20 + NOP = 0, + YIELD = 1, + WFE = 2, + WFI = 3, + SEV = 4, + SEVL = 5, + ESB = 16, + CSDB = 20, + BTI = 32, + BTI_c = 34, + BTI_j = 36, + BTI_jc = 38 +}; + +enum BranchTargetIdentifier { + EmitBTI_none = NOP, + EmitBTI = BTI, + EmitBTI_c = BTI_c, + EmitBTI_j = BTI_j, + EmitBTI_jc = BTI_jc, + + // These correspond to the values of the CRm:op2 fields in the equivalent HINT + // instruction. + EmitPACIASP = 25, + EmitPACIBSP = 27 }; enum BarrierDomain { @@ -331,6 +432,9 @@ enum PrefetchOperation { PLDL3KEEP = 0x04, PLDL3STRM = 0x05, + PrfUnallocated06 = 0x06, + PrfUnallocated07 = 0x07, + PLIL1KEEP = 0x08, PLIL1STRM = 0x09, PLIL2KEEP = 0x0a, @@ -338,12 +442,49 @@ enum PrefetchOperation { PLIL3KEEP = 0x0c, PLIL3STRM = 0x0d, + PrfUnallocated0e = 0x0e, + PrfUnallocated0f = 0x0f, + PSTL1KEEP = 0x10, PSTL1STRM = 0x11, PSTL2KEEP = 0x12, PSTL2STRM = 0x13, PSTL3KEEP = 0x14, - PSTL3STRM = 0x15 + PSTL3STRM = 0x15, + + PrfUnallocated16 = 0x16, + PrfUnallocated17 = 0x17, + PrfUnallocated18 = 0x18, + PrfUnallocated19 = 0x19, + PrfUnallocated1a = 0x1a, + PrfUnallocated1b = 0x1b, + PrfUnallocated1c = 0x1c, + PrfUnallocated1d = 0x1d, + PrfUnallocated1e = 0x1e, + PrfUnallocated1f = 0x1f, +}; + +constexpr bool IsNamedPrefetchOperation(int op) { + return ((op >= PLDL1KEEP) && (op <= PLDL3STRM)) || + ((op >= PLIL1KEEP) && (op <= PLIL3STRM)) || + ((op >= PSTL1KEEP) && (op <= PSTL3STRM)); +} + +enum BType { + // Set when executing any instruction on a guarded page, except those cases + // listed below. + DefaultBType = 0, + + // Set when an indirect branch is taken from an unguarded page to a guarded + // page, or from a guarded page to ip0 or ip1 (x16 or x17), eg "br ip0". + BranchFromUnguardedOrToIP = 1, + + // Set when an indirect branch and link (call) is taken, eg. "blr x0". + BranchAndLink = 2, + + // Set when an indirect branch is taken from a guarded page to a register + // that is not ip0 or ip1 (x16 or x17), eg, "br x0". + BranchFromGuardedNotToIP = 3 }; template @@ -359,10 +500,12 @@ class SystemRegisterEncoder { // System/special register names. // This information is not encoded as one field but as the concatenation of -// multiple fields (Op0<0>, Op1, Crn, Crm, Op2). +// multiple fields (Op0, Op1, Crn, Crm, Op2). enum SystemRegister { NZCV = SystemRegisterEncoder<3, 3, 4, 2, 0>::value, - FPCR = SystemRegisterEncoder<3, 3, 4, 4, 0>::value + FPCR = SystemRegisterEncoder<3, 3, 4, 4, 0>::value, + RNDR = SystemRegisterEncoder<3, 3, 2, 4, 0>::value, // Random number. + RNDRRS = SystemRegisterEncoder<3, 3, 2, 4, 1>::value // Reseeded random number. }; template @@ -382,8 +525,48 @@ enum InstructionCacheOp { enum DataCacheOp { CVAC = CacheOpEncoder<3, 7, 10, 1>::value, CVAU = CacheOpEncoder<3, 7, 11, 1>::value, + CVAP = CacheOpEncoder<3, 7, 12, 1>::value, + CVADP = CacheOpEncoder<3, 7, 13, 1>::value, CIVAC = CacheOpEncoder<3, 7, 14, 1>::value, - ZVA = CacheOpEncoder<3, 7, 4, 1>::value + ZVA = CacheOpEncoder<3, 7, 4, 1>::value, + GVA = CacheOpEncoder<3, 7, 4, 3>::value, + GZVA = CacheOpEncoder<3, 7, 4, 4>::value, + CGVAC = CacheOpEncoder<3, 7, 10, 3>::value, + CGDVAC = CacheOpEncoder<3, 7, 10, 5>::value, + CGVAP = CacheOpEncoder<3, 7, 12, 3>::value, + CGDVAP = CacheOpEncoder<3, 7, 12, 5>::value, + CIGVAC = CacheOpEncoder<3, 7, 14, 3>::value, + CIGDVAC = CacheOpEncoder<3, 7, 14, 5>::value +}; + +// Some SVE instructions support a predicate constraint pattern. This is +// interpreted as a VL-dependent value, and is typically used to initialise +// predicates, or to otherwise limit the number of processed elements. +enum SVEPredicateConstraint { + // Select 2^N elements, for the largest possible N. + SVE_POW2 = 0x0, + // Each VL selects exactly N elements if possible, or zero if N is greater + // than the number of elements. Note that the encoding values for VL are + // not linearly related to N. + SVE_VL1 = 0x1, + SVE_VL2 = 0x2, + SVE_VL3 = 0x3, + SVE_VL4 = 0x4, + SVE_VL5 = 0x5, + SVE_VL6 = 0x6, + SVE_VL7 = 0x7, + SVE_VL8 = 0x8, + SVE_VL16 = 0x9, + SVE_VL32 = 0xa, + SVE_VL64 = 0xb, + SVE_VL128 = 0xc, + SVE_VL256 = 0xd, + // Each MUL selects the largest multiple of N elements that the vector + // length supports. Note that for D-sized lanes, this can be zero. + SVE_MUL4 = 0x1d, + SVE_MUL3 = 0x1e, + // Select all elements. + SVE_ALL = 0x1f }; // Instruction enumerations. @@ -460,6 +643,14 @@ enum NEONScalarFormatField : uint32_t { NEON_D = 0x00C00000u }; +enum SVESizeField { + SVESizeFieldMask = 0x00C00000, + SVE_B = 0x00000000, + SVE_H = 0x00400000, + SVE_S = 0x00800000, + SVE_D = 0x00C00000 +}; + // PC relative addressing. enum PCRelAddressingOp : uint32_t { PCRelAddressingFixed = 0x10000000u, @@ -488,8 +679,8 @@ enum AddSubOp : uint32_t { enum AddSubImmediateOp : uint32_t { AddSubImmediateFixed = 0x11000000u, - AddSubImmediateFMask = 0x1F000000u, - AddSubImmediateMask = 0xFF000000u, + AddSubImmediateFMask = 0x1F800000u, + AddSubImmediateMask = 0xFF800000u, #define ADD_SUB_IMMEDIATE(A) \ A##_w_imm = AddSubImmediateFixed | A, \ A##_x_imm = AddSubImmediateFixed | A | SixtyFourBits @@ -536,6 +727,23 @@ enum AddSubWithCarryOp : uint32_t { SBCS_x = AddSubWithCarryFixed | SUBS | SixtyFourBits }; +// Rotate right into flags. +enum RotateRightIntoFlagsOp : uint32_t { + RotateRightIntoFlagsFixed = 0x1A000400u, + RotateRightIntoFlagsFMask = 0x1FE07C00u, + RotateRightIntoFlagsMask = 0xFFE07C10u, + RMIF = RotateRightIntoFlagsFixed | 0xA0000000u +}; + +// Evaluate into flags. +enum EvaluateIntoFlagsOp : uint32_t { + EvaluateIntoFlagsFixed = 0x1A000800u, + EvaluateIntoFlagsFMask = 0x1FE03C00u, + EvaluateIntoFlagsMask = 0xFFE07C1Fu, + SETF8 = EvaluateIntoFlagsFixed | 0x2000000Du, + SETF16 = EvaluateIntoFlagsFixed | 0x2000400Du +}; + // Logical (immediate and shifted register). enum LogicalOp : uint32_t { @@ -719,6 +927,15 @@ enum SystemSysRegOp : uint32_t { MSR = SystemSysRegFixed | 0x00000000u }; +enum SystemPStateOp : uint32_t { + SystemPStateFixed = 0xD5004000u, + SystemPStateFMask = 0xFFF8F000u, + SystemPStateMask = 0xFFFFF0FFu, + CFINV = SystemPStateFixed | 0x0000001Fu, + XAFLAG = SystemPStateFixed | 0x0000003Fu, + AXFLAG = SystemPStateFixed | 0x0000005Fu +}; + enum SystemHintOp : uint32_t { SystemHintFixed = 0xD503201Fu, SystemHintFMask = 0xFFFFF01Fu, @@ -868,6 +1085,18 @@ enum LoadStorePairNonTemporalOp : uint32_t { LDNP_q = LoadStorePairNonTemporalFixed | LDP_q }; +// Load with pointer authentication. +enum LoadStorePACOp { + LoadStorePACFixed = 0xF8200400u, + LoadStorePACFMask = 0xFF200400u, + LoadStorePACMask = 0xFFA00C00u, + LoadStorePACPreBit = 0x00000800u, + LDRAA = LoadStorePACFixed | 0x00000000u, + LDRAA_pre = LoadStorePACPreBit | LDRAA, + LDRAB = LoadStorePACFixed | 0x00800000u, + LDRAB_pre = LoadStorePACPreBit | LDRAB +}; + // Load literal. enum LoadLiteralOp : uint32_t { LoadLiteralFixed = 0x18000000u, @@ -1057,6 +1286,26 @@ enum LoadStoreExclusive : uint32_t { CASPAL_x = CASPFixed | LSEBit_l | LSEBit_o0 | LSEBit_sz }; +// Load/store RCpc unscaled offset. +enum LoadStoreRCpcUnscaledOffsetOp : uint32_t { + LoadStoreRCpcUnscaledOffsetFixed = 0x19000000u, + LoadStoreRCpcUnscaledOffsetFMask = 0x3F200C00u, + LoadStoreRCpcUnscaledOffsetMask = 0xFFE00C00u, + STLURB = LoadStoreRCpcUnscaledOffsetFixed | 0x00000000u, + LDAPURB = LoadStoreRCpcUnscaledOffsetFixed | 0x00400000u, + LDAPURSB_x = LoadStoreRCpcUnscaledOffsetFixed | 0x00800000u, + LDAPURSB_w = LoadStoreRCpcUnscaledOffsetFixed | 0x00C00000u, + STLURH = LoadStoreRCpcUnscaledOffsetFixed | 0x40000000u, + LDAPURH = LoadStoreRCpcUnscaledOffsetFixed | 0x40400000u, + LDAPURSH_x = LoadStoreRCpcUnscaledOffsetFixed | 0x40800000u, + LDAPURSH_w = LoadStoreRCpcUnscaledOffsetFixed | 0x40C00000u, + STLUR_w = LoadStoreRCpcUnscaledOffsetFixed | 0x80000000u, + LDAPUR_w = LoadStoreRCpcUnscaledOffsetFixed | 0x80400000u, + LDAPURSW = LoadStoreRCpcUnscaledOffsetFixed | 0x80800000u, + STLUR_x = LoadStoreRCpcUnscaledOffsetFixed | 0xC0000000u, + LDAPUR_x = LoadStoreRCpcUnscaledOffsetFixed | 0xC0400000u +}; + #define ATOMIC_MEMORY_SIMPLE_OPC_LIST(V) \ V(LDADD, 0x00000000u), \ V(LDCLR, 0x00001000u), \ @@ -1342,12 +1591,24 @@ enum FPDataProcessing1SourceOp : uint32_t { FSQRT_s = FPDataProcessing1SourceFixed | 0x00018000u, FSQRT_d = FPDataProcessing1SourceFixed | FP64 | 0x00018000u, FSQRT = FSQRT_s, - FCVT_ds = FPDataProcessing1SourceFixed | 0x00028000u, - FCVT_sd = FPDataProcessing1SourceFixed | FP64 | 0x00020000u, - FCVT_hs = FPDataProcessing1SourceFixed | 0x00038000u, - FCVT_hd = FPDataProcessing1SourceFixed | FP64 | 0x00038000u, - FCVT_sh = FPDataProcessing1SourceFixed | 0x00C20000u, - FCVT_dh = FPDataProcessing1SourceFixed | 0x00C28000u, + FCVT_ds = FPDataProcessing1SourceFixed | 0x00028000, + FCVT_sd = FPDataProcessing1SourceFixed | FP64 | 0x00020000, + FCVT_hs = FPDataProcessing1SourceFixed | 0x00038000, + FCVT_hd = FPDataProcessing1SourceFixed | FP64 | 0x00038000, + FCVT_sh = FPDataProcessing1SourceFixed | 0x00C20000, + FCVT_dh = FPDataProcessing1SourceFixed | 0x00C28000, + FRINT32X_s = FPDataProcessing1SourceFixed | 0x00088000u, + FRINT32X_d = FPDataProcessing1SourceFixed | FP64 | 0x00088000u, + FRINT32X = FRINT32X_s, + FRINT32Z_s = FPDataProcessing1SourceFixed | 0x00080000u, + FRINT32Z_d = FPDataProcessing1SourceFixed | FP64 | 0x00080000u, + FRINT32Z = FRINT32Z_s, + FRINT64X_s = FPDataProcessing1SourceFixed | 0x00098000u, + FRINT64X_d = FPDataProcessing1SourceFixed | FP64 | 0x00098000u, + FRINT64X = FRINT64X_s, + FRINT64Z_s = FPDataProcessing1SourceFixed | 0x00090000u, + FRINT64Z_d = FPDataProcessing1SourceFixed | FP64 | 0x00090000u, + FRINT64Z = FRINT64Z_s, FRINTN_h = FPDataProcessing1SourceFixed | FP16 | 0x00040000u, FRINTN_s = FPDataProcessing1SourceFixed | 0x00040000u, FRINTN_d = FPDataProcessing1SourceFixed | FP64 | 0x00040000u, @@ -1643,6 +1904,10 @@ enum NEON2RegMiscOp : uint32_t { NEON_FCVTN = NEON2RegMiscFixed | 0x00016000u, NEON_FCVTXN = NEON2RegMiscFixed | 0x20016000u, NEON_FCVTL = NEON2RegMiscFixed | 0x00017000u, + NEON_FRINT32X = NEON2RegMiscFixed | 0x2001E000u, + NEON_FRINT32Z = NEON2RegMiscFixed | 0x0001E000u, + NEON_FRINT64X = NEON2RegMiscFixed | 0x2001F000u, + NEON_FRINT64Z = NEON2RegMiscFixed | 0x0001F000u, NEON_FRINTN = NEON2RegMiscFixed | 0x00018000u, NEON_FRINTA = NEON2RegMiscFixed | 0x20018000u, NEON_FRINTP = NEON2RegMiscFixed | 0x00818000u, @@ -1806,7 +2071,14 @@ enum NEON3SameOp : uint32_t { NEON_BIC = NEON3SameLogicalFixed | 0x00400000u, NEON_BIF = NEON3SameLogicalFixed | 0x20C00000u, NEON_BIT = NEON3SameLogicalFixed | 0x20800000u, - NEON_BSL = NEON3SameLogicalFixed | 0x20400000u + NEON_BSL = NEON3SameLogicalFixed | 0x20400000u, + + // FHM (FMLAL-like) instructions have an oddball encoding scheme under 3Same. + NEON3SameFHMMask = 0xBFE0FC00u, // U size opcode + NEON_FMLAL = NEON3SameFixed | 0x0000E800u, // 0 00 11101 + NEON_FMLAL2 = NEON3SameFixed | 0x2000C800u, // 1 00 11001 + NEON_FMLSL = NEON3SameFixed | 0x0080E800u, // 0 10 11101 + NEON_FMLSL2 = NEON3SameFixed | 0x2080C800u // 1 10 11001 }; @@ -1978,6 +2250,7 @@ enum NEONByIndexedElementOp : uint32_t { NEON_SQRDMLAH_byelement = NEONByIndexedElementFixed | 0x2000D000u, NEON_UDOT_byelement = NEONByIndexedElementFixed | 0x2000E000u, NEON_SQRDMLSH_byelement = NEONByIndexedElementFixed | 0x2000F000u, + NEON_FMLA_H_byelement = NEONByIndexedElementFixed | 0x00001000u, NEON_FMLS_H_byelement = NEONByIndexedElementFixed | 0x00005000u, NEON_FMUL_H_byelement = NEONByIndexedElementFixed | 0x00009000u, @@ -1990,10 +2263,22 @@ enum NEONByIndexedElementOp : uint32_t { NEON_FMLS_byelement = NEONByIndexedElementFPFixed | 0x00005000u, NEON_FMUL_byelement = NEONByIndexedElementFPFixed | 0x00009000u, NEON_FMULX_byelement = NEONByIndexedElementFPFixed | 0x20009000u, - NEON_FCMLA_byelement = NEONByIndexedElementFixed | 0x20001000u, - // Complex instruction(s) this is necessary because 'rot' encoding moves into the NEONByIndex..Mask space - NEONByIndexedElementFPComplexMask = 0xBF009400u + // FMLAL-like instructions. + // For all cases: U = x, size = 10, opcode = xx00 + NEONByIndexedElementFPLongFixed = NEONByIndexedElementFixed | 0x00800000u, + NEONByIndexedElementFPLongFMask = NEONByIndexedElementFMask | 0x00C03000u, + NEONByIndexedElementFPLongMask = 0xBFC0F400u, + NEON_FMLAL_H_byelement = NEONByIndexedElementFixed | 0x00800000u, + NEON_FMLAL2_H_byelement = NEONByIndexedElementFixed | 0x20808000u, + NEON_FMLSL_H_byelement = NEONByIndexedElementFixed | 0x00804000u, + NEON_FMLSL2_H_byelement = NEONByIndexedElementFixed | 0x2080C000u, + + // Complex instruction(s). + // This is necessary because the 'rot' encoding moves into the + // NEONByIndex..Mask space. + NEONByIndexedElementFPComplexMask = 0xBF009400u, + NEON_FCMLA_byelement = NEONByIndexedElementFixed | 0x20001000u }; // NEON register copy. @@ -2306,7 +2591,7 @@ enum NEONScalar2RegMiscOp : uint32_t { NEONScalar2RegMiscOpcode = NEON2RegMiscOpcode, NEON_NEG_scalar_opcode = NEON_NEG_scalar & NEONScalar2RegMiscOpcode, - NEONScalar2RegMiscFPMask = NEONScalar2RegMiscMask | 0x00800000, + NEONScalar2RegMiscFPMask = NEONScalar2RegMiscMask | 0x00800000u, NEON_FRSQRTE_scalar = NEON_Q | NEONScalar | NEON_FRSQRTE, NEON_FRECPE_scalar = NEON_Q | NEONScalar | NEON_FRECPE, NEON_SCVTF_scalar = NEON_Q | NEONScalar | NEON_SCVTF, @@ -2316,7 +2601,7 @@ enum NEONScalar2RegMiscOp : uint32_t { NEON_FCMLT_zero_scalar = NEON_Q | NEONScalar | NEON_FCMLT_zero, NEON_FCMGE_zero_scalar = NEON_Q | NEONScalar | NEON_FCMGE_zero, NEON_FCMLE_zero_scalar = NEON_Q | NEONScalar | NEON_FCMLE_zero, - NEON_FRECPX_scalar = NEONScalar2RegMiscFixed | 0x0081F000, + NEON_FRECPX_scalar = NEONScalar2RegMiscFixed | 0x0081F000u, NEON_FCVTNS_scalar = NEON_Q | NEONScalar | NEON_FCVTNS, NEON_FCVTNU_scalar = NEON_Q | NEONScalar | NEON_FCVTNU, NEON_FCVTPS_scalar = NEON_Q | NEONScalar | NEON_FCVTPS, @@ -2523,6 +2808,1629 @@ enum NEONScalarShiftImmediateOp : uint32_t { NEON_FCVTZU_imm_scalar = NEON_Q | NEONScalar | NEON_FCVTZU_imm }; +enum SVE32BitGatherLoadHalfwords_ScalarPlus32BitScaledOffsetsOp : uint32_t { + SVE32BitGatherLoadHalfwords_ScalarPlus32BitScaledOffsetsFixed = 0x84A00000u, + SVE32BitGatherLoadHalfwords_ScalarPlus32BitScaledOffsetsFMask = 0xFFA08000u, + SVE32BitGatherLoadHalfwords_ScalarPlus32BitScaledOffsetsMask = 0xFFA0E000u, + LD1SH_z_p_bz_s_x32_scaled = SVE32BitGatherLoadHalfwords_ScalarPlus32BitScaledOffsetsFixed, + LDFF1SH_z_p_bz_s_x32_scaled = SVE32BitGatherLoadHalfwords_ScalarPlus32BitScaledOffsetsFixed | 0x00002000u, + LD1H_z_p_bz_s_x32_scaled = SVE32BitGatherLoadHalfwords_ScalarPlus32BitScaledOffsetsFixed | 0x00004000u, + LDFF1H_z_p_bz_s_x32_scaled = SVE32BitGatherLoadHalfwords_ScalarPlus32BitScaledOffsetsFixed | 0x00006000u +}; + +enum SVE32BitGatherLoadWords_ScalarPlus32BitScaledOffsetsOp : uint32_t { + SVE32BitGatherLoadWords_ScalarPlus32BitScaledOffsetsFixed = 0x85200000u, + SVE32BitGatherLoadWords_ScalarPlus32BitScaledOffsetsFMask = 0xFFA08000u, + SVE32BitGatherLoadWords_ScalarPlus32BitScaledOffsetsMask = 0xFFA0E000u, + LD1W_z_p_bz_s_x32_scaled = SVE32BitGatherLoadWords_ScalarPlus32BitScaledOffsetsFixed | 0x00004000u, + LDFF1W_z_p_bz_s_x32_scaled = SVE32BitGatherLoadWords_ScalarPlus32BitScaledOffsetsFixed | 0x00006000u +}; + +enum SVE32BitGatherLoad_ScalarPlus32BitUnscaledOffsetsOp : uint32_t { + SVE32BitGatherLoad_ScalarPlus32BitUnscaledOffsetsFixed = 0x84000000u, + SVE32BitGatherLoad_ScalarPlus32BitUnscaledOffsetsFMask = 0xFE208000u, + SVE32BitGatherLoad_ScalarPlus32BitUnscaledOffsetsMask = 0xFFA0E000u, + LD1SB_z_p_bz_s_x32_unscaled = SVE32BitGatherLoad_ScalarPlus32BitUnscaledOffsetsFixed, + LDFF1SB_z_p_bz_s_x32_unscaled = SVE32BitGatherLoad_ScalarPlus32BitUnscaledOffsetsFixed | 0x00002000u, + LD1B_z_p_bz_s_x32_unscaled = SVE32BitGatherLoad_ScalarPlus32BitUnscaledOffsetsFixed | 0x00004000u, + LDFF1B_z_p_bz_s_x32_unscaled = SVE32BitGatherLoad_ScalarPlus32BitUnscaledOffsetsFixed | 0x00006000u, + LD1SH_z_p_bz_s_x32_unscaled = SVE32BitGatherLoad_ScalarPlus32BitUnscaledOffsetsFixed | 0x00800000u, + LDFF1SH_z_p_bz_s_x32_unscaled = SVE32BitGatherLoad_ScalarPlus32BitUnscaledOffsetsFixed | 0x00802000u, + LD1H_z_p_bz_s_x32_unscaled = SVE32BitGatherLoad_ScalarPlus32BitUnscaledOffsetsFixed | 0x00804000u, + LDFF1H_z_p_bz_s_x32_unscaled = SVE32BitGatherLoad_ScalarPlus32BitUnscaledOffsetsFixed | 0x00806000u, + LD1W_z_p_bz_s_x32_unscaled = SVE32BitGatherLoad_ScalarPlus32BitUnscaledOffsetsFixed | 0x01004000u, + LDFF1W_z_p_bz_s_x32_unscaled = SVE32BitGatherLoad_ScalarPlus32BitUnscaledOffsetsFixed | 0x01006000u +}; + +enum SVE32BitGatherLoad_VectorPlusImmOp : uint32_t { + SVE32BitGatherLoad_VectorPlusImmFixed = 0x84208000u, + SVE32BitGatherLoad_VectorPlusImmFMask = 0xFE608000u, + SVE32BitGatherLoad_VectorPlusImmMask = 0xFFE0E000u, + LD1SB_z_p_ai_s = SVE32BitGatherLoad_VectorPlusImmFixed, + LDFF1SB_z_p_ai_s = SVE32BitGatherLoad_VectorPlusImmFixed | 0x00002000u, + LD1B_z_p_ai_s = SVE32BitGatherLoad_VectorPlusImmFixed | 0x00004000u, + LDFF1B_z_p_ai_s = SVE32BitGatherLoad_VectorPlusImmFixed | 0x00006000u, + LD1SH_z_p_ai_s = SVE32BitGatherLoad_VectorPlusImmFixed | 0x00800000u, + LDFF1SH_z_p_ai_s = SVE32BitGatherLoad_VectorPlusImmFixed | 0x00802000u, + LD1H_z_p_ai_s = SVE32BitGatherLoad_VectorPlusImmFixed | 0x00804000u, + LDFF1H_z_p_ai_s = SVE32BitGatherLoad_VectorPlusImmFixed | 0x00806000u, + LD1W_z_p_ai_s = SVE32BitGatherLoad_VectorPlusImmFixed | 0x01004000u, + LDFF1W_z_p_ai_s = SVE32BitGatherLoad_VectorPlusImmFixed | 0x01006000u +}; + +enum SVE32BitGatherPrefetch_ScalarPlus32BitScaledOffsetsOp : uint32_t { + SVE32BitGatherPrefetch_ScalarPlus32BitScaledOffsetsFixed = 0x84200000u, + SVE32BitGatherPrefetch_ScalarPlus32BitScaledOffsetsFMask = 0xFFA08010u, + SVE32BitGatherPrefetch_ScalarPlus32BitScaledOffsetsMask = 0xFFA0E010u, + PRFB_i_p_bz_s_x32_scaled = SVE32BitGatherPrefetch_ScalarPlus32BitScaledOffsetsFixed, + PRFH_i_p_bz_s_x32_scaled = SVE32BitGatherPrefetch_ScalarPlus32BitScaledOffsetsFixed | 0x00002000u, + PRFW_i_p_bz_s_x32_scaled = SVE32BitGatherPrefetch_ScalarPlus32BitScaledOffsetsFixed | 0x00004000u, + PRFD_i_p_bz_s_x32_scaled = SVE32BitGatherPrefetch_ScalarPlus32BitScaledOffsetsFixed | 0x00006000u +}; + +enum SVE32BitGatherPrefetch_VectorPlusImmOp : uint32_t { + SVE32BitGatherPrefetch_VectorPlusImmFixed = 0x8400E000u, + SVE32BitGatherPrefetch_VectorPlusImmFMask = 0xFE60E010u, + SVE32BitGatherPrefetch_VectorPlusImmMask = 0xFFE0E010u, + PRFB_i_p_ai_s = SVE32BitGatherPrefetch_VectorPlusImmFixed, + PRFH_i_p_ai_s = SVE32BitGatherPrefetch_VectorPlusImmFixed | 0x00800000u, + PRFW_i_p_ai_s = SVE32BitGatherPrefetch_VectorPlusImmFixed | 0x01000000u, + PRFD_i_p_ai_s = SVE32BitGatherPrefetch_VectorPlusImmFixed | 0x01800000u +}; + +enum SVE32BitScatterStore_ScalarPlus32BitScaledOffsetsOp : uint32_t { + SVE32BitScatterStore_ScalarPlus32BitScaledOffsetsFixed = 0xE4608000u, + SVE32BitScatterStore_ScalarPlus32BitScaledOffsetsFMask = 0xFE60A000u, + SVE32BitScatterStore_ScalarPlus32BitScaledOffsetsMask = 0xFFE0A000u, + ST1H_z_p_bz_s_x32_scaled = SVE32BitScatterStore_ScalarPlus32BitScaledOffsetsFixed | 0x00800000u, + ST1W_z_p_bz_s_x32_scaled = SVE32BitScatterStore_ScalarPlus32BitScaledOffsetsFixed | 0x01000000u +}; + +enum SVE32BitScatterStore_ScalarPlus32BitUnscaledOffsetsOp : uint32_t { + SVE32BitScatterStore_ScalarPlus32BitUnscaledOffsetsFixed = 0xE4408000u, + SVE32BitScatterStore_ScalarPlus32BitUnscaledOffsetsFMask = 0xFE60A000u, + SVE32BitScatterStore_ScalarPlus32BitUnscaledOffsetsMask = 0xFFE0A000u, + ST1B_z_p_bz_s_x32_unscaled = SVE32BitScatterStore_ScalarPlus32BitUnscaledOffsetsFixed, + ST1H_z_p_bz_s_x32_unscaled = SVE32BitScatterStore_ScalarPlus32BitUnscaledOffsetsFixed | 0x00800000u, + ST1W_z_p_bz_s_x32_unscaled = SVE32BitScatterStore_ScalarPlus32BitUnscaledOffsetsFixed | 0x01000000u +}; + +enum SVE32BitScatterStore_VectorPlusImmOp : uint32_t { + SVE32BitScatterStore_VectorPlusImmFixed = 0xE460A000u, + SVE32BitScatterStore_VectorPlusImmFMask = 0xFE60E000u, + SVE32BitScatterStore_VectorPlusImmMask = 0xFFE0E000u, + ST1B_z_p_ai_s = SVE32BitScatterStore_VectorPlusImmFixed, + ST1H_z_p_ai_s = SVE32BitScatterStore_VectorPlusImmFixed | 0x00800000u, + ST1W_z_p_ai_s = SVE32BitScatterStore_VectorPlusImmFixed | 0x01000000u +}; + +enum SVE64BitGatherLoad_ScalarPlus32BitUnpackedScaledOffsetsOp : uint32_t { + SVE64BitGatherLoad_ScalarPlus32BitUnpackedScaledOffsetsFixed = 0xC4200000u, + SVE64BitGatherLoad_ScalarPlus32BitUnpackedScaledOffsetsFMask = 0xFE208000u, + SVE64BitGatherLoad_ScalarPlus32BitUnpackedScaledOffsetsMask = 0xFFA0E000u, + LD1SH_z_p_bz_d_x32_scaled = SVE64BitGatherLoad_ScalarPlus32BitUnpackedScaledOffsetsFixed | 0x00800000u, + LDFF1SH_z_p_bz_d_x32_scaled = SVE64BitGatherLoad_ScalarPlus32BitUnpackedScaledOffsetsFixed | 0x00802000u, + LD1H_z_p_bz_d_x32_scaled = SVE64BitGatherLoad_ScalarPlus32BitUnpackedScaledOffsetsFixed | 0x00804000u, + LDFF1H_z_p_bz_d_x32_scaled = SVE64BitGatherLoad_ScalarPlus32BitUnpackedScaledOffsetsFixed | 0x00806000u, + LD1SW_z_p_bz_d_x32_scaled = SVE64BitGatherLoad_ScalarPlus32BitUnpackedScaledOffsetsFixed | 0x01000000u, + LDFF1SW_z_p_bz_d_x32_scaled = SVE64BitGatherLoad_ScalarPlus32BitUnpackedScaledOffsetsFixed | 0x01002000u, + LD1W_z_p_bz_d_x32_scaled = SVE64BitGatherLoad_ScalarPlus32BitUnpackedScaledOffsetsFixed | 0x01004000u, + LDFF1W_z_p_bz_d_x32_scaled = SVE64BitGatherLoad_ScalarPlus32BitUnpackedScaledOffsetsFixed | 0x01006000u, + LD1D_z_p_bz_d_x32_scaled = SVE64BitGatherLoad_ScalarPlus32BitUnpackedScaledOffsetsFixed | 0x01804000u, + LDFF1D_z_p_bz_d_x32_scaled = SVE64BitGatherLoad_ScalarPlus32BitUnpackedScaledOffsetsFixed | 0x01806000u +}; + +enum SVE64BitGatherLoad_ScalarPlus64BitScaledOffsetsOp : uint32_t { + SVE64BitGatherLoad_ScalarPlus64BitScaledOffsetsFixed = 0xC4608000u, + SVE64BitGatherLoad_ScalarPlus64BitScaledOffsetsFMask = 0xFE608000u, + SVE64BitGatherLoad_ScalarPlus64BitScaledOffsetsMask = 0xFFE0E000u, + LD1SH_z_p_bz_d_64_scaled = SVE64BitGatherLoad_ScalarPlus64BitScaledOffsetsFixed | 0x00800000u, + LDFF1SH_z_p_bz_d_64_scaled = SVE64BitGatherLoad_ScalarPlus64BitScaledOffsetsFixed | 0x00802000u, + LD1H_z_p_bz_d_64_scaled = SVE64BitGatherLoad_ScalarPlus64BitScaledOffsetsFixed | 0x00804000u, + LDFF1H_z_p_bz_d_64_scaled = SVE64BitGatherLoad_ScalarPlus64BitScaledOffsetsFixed | 0x00806000u, + LD1SW_z_p_bz_d_64_scaled = SVE64BitGatherLoad_ScalarPlus64BitScaledOffsetsFixed | 0x01000000u, + LDFF1SW_z_p_bz_d_64_scaled = SVE64BitGatherLoad_ScalarPlus64BitScaledOffsetsFixed | 0x01002000u, + LD1W_z_p_bz_d_64_scaled = SVE64BitGatherLoad_ScalarPlus64BitScaledOffsetsFixed | 0x01004000u, + LDFF1W_z_p_bz_d_64_scaled = SVE64BitGatherLoad_ScalarPlus64BitScaledOffsetsFixed | 0x01006000u, + LD1D_z_p_bz_d_64_scaled = SVE64BitGatherLoad_ScalarPlus64BitScaledOffsetsFixed | 0x01804000u, + LDFF1D_z_p_bz_d_64_scaled = SVE64BitGatherLoad_ScalarPlus64BitScaledOffsetsFixed | 0x01806000u +}; + +enum SVE64BitGatherLoad_ScalarPlus64BitUnscaledOffsetsOp : uint32_t { + SVE64BitGatherLoad_ScalarPlus64BitUnscaledOffsetsFixed = 0xC4408000u, + SVE64BitGatherLoad_ScalarPlus64BitUnscaledOffsetsFMask = 0xFE608000u, + SVE64BitGatherLoad_ScalarPlus64BitUnscaledOffsetsMask = 0xFFE0E000u, + LD1SB_z_p_bz_d_64_unscaled = SVE64BitGatherLoad_ScalarPlus64BitUnscaledOffsetsFixed, + LDFF1SB_z_p_bz_d_64_unscaled = SVE64BitGatherLoad_ScalarPlus64BitUnscaledOffsetsFixed | 0x00002000u, + LD1B_z_p_bz_d_64_unscaled = SVE64BitGatherLoad_ScalarPlus64BitUnscaledOffsetsFixed | 0x00004000u, + LDFF1B_z_p_bz_d_64_unscaled = SVE64BitGatherLoad_ScalarPlus64BitUnscaledOffsetsFixed | 0x00006000u, + LD1SH_z_p_bz_d_64_unscaled = SVE64BitGatherLoad_ScalarPlus64BitUnscaledOffsetsFixed | 0x00800000u, + LDFF1SH_z_p_bz_d_64_unscaled = SVE64BitGatherLoad_ScalarPlus64BitUnscaledOffsetsFixed | 0x00802000u, + LD1H_z_p_bz_d_64_unscaled = SVE64BitGatherLoad_ScalarPlus64BitUnscaledOffsetsFixed | 0x00804000u, + LDFF1H_z_p_bz_d_64_unscaled = SVE64BitGatherLoad_ScalarPlus64BitUnscaledOffsetsFixed | 0x00806000u, + LD1SW_z_p_bz_d_64_unscaled = SVE64BitGatherLoad_ScalarPlus64BitUnscaledOffsetsFixed | 0x01000000u, + LDFF1SW_z_p_bz_d_64_unscaled = SVE64BitGatherLoad_ScalarPlus64BitUnscaledOffsetsFixed | 0x01002000u, + LD1W_z_p_bz_d_64_unscaled = SVE64BitGatherLoad_ScalarPlus64BitUnscaledOffsetsFixed | 0x01004000u, + LDFF1W_z_p_bz_d_64_unscaled = SVE64BitGatherLoad_ScalarPlus64BitUnscaledOffsetsFixed | 0x01006000u, + LD1D_z_p_bz_d_64_unscaled = SVE64BitGatherLoad_ScalarPlus64BitUnscaledOffsetsFixed | 0x01804000u, + LDFF1D_z_p_bz_d_64_unscaled = SVE64BitGatherLoad_ScalarPlus64BitUnscaledOffsetsFixed | 0x01806000u +}; + +enum SVE64BitGatherLoad_ScalarPlusUnpacked32BitUnscaledOffsetsOp : uint32_t { + SVE64BitGatherLoad_ScalarPlusUnpacked32BitUnscaledOffsetsFixed = 0xC4000000u, + SVE64BitGatherLoad_ScalarPlusUnpacked32BitUnscaledOffsetsFMask = 0xFE208000u, + SVE64BitGatherLoad_ScalarPlusUnpacked32BitUnscaledOffsetsMask = 0xFFA0E000u, + LD1SB_z_p_bz_d_x32_unscaled = SVE64BitGatherLoad_ScalarPlusUnpacked32BitUnscaledOffsetsFixed, + LDFF1SB_z_p_bz_d_x32_unscaled = SVE64BitGatherLoad_ScalarPlusUnpacked32BitUnscaledOffsetsFixed | 0x00002000u, + LD1B_z_p_bz_d_x32_unscaled = SVE64BitGatherLoad_ScalarPlusUnpacked32BitUnscaledOffsetsFixed | 0x00004000u, + LDFF1B_z_p_bz_d_x32_unscaled = SVE64BitGatherLoad_ScalarPlusUnpacked32BitUnscaledOffsetsFixed | 0x00006000u, + LD1SH_z_p_bz_d_x32_unscaled = SVE64BitGatherLoad_ScalarPlusUnpacked32BitUnscaledOffsetsFixed | 0x00800000u, + LDFF1SH_z_p_bz_d_x32_unscaled = SVE64BitGatherLoad_ScalarPlusUnpacked32BitUnscaledOffsetsFixed | 0x00802000u, + LD1H_z_p_bz_d_x32_unscaled = SVE64BitGatherLoad_ScalarPlusUnpacked32BitUnscaledOffsetsFixed | 0x00804000u, + LDFF1H_z_p_bz_d_x32_unscaled = SVE64BitGatherLoad_ScalarPlusUnpacked32BitUnscaledOffsetsFixed | 0x00806000u, + LD1SW_z_p_bz_d_x32_unscaled = SVE64BitGatherLoad_ScalarPlusUnpacked32BitUnscaledOffsetsFixed | 0x01000000u, + LDFF1SW_z_p_bz_d_x32_unscaled = SVE64BitGatherLoad_ScalarPlusUnpacked32BitUnscaledOffsetsFixed | 0x01002000u, + LD1W_z_p_bz_d_x32_unscaled = SVE64BitGatherLoad_ScalarPlusUnpacked32BitUnscaledOffsetsFixed | 0x01004000u, + LDFF1W_z_p_bz_d_x32_unscaled = SVE64BitGatherLoad_ScalarPlusUnpacked32BitUnscaledOffsetsFixed | 0x01006000u, + LD1D_z_p_bz_d_x32_unscaled = SVE64BitGatherLoad_ScalarPlusUnpacked32BitUnscaledOffsetsFixed | 0x01804000u, + LDFF1D_z_p_bz_d_x32_unscaled = SVE64BitGatherLoad_ScalarPlusUnpacked32BitUnscaledOffsetsFixed | 0x01806000u +}; + +enum SVE64BitGatherLoad_VectorPlusImmOp : uint32_t { + SVE64BitGatherLoad_VectorPlusImmFixed = 0xC4208000u, + SVE64BitGatherLoad_VectorPlusImmFMask = 0xFE608000u, + SVE64BitGatherLoad_VectorPlusImmMask = 0xFFE0E000u, + LD1SB_z_p_ai_d = SVE64BitGatherLoad_VectorPlusImmFixed, + LDFF1SB_z_p_ai_d = SVE64BitGatherLoad_VectorPlusImmFixed | 0x00002000u, + LD1B_z_p_ai_d = SVE64BitGatherLoad_VectorPlusImmFixed | 0x00004000u, + LDFF1B_z_p_ai_d = SVE64BitGatherLoad_VectorPlusImmFixed | 0x00006000u, + LD1SH_z_p_ai_d = SVE64BitGatherLoad_VectorPlusImmFixed | 0x00800000u, + LDFF1SH_z_p_ai_d = SVE64BitGatherLoad_VectorPlusImmFixed | 0x00802000u, + LD1H_z_p_ai_d = SVE64BitGatherLoad_VectorPlusImmFixed | 0x00804000u, + LDFF1H_z_p_ai_d = SVE64BitGatherLoad_VectorPlusImmFixed | 0x00806000u, + LD1SW_z_p_ai_d = SVE64BitGatherLoad_VectorPlusImmFixed | 0x01000000u, + LDFF1SW_z_p_ai_d = SVE64BitGatherLoad_VectorPlusImmFixed | 0x01002000u, + LD1W_z_p_ai_d = SVE64BitGatherLoad_VectorPlusImmFixed | 0x01004000u, + LDFF1W_z_p_ai_d = SVE64BitGatherLoad_VectorPlusImmFixed | 0x01006000u, + LD1D_z_p_ai_d = SVE64BitGatherLoad_VectorPlusImmFixed | 0x01804000u, + LDFF1D_z_p_ai_d = SVE64BitGatherLoad_VectorPlusImmFixed | 0x01806000u +}; + +enum SVE64BitGatherPrefetch_ScalarPlus64BitScaledOffsetsOp : uint32_t { + SVE64BitGatherPrefetch_ScalarPlus64BitScaledOffsetsFixed = 0xC4608000u, + SVE64BitGatherPrefetch_ScalarPlus64BitScaledOffsetsFMask = 0xFFE08010u, + SVE64BitGatherPrefetch_ScalarPlus64BitScaledOffsetsMask = 0xFFE0E010u, + PRFB_i_p_bz_d_64_scaled = SVE64BitGatherPrefetch_ScalarPlus64BitScaledOffsetsFixed, + PRFH_i_p_bz_d_64_scaled = SVE64BitGatherPrefetch_ScalarPlus64BitScaledOffsetsFixed | 0x00002000u, + PRFW_i_p_bz_d_64_scaled = SVE64BitGatherPrefetch_ScalarPlus64BitScaledOffsetsFixed | 0x00004000u, + PRFD_i_p_bz_d_64_scaled = SVE64BitGatherPrefetch_ScalarPlus64BitScaledOffsetsFixed | 0x00006000u +}; + +enum SVE64BitGatherPrefetch_ScalarPlusUnpacked32BitScaledOffsetsOp : uint32_t { + SVE64BitGatherPrefetch_ScalarPlusUnpacked32BitScaledOffsetsFixed = 0xC4200000u, + SVE64BitGatherPrefetch_ScalarPlusUnpacked32BitScaledOffsetsFMask = 0xFFA08010u, + SVE64BitGatherPrefetch_ScalarPlusUnpacked32BitScaledOffsetsMask = 0xFFA0E010u, + PRFB_i_p_bz_d_x32_scaled = SVE64BitGatherPrefetch_ScalarPlusUnpacked32BitScaledOffsetsFixed, + PRFH_i_p_bz_d_x32_scaled = SVE64BitGatherPrefetch_ScalarPlusUnpacked32BitScaledOffsetsFixed | 0x00002000u, + PRFW_i_p_bz_d_x32_scaled = SVE64BitGatherPrefetch_ScalarPlusUnpacked32BitScaledOffsetsFixed | 0x00004000u, + PRFD_i_p_bz_d_x32_scaled = SVE64BitGatherPrefetch_ScalarPlusUnpacked32BitScaledOffsetsFixed | 0x00006000u +}; + +enum SVE64BitGatherPrefetch_VectorPlusImmOp : uint32_t { + SVE64BitGatherPrefetch_VectorPlusImmFixed = 0xC400E000u, + SVE64BitGatherPrefetch_VectorPlusImmFMask = 0xFE60E010u, + SVE64BitGatherPrefetch_VectorPlusImmMask = 0xFFE0E010u, + PRFB_i_p_ai_d = SVE64BitGatherPrefetch_VectorPlusImmFixed, + PRFH_i_p_ai_d = SVE64BitGatherPrefetch_VectorPlusImmFixed | 0x00800000u, + PRFW_i_p_ai_d = SVE64BitGatherPrefetch_VectorPlusImmFixed | 0x01000000u, + PRFD_i_p_ai_d = SVE64BitGatherPrefetch_VectorPlusImmFixed | 0x01800000u +}; + +enum SVE64BitScatterStore_ScalarPlus64BitScaledOffsetsOp : uint32_t { + SVE64BitScatterStore_ScalarPlus64BitScaledOffsetsFixed = 0xE420A000u, + SVE64BitScatterStore_ScalarPlus64BitScaledOffsetsFMask = 0xFE60E000u, + SVE64BitScatterStore_ScalarPlus64BitScaledOffsetsMask = 0xFFE0E000u, + ST1H_z_p_bz_d_64_scaled = SVE64BitScatterStore_ScalarPlus64BitScaledOffsetsFixed | 0x00800000u, + ST1W_z_p_bz_d_64_scaled = SVE64BitScatterStore_ScalarPlus64BitScaledOffsetsFixed | 0x01000000u, + ST1D_z_p_bz_d_64_scaled = SVE64BitScatterStore_ScalarPlus64BitScaledOffsetsFixed | 0x01800000u +}; + +enum SVE64BitScatterStore_ScalarPlus64BitUnscaledOffsetsOp : uint32_t { + SVE64BitScatterStore_ScalarPlus64BitUnscaledOffsetsFixed = 0xE400A000u, + SVE64BitScatterStore_ScalarPlus64BitUnscaledOffsetsFMask = 0xFE60E000u, + SVE64BitScatterStore_ScalarPlus64BitUnscaledOffsetsMask = 0xFFE0E000u, + ST1B_z_p_bz_d_64_unscaled = SVE64BitScatterStore_ScalarPlus64BitUnscaledOffsetsFixed, + ST1H_z_p_bz_d_64_unscaled = SVE64BitScatterStore_ScalarPlus64BitUnscaledOffsetsFixed | 0x00800000u, + ST1W_z_p_bz_d_64_unscaled = SVE64BitScatterStore_ScalarPlus64BitUnscaledOffsetsFixed | 0x01000000u, + ST1D_z_p_bz_d_64_unscaled = SVE64BitScatterStore_ScalarPlus64BitUnscaledOffsetsFixed | 0x01800000u +}; + +enum SVE64BitScatterStore_ScalarPlusUnpacked32BitScaledOffsetsOp : uint32_t { + SVE64BitScatterStore_ScalarPlusUnpacked32BitScaledOffsetsFixed = 0xE4208000u, + SVE64BitScatterStore_ScalarPlusUnpacked32BitScaledOffsetsFMask = 0xFE60A000u, + SVE64BitScatterStore_ScalarPlusUnpacked32BitScaledOffsetsMask = 0xFFE0A000u, + ST1H_z_p_bz_d_x32_scaled = SVE64BitScatterStore_ScalarPlusUnpacked32BitScaledOffsetsFixed | 0x00800000u, + ST1W_z_p_bz_d_x32_scaled = SVE64BitScatterStore_ScalarPlusUnpacked32BitScaledOffsetsFixed | 0x01000000u, + ST1D_z_p_bz_d_x32_scaled = SVE64BitScatterStore_ScalarPlusUnpacked32BitScaledOffsetsFixed | 0x01800000u +}; + +enum SVE64BitScatterStore_ScalarPlusUnpacked32BitUnscaledOffsetsOp : uint32_t { + SVE64BitScatterStore_ScalarPlusUnpacked32BitUnscaledOffsetsFixed = 0xE4008000u, + SVE64BitScatterStore_ScalarPlusUnpacked32BitUnscaledOffsetsFMask = 0xFE60A000u, + SVE64BitScatterStore_ScalarPlusUnpacked32BitUnscaledOffsetsMask = 0xFFE0A000u, + ST1B_z_p_bz_d_x32_unscaled = SVE64BitScatterStore_ScalarPlusUnpacked32BitUnscaledOffsetsFixed, + ST1H_z_p_bz_d_x32_unscaled = SVE64BitScatterStore_ScalarPlusUnpacked32BitUnscaledOffsetsFixed | 0x00800000u, + ST1W_z_p_bz_d_x32_unscaled = SVE64BitScatterStore_ScalarPlusUnpacked32BitUnscaledOffsetsFixed | 0x01000000u, + ST1D_z_p_bz_d_x32_unscaled = SVE64BitScatterStore_ScalarPlusUnpacked32BitUnscaledOffsetsFixed | 0x01800000u +}; + +enum SVE64BitScatterStore_VectorPlusImmOp : uint32_t { + SVE64BitScatterStore_VectorPlusImmFixed = 0xE440A000u, + SVE64BitScatterStore_VectorPlusImmFMask = 0xFE60E000u, + SVE64BitScatterStore_VectorPlusImmMask = 0xFFE0E000u, + ST1B_z_p_ai_d = SVE64BitScatterStore_VectorPlusImmFixed, + ST1H_z_p_ai_d = SVE64BitScatterStore_VectorPlusImmFixed | 0x00800000u, + ST1W_z_p_ai_d = SVE64BitScatterStore_VectorPlusImmFixed | 0x01000000u, + ST1D_z_p_ai_d = SVE64BitScatterStore_VectorPlusImmFixed | 0x01800000u +}; + +enum SVEAddressGenerationOp : uint32_t { + SVEAddressGenerationFixed = 0x0420A000u, + SVEAddressGenerationFMask = 0xFF20F000u, + SVEAddressGenerationMask = 0xFFE0F000u, + ADR_z_az_d_s32_scaled = SVEAddressGenerationFixed, + ADR_z_az_d_u32_scaled = SVEAddressGenerationFixed | 0x00400000u, + ADR_z_az_s_same_scaled = SVEAddressGenerationFixed | 0x00800000u, + ADR_z_az_d_same_scaled = SVEAddressGenerationFixed | 0x00C00000u +}; + +enum SVEBitwiseLogicalUnpredicatedOp : uint32_t { + SVEBitwiseLogicalUnpredicatedFixed = 0x04202000u, + SVEBitwiseLogicalUnpredicatedFMask = 0xFF20E000u, + SVEBitwiseLogicalUnpredicatedMask = 0xFFE0FC00u, + AND_z_zz = SVEBitwiseLogicalUnpredicatedFixed | 0x00001000u, + ORR_z_zz = SVEBitwiseLogicalUnpredicatedFixed | 0x00401000u, + EOR_z_zz = SVEBitwiseLogicalUnpredicatedFixed | 0x00801000u, + BIC_z_zz = SVEBitwiseLogicalUnpredicatedFixed | 0x00C01000u +}; + +enum SVEBitwiseLogicalWithImm_UnpredicatedOp : uint32_t { + SVEBitwiseLogicalWithImm_UnpredicatedFixed = 0x05000000u, + SVEBitwiseLogicalWithImm_UnpredicatedFMask = 0xFF3C0000u, + SVEBitwiseLogicalWithImm_UnpredicatedMask = 0xFFFC0000u, + ORR_z_zi = SVEBitwiseLogicalWithImm_UnpredicatedFixed, + EOR_z_zi = SVEBitwiseLogicalWithImm_UnpredicatedFixed | 0x00400000u, + AND_z_zi = SVEBitwiseLogicalWithImm_UnpredicatedFixed | 0x00800000u +}; + +enum SVEBitwiseLogical_PredicatedOp : uint32_t { + SVEBitwiseLogical_PredicatedFixed = 0x04180000u, + SVEBitwiseLogical_PredicatedFMask = 0xFF38E000u, + SVEBitwiseLogical_PredicatedMask = 0xFF3FE000u, + ORR_z_p_zz = SVEBitwiseLogical_PredicatedFixed, + EOR_z_p_zz = SVEBitwiseLogical_PredicatedFixed | 0x00010000u, + AND_z_p_zz = SVEBitwiseLogical_PredicatedFixed | 0x00020000u, + BIC_z_p_zz = SVEBitwiseLogical_PredicatedFixed | 0x00030000u +}; + +enum SVEBitwiseShiftByImm_PredicatedOp : uint32_t { + SVEBitwiseShiftByImm_PredicatedFixed = 0x04008000u, + SVEBitwiseShiftByImm_PredicatedFMask = 0xFF30E000u, + SVEBitwiseShiftByImm_PredicatedMask = 0xFF3FE000u, + ASR_z_p_zi = SVEBitwiseShiftByImm_PredicatedFixed, + LSR_z_p_zi = SVEBitwiseShiftByImm_PredicatedFixed | 0x00010000u, + LSL_z_p_zi = SVEBitwiseShiftByImm_PredicatedFixed | 0x00030000u, + ASRD_z_p_zi = SVEBitwiseShiftByImm_PredicatedFixed | 0x00040000u +}; + +enum SVEBitwiseShiftByVector_PredicatedOp : uint32_t { + SVEBitwiseShiftByVector_PredicatedFixed = 0x04108000u, + SVEBitwiseShiftByVector_PredicatedFMask = 0xFF38E000u, + SVEBitwiseShiftByVector_PredicatedMask = 0xFF3FE000u, + ASR_z_p_zz = SVEBitwiseShiftByVector_PredicatedFixed, + LSR_z_p_zz = SVEBitwiseShiftByVector_PredicatedFixed | 0x00010000u, + LSL_z_p_zz = SVEBitwiseShiftByVector_PredicatedFixed | 0x00030000u, + ASRR_z_p_zz = SVEBitwiseShiftByVector_PredicatedFixed | 0x00040000u, + LSRR_z_p_zz = SVEBitwiseShiftByVector_PredicatedFixed | 0x00050000u, + LSLR_z_p_zz = SVEBitwiseShiftByVector_PredicatedFixed | 0x00070000u +}; + +enum SVEBitwiseShiftByWideElements_PredicatedOp : uint32_t { + SVEBitwiseShiftByWideElements_PredicatedFixed = 0x04188000u, + SVEBitwiseShiftByWideElements_PredicatedFMask = 0xFF38E000u, + SVEBitwiseShiftByWideElements_PredicatedMask = 0xFF3FE000u, + ASR_z_p_zw = SVEBitwiseShiftByWideElements_PredicatedFixed, + LSR_z_p_zw = SVEBitwiseShiftByWideElements_PredicatedFixed | 0x00010000u, + LSL_z_p_zw = SVEBitwiseShiftByWideElements_PredicatedFixed | 0x00030000u +}; + +enum SVEBitwiseShiftUnpredicatedOp : uint32_t { + SVEBitwiseShiftUnpredicatedFixed = 0x04208000u, + SVEBitwiseShiftUnpredicatedFMask = 0xFF20E000u, + SVEBitwiseShiftUnpredicatedMask = 0xFF20FC00u, + ASR_z_zw = SVEBitwiseShiftUnpredicatedFixed, + LSR_z_zw = SVEBitwiseShiftUnpredicatedFixed | 0x00000400u, + LSL_z_zw = SVEBitwiseShiftUnpredicatedFixed | 0x00000C00u, + ASR_z_zi = SVEBitwiseShiftUnpredicatedFixed | 0x00001000u, + LSR_z_zi = SVEBitwiseShiftUnpredicatedFixed | 0x00001400u, + LSL_z_zi = SVEBitwiseShiftUnpredicatedFixed | 0x00001C00u +}; + +enum SVEBroadcastBitmaskImmOp : uint32_t { + SVEBroadcastBitmaskImmFixed = 0x05C00000u, + SVEBroadcastBitmaskImmFMask = 0xFFFC0000u, + SVEBroadcastBitmaskImmMask = 0xFFFC0000u, + DUPM_z_i = SVEBroadcastBitmaskImmFixed +}; + +enum SVEBroadcastFPImm_UnpredicatedOp : uint32_t { + SVEBroadcastFPImm_UnpredicatedFixed = 0x2539C000u, + SVEBroadcastFPImm_UnpredicatedFMask = 0xFF39C000u, + SVEBroadcastFPImm_UnpredicatedMask = 0xFF3FE000u, + FDUP_z_i = SVEBroadcastFPImm_UnpredicatedFixed +}; + +enum SVEBroadcastGeneralRegisterOp : uint32_t { + SVEBroadcastGeneralRegisterFixed = 0x05203800u, + SVEBroadcastGeneralRegisterFMask = 0xFF3FFC00u, + SVEBroadcastGeneralRegisterMask = 0xFF3FFC00u, + DUP_z_r = SVEBroadcastGeneralRegisterFixed +}; + +enum SVEBroadcastIndexElementOp : uint32_t { + SVEBroadcastIndexElementFixed = 0x05202000u, + SVEBroadcastIndexElementFMask = 0xFF20FC00u, + SVEBroadcastIndexElementMask = 0xFF20FC00u, + DUP_z_zi = SVEBroadcastIndexElementFixed +}; + +enum SVEBroadcastIntImm_UnpredicatedOp : uint32_t { + SVEBroadcastIntImm_UnpredicatedFixed = 0x2538C000u, + SVEBroadcastIntImm_UnpredicatedFMask = 0xFF39C000u, + SVEBroadcastIntImm_UnpredicatedMask = 0xFF3FC000u, + DUP_z_i = SVEBroadcastIntImm_UnpredicatedFixed +}; + +enum SVECompressActiveElementsOp : uint32_t { + SVECompressActiveElementsFixed = 0x05A18000u, + SVECompressActiveElementsFMask = 0xFFBFE000u, + SVECompressActiveElementsMask = 0xFFBFE000u, + COMPACT_z_p_z = SVECompressActiveElementsFixed +}; + +enum SVEConditionallyBroadcastElementToVectorOp : uint32_t { + SVEConditionallyBroadcastElementToVectorFixed = 0x05288000u, + SVEConditionallyBroadcastElementToVectorFMask = 0xFF3EE000u, + SVEConditionallyBroadcastElementToVectorMask = 0xFF3FE000u, + CLASTA_z_p_zz = SVEConditionallyBroadcastElementToVectorFixed, + CLASTB_z_p_zz = SVEConditionallyBroadcastElementToVectorFixed | 0x00010000u +}; + +enum SVEConditionallyExtractElementToGeneralRegisterOp : uint32_t { + SVEConditionallyExtractElementToGeneralRegisterFixed = 0x0530A000u, + SVEConditionallyExtractElementToGeneralRegisterFMask = 0xFF3EE000u, + SVEConditionallyExtractElementToGeneralRegisterMask = 0xFF3FE000u, + CLASTA_r_p_z = SVEConditionallyExtractElementToGeneralRegisterFixed, + CLASTB_r_p_z = SVEConditionallyExtractElementToGeneralRegisterFixed | 0x00010000u +}; + +enum SVEConditionallyExtractElementToSIMDFPScalarOp : uint32_t { + SVEConditionallyExtractElementToSIMDFPScalarFixed = 0x052A8000u, + SVEConditionallyExtractElementToSIMDFPScalarFMask = 0xFF3EE000u, + SVEConditionallyExtractElementToSIMDFPScalarMask = 0xFF3FE000u, + CLASTA_v_p_z = SVEConditionallyExtractElementToSIMDFPScalarFixed, + CLASTB_v_p_z = SVEConditionallyExtractElementToSIMDFPScalarFixed | 0x00010000u +}; + +enum SVEConditionallyTerminateScalarsOp : uint32_t { + SVEConditionallyTerminateScalarsFixed = 0x25202000u, + SVEConditionallyTerminateScalarsFMask = 0xFF20FC0Fu, + SVEConditionallyTerminateScalarsMask = 0xFFA0FC1Fu, + CTERMEQ_rr = SVEConditionallyTerminateScalarsFixed | 0x00800000u, + CTERMNE_rr = SVEConditionallyTerminateScalarsFixed | 0x00800010u +}; + +enum SVEConstructivePrefix_UnpredicatedOp : uint32_t { + SVEConstructivePrefix_UnpredicatedFixed = 0x0420BC00u, + SVEConstructivePrefix_UnpredicatedFMask = 0xFF20FC00u, + SVEConstructivePrefix_UnpredicatedMask = 0xFFFFFC00u, + MOVPRFX_z_z = SVEConstructivePrefix_UnpredicatedFixed +}; + +enum SVEContiguousFirstFaultLoad_ScalarPlusScalarOp : uint32_t { + SVEContiguousFirstFaultLoad_ScalarPlusScalarFixed = 0xA4006000u, + SVEContiguousFirstFaultLoad_ScalarPlusScalarFMask = 0xFE00E000u, + SVEContiguousFirstFaultLoad_ScalarPlusScalarMask = 0xFFE0E000u, + LDFF1B_z_p_br_u8 = SVEContiguousFirstFaultLoad_ScalarPlusScalarFixed, + LDFF1B_z_p_br_u16 = SVEContiguousFirstFaultLoad_ScalarPlusScalarFixed | 0x00200000u, + LDFF1B_z_p_br_u32 = SVEContiguousFirstFaultLoad_ScalarPlusScalarFixed | 0x00400000u, + LDFF1B_z_p_br_u64 = SVEContiguousFirstFaultLoad_ScalarPlusScalarFixed | 0x00600000u, + LDFF1SW_z_p_br_s64 = SVEContiguousFirstFaultLoad_ScalarPlusScalarFixed | 0x00800000u, + LDFF1H_z_p_br_u16 = SVEContiguousFirstFaultLoad_ScalarPlusScalarFixed | 0x00A00000u, + LDFF1H_z_p_br_u32 = SVEContiguousFirstFaultLoad_ScalarPlusScalarFixed | 0x00C00000u, + LDFF1H_z_p_br_u64 = SVEContiguousFirstFaultLoad_ScalarPlusScalarFixed | 0x00E00000u, + LDFF1SH_z_p_br_s64 = SVEContiguousFirstFaultLoad_ScalarPlusScalarFixed | 0x01000000u, + LDFF1SH_z_p_br_s32 = SVEContiguousFirstFaultLoad_ScalarPlusScalarFixed | 0x01200000u, + LDFF1W_z_p_br_u32 = SVEContiguousFirstFaultLoad_ScalarPlusScalarFixed | 0x01400000u, + LDFF1W_z_p_br_u64 = SVEContiguousFirstFaultLoad_ScalarPlusScalarFixed | 0x01600000u, + LDFF1SB_z_p_br_s64 = SVEContiguousFirstFaultLoad_ScalarPlusScalarFixed | 0x01800000u, + LDFF1SB_z_p_br_s32 = SVEContiguousFirstFaultLoad_ScalarPlusScalarFixed | 0x01A00000u, + LDFF1SB_z_p_br_s16 = SVEContiguousFirstFaultLoad_ScalarPlusScalarFixed | 0x01C00000u, + LDFF1D_z_p_br_u64 = SVEContiguousFirstFaultLoad_ScalarPlusScalarFixed | 0x01E00000u +}; + +enum SVEContiguousLoad_ScalarPlusImmOp : uint32_t { + SVEContiguousLoad_ScalarPlusImmFixed = 0xA400A000u, + SVEContiguousLoad_ScalarPlusImmFMask = 0xFE10E000u, + SVEContiguousLoad_ScalarPlusImmMask = 0xFFF0E000u, + LD1B_z_p_bi_u8 = SVEContiguousLoad_ScalarPlusImmFixed, + LD1B_z_p_bi_u16 = SVEContiguousLoad_ScalarPlusImmFixed | 0x00200000u, + LD1B_z_p_bi_u32 = SVEContiguousLoad_ScalarPlusImmFixed | 0x00400000u, + LD1B_z_p_bi_u64 = SVEContiguousLoad_ScalarPlusImmFixed | 0x00600000u, + LD1SW_z_p_bi_s64 = SVEContiguousLoad_ScalarPlusImmFixed | 0x00800000u, + LD1H_z_p_bi_u16 = SVEContiguousLoad_ScalarPlusImmFixed | 0x00A00000u, + LD1H_z_p_bi_u32 = SVEContiguousLoad_ScalarPlusImmFixed | 0x00C00000u, + LD1H_z_p_bi_u64 = SVEContiguousLoad_ScalarPlusImmFixed | 0x00E00000u, + LD1SH_z_p_bi_s64 = SVEContiguousLoad_ScalarPlusImmFixed | 0x01000000u, + LD1SH_z_p_bi_s32 = SVEContiguousLoad_ScalarPlusImmFixed | 0x01200000u, + LD1W_z_p_bi_u32 = SVEContiguousLoad_ScalarPlusImmFixed | 0x01400000u, + LD1W_z_p_bi_u64 = SVEContiguousLoad_ScalarPlusImmFixed | 0x01600000u, + LD1SB_z_p_bi_s64 = SVEContiguousLoad_ScalarPlusImmFixed | 0x01800000u, + LD1SB_z_p_bi_s32 = SVEContiguousLoad_ScalarPlusImmFixed | 0x01A00000u, + LD1SB_z_p_bi_s16 = SVEContiguousLoad_ScalarPlusImmFixed | 0x01C00000u, + LD1D_z_p_bi_u64 = SVEContiguousLoad_ScalarPlusImmFixed | 0x01E00000u +}; + +enum SVEContiguousLoad_ScalarPlusScalarOp : uint32_t { + SVEContiguousLoad_ScalarPlusScalarFixed = 0xA4004000u, + SVEContiguousLoad_ScalarPlusScalarFMask = 0xFE00E000u, + SVEContiguousLoad_ScalarPlusScalarMask = 0xFFE0E000u, + LD1B_z_p_br_u8 = SVEContiguousLoad_ScalarPlusScalarFixed, + LD1B_z_p_br_u16 = SVEContiguousLoad_ScalarPlusScalarFixed | 0x00200000u, + LD1B_z_p_br_u32 = SVEContiguousLoad_ScalarPlusScalarFixed | 0x00400000u, + LD1B_z_p_br_u64 = SVEContiguousLoad_ScalarPlusScalarFixed | 0x00600000u, + LD1SW_z_p_br_s64 = SVEContiguousLoad_ScalarPlusScalarFixed | 0x00800000u, + LD1H_z_p_br_u16 = SVEContiguousLoad_ScalarPlusScalarFixed | 0x00A00000u, + LD1H_z_p_br_u32 = SVEContiguousLoad_ScalarPlusScalarFixed | 0x00C00000u, + LD1H_z_p_br_u64 = SVEContiguousLoad_ScalarPlusScalarFixed | 0x00E00000u, + LD1SH_z_p_br_s64 = SVEContiguousLoad_ScalarPlusScalarFixed | 0x01000000u, + LD1SH_z_p_br_s32 = SVEContiguousLoad_ScalarPlusScalarFixed | 0x01200000u, + LD1W_z_p_br_u32 = SVEContiguousLoad_ScalarPlusScalarFixed | 0x01400000u, + LD1W_z_p_br_u64 = SVEContiguousLoad_ScalarPlusScalarFixed | 0x01600000u, + LD1SB_z_p_br_s64 = SVEContiguousLoad_ScalarPlusScalarFixed | 0x01800000u, + LD1SB_z_p_br_s32 = SVEContiguousLoad_ScalarPlusScalarFixed | 0x01A00000u, + LD1SB_z_p_br_s16 = SVEContiguousLoad_ScalarPlusScalarFixed | 0x01C00000u, + LD1D_z_p_br_u64 = SVEContiguousLoad_ScalarPlusScalarFixed | 0x01E00000u +}; + +enum SVEContiguousNonFaultLoad_ScalarPlusImmOp : uint32_t { + SVEContiguousNonFaultLoad_ScalarPlusImmFixed = 0xA410A000u, + SVEContiguousNonFaultLoad_ScalarPlusImmFMask = 0xFE10E000u, + SVEContiguousNonFaultLoad_ScalarPlusImmMask = 0xFFF0E000u, + LDNF1B_z_p_bi_u8 = SVEContiguousNonFaultLoad_ScalarPlusImmFixed, + LDNF1B_z_p_bi_u16 = SVEContiguousNonFaultLoad_ScalarPlusImmFixed | 0x00200000u, + LDNF1B_z_p_bi_u32 = SVEContiguousNonFaultLoad_ScalarPlusImmFixed | 0x00400000u, + LDNF1B_z_p_bi_u64 = SVEContiguousNonFaultLoad_ScalarPlusImmFixed | 0x00600000u, + LDNF1SW_z_p_bi_s64 = SVEContiguousNonFaultLoad_ScalarPlusImmFixed | 0x00800000u, + LDNF1H_z_p_bi_u16 = SVEContiguousNonFaultLoad_ScalarPlusImmFixed | 0x00A00000u, + LDNF1H_z_p_bi_u32 = SVEContiguousNonFaultLoad_ScalarPlusImmFixed | 0x00C00000u, + LDNF1H_z_p_bi_u64 = SVEContiguousNonFaultLoad_ScalarPlusImmFixed | 0x00E00000u, + LDNF1SH_z_p_bi_s64 = SVEContiguousNonFaultLoad_ScalarPlusImmFixed | 0x01000000u, + LDNF1SH_z_p_bi_s32 = SVEContiguousNonFaultLoad_ScalarPlusImmFixed | 0x01200000u, + LDNF1W_z_p_bi_u32 = SVEContiguousNonFaultLoad_ScalarPlusImmFixed | 0x01400000u, + LDNF1W_z_p_bi_u64 = SVEContiguousNonFaultLoad_ScalarPlusImmFixed | 0x01600000u, + LDNF1SB_z_p_bi_s64 = SVEContiguousNonFaultLoad_ScalarPlusImmFixed | 0x01800000u, + LDNF1SB_z_p_bi_s32 = SVEContiguousNonFaultLoad_ScalarPlusImmFixed | 0x01A00000u, + LDNF1SB_z_p_bi_s16 = SVEContiguousNonFaultLoad_ScalarPlusImmFixed | 0x01C00000u, + LDNF1D_z_p_bi_u64 = SVEContiguousNonFaultLoad_ScalarPlusImmFixed | 0x01E00000u +}; + +enum SVEContiguousNonTemporalLoad_ScalarPlusImmOp : uint32_t { + SVEContiguousNonTemporalLoad_ScalarPlusImmFixed = 0xA400E000u, + SVEContiguousNonTemporalLoad_ScalarPlusImmFMask = 0xFE70E000u, + SVEContiguousNonTemporalLoad_ScalarPlusImmMask = 0xFFF0E000u, + LDNT1B_z_p_bi_contiguous = SVEContiguousNonTemporalLoad_ScalarPlusImmFixed, + LDNT1H_z_p_bi_contiguous = SVEContiguousNonTemporalLoad_ScalarPlusImmFixed | 0x00800000u, + LDNT1W_z_p_bi_contiguous = SVEContiguousNonTemporalLoad_ScalarPlusImmFixed | 0x01000000u, + LDNT1D_z_p_bi_contiguous = SVEContiguousNonTemporalLoad_ScalarPlusImmFixed | 0x01800000u +}; + +enum SVEContiguousNonTemporalLoad_ScalarPlusScalarOp : uint32_t { + SVEContiguousNonTemporalLoad_ScalarPlusScalarFixed = 0xA400C000u, + SVEContiguousNonTemporalLoad_ScalarPlusScalarFMask = 0xFE60E000u, + SVEContiguousNonTemporalLoad_ScalarPlusScalarMask = 0xFFE0E000u, + LDNT1B_z_p_br_contiguous = SVEContiguousNonTemporalLoad_ScalarPlusScalarFixed, + LDNT1H_z_p_br_contiguous = SVEContiguousNonTemporalLoad_ScalarPlusScalarFixed | 0x00800000u, + LDNT1W_z_p_br_contiguous = SVEContiguousNonTemporalLoad_ScalarPlusScalarFixed | 0x01000000u, + LDNT1D_z_p_br_contiguous = SVEContiguousNonTemporalLoad_ScalarPlusScalarFixed | 0x01800000u +}; + +enum SVEContiguousNonTemporalStore_ScalarPlusImmOp : uint32_t { + SVEContiguousNonTemporalStore_ScalarPlusImmFixed = 0xE410E000u, + SVEContiguousNonTemporalStore_ScalarPlusImmFMask = 0xFE70E000u, + SVEContiguousNonTemporalStore_ScalarPlusImmMask = 0xFFF0E000u, + STNT1B_z_p_bi_contiguous = SVEContiguousNonTemporalStore_ScalarPlusImmFixed, + STNT1H_z_p_bi_contiguous = SVEContiguousNonTemporalStore_ScalarPlusImmFixed | 0x00800000u, + STNT1W_z_p_bi_contiguous = SVEContiguousNonTemporalStore_ScalarPlusImmFixed | 0x01000000u, + STNT1D_z_p_bi_contiguous = SVEContiguousNonTemporalStore_ScalarPlusImmFixed | 0x01800000u +}; + +enum SVEContiguousNonTemporalStore_ScalarPlusScalarOp : uint32_t { + SVEContiguousNonTemporalStore_ScalarPlusScalarFixed = 0xE4006000u, + SVEContiguousNonTemporalStore_ScalarPlusScalarFMask = 0xFE60E000u, + SVEContiguousNonTemporalStore_ScalarPlusScalarMask = 0xFFE0E000u, + STNT1B_z_p_br_contiguous = SVEContiguousNonTemporalStore_ScalarPlusScalarFixed, + STNT1H_z_p_br_contiguous = SVEContiguousNonTemporalStore_ScalarPlusScalarFixed | 0x00800000u, + STNT1W_z_p_br_contiguous = SVEContiguousNonTemporalStore_ScalarPlusScalarFixed | 0x01000000u, + STNT1D_z_p_br_contiguous = SVEContiguousNonTemporalStore_ScalarPlusScalarFixed | 0x01800000u +}; + +enum SVEContiguousPrefetch_ScalarPlusImmOp : uint32_t { + SVEContiguousPrefetch_ScalarPlusImmFixed = 0x85C00000u, + SVEContiguousPrefetch_ScalarPlusImmFMask = 0xFFC08010u, + SVEContiguousPrefetch_ScalarPlusImmMask = 0xFFC0E010u, + PRFB_i_p_bi_s = SVEContiguousPrefetch_ScalarPlusImmFixed, + PRFH_i_p_bi_s = SVEContiguousPrefetch_ScalarPlusImmFixed | 0x00002000u, + PRFW_i_p_bi_s = SVEContiguousPrefetch_ScalarPlusImmFixed | 0x00004000u, + PRFD_i_p_bi_s = SVEContiguousPrefetch_ScalarPlusImmFixed | 0x00006000u +}; + +enum SVEContiguousPrefetch_ScalarPlusScalarOp : uint32_t { + SVEContiguousPrefetch_ScalarPlusScalarFixed = 0x8400C000u, + SVEContiguousPrefetch_ScalarPlusScalarFMask = 0xFE60E010u, + SVEContiguousPrefetch_ScalarPlusScalarMask = 0xFFE0E010u, + PRFB_i_p_br_s = SVEContiguousPrefetch_ScalarPlusScalarFixed, + PRFH_i_p_br_s = SVEContiguousPrefetch_ScalarPlusScalarFixed | 0x00800000u, + PRFW_i_p_br_s = SVEContiguousPrefetch_ScalarPlusScalarFixed | 0x01000000u, + PRFD_i_p_br_s = SVEContiguousPrefetch_ScalarPlusScalarFixed | 0x01800000u +}; + +enum SVEContiguousStore_ScalarPlusImmOp : uint32_t { + SVEContiguousStore_ScalarPlusImmFixed = 0xE400E000u, + SVEContiguousStore_ScalarPlusImmFMask = 0xFE10E000u, + SVEContiguousStore_ScalarPlusImmMask = 0xFF90E000u, + ST1B_z_p_bi = SVEContiguousStore_ScalarPlusImmFixed, + ST1H_z_p_bi = SVEContiguousStore_ScalarPlusImmFixed | 0x00800000u, + ST1W_z_p_bi = SVEContiguousStore_ScalarPlusImmFixed | 0x01000000u, + ST1D_z_p_bi = SVEContiguousStore_ScalarPlusImmFixed | 0x01800000u +}; + +enum SVEContiguousStore_ScalarPlusScalarOp : uint32_t { + SVEContiguousStore_ScalarPlusScalarFixed = 0xE4004000u, + SVEContiguousStore_ScalarPlusScalarFMask = 0xFE00E000u, + SVEContiguousStore_ScalarPlusScalarMask = 0xFF80E000u, + ST1B_z_p_br = SVEContiguousStore_ScalarPlusScalarFixed, + ST1H_z_p_br = SVEContiguousStore_ScalarPlusScalarFixed | 0x00800000u, + ST1W_z_p_br = SVEContiguousStore_ScalarPlusScalarFixed | 0x01000000u, + ST1D_z_p_br = SVEContiguousStore_ScalarPlusScalarFixed | 0x01800000u +}; + +enum SVECopyFPImm_PredicatedOp : uint32_t { + SVECopyFPImm_PredicatedFixed = 0x0510C000u, + SVECopyFPImm_PredicatedFMask = 0xFF30E000u, + SVECopyFPImm_PredicatedMask = 0xFF30E000u, + FCPY_z_p_i = SVECopyFPImm_PredicatedFixed +}; + +enum SVECopyGeneralRegisterToVector_PredicatedOp : uint32_t { + SVECopyGeneralRegisterToVector_PredicatedFixed = 0x0528A000u, + SVECopyGeneralRegisterToVector_PredicatedFMask = 0xFF3FE000u, + SVECopyGeneralRegisterToVector_PredicatedMask = 0xFF3FE000u, + CPY_z_p_r = SVECopyGeneralRegisterToVector_PredicatedFixed +}; + +enum SVECopyIntImm_PredicatedOp : uint32_t { + SVECopyIntImm_PredicatedFixed = 0x05100000u, + SVECopyIntImm_PredicatedFMask = 0xFF308000u, + SVECopyIntImm_PredicatedMask = 0xFF308000u, + CPY_z_p_i = SVECopyIntImm_PredicatedFixed +}; + +enum SVECopySIMDFPScalarRegisterToVector_PredicatedOp : uint32_t { + SVECopySIMDFPScalarRegisterToVector_PredicatedFixed = 0x05208000u, + SVECopySIMDFPScalarRegisterToVector_PredicatedFMask = 0xFF3FE000u, + SVECopySIMDFPScalarRegisterToVector_PredicatedMask = 0xFF3FE000u, + CPY_z_p_v = SVECopySIMDFPScalarRegisterToVector_PredicatedFixed +}; + +enum SVEElementCountOp : uint32_t { + SVEElementCountFixed = 0x0420E000u, + SVEElementCountFMask = 0xFF30F800u, + SVEElementCountMask = 0xFFF0FC00u, + CNTB_r_s = SVEElementCountFixed, + CNTH_r_s = SVEElementCountFixed | 0x00400000u, + CNTW_r_s = SVEElementCountFixed | 0x00800000u, + CNTD_r_s = SVEElementCountFixed | 0x00C00000u +}; + +enum SVEExtractElementToGeneralRegisterOp : uint32_t { + SVEExtractElementToGeneralRegisterFixed = 0x0520A000u, + SVEExtractElementToGeneralRegisterFMask = 0xFF3EE000u, + SVEExtractElementToGeneralRegisterMask = 0xFF3FE000u, + LASTA_r_p_z = SVEExtractElementToGeneralRegisterFixed, + LASTB_r_p_z = SVEExtractElementToGeneralRegisterFixed | 0x00010000u +}; + +enum SVEExtractElementToSIMDFPScalarRegisterOp : uint32_t { + SVEExtractElementToSIMDFPScalarRegisterFixed = 0x05228000u, + SVEExtractElementToSIMDFPScalarRegisterFMask = 0xFF3EE000u, + SVEExtractElementToSIMDFPScalarRegisterMask = 0xFF3FE000u, + LASTA_v_p_z = SVEExtractElementToSIMDFPScalarRegisterFixed, + LASTB_v_p_z = SVEExtractElementToSIMDFPScalarRegisterFixed | 0x00010000u +}; + +enum SVEFFRInitialiseOp : uint32_t { + SVEFFRInitialiseFixed = 0x252C9000u, + SVEFFRInitialiseFMask = 0xFF3FFFFFu, + SVEFFRInitialiseMask = 0xFFFFFFFFu, + SETFFR_f = SVEFFRInitialiseFixed +}; + +enum SVEFFRWriteFromPredicateOp : uint32_t { + SVEFFRWriteFromPredicateFixed = 0x25289000u, + SVEFFRWriteFromPredicateFMask = 0xFF3FFE1Fu, + SVEFFRWriteFromPredicateMask = 0xFFFFFE1Fu, + WRFFR_f_p = SVEFFRWriteFromPredicateFixed +}; + +enum SVEFPAccumulatingReductionOp : uint32_t { + SVEFPAccumulatingReductionFixed = 0x65182000u, + SVEFPAccumulatingReductionFMask = 0xFF38E000u, + SVEFPAccumulatingReductionMask = 0xFF3FE000u, + FADDA_v_p_z = SVEFPAccumulatingReductionFixed +}; + +enum SVEFPArithmeticUnpredicatedOp : uint32_t { + SVEFPArithmeticUnpredicatedFixed = 0x65000000u, + SVEFPArithmeticUnpredicatedFMask = 0xFF20E000u, + SVEFPArithmeticUnpredicatedMask = 0xFF20FC00u, + FADD_z_zz = SVEFPArithmeticUnpredicatedFixed, + FSUB_z_zz = SVEFPArithmeticUnpredicatedFixed | 0x00000400u, + FMUL_z_zz = SVEFPArithmeticUnpredicatedFixed | 0x00000800u, + FTSMUL_z_zz = SVEFPArithmeticUnpredicatedFixed | 0x00000C00u, + FRECPS_z_zz = SVEFPArithmeticUnpredicatedFixed | 0x00001800u, + FRSQRTS_z_zz = SVEFPArithmeticUnpredicatedFixed | 0x00001C00u +}; + +enum SVEFPArithmeticWithImm_PredicatedOp : uint32_t { + SVEFPArithmeticWithImm_PredicatedFixed = 0x65188000u, + SVEFPArithmeticWithImm_PredicatedFMask = 0xFF38E3C0u, + SVEFPArithmeticWithImm_PredicatedMask = 0xFF3FE3C0u, + FADD_z_p_zs = SVEFPArithmeticWithImm_PredicatedFixed, + FSUB_z_p_zs = SVEFPArithmeticWithImm_PredicatedFixed | 0x00010000u, + FMUL_z_p_zs = SVEFPArithmeticWithImm_PredicatedFixed | 0x00020000u, + FSUBR_z_p_zs = SVEFPArithmeticWithImm_PredicatedFixed | 0x00030000u, + FMAXNM_z_p_zs = SVEFPArithmeticWithImm_PredicatedFixed | 0x00040000u, + FMINNM_z_p_zs = SVEFPArithmeticWithImm_PredicatedFixed | 0x00050000u, + FMAX_z_p_zs = SVEFPArithmeticWithImm_PredicatedFixed | 0x00060000u, + FMIN_z_p_zs = SVEFPArithmeticWithImm_PredicatedFixed | 0x00070000u +}; + +enum SVEFPArithmetic_PredicatedOp : uint32_t { + SVEFPArithmetic_PredicatedFixed = 0x65008000u, + SVEFPArithmetic_PredicatedFMask = 0xFF30E000u, + SVEFPArithmetic_PredicatedMask = 0xFF3FE000u, + FADD_z_p_zz = SVEFPArithmetic_PredicatedFixed, + FSUB_z_p_zz = SVEFPArithmetic_PredicatedFixed | 0x00010000u, + FMUL_z_p_zz = SVEFPArithmetic_PredicatedFixed | 0x00020000u, + FSUBR_z_p_zz = SVEFPArithmetic_PredicatedFixed | 0x00030000u, + FMAXNM_z_p_zz = SVEFPArithmetic_PredicatedFixed | 0x00040000u, + FMINNM_z_p_zz = SVEFPArithmetic_PredicatedFixed | 0x00050000u, + FMAX_z_p_zz = SVEFPArithmetic_PredicatedFixed | 0x00060000u, + FMIN_z_p_zz = SVEFPArithmetic_PredicatedFixed | 0x00070000u, + FABD_z_p_zz = SVEFPArithmetic_PredicatedFixed | 0x00080000u, + FSCALE_z_p_zz = SVEFPArithmetic_PredicatedFixed | 0x00090000u, + FMULX_z_p_zz = SVEFPArithmetic_PredicatedFixed | 0x000A0000u, + FDIVR_z_p_zz = SVEFPArithmetic_PredicatedFixed | 0x000C0000u, + FDIV_z_p_zz = SVEFPArithmetic_PredicatedFixed | 0x000D0000u +}; + +enum SVEFPCompareVectorsOp : uint32_t { + SVEFPCompareVectorsFixed = 0x65004000u, + SVEFPCompareVectorsFMask = 0xFF204000u, + SVEFPCompareVectorsMask = 0xFF20E010u, + FCMGE_p_p_zz = SVEFPCompareVectorsFixed, + FCMGT_p_p_zz = SVEFPCompareVectorsFixed | 0x00000010u, + FCMEQ_p_p_zz = SVEFPCompareVectorsFixed | 0x00002000u, + FCMNE_p_p_zz = SVEFPCompareVectorsFixed | 0x00002010u, + FCMUO_p_p_zz = SVEFPCompareVectorsFixed | 0x00008000u, + FACGE_p_p_zz = SVEFPCompareVectorsFixed | 0x00008010u, + FACGT_p_p_zz = SVEFPCompareVectorsFixed | 0x0000A010u +}; + +enum SVEFPCompareWithZeroOp : uint32_t { + SVEFPCompareWithZeroFixed = 0x65102000u, + SVEFPCompareWithZeroFMask = 0xFF38E000u, + SVEFPCompareWithZeroMask = 0xFF3FE010u, + FCMGE_p_p_z0 = SVEFPCompareWithZeroFixed, + FCMGT_p_p_z0 = SVEFPCompareWithZeroFixed | 0x00000010u, + FCMLT_p_p_z0 = SVEFPCompareWithZeroFixed | 0x00010000u, + FCMLE_p_p_z0 = SVEFPCompareWithZeroFixed | 0x00010010u, + FCMEQ_p_p_z0 = SVEFPCompareWithZeroFixed | 0x00020000u, + FCMNE_p_p_z0 = SVEFPCompareWithZeroFixed | 0x00030000u +}; + +enum SVEFPComplexAdditionOp : uint32_t { + SVEFPComplexAdditionFixed = 0x64008000u, + SVEFPComplexAdditionFMask = 0xFF3EE000u, + SVEFPComplexAdditionMask = 0xFF3EE000u, + FCADD_z_p_zz = SVEFPComplexAdditionFixed +}; + +enum SVEFPComplexMulAddOp : uint32_t { + SVEFPComplexMulAddFixed = 0x64000000u, + SVEFPComplexMulAddFMask = 0xFF208000u, + SVEFPComplexMulAddMask = 0xFF208000u, + FCMLA_z_p_zzz = SVEFPComplexMulAddFixed +}; + +enum SVEFPComplexMulAddIndexOp : uint32_t { + SVEFPComplexMulAddIndexFixed = 0x64201000u, + SVEFPComplexMulAddIndexFMask = 0xFF20F000u, + SVEFPComplexMulAddIndexMask = 0xFFE0F000u, + FCMLA_z_zzzi_h = SVEFPComplexMulAddIndexFixed | 0x00800000u, + FCMLA_z_zzzi_s = SVEFPComplexMulAddIndexFixed | 0x00C00000u +}; + +enum SVEFPConvertPrecisionOp : uint32_t { + SVEFPConvertPrecisionFixed = 0x6508A000u, + SVEFPConvertPrecisionFMask = 0xFF3CE000u, + SVEFPConvertPrecisionMask = 0xFFFFE000u, + FCVT_z_p_z_s2h = SVEFPConvertPrecisionFixed | 0x00800000u, + FCVT_z_p_z_h2s = SVEFPConvertPrecisionFixed | 0x00810000u, + FCVT_z_p_z_d2h = SVEFPConvertPrecisionFixed | 0x00C00000u, + FCVT_z_p_z_h2d = SVEFPConvertPrecisionFixed | 0x00C10000u, + FCVT_z_p_z_d2s = SVEFPConvertPrecisionFixed | 0x00C20000u, + FCVT_z_p_z_s2d = SVEFPConvertPrecisionFixed | 0x00C30000u +}; + +enum SVEFPConvertToIntOp : uint32_t { + SVEFPConvertToIntFixed = 0x6518A000u, + SVEFPConvertToIntFMask = 0xFF38E000u, + SVEFPConvertToIntMask = 0xFFFFE000u, + FCVTZS_z_p_z_fp162h = SVEFPConvertToIntFixed | 0x00420000u, + FCVTZU_z_p_z_fp162h = SVEFPConvertToIntFixed | 0x00430000u, + FCVTZS_z_p_z_fp162w = SVEFPConvertToIntFixed | 0x00440000u, + FCVTZU_z_p_z_fp162w = SVEFPConvertToIntFixed | 0x00450000u, + FCVTZS_z_p_z_fp162x = SVEFPConvertToIntFixed | 0x00460000u, + FCVTZU_z_p_z_fp162x = SVEFPConvertToIntFixed | 0x00470000u, + FCVTZS_z_p_z_s2w = SVEFPConvertToIntFixed | 0x00840000u, + FCVTZU_z_p_z_s2w = SVEFPConvertToIntFixed | 0x00850000u, + FCVTZS_z_p_z_d2w = SVEFPConvertToIntFixed | 0x00C00000u, + FCVTZU_z_p_z_d2w = SVEFPConvertToIntFixed | 0x00C10000u, + FCVTZS_z_p_z_s2x = SVEFPConvertToIntFixed | 0x00C40000u, + FCVTZU_z_p_z_s2x = SVEFPConvertToIntFixed | 0x00C50000u, + FCVTZS_z_p_z_d2x = SVEFPConvertToIntFixed | 0x00C60000u, + FCVTZU_z_p_z_d2x = SVEFPConvertToIntFixed | 0x00C70000u +}; + +enum SVEFPExponentialAcceleratorOp : uint32_t { + SVEFPExponentialAcceleratorFixed = 0x0420B800u, + SVEFPExponentialAcceleratorFMask = 0xFF20FC00u, + SVEFPExponentialAcceleratorMask = 0xFF3FFC00u, + FEXPA_z_z = SVEFPExponentialAcceleratorFixed +}; + +enum SVEFPFastReductionOp : uint32_t { + SVEFPFastReductionFixed = 0x65002000u, + SVEFPFastReductionFMask = 0xFF38E000u, + SVEFPFastReductionMask = 0xFF3FE000u, + FADDV_v_p_z = SVEFPFastReductionFixed, + FMAXNMV_v_p_z = SVEFPFastReductionFixed | 0x00040000u, + FMINNMV_v_p_z = SVEFPFastReductionFixed | 0x00050000u, + FMAXV_v_p_z = SVEFPFastReductionFixed | 0x00060000u, + FMINV_v_p_z = SVEFPFastReductionFixed | 0x00070000u +}; + +enum SVEFPMulAddOp : uint32_t { + SVEFPMulAddFixed = 0x65200000u, + SVEFPMulAddFMask = 0xFF200000u, + SVEFPMulAddMask = 0xFF20E000u, + FMLA_z_p_zzz = SVEFPMulAddFixed, + FMLS_z_p_zzz = SVEFPMulAddFixed | 0x00002000u, + FNMLA_z_p_zzz = SVEFPMulAddFixed | 0x00004000u, + FNMLS_z_p_zzz = SVEFPMulAddFixed | 0x00006000u, + FMAD_z_p_zzz = SVEFPMulAddFixed | 0x00008000u, + FMSB_z_p_zzz = SVEFPMulAddFixed | 0x0000A000u, + FNMAD_z_p_zzz = SVEFPMulAddFixed | 0x0000C000u, + FNMSB_z_p_zzz = SVEFPMulAddFixed | 0x0000E000u +}; + +enum SVEFPMulAddIndexOp : uint32_t { + SVEFPMulAddIndexFixed = 0x64200000u, + SVEFPMulAddIndexFMask = 0xFF20F800u, + SVEFPMulAddIndexMask = 0xFFE0FC00u, + FMLA_z_zzzi_h = SVEFPMulAddIndexFixed, + FMLA_z_zzzi_h_i3h = FMLA_z_zzzi_h | 0x00400000u, + FMLS_z_zzzi_h = SVEFPMulAddIndexFixed | 0x00000400u, + FMLS_z_zzzi_h_i3h = FMLS_z_zzzi_h | 0x00400000u, + FMLA_z_zzzi_s = SVEFPMulAddIndexFixed | 0x00800000u, + FMLS_z_zzzi_s = SVEFPMulAddIndexFixed | 0x00800400u, + FMLA_z_zzzi_d = SVEFPMulAddIndexFixed | 0x00C00000u, + FMLS_z_zzzi_d = SVEFPMulAddIndexFixed | 0x00C00400u +}; + +enum SVEFPMulIndexOp : uint32_t { + SVEFPMulIndexFixed = 0x64202000u, + SVEFPMulIndexFMask = 0xFF20FC00u, + SVEFPMulIndexMask = 0xFFE0FC00u, + FMUL_z_zzi_h = SVEFPMulIndexFixed, + FMUL_z_zzi_h_i3h = FMUL_z_zzi_h | 0x00400000u, + FMUL_z_zzi_s = SVEFPMulIndexFixed | 0x00800000u, + FMUL_z_zzi_d = SVEFPMulIndexFixed | 0x00C00000u +}; + +enum SVEFPRoundToIntegralValueOp : uint32_t { + SVEFPRoundToIntegralValueFixed = 0x6500A000u, + SVEFPRoundToIntegralValueFMask = 0xFF38E000u, + SVEFPRoundToIntegralValueMask = 0xFF3FE000u, + FRINTN_z_p_z = SVEFPRoundToIntegralValueFixed, + FRINTP_z_p_z = SVEFPRoundToIntegralValueFixed | 0x00010000u, + FRINTM_z_p_z = SVEFPRoundToIntegralValueFixed | 0x00020000u, + FRINTZ_z_p_z = SVEFPRoundToIntegralValueFixed | 0x00030000u, + FRINTA_z_p_z = SVEFPRoundToIntegralValueFixed | 0x00040000u, + FRINTX_z_p_z = SVEFPRoundToIntegralValueFixed | 0x00060000u, + FRINTI_z_p_z = SVEFPRoundToIntegralValueFixed | 0x00070000u +}; + +enum SVEFPTrigMulAddCoefficientOp : uint32_t { + SVEFPTrigMulAddCoefficientFixed = 0x65108000u, + SVEFPTrigMulAddCoefficientFMask = 0xFF38FC00u, + SVEFPTrigMulAddCoefficientMask = 0xFF38FC00u, + FTMAD_z_zzi = SVEFPTrigMulAddCoefficientFixed +}; + +enum SVEFPTrigSelectCoefficientOp : uint32_t { + SVEFPTrigSelectCoefficientFixed = 0x0420B000u, + SVEFPTrigSelectCoefficientFMask = 0xFF20F800u, + SVEFPTrigSelectCoefficientMask = 0xFF20FC00u, + FTSSEL_z_zz = SVEFPTrigSelectCoefficientFixed +}; + +enum SVEFPUnaryOpOp : uint32_t { + SVEFPUnaryOpFixed = 0x650CA000u, + SVEFPUnaryOpFMask = 0xFF3CE000u, + SVEFPUnaryOpMask = 0xFF3FE000u, + FRECPX_z_p_z = SVEFPUnaryOpFixed, + FSQRT_z_p_z = SVEFPUnaryOpFixed | 0x00010000u +}; + +enum SVEFPUnaryOpUnpredicatedOp : uint32_t { + SVEFPUnaryOpUnpredicatedFixed = 0x65083000u, + SVEFPUnaryOpUnpredicatedFMask = 0xFF38F000u, + SVEFPUnaryOpUnpredicatedMask = 0xFF3FFC00u, + FRECPE_z_z = SVEFPUnaryOpUnpredicatedFixed | 0x00060000u, + FRSQRTE_z_z = SVEFPUnaryOpUnpredicatedFixed | 0x00070000u +}; + +enum SVEIncDecByPredicateCountOp : uint32_t { + SVEIncDecByPredicateCountFixed = 0x25288000u, + SVEIncDecByPredicateCountFMask = 0xFF38F000u, + SVEIncDecByPredicateCountMask = 0xFF3FFE00u, + SQINCP_z_p_z = SVEIncDecByPredicateCountFixed, + SQINCP_r_p_r_sx = SVEIncDecByPredicateCountFixed | 0x00000800u, + SQINCP_r_p_r_x = SVEIncDecByPredicateCountFixed | 0x00000C00u, + UQINCP_z_p_z = SVEIncDecByPredicateCountFixed | 0x00010000u, + UQINCP_r_p_r_uw = SVEIncDecByPredicateCountFixed | 0x00010800u, + UQINCP_r_p_r_x = SVEIncDecByPredicateCountFixed | 0x00010C00u, + SQDECP_z_p_z = SVEIncDecByPredicateCountFixed | 0x00020000u, + SQDECP_r_p_r_sx = SVEIncDecByPredicateCountFixed | 0x00020800u, + SQDECP_r_p_r_x = SVEIncDecByPredicateCountFixed | 0x00020C00u, + UQDECP_z_p_z = SVEIncDecByPredicateCountFixed | 0x00030000u, + UQDECP_r_p_r_uw = SVEIncDecByPredicateCountFixed | 0x00030800u, + UQDECP_r_p_r_x = SVEIncDecByPredicateCountFixed | 0x00030C00u, + INCP_z_p_z = SVEIncDecByPredicateCountFixed | 0x00040000u, + INCP_r_p_r = SVEIncDecByPredicateCountFixed | 0x00040800u, + DECP_z_p_z = SVEIncDecByPredicateCountFixed | 0x00050000u, + DECP_r_p_r = SVEIncDecByPredicateCountFixed | 0x00050800u +}; + +enum SVEIncDecRegisterByElementCountOp : uint32_t { + SVEIncDecRegisterByElementCountFixed = 0x0430E000u, + SVEIncDecRegisterByElementCountFMask = 0xFF30F800u, + SVEIncDecRegisterByElementCountMask = 0xFFF0FC00u, + INCB_r_rs = SVEIncDecRegisterByElementCountFixed, + DECB_r_rs = SVEIncDecRegisterByElementCountFixed | 0x00000400u, + INCH_r_rs = SVEIncDecRegisterByElementCountFixed | 0x00400000u, + DECH_r_rs = SVEIncDecRegisterByElementCountFixed | 0x00400400u, + INCW_r_rs = SVEIncDecRegisterByElementCountFixed | 0x00800000u, + DECW_r_rs = SVEIncDecRegisterByElementCountFixed | 0x00800400u, + INCD_r_rs = SVEIncDecRegisterByElementCountFixed | 0x00C00000u, + DECD_r_rs = SVEIncDecRegisterByElementCountFixed | 0x00C00400u +}; + +enum SVEIncDecVectorByElementCountOp : uint32_t { + SVEIncDecVectorByElementCountFixed = 0x0430C000u, + SVEIncDecVectorByElementCountFMask = 0xFF30F800u, + SVEIncDecVectorByElementCountMask = 0xFFF0FC00u, + INCH_z_zs = SVEIncDecVectorByElementCountFixed | 0x00400000u, + DECH_z_zs = SVEIncDecVectorByElementCountFixed | 0x00400400u, + INCW_z_zs = SVEIncDecVectorByElementCountFixed | 0x00800000u, + DECW_z_zs = SVEIncDecVectorByElementCountFixed | 0x00800400u, + INCD_z_zs = SVEIncDecVectorByElementCountFixed | 0x00C00000u, + DECD_z_zs = SVEIncDecVectorByElementCountFixed | 0x00C00400u +}; + +enum SVEIndexGenerationOp : uint32_t { + SVEIndexGenerationFixed = 0x04204000u, + SVEIndexGenerationFMask = 0xFF20F000u, + SVEIndexGenerationMask = 0xFF20FC00u, + INDEX_z_ii = SVEIndexGenerationFixed, + INDEX_z_ri = SVEIndexGenerationFixed | 0x00000400u, + INDEX_z_ir = SVEIndexGenerationFixed | 0x00000800u, + INDEX_z_rr = SVEIndexGenerationFixed | 0x00000C00u +}; + +enum SVEInsertGeneralRegisterOp : uint32_t { + SVEInsertGeneralRegisterFixed = 0x05243800u, + SVEInsertGeneralRegisterFMask = 0xFF3FFC00u, + SVEInsertGeneralRegisterMask = 0xFF3FFC00u, + INSR_z_r = SVEInsertGeneralRegisterFixed +}; + +enum SVEInsertSIMDFPScalarRegisterOp : uint32_t { + SVEInsertSIMDFPScalarRegisterFixed = 0x05343800u, + SVEInsertSIMDFPScalarRegisterFMask = 0xFF3FFC00u, + SVEInsertSIMDFPScalarRegisterMask = 0xFF3FFC00u, + INSR_z_v = SVEInsertSIMDFPScalarRegisterFixed +}; + +enum SVEIntAddSubtractImm_UnpredicatedOp : uint32_t { + SVEIntAddSubtractImm_UnpredicatedFixed = 0x2520C000u, + SVEIntAddSubtractImm_UnpredicatedFMask = 0xFF38C000u, + SVEIntAddSubtractImm_UnpredicatedMask = 0xFF3FC000u, + ADD_z_zi = SVEIntAddSubtractImm_UnpredicatedFixed, + SUB_z_zi = SVEIntAddSubtractImm_UnpredicatedFixed | 0x00010000u, + SUBR_z_zi = SVEIntAddSubtractImm_UnpredicatedFixed | 0x00030000u, + SQADD_z_zi = SVEIntAddSubtractImm_UnpredicatedFixed | 0x00040000u, + UQADD_z_zi = SVEIntAddSubtractImm_UnpredicatedFixed | 0x00050000u, + SQSUB_z_zi = SVEIntAddSubtractImm_UnpredicatedFixed | 0x00060000u, + UQSUB_z_zi = SVEIntAddSubtractImm_UnpredicatedFixed | 0x00070000u +}; + +enum SVEIntAddSubtractVectors_PredicatedOp : uint32_t { + SVEIntAddSubtractVectors_PredicatedFixed = 0x04000000u, + SVEIntAddSubtractVectors_PredicatedFMask = 0xFF38E000u, + SVEIntAddSubtractVectors_PredicatedMask = 0xFF3FE000u, + ADD_z_p_zz = SVEIntAddSubtractVectors_PredicatedFixed, + SUB_z_p_zz = SVEIntAddSubtractVectors_PredicatedFixed | 0x00010000u, + SUBR_z_p_zz = SVEIntAddSubtractVectors_PredicatedFixed | 0x00030000u +}; + +enum SVEIntArithmeticUnpredicatedOp : uint32_t { + SVEIntArithmeticUnpredicatedFixed = 0x04200000u, + SVEIntArithmeticUnpredicatedFMask = 0xFF20E000u, + SVEIntArithmeticUnpredicatedMask = 0xFF20FC00u, + ADD_z_zz = SVEIntArithmeticUnpredicatedFixed, + SUB_z_zz = SVEIntArithmeticUnpredicatedFixed | 0x00000400u, + SQADD_z_zz = SVEIntArithmeticUnpredicatedFixed | 0x00001000u, + UQADD_z_zz = SVEIntArithmeticUnpredicatedFixed | 0x00001400u, + SQSUB_z_zz = SVEIntArithmeticUnpredicatedFixed | 0x00001800u, + UQSUB_z_zz = SVEIntArithmeticUnpredicatedFixed | 0x00001C00u +}; + +enum SVEIntCompareScalarCountAndLimitOp : uint32_t { + SVEIntCompareScalarCountAndLimitFixed = 0x25200000u, + SVEIntCompareScalarCountAndLimitFMask = 0xFF20E000u, + SVEIntCompareScalarCountAndLimitMask = 0xFF20EC10u, + WHILELT_p_p_rr = SVEIntCompareScalarCountAndLimitFixed | 0x00000400u, + WHILELE_p_p_rr = SVEIntCompareScalarCountAndLimitFixed | 0x00000410u, + WHILELO_p_p_rr = SVEIntCompareScalarCountAndLimitFixed | 0x00000C00u, + WHILELS_p_p_rr = SVEIntCompareScalarCountAndLimitFixed | 0x00000C10u +}; + +enum SVEIntCompareSignedImmOp : uint32_t { + SVEIntCompareSignedImmFixed = 0x25000000u, + SVEIntCompareSignedImmFMask = 0xFF204000u, + SVEIntCompareSignedImmMask = 0xFF20E010u, + CMPGE_p_p_zi = SVEIntCompareSignedImmFixed, + CMPGT_p_p_zi = SVEIntCompareSignedImmFixed | 0x00000010u, + CMPLT_p_p_zi = SVEIntCompareSignedImmFixed | 0x00002000u, + CMPLE_p_p_zi = SVEIntCompareSignedImmFixed | 0x00002010u, + CMPEQ_p_p_zi = SVEIntCompareSignedImmFixed | 0x00008000u, + CMPNE_p_p_zi = SVEIntCompareSignedImmFixed | 0x00008010u +}; + +enum SVEIntCompareUnsignedImmOp : uint32_t { + SVEIntCompareUnsignedImmFixed = 0x24200000u, + SVEIntCompareUnsignedImmFMask = 0xFF200000u, + SVEIntCompareUnsignedImmMask = 0xFF202010u, + CMPHS_p_p_zi = SVEIntCompareUnsignedImmFixed, + CMPHI_p_p_zi = SVEIntCompareUnsignedImmFixed | 0x00000010u, + CMPLO_p_p_zi = SVEIntCompareUnsignedImmFixed | 0x00002000u, + CMPLS_p_p_zi = SVEIntCompareUnsignedImmFixed | 0x00002010u +}; + +enum SVEIntCompareVectorsOp : uint32_t { + SVEIntCompareVectorsFixed = 0x24000000u, + SVEIntCompareVectorsFMask = 0xFF200000u, + SVEIntCompareVectorsMask = 0xFF20E010u, + CMPHS_p_p_zz = SVEIntCompareVectorsFixed, + CMPHI_p_p_zz = SVEIntCompareVectorsFixed | 0x00000010u, + CMPEQ_p_p_zw = SVEIntCompareVectorsFixed | 0x00002000u, + CMPNE_p_p_zw = SVEIntCompareVectorsFixed | 0x00002010u, + CMPGE_p_p_zw = SVEIntCompareVectorsFixed | 0x00004000u, + CMPGT_p_p_zw = SVEIntCompareVectorsFixed | 0x00004010u, + CMPLT_p_p_zw = SVEIntCompareVectorsFixed | 0x00006000u, + CMPLE_p_p_zw = SVEIntCompareVectorsFixed | 0x00006010u, + CMPGE_p_p_zz = SVEIntCompareVectorsFixed | 0x00008000u, + CMPGT_p_p_zz = SVEIntCompareVectorsFixed | 0x00008010u, + CMPEQ_p_p_zz = SVEIntCompareVectorsFixed | 0x0000A000u, + CMPNE_p_p_zz = SVEIntCompareVectorsFixed | 0x0000A010u, + CMPHS_p_p_zw = SVEIntCompareVectorsFixed | 0x0000C000u, + CMPHI_p_p_zw = SVEIntCompareVectorsFixed | 0x0000C010u, + CMPLO_p_p_zw = SVEIntCompareVectorsFixed | 0x0000E000u, + CMPLS_p_p_zw = SVEIntCompareVectorsFixed | 0x0000E010u +}; + +enum SVEIntConvertToFPOp : uint32_t { + SVEIntConvertToFPFixed = 0x6510A000u, + SVEIntConvertToFPFMask = 0xFF38E000u, + SVEIntConvertToFPMask = 0xFFFFE000u, + SCVTF_z_p_z_h2fp16 = SVEIntConvertToFPFixed | 0x00420000u, + UCVTF_z_p_z_h2fp16 = SVEIntConvertToFPFixed | 0x00430000u, + SCVTF_z_p_z_w2fp16 = SVEIntConvertToFPFixed | 0x00440000u, + UCVTF_z_p_z_w2fp16 = SVEIntConvertToFPFixed | 0x00450000u, + SCVTF_z_p_z_x2fp16 = SVEIntConvertToFPFixed | 0x00460000u, + UCVTF_z_p_z_x2fp16 = SVEIntConvertToFPFixed | 0x00470000u, + SCVTF_z_p_z_w2s = SVEIntConvertToFPFixed | 0x00840000u, + UCVTF_z_p_z_w2s = SVEIntConvertToFPFixed | 0x00850000u, + SCVTF_z_p_z_w2d = SVEIntConvertToFPFixed | 0x00C00000u, + UCVTF_z_p_z_w2d = SVEIntConvertToFPFixed | 0x00C10000u, + SCVTF_z_p_z_x2s = SVEIntConvertToFPFixed | 0x00C40000u, + UCVTF_z_p_z_x2s = SVEIntConvertToFPFixed | 0x00C50000u, + SCVTF_z_p_z_x2d = SVEIntConvertToFPFixed | 0x00C60000u, + UCVTF_z_p_z_x2d = SVEIntConvertToFPFixed | 0x00C70000u +}; + +enum SVEIntDivideVectors_PredicatedOp : uint32_t { + SVEIntDivideVectors_PredicatedFixed = 0x04140000u, + SVEIntDivideVectors_PredicatedFMask = 0xFF3CE000u, + SVEIntDivideVectors_PredicatedMask = 0xFF3FE000u, + SDIV_z_p_zz = SVEIntDivideVectors_PredicatedFixed, + UDIV_z_p_zz = SVEIntDivideVectors_PredicatedFixed | 0x00010000u, + SDIVR_z_p_zz = SVEIntDivideVectors_PredicatedFixed | 0x00020000u, + UDIVR_z_p_zz = SVEIntDivideVectors_PredicatedFixed | 0x00030000u +}; + +enum SVEIntMinMaxDifference_PredicatedOp : uint32_t { + SVEIntMinMaxDifference_PredicatedFixed = 0x04080000u, + SVEIntMinMaxDifference_PredicatedFMask = 0xFF38E000u, + SVEIntMinMaxDifference_PredicatedMask = 0xFF3FE000u, + SMAX_z_p_zz = SVEIntMinMaxDifference_PredicatedFixed, + UMAX_z_p_zz = SVEIntMinMaxDifference_PredicatedFixed | 0x00010000u, + SMIN_z_p_zz = SVEIntMinMaxDifference_PredicatedFixed | 0x00020000u, + UMIN_z_p_zz = SVEIntMinMaxDifference_PredicatedFixed | 0x00030000u, + SABD_z_p_zz = SVEIntMinMaxDifference_PredicatedFixed | 0x00040000u, + UABD_z_p_zz = SVEIntMinMaxDifference_PredicatedFixed | 0x00050000u +}; + +enum SVEIntMinMaxImm_UnpredicatedOp : uint32_t { + SVEIntMinMaxImm_UnpredicatedFixed = 0x2528C000u, + SVEIntMinMaxImm_UnpredicatedFMask = 0xFF38C000u, + SVEIntMinMaxImm_UnpredicatedMask = 0xFF3FE000u, + SMAX_z_zi = SVEIntMinMaxImm_UnpredicatedFixed, + UMAX_z_zi = SVEIntMinMaxImm_UnpredicatedFixed | 0x00010000u, + SMIN_z_zi = SVEIntMinMaxImm_UnpredicatedFixed | 0x00020000u, + UMIN_z_zi = SVEIntMinMaxImm_UnpredicatedFixed | 0x00030000u +}; + +enum SVEIntMulAddPredicatedOp : uint32_t { + SVEIntMulAddPredicatedFixed = 0x04004000u, + SVEIntMulAddPredicatedFMask = 0xFF204000u, + SVEIntMulAddPredicatedMask = 0xFF20E000u, + MLA_z_p_zzz = SVEIntMulAddPredicatedFixed, + MLS_z_p_zzz = SVEIntMulAddPredicatedFixed | 0x00002000u, + MAD_z_p_zzz = SVEIntMulAddPredicatedFixed | 0x00008000u, + MSB_z_p_zzz = SVEIntMulAddPredicatedFixed | 0x0000A000u +}; + +enum SVEIntMulAddUnpredicatedOp : uint32_t { + SVEIntMulAddUnpredicatedFixed = 0x44000000u, + SVEIntMulAddUnpredicatedFMask = 0xFF208000u, + SVEIntMulAddUnpredicatedMask = 0xFF20FC00u, + SDOT_z_zzz = SVEIntMulAddUnpredicatedFixed, + UDOT_z_zzz = SVEIntMulAddUnpredicatedFixed | 0x00000400u +}; + +enum SVEIntMulImm_UnpredicatedOp : uint32_t { + SVEIntMulImm_UnpredicatedFixed = 0x2530C000u, + SVEIntMulImm_UnpredicatedFMask = 0xFF38C000u, + SVEIntMulImm_UnpredicatedMask = 0xFF3FE000u, + MUL_z_zi = SVEIntMulImm_UnpredicatedFixed +}; + +enum SVEIntMulVectors_PredicatedOp : uint32_t { + SVEIntMulVectors_PredicatedFixed = 0x04100000u, + SVEIntMulVectors_PredicatedFMask = 0xFF3CE000u, + SVEIntMulVectors_PredicatedMask = 0xFF3FE000u, + MUL_z_p_zz = SVEIntMulVectors_PredicatedFixed, + SMULH_z_p_zz = SVEIntMulVectors_PredicatedFixed | 0x00020000u, + UMULH_z_p_zz = SVEIntMulVectors_PredicatedFixed | 0x00030000u +}; + +enum SVEMovprfxOp : uint32_t { + SVEMovprfxFixed = 0x04002000u, + SVEMovprfxFMask = 0xFF20E000u, + SVEMovprfxMask = 0xFF3EE000u, + MOVPRFX_z_p_z = SVEMovprfxFixed | 0x00100000u +}; + +enum SVEIntReductionOp : uint32_t { + SVEIntReductionFixed = 0x04002000u, + SVEIntReductionFMask = 0xFF20E000u, + SVEIntReductionMask = 0xFF3FE000u, + SADDV_r_p_z = SVEIntReductionFixed, + UADDV_r_p_z = SVEIntReductionFixed | 0x00010000u, + SMAXV_r_p_z = SVEIntReductionFixed | 0x00080000u, + UMAXV_r_p_z = SVEIntReductionFixed | 0x00090000u, + SMINV_r_p_z = SVEIntReductionFixed | 0x000A0000u, + UMINV_r_p_z = SVEIntReductionFixed | 0x000B0000u +}; + +enum SVEIntReductionLogicalOp : uint32_t { + SVEIntReductionLogicalFixed = 0x04182000u, + SVEIntReductionLogicalFMask = 0xFF38E000u, + SVEIntReductionLogicalMask = 0xFF3FE000u, + ORV_r_p_z = SVEIntReductionLogicalFixed | 0x00180000u, + EORV_r_p_z = SVEIntReductionLogicalFixed | 0x00190000u, + ANDV_r_p_z = SVEIntReductionLogicalFixed | 0x001A0000u +}; + +enum SVEIntUnaryArithmeticPredicatedOp : uint32_t { + SVEIntUnaryArithmeticPredicatedFixed = 0x0400A000u, + SVEIntUnaryArithmeticPredicatedFMask = 0xFF20E000u, + SVEIntUnaryArithmeticPredicatedMask = 0xFF3FE000u, + SXTB_z_p_z = SVEIntUnaryArithmeticPredicatedFixed | 0x00100000u, + UXTB_z_p_z = SVEIntUnaryArithmeticPredicatedFixed | 0x00110000u, + SXTH_z_p_z = SVEIntUnaryArithmeticPredicatedFixed | 0x00120000u, + UXTH_z_p_z = SVEIntUnaryArithmeticPredicatedFixed | 0x00130000u, + SXTW_z_p_z = SVEIntUnaryArithmeticPredicatedFixed | 0x00140000u, + UXTW_z_p_z = SVEIntUnaryArithmeticPredicatedFixed | 0x00150000u, + ABS_z_p_z = SVEIntUnaryArithmeticPredicatedFixed | 0x00160000u, + NEG_z_p_z = SVEIntUnaryArithmeticPredicatedFixed | 0x00170000u, + CLS_z_p_z = SVEIntUnaryArithmeticPredicatedFixed | 0x00180000u, + CLZ_z_p_z = SVEIntUnaryArithmeticPredicatedFixed | 0x00190000u, + CNT_z_p_z = SVEIntUnaryArithmeticPredicatedFixed | 0x001A0000u, + CNOT_z_p_z = SVEIntUnaryArithmeticPredicatedFixed | 0x001B0000u, + FABS_z_p_z = SVEIntUnaryArithmeticPredicatedFixed | 0x001C0000u, + FNEG_z_p_z = SVEIntUnaryArithmeticPredicatedFixed | 0x001D0000u, + NOT_z_p_z = SVEIntUnaryArithmeticPredicatedFixed | 0x001E0000u +}; + +enum SVELoadAndBroadcastElementOp : uint32_t { + SVELoadAndBroadcastElementFixed = 0x84408000u, + SVELoadAndBroadcastElementFMask = 0xFE408000u, + SVELoadAndBroadcastElementMask = 0xFFC0E000u, + LD1RB_z_p_bi_u8 = SVELoadAndBroadcastElementFixed, + LD1RB_z_p_bi_u16 = SVELoadAndBroadcastElementFixed | 0x00002000u, + LD1RB_z_p_bi_u32 = SVELoadAndBroadcastElementFixed | 0x00004000u, + LD1RB_z_p_bi_u64 = SVELoadAndBroadcastElementFixed | 0x00006000u, + LD1RSW_z_p_bi_s64 = SVELoadAndBroadcastElementFixed | 0x00800000u, + LD1RH_z_p_bi_u16 = SVELoadAndBroadcastElementFixed | 0x00802000u, + LD1RH_z_p_bi_u32 = SVELoadAndBroadcastElementFixed | 0x00804000u, + LD1RH_z_p_bi_u64 = SVELoadAndBroadcastElementFixed | 0x00806000u, + LD1RSH_z_p_bi_s64 = SVELoadAndBroadcastElementFixed | 0x01000000u, + LD1RSH_z_p_bi_s32 = SVELoadAndBroadcastElementFixed | 0x01002000u, + LD1RW_z_p_bi_u32 = SVELoadAndBroadcastElementFixed | 0x01004000u, + LD1RW_z_p_bi_u64 = SVELoadAndBroadcastElementFixed | 0x01006000u, + LD1RSB_z_p_bi_s64 = SVELoadAndBroadcastElementFixed | 0x01800000u, + LD1RSB_z_p_bi_s32 = SVELoadAndBroadcastElementFixed | 0x01802000u, + LD1RSB_z_p_bi_s16 = SVELoadAndBroadcastElementFixed | 0x01804000u, + LD1RD_z_p_bi_u64 = SVELoadAndBroadcastElementFixed | 0x01806000u +}; + +enum SVELoadAndBroadcastQuadword_ScalarPlusImmOp : uint32_t { + SVELoadAndBroadcastQuadword_ScalarPlusImmFixed = 0xA4002000u, + SVELoadAndBroadcastQuadword_ScalarPlusImmFMask = 0xFE10E000u, + SVELoadAndBroadcastQuadword_ScalarPlusImmMask = 0xFFF0E000u, + LD1RQB_z_p_bi_u8 = SVELoadAndBroadcastQuadword_ScalarPlusImmFixed, + LD1RQH_z_p_bi_u16 = SVELoadAndBroadcastQuadword_ScalarPlusImmFixed | 0x00800000u, + LD1RQW_z_p_bi_u32 = SVELoadAndBroadcastQuadword_ScalarPlusImmFixed | 0x01000000u, + LD1RQD_z_p_bi_u64 = SVELoadAndBroadcastQuadword_ScalarPlusImmFixed | 0x01800000u +}; + +enum SVELoadAndBroadcastQuadword_ScalarPlusScalarOp : uint32_t { + SVELoadAndBroadcastQuadword_ScalarPlusScalarFixed = 0xA4000000u, + SVELoadAndBroadcastQuadword_ScalarPlusScalarFMask = 0xFE00E000u, + SVELoadAndBroadcastQuadword_ScalarPlusScalarMask = 0xFFE0E000u, + LD1RQB_z_p_br_contiguous = SVELoadAndBroadcastQuadword_ScalarPlusScalarFixed, + LD1RQH_z_p_br_contiguous = SVELoadAndBroadcastQuadword_ScalarPlusScalarFixed | 0x00800000u, + LD1RQW_z_p_br_contiguous = SVELoadAndBroadcastQuadword_ScalarPlusScalarFixed | 0x01000000u, + LD1RQD_z_p_br_contiguous = SVELoadAndBroadcastQuadword_ScalarPlusScalarFixed | 0x01800000u +}; + +enum SVELoadMultipleStructures_ScalarPlusImmOp : uint32_t { + SVELoadMultipleStructures_ScalarPlusImmFixed = 0xA400E000u, + SVELoadMultipleStructures_ScalarPlusImmFMask = 0xFE10E000u, + SVELoadMultipleStructures_ScalarPlusImmMask = 0xFFF0E000u, + LD2B_z_p_bi_contiguous = SVELoadMultipleStructures_ScalarPlusImmFixed | 0x00200000u, + LD3B_z_p_bi_contiguous = SVELoadMultipleStructures_ScalarPlusImmFixed | 0x00400000u, + LD4B_z_p_bi_contiguous = SVELoadMultipleStructures_ScalarPlusImmFixed | 0x00600000u, + LD2H_z_p_bi_contiguous = SVELoadMultipleStructures_ScalarPlusImmFixed | 0x00A00000u, + LD3H_z_p_bi_contiguous = SVELoadMultipleStructures_ScalarPlusImmFixed | 0x00C00000u, + LD4H_z_p_bi_contiguous = SVELoadMultipleStructures_ScalarPlusImmFixed | 0x00E00000u, + LD2W_z_p_bi_contiguous = SVELoadMultipleStructures_ScalarPlusImmFixed | 0x01200000u, + LD3W_z_p_bi_contiguous = SVELoadMultipleStructures_ScalarPlusImmFixed | 0x01400000u, + LD4W_z_p_bi_contiguous = SVELoadMultipleStructures_ScalarPlusImmFixed | 0x01600000u, + LD2D_z_p_bi_contiguous = SVELoadMultipleStructures_ScalarPlusImmFixed | 0x01A00000u, + LD3D_z_p_bi_contiguous = SVELoadMultipleStructures_ScalarPlusImmFixed | 0x01C00000u, + LD4D_z_p_bi_contiguous = SVELoadMultipleStructures_ScalarPlusImmFixed | 0x01E00000u +}; + +enum SVELoadMultipleStructures_ScalarPlusScalarOp : uint32_t { + SVELoadMultipleStructures_ScalarPlusScalarFixed = 0xA400C000u, + SVELoadMultipleStructures_ScalarPlusScalarFMask = 0xFE00E000u, + SVELoadMultipleStructures_ScalarPlusScalarMask = 0xFFE0E000u, + LD2B_z_p_br_contiguous = SVELoadMultipleStructures_ScalarPlusScalarFixed | 0x00200000u, + LD3B_z_p_br_contiguous = SVELoadMultipleStructures_ScalarPlusScalarFixed | 0x00400000u, + LD4B_z_p_br_contiguous = SVELoadMultipleStructures_ScalarPlusScalarFixed | 0x00600000u, + LD2H_z_p_br_contiguous = SVELoadMultipleStructures_ScalarPlusScalarFixed | 0x00A00000u, + LD3H_z_p_br_contiguous = SVELoadMultipleStructures_ScalarPlusScalarFixed | 0x00C00000u, + LD4H_z_p_br_contiguous = SVELoadMultipleStructures_ScalarPlusScalarFixed | 0x00E00000u, + LD2W_z_p_br_contiguous = SVELoadMultipleStructures_ScalarPlusScalarFixed | 0x01200000u, + LD3W_z_p_br_contiguous = SVELoadMultipleStructures_ScalarPlusScalarFixed | 0x01400000u, + LD4W_z_p_br_contiguous = SVELoadMultipleStructures_ScalarPlusScalarFixed | 0x01600000u, + LD2D_z_p_br_contiguous = SVELoadMultipleStructures_ScalarPlusScalarFixed | 0x01A00000u, + LD3D_z_p_br_contiguous = SVELoadMultipleStructures_ScalarPlusScalarFixed | 0x01C00000u, + LD4D_z_p_br_contiguous = SVELoadMultipleStructures_ScalarPlusScalarFixed | 0x01E00000u +}; + +enum SVELoadPredicateRegisterOp : uint32_t { + SVELoadPredicateRegisterFixed = 0x85800000u, + SVELoadPredicateRegisterFMask = 0xFFC0E010u, + SVELoadPredicateRegisterMask = 0xFFC0E010u, + LDR_p_bi = SVELoadPredicateRegisterFixed +}; + +enum SVELoadVectorRegisterOp : uint32_t { + SVELoadVectorRegisterFixed = 0x85804000u, + SVELoadVectorRegisterFMask = 0xFFC0E000u, + SVELoadVectorRegisterMask = 0xFFC0E000u, + LDR_z_bi = SVELoadVectorRegisterFixed +}; + +enum SVEMulIndexOp : uint32_t { + SVEMulIndexFixed = 0x44200000u, + SVEMulIndexFMask = 0xFF200000u, + SVEMulIndexMask = 0xFFE0FC00u, + SDOT_z_zzzi_s = SVEMulIndexFixed | 0x00800000u, + UDOT_z_zzzi_s = SVEMulIndexFixed | 0x00800400u, + SDOT_z_zzzi_d = SVEMulIndexFixed | 0x00C00000u, + UDOT_z_zzzi_d = SVEMulIndexFixed | 0x00C00400u +}; + +enum SVEPartitionBreakConditionOp : uint32_t { + SVEPartitionBreakConditionFixed = 0x25104000u, + SVEPartitionBreakConditionFMask = 0xFF3FC200u, + SVEPartitionBreakConditionMask = 0xFFFFC200u, + BRKA_p_p_p = SVEPartitionBreakConditionFixed, + BRKAS_p_p_p_z = SVEPartitionBreakConditionFixed | 0x00400000u, + BRKB_p_p_p = SVEPartitionBreakConditionFixed | 0x00800000u, + BRKBS_p_p_p_z = SVEPartitionBreakConditionFixed | 0x00C00000u +}; + +enum SVEPermutePredicateElementsOp : uint32_t { + SVEPermutePredicateElementsFixed = 0x05204000u, + SVEPermutePredicateElementsFMask = 0xFF30E210u, + SVEPermutePredicateElementsMask = 0xFF30FE10u, + ZIP1_p_pp = SVEPermutePredicateElementsFixed, + ZIP2_p_pp = SVEPermutePredicateElementsFixed | 0x00000400u, + UZP1_p_pp = SVEPermutePredicateElementsFixed | 0x00000800u, + UZP2_p_pp = SVEPermutePredicateElementsFixed | 0x00000C00u, + TRN1_p_pp = SVEPermutePredicateElementsFixed | 0x00001000u, + TRN2_p_pp = SVEPermutePredicateElementsFixed | 0x00001400u +}; + +enum SVEPermuteVectorExtractOp : uint32_t { + SVEPermuteVectorExtractFixed = 0x05200000u, + SVEPermuteVectorExtractFMask = 0xFF20E000u, + SVEPermuteVectorExtractMask = 0xFFE0E000u, + EXT_z_zi_des = SVEPermuteVectorExtractFixed +}; + +enum SVEPermuteVectorInterleavingOp : uint32_t { + SVEPermuteVectorInterleavingFixed = 0x05206000u, + SVEPermuteVectorInterleavingFMask = 0xFF20E000u, + SVEPermuteVectorInterleavingMask = 0xFF20FC00u, + ZIP1_z_zz = SVEPermuteVectorInterleavingFixed, + ZIP2_z_zz = SVEPermuteVectorInterleavingFixed | 0x00000400u, + UZP1_z_zz = SVEPermuteVectorInterleavingFixed | 0x00000800u, + UZP2_z_zz = SVEPermuteVectorInterleavingFixed | 0x00000C00u, + TRN1_z_zz = SVEPermuteVectorInterleavingFixed | 0x00001000u, + TRN2_z_zz = SVEPermuteVectorInterleavingFixed | 0x00001400u +}; + +enum SVEPredicateCountOp : uint32_t { + SVEPredicateCountFixed = 0x25208000u, + SVEPredicateCountFMask = 0xFF38C000u, + SVEPredicateCountMask = 0xFF3FC200u, + CNTP_r_p_p = SVEPredicateCountFixed +}; + +enum SVEPredicateFirstActiveOp : uint32_t { + SVEPredicateFirstActiveFixed = 0x2518C000u, + SVEPredicateFirstActiveFMask = 0xFF3FFE10u, + SVEPredicateFirstActiveMask = 0xFFFFFE10u, + PFIRST_p_p_p = SVEPredicateFirstActiveFixed | 0x00400000u +}; + +enum SVEPredicateInitializeOp : uint32_t { + SVEPredicateInitializeFixed = 0x2518E000u, + SVEPredicateInitializeFMask = 0xFF3EFC10u, + SVEPredicateInitializeMask = 0xFF3FFC10u, + SVEPredicateInitializeSetFlagsBit = 0x00010000u, + PTRUE_p_s = SVEPredicateInitializeFixed | 0x00000000u, + PTRUES_p_s = SVEPredicateInitializeFixed | SVEPredicateInitializeSetFlagsBit +}; + +enum SVEPredicateLogicalOp : uint32_t { + SVEPredicateLogicalFixed = 0x25004000u, + SVEPredicateLogicalFMask = 0xFF30C000u, + SVEPredicateLogicalMask = 0xFFF0C210u, + SVEPredicateLogicalSetFlagsBit = 0x00400000u, + AND_p_p_pp_z = SVEPredicateLogicalFixed, + ANDS_p_p_pp_z = AND_p_p_pp_z | SVEPredicateLogicalSetFlagsBit, + BIC_p_p_pp_z = SVEPredicateLogicalFixed | 0x00000010u, + BICS_p_p_pp_z = BIC_p_p_pp_z | SVEPredicateLogicalSetFlagsBit, + EOR_p_p_pp_z = SVEPredicateLogicalFixed | 0x00000200u, + EORS_p_p_pp_z = EOR_p_p_pp_z | SVEPredicateLogicalSetFlagsBit, + ORR_p_p_pp_z = SVEPredicateLogicalFixed | 0x00800000u, + ORRS_p_p_pp_z = ORR_p_p_pp_z | SVEPredicateLogicalSetFlagsBit, + ORN_p_p_pp_z = SVEPredicateLogicalFixed | 0x00800010u, + ORNS_p_p_pp_z = ORN_p_p_pp_z | SVEPredicateLogicalSetFlagsBit, + NAND_p_p_pp_z = SVEPredicateLogicalFixed | 0x00800210u, + NANDS_p_p_pp_z = NAND_p_p_pp_z | SVEPredicateLogicalSetFlagsBit, + NOR_p_p_pp_z = SVEPredicateLogicalFixed | 0x00800200u, + NORS_p_p_pp_z = NOR_p_p_pp_z | SVEPredicateLogicalSetFlagsBit, + SEL_p_p_pp = SVEPredicateLogicalFixed | 0x00000210u +}; + +enum SVEPredicateNextActiveOp : uint32_t { + SVEPredicateNextActiveFixed = 0x2519C400u, + SVEPredicateNextActiveFMask = 0xFF3FFE10u, + SVEPredicateNextActiveMask = 0xFF3FFE10u, + PNEXT_p_p_p = SVEPredicateNextActiveFixed +}; + +enum SVEPredicateReadFromFFR_PredicatedOp : uint32_t { + SVEPredicateReadFromFFR_PredicatedFixed = 0x2518F000u, + SVEPredicateReadFromFFR_PredicatedFMask = 0xFF3FFE10u, + SVEPredicateReadFromFFR_PredicatedMask = 0xFFFFFE10u, + RDFFR_p_p_f = SVEPredicateReadFromFFR_PredicatedFixed, + RDFFRS_p_p_f = SVEPredicateReadFromFFR_PredicatedFixed | 0x00400000u +}; + +enum SVEPredicateReadFromFFR_UnpredicatedOp : uint32_t { + SVEPredicateReadFromFFR_UnpredicatedFixed = 0x2519F000u, + SVEPredicateReadFromFFR_UnpredicatedFMask = 0xFF3FFFF0u, + SVEPredicateReadFromFFR_UnpredicatedMask = 0xFFFFFFF0u, + RDFFR_p_f = SVEPredicateReadFromFFR_UnpredicatedFixed +}; + +enum SVEPredicateTestOp : uint32_t { + SVEPredicateTestFixed = 0x2510C000u, + SVEPredicateTestFMask = 0xFF3FC210u, + SVEPredicateTestMask = 0xFFFFC21Fu, + PTEST_p_p = SVEPredicateTestFixed | 0x00400000u +}; + +enum SVEPredicateZeroOp : uint32_t { + SVEPredicateZeroFixed = 0x2518E400u, + SVEPredicateZeroFMask = 0xFF3FFFF0u, + SVEPredicateZeroMask = 0xFFFFFFF0u, + PFALSE_p = SVEPredicateZeroFixed +}; + +enum SVEPropagateBreakOp : uint32_t { + SVEPropagateBreakFixed = 0x2500C000u, + SVEPropagateBreakFMask = 0xFF30C000u, + SVEPropagateBreakMask = 0xFFF0C210u, + BRKPA_p_p_pp = SVEPropagateBreakFixed, + BRKPB_p_p_pp = SVEPropagateBreakFixed | 0x00000010u, + BRKPAS_p_p_pp = SVEPropagateBreakFixed | 0x00400000u, + BRKPBS_p_p_pp = SVEPropagateBreakFixed | 0x00400010u +}; + +enum SVEPropagateBreakToNextPartitionOp : uint32_t { + SVEPropagateBreakToNextPartitionFixed = 0x25184000u, + SVEPropagateBreakToNextPartitionFMask = 0xFFBFC210u, + SVEPropagateBreakToNextPartitionMask = 0xFFFFC210u, + BRKN_p_p_pp = SVEPropagateBreakToNextPartitionFixed, + BRKNS_p_p_pp = SVEPropagateBreakToNextPartitionFixed | 0x00400000u +}; + +enum SVEReversePredicateElementsOp : uint32_t { + SVEReversePredicateElementsFixed = 0x05344000u, + SVEReversePredicateElementsFMask = 0xFF3FFE10u, + SVEReversePredicateElementsMask = 0xFF3FFE10u, + REV_p_p = SVEReversePredicateElementsFixed +}; + +enum SVEReverseVectorElementsOp : uint32_t { + SVEReverseVectorElementsFixed = 0x05383800u, + SVEReverseVectorElementsFMask = 0xFF3FFC00u, + SVEReverseVectorElementsMask = 0xFF3FFC00u, + REV_z_z = SVEReverseVectorElementsFixed +}; + +enum SVEReverseWithinElementsOp : uint32_t { + SVEReverseWithinElementsFixed = 0x05248000u, + SVEReverseWithinElementsFMask = 0xFF3CE000u, + SVEReverseWithinElementsMask = 0xFF3FE000u, + REVB_z_z = SVEReverseWithinElementsFixed, + REVH_z_z = SVEReverseWithinElementsFixed | 0x00010000u, + REVW_z_z = SVEReverseWithinElementsFixed | 0x00020000u, + RBIT_z_p_z = SVEReverseWithinElementsFixed | 0x00030000u +}; + +enum SVESaturatingIncDecRegisterByElementCountOp : uint32_t { + SVESaturatingIncDecRegisterByElementCountFixed = 0x0420F000u, + SVESaturatingIncDecRegisterByElementCountFMask = 0xFF20F000u, + SVESaturatingIncDecRegisterByElementCountMask = 0xFFF0FC00u, + SQINCB_r_rs_sx = SVESaturatingIncDecRegisterByElementCountFixed, + UQINCB_r_rs_uw = SVESaturatingIncDecRegisterByElementCountFixed | 0x00000400u, + SQDECB_r_rs_sx = SVESaturatingIncDecRegisterByElementCountFixed | 0x00000800u, + UQDECB_r_rs_uw = SVESaturatingIncDecRegisterByElementCountFixed | 0x00000C00u, + SQINCB_r_rs_x = SVESaturatingIncDecRegisterByElementCountFixed | 0x00100000u, + UQINCB_r_rs_x = SVESaturatingIncDecRegisterByElementCountFixed | 0x00100400u, + SQDECB_r_rs_x = SVESaturatingIncDecRegisterByElementCountFixed | 0x00100800u, + UQDECB_r_rs_x = SVESaturatingIncDecRegisterByElementCountFixed | 0x00100C00u, + SQINCH_r_rs_sx = SVESaturatingIncDecRegisterByElementCountFixed | 0x00400000u, + UQINCH_r_rs_uw = SVESaturatingIncDecRegisterByElementCountFixed | 0x00400400u, + SQDECH_r_rs_sx = SVESaturatingIncDecRegisterByElementCountFixed | 0x00400800u, + UQDECH_r_rs_uw = SVESaturatingIncDecRegisterByElementCountFixed | 0x00400C00u, + SQINCH_r_rs_x = SVESaturatingIncDecRegisterByElementCountFixed | 0x00500000u, + UQINCH_r_rs_x = SVESaturatingIncDecRegisterByElementCountFixed | 0x00500400u, + SQDECH_r_rs_x = SVESaturatingIncDecRegisterByElementCountFixed | 0x00500800u, + UQDECH_r_rs_x = SVESaturatingIncDecRegisterByElementCountFixed | 0x00500C00u, + SQINCW_r_rs_sx = SVESaturatingIncDecRegisterByElementCountFixed | 0x00800000u, + UQINCW_r_rs_uw = SVESaturatingIncDecRegisterByElementCountFixed | 0x00800400u, + SQDECW_r_rs_sx = SVESaturatingIncDecRegisterByElementCountFixed | 0x00800800u, + UQDECW_r_rs_uw = SVESaturatingIncDecRegisterByElementCountFixed | 0x00800C00u, + SQINCW_r_rs_x = SVESaturatingIncDecRegisterByElementCountFixed | 0x00900000u, + UQINCW_r_rs_x = SVESaturatingIncDecRegisterByElementCountFixed | 0x00900400u, + SQDECW_r_rs_x = SVESaturatingIncDecRegisterByElementCountFixed | 0x00900800u, + UQDECW_r_rs_x = SVESaturatingIncDecRegisterByElementCountFixed | 0x00900C00u, + SQINCD_r_rs_sx = SVESaturatingIncDecRegisterByElementCountFixed | 0x00C00000u, + UQINCD_r_rs_uw = SVESaturatingIncDecRegisterByElementCountFixed | 0x00C00400u, + SQDECD_r_rs_sx = SVESaturatingIncDecRegisterByElementCountFixed | 0x00C00800u, + UQDECD_r_rs_uw = SVESaturatingIncDecRegisterByElementCountFixed | 0x00C00C00u, + SQINCD_r_rs_x = SVESaturatingIncDecRegisterByElementCountFixed | 0x00D00000u, + UQINCD_r_rs_x = SVESaturatingIncDecRegisterByElementCountFixed | 0x00D00400u, + SQDECD_r_rs_x = SVESaturatingIncDecRegisterByElementCountFixed | 0x00D00800u, + UQDECD_r_rs_x = SVESaturatingIncDecRegisterByElementCountFixed | 0x00D00C00u +}; + +enum SVESaturatingIncDecVectorByElementCountOp : uint32_t { + SVESaturatingIncDecVectorByElementCountFixed = 0x0420C000u, + SVESaturatingIncDecVectorByElementCountFMask = 0xFF30F000u, + SVESaturatingIncDecVectorByElementCountMask = 0xFFF0FC00u, + SQINCH_z_zs = SVESaturatingIncDecVectorByElementCountFixed | 0x00400000u, + UQINCH_z_zs = SVESaturatingIncDecVectorByElementCountFixed | 0x00400400u, + SQDECH_z_zs = SVESaturatingIncDecVectorByElementCountFixed | 0x00400800u, + UQDECH_z_zs = SVESaturatingIncDecVectorByElementCountFixed | 0x00400C00u, + SQINCW_z_zs = SVESaturatingIncDecVectorByElementCountFixed | 0x00800000u, + UQINCW_z_zs = SVESaturatingIncDecVectorByElementCountFixed | 0x00800400u, + SQDECW_z_zs = SVESaturatingIncDecVectorByElementCountFixed | 0x00800800u, + UQDECW_z_zs = SVESaturatingIncDecVectorByElementCountFixed | 0x00800C00u, + SQINCD_z_zs = SVESaturatingIncDecVectorByElementCountFixed | 0x00C00000u, + UQINCD_z_zs = SVESaturatingIncDecVectorByElementCountFixed | 0x00C00400u, + SQDECD_z_zs = SVESaturatingIncDecVectorByElementCountFixed | 0x00C00800u, + UQDECD_z_zs = SVESaturatingIncDecVectorByElementCountFixed | 0x00C00C00u +}; + +enum SVEStackFrameAdjustmentOp { + SVEStackFrameAdjustmentFixed = 0x04205000u, + SVEStackFrameAdjustmentFMask = 0xFFA0F800u, + SVEStackFrameAdjustmentMask = 0xFFE0F800u, + ADDVL_r_ri = SVEStackFrameAdjustmentFixed, + ADDPL_r_ri = SVEStackFrameAdjustmentFixed | 0x00400000u +}; + +enum SVEStackFrameSizeOp : uint32_t { + SVEStackFrameSizeFixed = 0x04BF5000u, + SVEStackFrameSizeFMask = 0xFFFFF800u, + SVEStackFrameSizeMask = 0xFFFFF800u, + RDVL_r_i = SVEStackFrameSizeFixed +}; + +enum SVEStoreMultipleStructures_ScalarPlusImmOp : uint32_t { + SVEStoreMultipleStructures_ScalarPlusImmFixed = 0xE410E000u, + SVEStoreMultipleStructures_ScalarPlusImmFMask = 0xFE10E000u, + SVEStoreMultipleStructures_ScalarPlusImmMask = 0xFFF0E000u, + ST2B_z_p_bi_contiguous = SVEStoreMultipleStructures_ScalarPlusImmFixed | 0x00200000u, + ST3B_z_p_bi_contiguous = SVEStoreMultipleStructures_ScalarPlusImmFixed | 0x00400000u, + ST4B_z_p_bi_contiguous = SVEStoreMultipleStructures_ScalarPlusImmFixed | 0x00600000u, + ST2H_z_p_bi_contiguous = SVEStoreMultipleStructures_ScalarPlusImmFixed | 0x00A00000u, + ST3H_z_p_bi_contiguous = SVEStoreMultipleStructures_ScalarPlusImmFixed | 0x00C00000u, + ST4H_z_p_bi_contiguous = SVEStoreMultipleStructures_ScalarPlusImmFixed | 0x00E00000u, + ST2W_z_p_bi_contiguous = SVEStoreMultipleStructures_ScalarPlusImmFixed | 0x01200000u, + ST3W_z_p_bi_contiguous = SVEStoreMultipleStructures_ScalarPlusImmFixed | 0x01400000u, + ST4W_z_p_bi_contiguous = SVEStoreMultipleStructures_ScalarPlusImmFixed | 0x01600000u, + ST2D_z_p_bi_contiguous = SVEStoreMultipleStructures_ScalarPlusImmFixed | 0x01A00000u, + ST3D_z_p_bi_contiguous = SVEStoreMultipleStructures_ScalarPlusImmFixed | 0x01C00000u, + ST4D_z_p_bi_contiguous = SVEStoreMultipleStructures_ScalarPlusImmFixed | 0x01E00000u +}; + +enum SVEStoreMultipleStructures_ScalarPlusScalarOp : uint32_t { + SVEStoreMultipleStructures_ScalarPlusScalarFixed = 0xE4006000u, + SVEStoreMultipleStructures_ScalarPlusScalarFMask = 0xFE00E000u, + SVEStoreMultipleStructures_ScalarPlusScalarMask = 0xFFE0E000u, + ST2B_z_p_br_contiguous = SVEStoreMultipleStructures_ScalarPlusScalarFixed | 0x00200000u, + ST3B_z_p_br_contiguous = SVEStoreMultipleStructures_ScalarPlusScalarFixed | 0x00400000u, + ST4B_z_p_br_contiguous = SVEStoreMultipleStructures_ScalarPlusScalarFixed | 0x00600000u, + ST2H_z_p_br_contiguous = SVEStoreMultipleStructures_ScalarPlusScalarFixed | 0x00A00000u, + ST3H_z_p_br_contiguous = SVEStoreMultipleStructures_ScalarPlusScalarFixed | 0x00C00000u, + ST4H_z_p_br_contiguous = SVEStoreMultipleStructures_ScalarPlusScalarFixed | 0x00E00000u, + ST2W_z_p_br_contiguous = SVEStoreMultipleStructures_ScalarPlusScalarFixed | 0x01200000u, + ST3W_z_p_br_contiguous = SVEStoreMultipleStructures_ScalarPlusScalarFixed | 0x01400000u, + ST4W_z_p_br_contiguous = SVEStoreMultipleStructures_ScalarPlusScalarFixed | 0x01600000u, + ST2D_z_p_br_contiguous = SVEStoreMultipleStructures_ScalarPlusScalarFixed | 0x01A00000u, + ST3D_z_p_br_contiguous = SVEStoreMultipleStructures_ScalarPlusScalarFixed | 0x01C00000u, + ST4D_z_p_br_contiguous = SVEStoreMultipleStructures_ScalarPlusScalarFixed | 0x01E00000u +}; + +enum SVEStorePredicateRegisterOp : uint32_t { + SVEStorePredicateRegisterFixed = 0xE5800000u, + SVEStorePredicateRegisterFMask = 0xFFC0E010u, + SVEStorePredicateRegisterMask = 0xFFC0E010u, + STR_p_bi = SVEStorePredicateRegisterFixed +}; + +enum SVEStoreVectorRegisterOp : uint32_t { + SVEStoreVectorRegisterFixed = 0xE5804000u, + SVEStoreVectorRegisterFMask = 0xFFC0E000u, + SVEStoreVectorRegisterMask = 0xFFC0E000u, + STR_z_bi = SVEStoreVectorRegisterFixed +}; + +enum SVETableLookupOp : uint32_t { + SVETableLookupFixed = 0x05203000u, + SVETableLookupFMask = 0xFF20FC00u, + SVETableLookupMask = 0xFF20FC00u, + TBL_z_zz_1 = SVETableLookupFixed +}; + +enum SVEUnpackPredicateElementsOp : uint32_t { + SVEUnpackPredicateElementsFixed = 0x05304000u, + SVEUnpackPredicateElementsFMask = 0xFFFEFE10u, + SVEUnpackPredicateElementsMask = 0xFFFFFE10u, + PUNPKLO_p_p = SVEUnpackPredicateElementsFixed, + PUNPKHI_p_p = SVEUnpackPredicateElementsFixed | 0x00010000u +}; + +enum SVEUnpackVectorElementsOp : uint32_t { + SVEUnpackVectorElementsFixed = 0x05303800u, + SVEUnpackVectorElementsFMask = 0xFF3CFC00u, + SVEUnpackVectorElementsMask = 0xFF3FFC00u, + SUNPKLO_z_z = SVEUnpackVectorElementsFixed, + SUNPKHI_z_z = SVEUnpackVectorElementsFixed | 0x00010000u, + UUNPKLO_z_z = SVEUnpackVectorElementsFixed | 0x00020000u, + UUNPKHI_z_z = SVEUnpackVectorElementsFixed | 0x00030000u +}; + +enum SVEVectorSelectOp : uint32_t { + SVEVectorSelectFixed = 0x0520C000u, + SVEVectorSelectFMask = 0xFF20C000u, + SVEVectorSelectMask = 0xFF20C000u, + SEL_z_p_zz = SVEVectorSelectFixed +}; + +enum SVEVectorSpliceOp : uint32_t { + SVEVectorSpliceFixed = 0x052C8000u, + SVEVectorSpliceFMask = 0xFF3FE000u, + SVEVectorSpliceMask = 0xFF3FE000u, + SPLICE_z_p_zz_des = SVEVectorSpliceFixed +}; + +enum ReservedOp : uint32_t { + ReservedFixed = 0x00000000u, + ReservedFMask = 0x1E000000u, + ReservedMask = 0xFFFF0000u, + UDF = ReservedFixed | 0x00000000u +}; + // Unimplemented and unallocated instructions. These are defined to make fixed // bit assertion easier. enum UnimplementedOp : uint32_t { @@ -2541,4 +4449,8 @@ enum UnallocatedOp : uint32_t { } // namespace aarch64 } // namespace vixl +#ifdef __clang__ +#pragma clang diagnostic pop +#endif + #endif // VIXL_AARCH64_CONSTANTS_AARCH64_H_ diff --git a/dep/vixl/include/vixl/aarch64/cpu-aarch64.h b/dep/vixl/include/vixl/aarch64/cpu-aarch64.h index 031fa42c8..d5a5f8c82 100644 --- a/dep/vixl/include/vixl/aarch64/cpu-aarch64.h +++ b/dep/vixl/include/vixl/aarch64/cpu-aarch64.h @@ -27,13 +27,219 @@ #ifndef VIXL_CPU_AARCH64_H #define VIXL_CPU_AARCH64_H +#include "../cpu-features.h" #include "../globals-vixl.h" #include "instructions-aarch64.h" +#include "simulator-aarch64.h" + +#ifndef VIXL_INCLUDE_TARGET_AARCH64 +// The supporting .cc file is only compiled when the A64 target is selected. +// Throw an explicit error now to avoid a harder-to-debug linker error later. +// +// These helpers _could_ work on any AArch64 host, even when generating AArch32 +// code, but we don't support this because the available features may differ +// between AArch32 and AArch64 on the same platform, so basing AArch32 code +// generation on aarch64::CPU features is probably broken. +#error cpu-aarch64.h requires VIXL_INCLUDE_TARGET_AARCH64 (scons target=a64). +#endif namespace vixl { namespace aarch64 { +// A CPU ID register, for use with CPUFeatures::kIDRegisterEmulation. Fields +// specific to each register are described in relevant subclasses. +class IDRegister { + protected: + explicit IDRegister(uint64_t value = 0) : value_(value) {} + + class Field { + public: + enum Type { kUnsigned, kSigned }; + + static const int kMaxWidthInBits = 4; + + // This needs to be constexpr so that fields have "constant initialisation". + // This avoids initialisation order problems when these values are used to + // (dynamically) initialise static variables, etc. + explicit constexpr Field(int lsb, + int bitWidth = kMaxWidthInBits, + Type type = kUnsigned) + : lsb_(lsb), bitWidth_(bitWidth), type_(type) {} + + int GetWidthInBits() const { return bitWidth_; } + int GetLsb() const { return lsb_; } + int GetMsb() const { return lsb_ + GetWidthInBits() - 1; } + Type GetType() const { return type_; } + + private: + int lsb_; + int bitWidth_; + Type type_; + }; + + public: + // Extract the specified field, performing sign-extension for signed fields. + // This allows us to implement the 'value >= number' detection mechanism + // recommended by the Arm ARM, for both signed and unsigned fields. + int Get(Field field) const; + + private: + uint64_t value_; +}; + +class AA64PFR0 : public IDRegister { + public: + explicit AA64PFR0(uint64_t value) : IDRegister(value) {} + + CPUFeatures GetCPUFeatures() const; + + private: + static const Field kFP; + static const Field kAdvSIMD; + static const Field kRAS; + static const Field kSVE; + static const Field kDIT; + static const Field kCSV2; + static const Field kCSV3; +}; + +class AA64PFR1 : public IDRegister { + public: + explicit AA64PFR1(uint64_t value) : IDRegister(value) {} + + CPUFeatures GetCPUFeatures() const; + + private: + static const Field kBT; + static const Field kSSBS; + static const Field kMTE; + static const Field kSME; +}; + +class AA64ISAR0 : public IDRegister { + public: + explicit AA64ISAR0(uint64_t value) : IDRegister(value) {} + + CPUFeatures GetCPUFeatures() const; + + private: + static const Field kAES; + static const Field kSHA1; + static const Field kSHA2; + static const Field kCRC32; + static const Field kAtomic; + static const Field kRDM; + static const Field kSHA3; + static const Field kSM3; + static const Field kSM4; + static const Field kDP; + static const Field kFHM; + static const Field kTS; + static const Field kRNDR; +}; + +class AA64ISAR1 : public IDRegister { + public: + explicit AA64ISAR1(uint64_t value) : IDRegister(value) {} + + CPUFeatures GetCPUFeatures() const; + + private: + static const Field kDPB; + static const Field kAPA; + static const Field kAPI; + static const Field kJSCVT; + static const Field kFCMA; + static const Field kLRCPC; + static const Field kGPA; + static const Field kGPI; + static const Field kFRINTTS; + static const Field kSB; + static const Field kSPECRES; + static const Field kBF16; + static const Field kDGH; + static const Field kI8MM; +}; + +class AA64ISAR2 : public IDRegister { + public: + explicit AA64ISAR2(uint64_t value) : IDRegister(value) {} + + CPUFeatures GetCPUFeatures() const; + + private: + static const Field kWFXT; + static const Field kRPRES; + static const Field kMOPS; + static const Field kCSSC; +}; + +class AA64MMFR0 : public IDRegister { + public: + explicit AA64MMFR0(uint64_t value) : IDRegister(value) {} + + CPUFeatures GetCPUFeatures() const; + + private: + static const Field kECV; +}; + +class AA64MMFR1 : public IDRegister { + public: + explicit AA64MMFR1(uint64_t value) : IDRegister(value) {} + + CPUFeatures GetCPUFeatures() const; + + private: + static const Field kLO; + static const Field kAFP; +}; + +class AA64MMFR2 : public IDRegister { + public: + explicit AA64MMFR2(uint64_t value) : IDRegister(value) {} + + CPUFeatures GetCPUFeatures() const; + + private: + static const Field kAT; +}; + +class AA64ZFR0 : public IDRegister { + public: + explicit AA64ZFR0(uint64_t value) : IDRegister(value) {} + + CPUFeatures GetCPUFeatures() const; + + private: + static const Field kSVEver; + static const Field kAES; + static const Field kBitPerm; + static const Field kBF16; + static const Field kSHA3; + static const Field kSM4; + static const Field kI8MM; + static const Field kF32MM; + static const Field kF64MM; +}; + +class AA64SMFR0 : public IDRegister { + public: + explicit AA64SMFR0(uint64_t value) : IDRegister(value) {} + + CPUFeatures GetCPUFeatures() const; + + private: + static const Field kSMEf32f32; + static const Field kSMEb16f32; + static const Field kSMEf16f32; + static const Field kSMEi8i32; + static const Field kSMEf64f64; + static const Field kSMEi16i64; + static const Field kSMEfa64; +}; + class CPU { public: // Initialise CPU support. @@ -45,6 +251,25 @@ class CPU { // safely run. static void EnsureIAndDCacheCoherency(void *address, size_t length); + // Read and interpret the ID registers. This requires + // CPUFeatures::kIDRegisterEmulation, and therefore cannot be called on + // non-AArch64 platforms. + static CPUFeatures InferCPUFeaturesFromIDRegisters(); + + // Read and interpret CPUFeatures reported by the OS. Failed queries (or + // unsupported platforms) return an empty list. Note that this is + // indistinguishable from a successful query on a platform that advertises no + // features. + // + // Non-AArch64 hosts are considered to be unsupported platforms, and this + // function returns an empty list. + static CPUFeatures InferCPUFeaturesFromOS( + CPUFeatures::QueryIDRegistersOption option = + CPUFeatures::kQueryIDRegistersIfAvailable); + + // Query the SVE vector length. This requires CPUFeatures::kSVE. + static int ReadSVEVectorLengthInBits(); + // Handle tagged pointers. template static T SetPointerTag(T pointer, uint64_t tag) { @@ -72,6 +297,27 @@ class CPU { } private: +#define VIXL_AARCH64_ID_REG_LIST(V) \ + V(AA64PFR0, "ID_AA64PFR0_EL1") \ + V(AA64PFR1, "ID_AA64PFR1_EL1") \ + V(AA64ISAR0, "ID_AA64ISAR0_EL1") \ + V(AA64ISAR1, "ID_AA64ISAR1_EL1") \ + V(AA64MMFR0, "ID_AA64MMFR0_EL1") \ + V(AA64MMFR1, "ID_AA64MMFR1_EL1") \ + /* These registers are RES0 in the baseline Arm8.0. We can always safely */ \ + /* read them, but some compilers don't accept the symbolic names. */ \ + V(AA64SMFR0, "S3_0_C0_C4_5") \ + V(AA64ISAR2, "S3_0_C0_C6_2") \ + V(AA64MMFR2, "S3_0_C0_C7_2") \ + V(AA64ZFR0, "S3_0_C0_C4_4") + +#define VIXL_READ_ID_REG(NAME, MRS_ARG) static NAME Read##NAME(); + // On native AArch64 platforms, read the named CPU ID registers. These require + // CPUFeatures::kIDRegisterEmulation, and should not be called on non-AArch64 + // platforms. + VIXL_AARCH64_ID_REG_LIST(VIXL_READ_ID_REG) +#undef VIXL_READ_ID_REG + // Return the content of the cache type register. static uint32_t GetCacheType(); diff --git a/dep/vixl/include/vixl/aarch64/cpu-features-auditor-aarch64.h b/dep/vixl/include/vixl/aarch64/cpu-features-auditor-aarch64.h index 9f034778a..0f3fb9638 100644 --- a/dep/vixl/include/vixl/aarch64/cpu-features-auditor-aarch64.h +++ b/dep/vixl/include/vixl/aarch64/cpu-features-auditor-aarch64.h @@ -27,10 +27,14 @@ #ifndef VIXL_AARCH64_CPU_FEATURES_AUDITOR_AARCH64_H_ #define VIXL_AARCH64_CPU_FEATURES_AUDITOR_AARCH64_H_ +#include #include +#include #include "../cpu-features.h" + #include "decoder-aarch64.h" +#include "decoder-visitor-map-aarch64.h" namespace vixl { namespace aarch64 { @@ -100,15 +104,16 @@ class CPUFeaturesAuditor : public DecoderVisitor { SetAvailableFeatures(available); } -// Declare all Visitor functions. -#define DECLARE(A) \ - virtual void Visit##A(const Instruction* instr) VIXL_OVERRIDE; - VISITOR_LIST(DECLARE) -#undef DECLARE + virtual void Visit(Metadata* metadata, + const Instruction* instr) VIXL_OVERRIDE; private: class RecordInstructionFeaturesScope; +#define DECLARE(A) virtual void Visit##A(const Instruction* instr); + VISITOR_LIST(DECLARE) +#undef DECLARE + void LoadStoreHelper(const Instruction* instr); void LoadStorePairHelper(const Instruction* instr); @@ -117,6 +122,12 @@ class CPUFeaturesAuditor : public DecoderVisitor { CPUFeatures available_; Decoder* decoder_; + + using FormToVisitorFnMap = std::unordered_map< + uint32_t, + std::function>; + static const FormToVisitorFnMap* GetFormToVisitorFnMap(); + uint32_t form_hash_; }; } // namespace aarch64 diff --git a/dep/vixl/include/vixl/aarch64/debugger-aarch64.h b/dep/vixl/include/vixl/aarch64/debugger-aarch64.h new file mode 100644 index 000000000..1e8039bb9 --- /dev/null +++ b/dep/vixl/include/vixl/aarch64/debugger-aarch64.h @@ -0,0 +1,276 @@ +// Copyright 2023, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_AARCH64_DEBUGGER_AARCH64_H_ +#define VIXL_AARCH64_DEBUGGER_AARCH64_H_ + +#include +#include +#include + +#include "../globals-vixl.h" +#include "../utils-vixl.h" +#include "../cpu-features.h" + +#include "abi-aarch64.h" +#include "cpu-features-auditor-aarch64.h" +#include "disasm-aarch64.h" +#include "instructions-aarch64.h" +#include "simulator-aarch64.h" +#include "simulator-constants-aarch64.h" + +#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64 + +namespace vixl { +namespace aarch64 { + +class Simulator; + +enum DebugReturn { DebugContinue, DebugExit }; + + +// A debugger command that performs some action when used by the simulator +// debugger. +class DebuggerCmd { + public: + DebuggerCmd(Simulator* sim, + std::string cmd_word, + std::string cmd_alias, + std::string usage, + std::string description); + virtual ~DebuggerCmd() {} + + // Perform some action based on the arguments passed in. Returns true if the + // debugger should exit after the action, false otherwise. + virtual DebugReturn Action(const std::vector& args) = 0; + + // Return the command word. + std::string_view GetCommandWord() { return command_word_; } + // Return the alias for this command. Returns an empty string if this command + // has no alias. + std::string_view GetCommandAlias() { return command_alias_; } + // Return this commands usage. + std::string_view GetArgsString() { return args_str_; } + // Return this commands description. + std::string_view GetDescription() { return description_; } + + protected: + // Simulator which this command will be performed on. + Simulator* sim_; + // Stream to output the result of the command to. + FILE* ostream_; + // Command word that, when given to the interactive debugger, calls Action. + std::string command_word_; + // Optional alias for the command_word. + std::string command_alias_; + // Optional string showing the arguments that can be passed to the command. + std::string args_str_; + // Optional description of the command. + std::string description_; +}; + + +// +// Base debugger command handlers: +// + + +class HelpCmd : public DebuggerCmd { + public: + HelpCmd(Simulator* sim) + : DebuggerCmd(sim, "help", "h", "", "Display this help message.") {} + + DebugReturn Action(const std::vector& args) override; +}; + + +class BreakCmd : public DebuggerCmd { + public: + BreakCmd(Simulator* sim) + : DebuggerCmd(sim, + "break", + "b", + "
", + "Set or remove a breakpoint.") {} + + DebugReturn Action(const std::vector& args) override; +}; + + +class StepCmd : public DebuggerCmd { + public: + StepCmd(Simulator* sim) + : DebuggerCmd(sim, + "step", + "s", + "[]", + "Step n instructions, default step 1 instruction.") {} + + DebugReturn Action(const std::vector& args) override; +}; + + +class ContinueCmd : public DebuggerCmd { + public: + ContinueCmd(Simulator* sim) + : DebuggerCmd(sim, + "continue", + "c", + "", + "Exit the debugger and continue executing instructions.") {} + + DebugReturn Action(const std::vector& args) override; +}; + + +class PrintCmd : public DebuggerCmd { + public: + PrintCmd(Simulator* sim) + : DebuggerCmd(sim, + "print", + "p", + "", + "Print the contents of a register, all registers or all" + " system registers.") {} + + DebugReturn Action(const std::vector& args) override; +}; + + +class TraceCmd : public DebuggerCmd { + public: + TraceCmd(Simulator* sim) + : DebuggerCmd(sim, + "trace", + "t", + "", + "Start/stop memory and register tracing.") {} + + DebugReturn Action(const std::vector& args) override; +}; + + +class GdbCmd : public DebuggerCmd { + public: + GdbCmd(Simulator* sim) + : DebuggerCmd(sim, + "gdb", + "g", + "", + "Enter an already running instance of gdb.") {} + + DebugReturn Action(const std::vector& args) override; +}; + + +// A debugger for the Simulator which takes input from the user in order to +// control the running of the Simulator. +class Debugger { + public: + // A pair consisting of a register character (e.g: W, X, V) and a register + // code (e.g: 0, 1 ...31) which represents a single parsed register. + // + // Note: the register character is guaranteed to be upper case. + using RegisterParsedFormat = std::pair; + + Debugger(Simulator* sim); + + // Set the input stream, from which commands are read, to a custom stream. + void SetInputStream(std::istream* stream) { input_stream_ = stream; } + + // Register a new command for the debugger. + template + void RegisterCmd(); + + // Set a breakpoint at the given address. + void RegisterBreakpoint(uint64_t addr) { breakpoints_.insert(addr); } + // Remove a breakpoint at the given address. + void RemoveBreakpoint(uint64_t addr) { breakpoints_.erase(addr); } + // Return true if the address is the location of a breakpoint. + bool IsBreakpoint(uint64_t addr) const { + return (breakpoints_.find(addr) != breakpoints_.end()); + } + // Return true if the simulator pc is a breakpoint. + bool IsAtBreakpoint() const; + + // Main loop for the debugger. Keep prompting for user inputted debugger + // commands and try to execute them until a command is given that exits the + // interactive debugger. + void Debug(); + + // Get an unsigned integer value from a string and return it in 'value'. + // Base is used to determine the numeric base of the number to be read, + // i.e: 8 for octal, 10 for decimal, 16 for hexadecimal and 0 for + // auto-detect. Return true if an integer value was found, false otherwise. + static std::optional ParseUint64String(std::string_view uint64_str, + int base = 0); + + // Get a register from a string and return it in 'reg'. Return true if a + // valid register character and code (e.g: W0, X29, V31) was found, false + // otherwise. + static std::optional ParseRegString( + std::string_view reg_str); + + // Print the usage of each debugger command. + void PrintUsage(); + + private: + // Split a string based on the separator given (a single space character by + // default) and return as a std::vector of strings. + static std::vector Tokenize(std::string_view input_line, + char separator = ' '); + + // Try to execute a single debugger command. + DebugReturn ExecDebugCommand(const std::vector& tokenized_cmd); + + // Return true if the string is zero, i.e: all characters in the string + // (other than prefixes) are zero. + static bool IsZeroUint64String(std::string_view uint64_str, int base); + + // The simulator that this debugger acts on. + Simulator* sim_; + + // A vector of all commands recognised by the debugger. + std::vector> debugger_cmds_; + + // Input stream from which commands are read. Default is std::cin. + std::istream* input_stream_; + + // Output stream from the simulator. + FILE* ostream_; + + // A list of all instruction addresses that, when executed by the + // simulator, will start the interactive debugger if it hasn't already. + std::unordered_set breakpoints_; +}; + + +} // namespace aarch64 +} // namespace vixl + +#endif // VIXL_INCLUDE_SIMULATOR_AARCH64 + +#endif // VIXL_AARCH64_DEBUGGER_AARCH64_H_ diff --git a/dep/vixl/include/vixl/aarch64/decoder-aarch64.h b/dep/vixl/include/vixl/aarch64/decoder-aarch64.h index 100fbb352..22c66e82a 100644 --- a/dep/vixl/include/vixl/aarch64/decoder-aarch64.h +++ b/dep/vixl/include/vixl/aarch64/decoder-aarch64.h @@ -1,4 +1,4 @@ -// Copyright 2014, VIXL authors +// Copyright 2019, VIXL authors // All rights reserved. // // Redistribution and use in source and binary forms, with or without @@ -28,95 +28,245 @@ #define VIXL_AARCH64_DECODER_AARCH64_H_ #include +#include +#include #include "../globals-vixl.h" #include "instructions-aarch64.h" - // List macro containing all visitors needed by the decoder class. - -#define VISITOR_LIST_THAT_RETURN(V) \ - V(AddSubExtended) \ - V(AddSubImmediate) \ - V(AddSubShifted) \ - V(AddSubWithCarry) \ - V(AtomicMemory) \ - V(Bitfield) \ - V(CompareBranch) \ - V(ConditionalBranch) \ - V(ConditionalCompareImmediate) \ - V(ConditionalCompareRegister) \ - V(ConditionalSelect) \ - V(Crypto2RegSHA) \ - V(Crypto3RegSHA) \ - V(CryptoAES) \ - V(DataProcessing1Source) \ - V(DataProcessing2Source) \ - V(DataProcessing3Source) \ - V(Exception) \ - V(Extract) \ - V(FPCompare) \ - V(FPConditionalCompare) \ - V(FPConditionalSelect) \ - V(FPDataProcessing1Source) \ - V(FPDataProcessing2Source) \ - V(FPDataProcessing3Source) \ - V(FPFixedPointConvert) \ - V(FPImmediate) \ - V(FPIntegerConvert) \ - V(LoadLiteral) \ - V(LoadStoreExclusive) \ - V(LoadStorePairNonTemporal) \ - V(LoadStorePairOffset) \ - V(LoadStorePairPostIndex) \ - V(LoadStorePairPreIndex) \ - V(LoadStorePostIndex) \ - V(LoadStorePreIndex) \ - V(LoadStoreRegisterOffset) \ - V(LoadStoreUnscaledOffset) \ - V(LoadStoreUnsignedOffset) \ - V(LogicalImmediate) \ - V(LogicalShifted) \ - V(MoveWideImmediate) \ - V(NEON2RegMisc) \ - V(NEON2RegMiscFP16) \ - V(NEON3Different) \ - V(NEON3Same) \ - V(NEON3SameExtra) \ - V(NEON3SameFP16) \ - V(NEONAcrossLanes) \ - V(NEONByIndexedElement) \ - V(NEONCopy) \ - V(NEONExtract) \ - V(NEONLoadStoreMultiStruct) \ - V(NEONLoadStoreMultiStructPostIndex) \ - V(NEONLoadStoreSingleStruct) \ - V(NEONLoadStoreSingleStructPostIndex) \ - V(NEONModifiedImmediate) \ - V(NEONPerm) \ - V(NEONScalar2RegMisc) \ - V(NEONScalar2RegMiscFP16) \ - V(NEONScalar3Diff) \ - V(NEONScalar3Same) \ - V(NEONScalar3SameExtra) \ - V(NEONScalar3SameFP16) \ - V(NEONScalarByIndexedElement) \ - V(NEONScalarCopy) \ - V(NEONScalarPairwise) \ - V(NEONScalarShiftImmediate) \ - V(NEONShiftImmediate) \ - V(NEONTable) \ - V(PCRelAddressing) \ - V(System) \ - V(TestBranch) \ - V(UnconditionalBranch) \ - V(UnconditionalBranchToRegister) - -#define VISITOR_LIST_THAT_DONT_RETURN(V) \ - V(Unallocated) \ +#define VISITOR_LIST_THAT_RETURN(V) \ + V(AddSubExtended) \ + V(AddSubImmediate) \ + V(AddSubShifted) \ + V(AddSubWithCarry) \ + V(AtomicMemory) \ + V(Bitfield) \ + V(CompareBranch) \ + V(ConditionalBranch) \ + V(ConditionalCompareImmediate) \ + V(ConditionalCompareRegister) \ + V(ConditionalSelect) \ + V(Crypto2RegSHA) \ + V(Crypto3RegSHA) \ + V(CryptoAES) \ + V(DataProcessing1Source) \ + V(DataProcessing2Source) \ + V(DataProcessing3Source) \ + V(EvaluateIntoFlags) \ + V(Exception) \ + V(Extract) \ + V(FPCompare) \ + V(FPConditionalCompare) \ + V(FPConditionalSelect) \ + V(FPDataProcessing1Source) \ + V(FPDataProcessing2Source) \ + V(FPDataProcessing3Source) \ + V(FPFixedPointConvert) \ + V(FPImmediate) \ + V(FPIntegerConvert) \ + V(LoadLiteral) \ + V(LoadStoreExclusive) \ + V(LoadStorePAC) \ + V(LoadStorePairNonTemporal) \ + V(LoadStorePairOffset) \ + V(LoadStorePairPostIndex) \ + V(LoadStorePairPreIndex) \ + V(LoadStorePostIndex) \ + V(LoadStorePreIndex) \ + V(LoadStoreRCpcUnscaledOffset) \ + V(LoadStoreRegisterOffset) \ + V(LoadStoreUnscaledOffset) \ + V(LoadStoreUnsignedOffset) \ + V(LogicalImmediate) \ + V(LogicalShifted) \ + V(MoveWideImmediate) \ + V(NEON2RegMisc) \ + V(NEON2RegMiscFP16) \ + V(NEON3Different) \ + V(NEON3Same) \ + V(NEON3SameExtra) \ + V(NEON3SameFP16) \ + V(NEONAcrossLanes) \ + V(NEONByIndexedElement) \ + V(NEONCopy) \ + V(NEONExtract) \ + V(NEONLoadStoreMultiStruct) \ + V(NEONLoadStoreMultiStructPostIndex) \ + V(NEONLoadStoreSingleStruct) \ + V(NEONLoadStoreSingleStructPostIndex) \ + V(NEONModifiedImmediate) \ + V(NEONPerm) \ + V(NEONScalar2RegMisc) \ + V(NEONScalar2RegMiscFP16) \ + V(NEONScalar3Diff) \ + V(NEONScalar3Same) \ + V(NEONScalar3SameExtra) \ + V(NEONScalar3SameFP16) \ + V(NEONScalarByIndexedElement) \ + V(NEONScalarCopy) \ + V(NEONScalarPairwise) \ + V(NEONScalarShiftImmediate) \ + V(NEONShiftImmediate) \ + V(NEONTable) \ + V(PCRelAddressing) \ + V(RotateRightIntoFlags) \ + V(SVE32BitGatherLoad_ScalarPlus32BitUnscaledOffsets) \ + V(SVE32BitGatherLoad_VectorPlusImm) \ + V(SVE32BitGatherLoadHalfwords_ScalarPlus32BitScaledOffsets) \ + V(SVE32BitGatherLoadWords_ScalarPlus32BitScaledOffsets) \ + V(SVE32BitGatherPrefetch_ScalarPlus32BitScaledOffsets) \ + V(SVE32BitGatherPrefetch_VectorPlusImm) \ + V(SVE32BitScatterStore_ScalarPlus32BitScaledOffsets) \ + V(SVE32BitScatterStore_ScalarPlus32BitUnscaledOffsets) \ + V(SVE32BitScatterStore_VectorPlusImm) \ + V(SVE64BitGatherLoad_ScalarPlus32BitUnpackedScaledOffsets) \ + V(SVE64BitGatherLoad_ScalarPlus64BitScaledOffsets) \ + V(SVE64BitGatherLoad_ScalarPlus64BitUnscaledOffsets) \ + V(SVE64BitGatherLoad_ScalarPlusUnpacked32BitUnscaledOffsets) \ + V(SVE64BitGatherLoad_VectorPlusImm) \ + V(SVE64BitGatherPrefetch_ScalarPlus64BitScaledOffsets) \ + V(SVE64BitGatherPrefetch_ScalarPlusUnpacked32BitScaledOffsets) \ + V(SVE64BitGatherPrefetch_VectorPlusImm) \ + V(SVE64BitScatterStore_ScalarPlus64BitScaledOffsets) \ + V(SVE64BitScatterStore_ScalarPlus64BitUnscaledOffsets) \ + V(SVE64BitScatterStore_ScalarPlusUnpacked32BitScaledOffsets) \ + V(SVE64BitScatterStore_ScalarPlusUnpacked32BitUnscaledOffsets) \ + V(SVE64BitScatterStore_VectorPlusImm) \ + V(SVEAddressGeneration) \ + V(SVEBitwiseLogicalUnpredicated) \ + V(SVEBitwiseShiftUnpredicated) \ + V(SVEFFRInitialise) \ + V(SVEFFRWriteFromPredicate) \ + V(SVEFPAccumulatingReduction) \ + V(SVEFPArithmeticUnpredicated) \ + V(SVEFPCompareVectors) \ + V(SVEFPCompareWithZero) \ + V(SVEFPComplexAddition) \ + V(SVEFPComplexMulAdd) \ + V(SVEFPComplexMulAddIndex) \ + V(SVEFPFastReduction) \ + V(SVEFPMulIndex) \ + V(SVEFPMulAdd) \ + V(SVEFPMulAddIndex) \ + V(SVEFPUnaryOpUnpredicated) \ + V(SVEIncDecByPredicateCount) \ + V(SVEIndexGeneration) \ + V(SVEIntArithmeticUnpredicated) \ + V(SVEIntCompareSignedImm) \ + V(SVEIntCompareUnsignedImm) \ + V(SVEIntCompareVectors) \ + V(SVEIntMulAddPredicated) \ + V(SVEIntMulAddUnpredicated) \ + V(SVEIntReduction) \ + V(SVEIntUnaryArithmeticPredicated) \ + V(SVEMovprfx) \ + V(SVEMulIndex) \ + V(SVEPermuteVectorExtract) \ + V(SVEPermuteVectorInterleaving) \ + V(SVEPredicateCount) \ + V(SVEPredicateLogical) \ + V(SVEPropagateBreak) \ + V(SVEStackFrameAdjustment) \ + V(SVEStackFrameSize) \ + V(SVEVectorSelect) \ + V(SVEBitwiseLogical_Predicated) \ + V(SVEBitwiseLogicalWithImm_Unpredicated) \ + V(SVEBitwiseShiftByImm_Predicated) \ + V(SVEBitwiseShiftByVector_Predicated) \ + V(SVEBitwiseShiftByWideElements_Predicated) \ + V(SVEBroadcastBitmaskImm) \ + V(SVEBroadcastFPImm_Unpredicated) \ + V(SVEBroadcastGeneralRegister) \ + V(SVEBroadcastIndexElement) \ + V(SVEBroadcastIntImm_Unpredicated) \ + V(SVECompressActiveElements) \ + V(SVEConditionallyBroadcastElementToVector) \ + V(SVEConditionallyExtractElementToSIMDFPScalar) \ + V(SVEConditionallyExtractElementToGeneralRegister) \ + V(SVEConditionallyTerminateScalars) \ + V(SVEConstructivePrefix_Unpredicated) \ + V(SVEContiguousFirstFaultLoad_ScalarPlusScalar) \ + V(SVEContiguousLoad_ScalarPlusImm) \ + V(SVEContiguousLoad_ScalarPlusScalar) \ + V(SVEContiguousNonFaultLoad_ScalarPlusImm) \ + V(SVEContiguousNonTemporalLoad_ScalarPlusImm) \ + V(SVEContiguousNonTemporalLoad_ScalarPlusScalar) \ + V(SVEContiguousNonTemporalStore_ScalarPlusImm) \ + V(SVEContiguousNonTemporalStore_ScalarPlusScalar) \ + V(SVEContiguousPrefetch_ScalarPlusImm) \ + V(SVEContiguousPrefetch_ScalarPlusScalar) \ + V(SVEContiguousStore_ScalarPlusImm) \ + V(SVEContiguousStore_ScalarPlusScalar) \ + V(SVECopySIMDFPScalarRegisterToVector_Predicated) \ + V(SVECopyFPImm_Predicated) \ + V(SVECopyGeneralRegisterToVector_Predicated) \ + V(SVECopyIntImm_Predicated) \ + V(SVEElementCount) \ + V(SVEExtractElementToSIMDFPScalarRegister) \ + V(SVEExtractElementToGeneralRegister) \ + V(SVEFPArithmetic_Predicated) \ + V(SVEFPArithmeticWithImm_Predicated) \ + V(SVEFPConvertPrecision) \ + V(SVEFPConvertToInt) \ + V(SVEFPExponentialAccelerator) \ + V(SVEFPRoundToIntegralValue) \ + V(SVEFPTrigMulAddCoefficient) \ + V(SVEFPTrigSelectCoefficient) \ + V(SVEFPUnaryOp) \ + V(SVEIncDecRegisterByElementCount) \ + V(SVEIncDecVectorByElementCount) \ + V(SVEInsertSIMDFPScalarRegister) \ + V(SVEInsertGeneralRegister) \ + V(SVEIntAddSubtractImm_Unpredicated) \ + V(SVEIntAddSubtractVectors_Predicated) \ + V(SVEIntCompareScalarCountAndLimit) \ + V(SVEIntConvertToFP) \ + V(SVEIntDivideVectors_Predicated) \ + V(SVEIntMinMaxImm_Unpredicated) \ + V(SVEIntMinMaxDifference_Predicated) \ + V(SVEIntMulImm_Unpredicated) \ + V(SVEIntMulVectors_Predicated) \ + V(SVELoadAndBroadcastElement) \ + V(SVELoadAndBroadcastQOWord_ScalarPlusImm) \ + V(SVELoadAndBroadcastQOWord_ScalarPlusScalar) \ + V(SVELoadMultipleStructures_ScalarPlusImm) \ + V(SVELoadMultipleStructures_ScalarPlusScalar) \ + V(SVELoadPredicateRegister) \ + V(SVELoadVectorRegister) \ + V(SVEPartitionBreakCondition) \ + V(SVEPermutePredicateElements) \ + V(SVEPredicateFirstActive) \ + V(SVEPredicateInitialize) \ + V(SVEPredicateNextActive) \ + V(SVEPredicateReadFromFFR_Predicated) \ + V(SVEPredicateReadFromFFR_Unpredicated) \ + V(SVEPredicateTest) \ + V(SVEPredicateZero) \ + V(SVEPropagateBreakToNextPartition) \ + V(SVEReversePredicateElements) \ + V(SVEReverseVectorElements) \ + V(SVEReverseWithinElements) \ + V(SVESaturatingIncDecRegisterByElementCount) \ + V(SVESaturatingIncDecVectorByElementCount) \ + V(SVEStoreMultipleStructures_ScalarPlusImm) \ + V(SVEStoreMultipleStructures_ScalarPlusScalar) \ + V(SVEStorePredicateRegister) \ + V(SVEStoreVectorRegister) \ + V(SVETableLookup) \ + V(SVEUnpackPredicateElements) \ + V(SVEUnpackVectorElements) \ + V(SVEVectorSplice) \ + V(System) \ + V(TestBranch) \ + V(Unallocated) \ + V(UnconditionalBranch) \ + V(UnconditionalBranchToRegister) \ V(Unimplemented) +#define VISITOR_LIST_THAT_DONT_RETURN(V) V(Reserved) + #define VISITOR_LIST(V) \ VISITOR_LIST_THAT_RETURN(V) \ VISITOR_LIST_THAT_DONT_RETURN(V) @@ -124,8 +274,12 @@ namespace vixl { namespace aarch64 { -// The Visitor interface. Disassembler and simulator (and other tools) -// must provide implementations for all of these functions. +using Metadata = std::map; + +// The Visitor interface consists only of the Visit() method. User classes +// that inherit from this one must provide an implementation of the method. +// Information about the instruction encountered by the Decoder is available +// via the metadata pointer. class DecoderVisitor { public: enum VisitorConstness { kConstVisitor, kNonConstVisitor }; @@ -134,9 +288,7 @@ class DecoderVisitor { virtual ~DecoderVisitor() {} -#define DECLARE(A) virtual void Visit##A(const Instruction* instr) = 0; - VISITOR_LIST(DECLARE) -#undef DECLARE + virtual void Visit(Metadata* metadata, const Instruction* instr) = 0; bool IsConstVisitor() const { return constness_ == kConstVisitor; } Instruction* MutableInstruction(const Instruction* instr) { @@ -148,22 +300,22 @@ class DecoderVisitor { const VisitorConstness constness_; }; +class DecodeNode; +class CompiledDecodeNode; +// The instruction decoder is constructed from a graph of decode nodes. At each +// node, a number of bits are sampled from the instruction being decoded. The +// resulting value is used to look up the next node in the graph, which then +// samples other bits, and moves to other decode nodes. Eventually, a visitor +// node is reached, and the corresponding visitor function is called, which +// handles the instruction. class Decoder { public: - Decoder() {} + Decoder() { ConstructDecodeGraph(); } // Top-level wrappers around the actual decoding function. - void Decode(const Instruction* instr) { - std::list::iterator it; - for (it = visitors_.begin(); it != visitors_.end(); it++) { - VIXL_ASSERT((*it)->IsConstVisitor()); - } - DecodeInstruction(instr); - } - void Decode(Instruction* instr) { - DecodeInstruction(const_cast(instr)); - } + void Decode(const Instruction* instr); + void Decode(Instruction* instr); // Decode all instructions from start (inclusive) to end (exclusive). template @@ -212,76 +364,329 @@ class Decoder { // of visitors stored by the decoder. void RemoveVisitor(DecoderVisitor* visitor); -#define DECLARE(A) void Visit##A(const Instruction* instr); - VISITOR_LIST(DECLARE) -#undef DECLARE - + void VisitNamedInstruction(const Instruction* instr, const std::string& name); std::list* visitors() { return &visitors_; } + // Get a DecodeNode by name from the Decoder's map. + DecodeNode* GetDecodeNode(std::string name); + private: // Decodes an instruction and calls the visitor functions registered with the // Decoder class. void DecodeInstruction(const Instruction* instr); - // Decode the PC relative addressing instruction, and call the corresponding - // visitors. - // On entry, instruction bits 27:24 = 0x0. - void DecodePCRelAddressing(const Instruction* instr); + // Add an initialised DecodeNode to the decode_node_ map. + void AddDecodeNode(const DecodeNode& node); - // Decode the add/subtract immediate instruction, and call the correspoding - // visitors. - // On entry, instruction bits 27:24 = 0x1. - void DecodeAddSubImmediate(const Instruction* instr); - - // Decode the branch, system command, and exception generation parts of - // the instruction tree, and call the corresponding visitors. - // On entry, instruction bits 27:24 = {0x4, 0x5, 0x6, 0x7}. - void DecodeBranchSystemException(const Instruction* instr); - - // Decode the load and store parts of the instruction tree, and call - // the corresponding visitors. - // On entry, instruction bits 27:24 = {0x8, 0x9, 0xC, 0xD}. - void DecodeLoadStore(const Instruction* instr); - - // Decode the logical immediate and move wide immediate parts of the - // instruction tree, and call the corresponding visitors. - // On entry, instruction bits 27:24 = 0x2. - void DecodeLogical(const Instruction* instr); - - // Decode the bitfield and extraction parts of the instruction tree, - // and call the corresponding visitors. - // On entry, instruction bits 27:24 = 0x3. - void DecodeBitfieldExtract(const Instruction* instr); - - // Decode the data processing parts of the instruction tree, and call the - // corresponding visitors. - // On entry, instruction bits 27:24 = {0x1, 0xA, 0xB}. - void DecodeDataProcessing(const Instruction* instr); - - // Decode the floating point parts of the instruction tree, and call the - // corresponding visitors. - // On entry, instruction bits 27:24 = {0xE, 0xF}. - void DecodeFP(const Instruction* instr); - - // Decode the Advanced SIMD (NEON) load/store part of the instruction tree, - // and call the corresponding visitors. - // On entry, instruction bits 29:25 = 0x6. - void DecodeNEONLoadStore(const Instruction* instr); - - // Decode the Advanced SIMD (NEON) vector data processing part of the - // instruction tree, and call the corresponding visitors. - // On entry, instruction bits 28:25 = 0x7. - void DecodeNEONVectorDataProcessing(const Instruction* instr); - - // Decode the Advanced SIMD (NEON) scalar data processing part of the - // instruction tree, and call the corresponding visitors. - // On entry, instruction bits 28:25 = 0xF. - void DecodeNEONScalarDataProcessing(const Instruction* instr); - - private: // Visitors are registered in a list. std::list visitors_; + + // Compile the dynamically generated decode graph based on the static + // information in kDecodeMapping and kVisitorNodes. + void ConstructDecodeGraph(); + + // Root node for the compiled decoder graph, stored here to avoid a map lookup + // for every instruction decoded. + CompiledDecodeNode* compiled_decoder_root_; + + // Map of node names to DecodeNodes. + std::map decode_nodes_; +}; + +typedef void (Decoder::*DecodeFnPtr)(const Instruction*); +typedef uint32_t (Instruction::*BitExtractFn)(void) const; + +// A Visitor node maps the name of a visitor to the function that handles it. +struct VisitorNode { + const char* name; + const DecodeFnPtr visitor_fn; +}; + +// DecodePattern and DecodeMapping represent the input data to the decoder +// compilation stage. After compilation, the decoder is embodied in the graph +// of CompiledDecodeNodes pointer to by compiled_decoder_root_. + +// A DecodePattern maps a pattern of set/unset/don't care (1, 0, x) bits encoded +// as uint32_t to its handler. +// The encoding uses two bits per symbol: 0 => 0b00, 1 => 0b01, x => 0b10. +// 0b11 marks the edge of the most-significant bits of the pattern, which is +// required to determine the length. For example, the pattern "1x01"_b is +// encoded in a uint32_t as 0b11_01_10_00_01. +struct DecodePattern { + uint32_t pattern; + const char* handler; +}; + +// A DecodeMapping consists of the name of a handler, the bits sampled in the +// instruction by that handler, and a mapping from the pattern that those +// sampled bits match to the corresponding name of a node. +struct DecodeMapping { + const char* name; + const std::vector sampled_bits; + const std::vector mapping; +}; + +// For speed, before nodes can be used for decoding instructions, they must +// be compiled. This converts the mapping "bit pattern strings to decoder name +// string" stored in DecodeNodes to an array look up for the pointer to the next +// node, stored in CompiledDecodeNodes. Compilation may also apply other +// optimisations for simple decode patterns. +class CompiledDecodeNode { + public: + // Constructor for decode node, containing a decode table and pointer to a + // function that extracts the bits to be sampled. + CompiledDecodeNode(BitExtractFn bit_extract_fn, size_t decode_table_size) + : bit_extract_fn_(bit_extract_fn), + instruction_name_("node"), + decode_table_size_(decode_table_size), + decoder_(NULL) { + decode_table_ = new CompiledDecodeNode*[decode_table_size_]; + memset(decode_table_, 0, decode_table_size_ * sizeof(decode_table_[0])); + } + + // Constructor for wrappers around visitor functions. These require no + // decoding, so no bit extraction function or decode table is assigned. + explicit CompiledDecodeNode(std::string iname, Decoder* decoder) + : bit_extract_fn_(NULL), + instruction_name_(iname), + decode_table_(NULL), + decode_table_size_(0), + decoder_(decoder) {} + + ~CompiledDecodeNode() VIXL_NEGATIVE_TESTING_ALLOW_EXCEPTION { + // Free the decode table, if this is a compiled, non-leaf node. + if (decode_table_ != NULL) { + VIXL_ASSERT(!IsLeafNode()); + delete[] decode_table_; + } + } + + // Decode the instruction by either sampling the bits using the bit extract + // function to find the next node, or, if we're at a leaf, calling the visitor + // function. + void Decode(const Instruction* instr) const; + + // A leaf node is a wrapper for a visitor function. + bool IsLeafNode() const { + VIXL_ASSERT(((instruction_name_ == "node") && (bit_extract_fn_ != NULL)) || + ((instruction_name_ != "node") && (bit_extract_fn_ == NULL))); + return instruction_name_ != "node"; + } + + // Get a pointer to the next node required in the decode process, based on the + // bits sampled by the current node. + CompiledDecodeNode* GetNodeForBits(uint32_t bits) const { + VIXL_ASSERT(bits < decode_table_size_); + return decode_table_[bits]; + } + + // Set the next node in the decode process for the pattern of sampled bits in + // the current node. + void SetNodeForBits(uint32_t bits, CompiledDecodeNode* n) { + VIXL_ASSERT(bits < decode_table_size_); + VIXL_ASSERT(n != NULL); + decode_table_[bits] = n; + } + + private: + // Pointer to an instantiated template function for extracting the bits + // sampled by this node. Set to NULL for leaf nodes. + const BitExtractFn bit_extract_fn_; + + // Visitor function that handles the instruction identified. Set only for + // leaf nodes, where no extra decoding is required, otherwise NULL. + std::string instruction_name_; + + // Mapping table from instruction bits to next decode stage. + CompiledDecodeNode** decode_table_; + const size_t decode_table_size_; + + // Pointer to the decoder containing this node, used to call its visitor + // function for leaf nodes. Set to NULL for non-leaf nodes. + Decoder* decoder_; +}; + +class DecodeNode { + public: + // Default constructor needed for map initialisation. + DecodeNode() + : sampled_bits_(DecodeNode::kEmptySampledBits), + pattern_table_(DecodeNode::kEmptyPatternTable), + compiled_node_(NULL) {} + + // Constructor for DecodeNode wrappers around visitor functions. These are + // marked as "compiled", as there is no decoding left to do. + explicit DecodeNode(const std::string& iname, Decoder* decoder) + : name_(iname), + sampled_bits_(DecodeNode::kEmptySampledBits), + instruction_name_(iname), + pattern_table_(DecodeNode::kEmptyPatternTable), + decoder_(decoder), + compiled_node_(NULL) {} + + // Constructor for DecodeNodes that map bit patterns to other DecodeNodes. + explicit DecodeNode(const DecodeMapping& map, Decoder* decoder = NULL) + : name_(map.name), + sampled_bits_(map.sampled_bits), + instruction_name_("node"), + pattern_table_(map.mapping), + decoder_(decoder), + compiled_node_(NULL) { + // With the current two bits per symbol encoding scheme, the maximum pattern + // length is (32 - 2) / 2 = 15 bits. + VIXL_CHECK(GetPatternLength(map.mapping[0].pattern) <= 15); + for (const DecodePattern& p : map.mapping) { + VIXL_CHECK(GetPatternLength(p.pattern) == map.sampled_bits.size()); + } + } + + ~DecodeNode() { + // Delete the compiled version of this node, if one was created. + if (compiled_node_ != NULL) { + delete compiled_node_; + } + } + + // Get the bits sampled from the instruction by this node. + const std::vector& GetSampledBits() const { return sampled_bits_; } + + // Get the number of bits sampled from the instruction by this node. + size_t GetSampledBitsCount() const { return sampled_bits_.size(); } + + // A leaf node is a DecodeNode that wraps the visitor function for the + // identified instruction class. + bool IsLeafNode() const { return instruction_name_ != "node"; } + + std::string GetName() const { return name_; } + + // Create a CompiledDecodeNode of specified table size that uses + // bit_extract_fn to sample bits from the instruction. + void CreateCompiledNode(BitExtractFn bit_extract_fn, size_t table_size) { + VIXL_ASSERT(bit_extract_fn != NULL); + VIXL_ASSERT(table_size > 0); + compiled_node_ = new CompiledDecodeNode(bit_extract_fn, table_size); + } + + // Create a CompiledDecodeNode wrapping a visitor function. No decoding is + // required for this node; the visitor function is called instead. + void CreateVisitorNode() { + compiled_node_ = new CompiledDecodeNode(instruction_name_, decoder_); + } + + // Find and compile the DecodeNode named "name", and set it as the node for + // the pattern "bits". + void CompileNodeForBits(Decoder* decoder, std::string name, uint32_t bits); + + // Get a pointer to an instruction method that extracts the instruction bits + // specified by the mask argument, and returns those sampled bits as a + // contiguous sequence, suitable for indexing an array. + // For example, a mask of 0b1010 returns a function that, given an instruction + // 0bXYZW, will return 0bXZ. + BitExtractFn GetBitExtractFunction(uint32_t mask) { + return GetBitExtractFunctionHelper(mask, 0); + } + + // Get a pointer to an Instruction method that applies a mask to the + // instruction bits, and tests if the result is equal to value. The returned + // function gives a 1 result if (inst & mask == value), 0 otherwise. + BitExtractFn GetBitExtractFunction(uint32_t mask, uint32_t value) { + return GetBitExtractFunctionHelper(value, mask); + } + + // Compile this DecodeNode into a new CompiledDecodeNode and returns a pointer + // to it. This pointer is also stored inside the DecodeNode itself. Destroying + // a DecodeNode frees its associated CompiledDecodeNode. + CompiledDecodeNode* Compile(Decoder* decoder); + + // Get a pointer to the CompiledDecodeNode associated with this DecodeNode. + // Returns NULL if the node has not been compiled yet. + CompiledDecodeNode* GetCompiledNode() const { return compiled_node_; } + bool IsCompiled() const { return GetCompiledNode() != NULL; } + + enum class PatternSymbol { kSymbol0 = 0, kSymbol1 = 1, kSymbolX = 2 }; + static const uint32_t kEndOfPattern = 3; + static const uint32_t kPatternSymbolMask = 3; + + size_t GetPatternLength(uint32_t pattern) const { + uint32_t hsb = HighestSetBitPosition(pattern); + // The pattern length is signified by two set bits in a two bit-aligned + // position. Ensure that the pattern has a highest set bit, it's at an odd + // bit position, and that the bit to the right of the hsb is also set. + VIXL_ASSERT(((hsb % 2) == 1) && (pattern >> (hsb - 1)) == kEndOfPattern); + return hsb / 2; + } + + bool PatternContainsSymbol(uint32_t pattern, PatternSymbol symbol) const { + while ((pattern & kPatternSymbolMask) != kEndOfPattern) { + if (static_cast(pattern & kPatternSymbolMask) == symbol) + return true; + pattern >>= 2; + } + return false; + } + + PatternSymbol GetSymbolAt(uint32_t pattern, size_t pos) const { + size_t len = GetPatternLength(pattern); + VIXL_ASSERT((pos < 15) && (pos < len)); + uint32_t shift = static_cast(2 * (len - pos - 1)); + uint32_t sym = (pattern >> shift) & kPatternSymbolMask; + return static_cast(sym); + } + + private: + // Generate a mask and value pair from a pattern constructed from 0, 1 and x + // (don't care) 2-bit symbols. + // For example "10x1"_b should return mask = 0b1101, value = 0b1001. + typedef std::pair MaskValuePair; + MaskValuePair GenerateMaskValuePair(uint32_t pattern) const; + + // Generate a pattern ordered by the bit positions sampled by this node. + // The symbol corresponding to the lowest sample position is placed in the + // least-significant bits of the result pattern. + // For example, a pattern of "1x0"_b expected when sampling bits 31, 1 and 30 + // returns the pattern "x01"_b; bit 1 should be 'x', bit 30 '0' and bit 31 + // '1'. + // This output makes comparisons easier between the pattern and bits sampled + // from an instruction using the fast "compress" algorithm. See + // Instruction::Compress(). + uint32_t GenerateOrderedPattern(uint32_t pattern) const; + + // Generate a mask with a bit set at each sample position. + uint32_t GenerateSampledBitsMask() const; + + // Try to compile a more optimised decode operation for this node, returning + // true if successful. + bool TryCompileOptimisedDecodeTable(Decoder* decoder); + + // Helper function that returns a bit extracting function. If y is zero, + // x is a bit extraction mask. Otherwise, y is the mask, and x is the value + // to match after masking. + BitExtractFn GetBitExtractFunctionHelper(uint32_t x, uint32_t y); + + // Name of this decoder node, used to construct edges in the decode graph. + std::string name_; + + // Vector of bits sampled from an instruction to determine which node to look + // up next in the decode process. + const std::vector& sampled_bits_; + static const std::vector kEmptySampledBits; + + // For leaf nodes, this is the name of the instruction form that the node + // represents. For other nodes, this is always set to "node". + std::string instruction_name_; + + // Source mapping from bit pattern to name of next decode stage. + const std::vector& pattern_table_; + static const std::vector kEmptyPatternTable; + + // Pointer to the decoder containing this node, used to call its visitor + // function for leaf nodes. + Decoder* decoder_; + + // Pointer to the compiled version of this node. Is this node hasn't been + // compiled yet, this pointer is NULL. + CompiledDecodeNode* compiled_node_; }; } // namespace aarch64 diff --git a/dep/vixl/include/vixl/aarch64/decoder-constants-aarch64.h b/dep/vixl/include/vixl/aarch64/decoder-constants-aarch64.h new file mode 100644 index 000000000..70e01a103 --- /dev/null +++ b/dep/vixl/include/vixl/aarch64/decoder-constants-aarch64.h @@ -0,0 +1,9963 @@ +// Copyright 2019, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +namespace vixl { +namespace aarch64 { + +// Recursively construct a uint32_t encoded bit pattern from a string literal. +// The string characters are mapped as two-bit symbols '0'=>0, '1'=>1, 'x'=>2. +// The remaining symbol, 3, is used to mark the end of the pattern, allowing +// its length to be found. For example, the pattern "1x01"_b is encoded in a +// uint32_t as 0b11_01_00_01. The maximum pattern string length is 15 +// characters, encoded as 3 in the most significant bits, followed by 15 2-bit +// symbols. +constexpr uint32_t str_to_two_bit_pattern(const char* x, size_t s, uint32_t a) { + if (s == 0) return a; + uint32_t r = (x[0] == 'x') ? 2 : (x[0] - '0'); + return str_to_two_bit_pattern(x + 1, s - 1, (a << 2) | r); +} + +constexpr uint32_t operator"" _b(const char* x, size_t s) { + return str_to_two_bit_pattern(x, s, DecodeNode::kEndOfPattern); +} + +// This decode table is derived from the AArch64 ISA XML specification, +// available from https://developer.arm.com/products/architecture/a-profile/ + +// clang-format off +static const DecodeMapping kDecodeMapping[] = { + { "_ggvlym", + {13, 12}, + { {"00"_b, "adc_32_addsub_carry"}, + }, + }, + + { "_ghmtnl", + {18, 17}, + { {"0x"_b, "ld1_asisdlsep_r3_r3"}, + {"10"_b, "ld1_asisdlsep_r3_r3"}, + {"11"_b, "ld1_asisdlsep_i3_i3"}, + }, + }, + + { "_ghpxms", + {23, 22}, + { {"01"_b, "fmla_z_p_zzz"}, + {"1x"_b, "fmla_z_p_zzz"}, + }, + }, + + { "_ghqqzy", + {11}, + { {"0"_b, "_qrsxzp"}, + }, + }, + + { "_ghrnmz", + {20, 19, 18, 17, 16, 13, 12, 9, 8, 7, 6, 5}, + { {"000010011111"_b, "xpacd_64z_dp_1src"}, + }, + }, + + { "_gjprgr", + {22, 13, 12}, + { {"000"_b, "ldsmax_64_memop"}, + {"001"_b, "ldsmin_64_memop"}, + {"010"_b, "ldumax_64_memop"}, + {"011"_b, "ldumin_64_memop"}, + {"100"_b, "ldsmaxl_64_memop"}, + {"101"_b, "ldsminl_64_memop"}, + {"110"_b, "ldumaxl_64_memop"}, + {"111"_b, "lduminl_64_memop"}, + }, + }, + + { "_gjprmg", + {11}, + { {"0"_b, "_llpsqq"}, + }, + }, + + { "_gjtmjg", + {23, 22, 20, 19, 13, 11}, + { {"0000x0"_b, "bic_asimdimm_l_hl"}, + {"00x100"_b, "uqshrn_asimdshf_n"}, + {"00x101"_b, "uqrshrn_asimdshf_n"}, + {"010x00"_b, "uqshrn_asimdshf_n"}, + {"010x01"_b, "uqrshrn_asimdshf_n"}, + {"011100"_b, "uqshrn_asimdshf_n"}, + {"011101"_b, "uqrshrn_asimdshf_n"}, + {"0x1000"_b, "uqshrn_asimdshf_n"}, + {"0x1001"_b, "uqrshrn_asimdshf_n"}, + }, + }, + + { "_gjxsrn", + {2, 1, 0}, + { {"000"_b, "_sqttsv"}, + }, + }, + + { "_gknljg", + {11, 10, 9, 8, 7, 6}, + { {"000000"_b, "wfet_only_systeminstrswithreg"}, + }, + }, + + { "_gkqhyz", + {23, 22}, + { {"00"_b, "fmsub_s_floatdp3"}, + {"01"_b, "fmsub_d_floatdp3"}, + {"11"_b, "fmsub_h_floatdp3"}, + }, + }, + + { "_glgznt", + {20, 19, 18, 17, 16, 4, 3}, + { {"0000001"_b, "fcmp_dz_floatcmp"}, + {"0000011"_b, "fcmpe_dz_floatcmp"}, + {"xxxxx00"_b, "fcmp_d_floatcmp"}, + {"xxxxx10"_b, "fcmpe_d_floatcmp"}, + }, + }, + + { "_gljqng", + {22, 13, 12}, + { {"000"_b, "ldsmaxa_32_memop"}, + {"001"_b, "ldsmina_32_memop"}, + {"010"_b, "ldumaxa_32_memop"}, + {"011"_b, "ldumina_32_memop"}, + {"100"_b, "ldsmaxal_32_memop"}, + {"101"_b, "ldsminal_32_memop"}, + {"110"_b, "ldumaxal_32_memop"}, + {"111"_b, "lduminal_32_memop"}, + }, + }, + + { "_glkvkr", + {30}, + { {"0"_b, "adds_32_addsub_shift"}, + {"1"_b, "subs_32_addsub_shift"}, + }, + }, + + { "_glpxty", + {20, 19, 18, 17, 16}, + { {"00010"_b, "scvtf_s32_float2fix"}, + {"00011"_b, "ucvtf_s32_float2fix"}, + {"11000"_b, "fcvtzs_32s_float2fix"}, + {"11001"_b, "fcvtzu_32s_float2fix"}, + }, + }, + + { "_gmqyjv", + {30, 20, 19, 18, 17, 16, 13}, + { {"1111110"_b, "_nvkxzs"}, + }, + }, + + { "_gmsmls", + {13}, + { {"0"_b, "mls_asimdelem_r"}, + {"1"_b, "umlsl_asimdelem_l"}, + }, + }, + + { "_gmsqqz", + {23}, + { {"0"_b, "facge_asimdsame_only"}, + {"1"_b, "facgt_asimdsame_only"}, + }, + }, + + { "_gmtjvr", + {16, 13, 12}, + { {"000"_b, "rev_64_dp_1src"}, + {"001"_b, "cnt_64_dp_1src"}, + {"100"_b, "pacdb_64p_dp_1src"}, + {"101"_b, "autdb_64p_dp_1src"}, + {"110"_b, "_rlxhxz"}, + {"111"_b, "_phjkhr"}, + }, + }, + + { "_gngjxr", + {20, 19, 18, 17, 16}, + { {"00000"_b, "cadd_z_zz"}, + {"00001"_b, "sqcadd_z_zz"}, + }, + }, + + { "_gnhjkl", + {16, 13, 12}, + { {"000"_b, "rbit_64_dp_1src"}, + {"001"_b, "clz_64_dp_1src"}, + {"010"_b, "abs_64_dp_1src"}, + {"100"_b, "pacia_64p_dp_1src"}, + {"101"_b, "autia_64p_dp_1src"}, + {"110"_b, "_yzxjnk"}, + {"111"_b, "_prxyhr"}, + }, + }, + + { "_gnpgsg", + {22}, + { {"0"_b, "str_64_ldst_regoff"}, + {"1"_b, "ldr_64_ldst_regoff"}, + }, + }, + + { "_gnqhsl", + {23, 22, 20, 19, 18, 17, 16}, + { {"0010000"_b, "punpklo_p_p"}, + {"0010001"_b, "punpkhi_p_p"}, + {"xx0xxxx"_b, "zip1_p_pp"}, + {"xx10100"_b, "rev_p_p"}, + }, + }, + + { "_gnxrlr", + {23, 22, 13, 12, 11, 10}, + { {"0011x0"_b, "sudot_asimdelem_d"}, + {"0111x0"_b, "bfdot_asimdelem_e"}, + {"0x1001"_b, "scvtf_asimdshf_c"}, + {"0x1111"_b, "fcvtzs_asimdshf_c"}, + {"1011x0"_b, "usdot_asimdelem_d"}, + {"1111x0"_b, "bfmlal_asimdelem_f"}, + {"xx00x0"_b, "sqdmulh_asimdelem_r"}, + {"xx01x0"_b, "sqrdmulh_asimdelem_r"}, + {"xx10x0"_b, "sdot_asimdelem_d"}, + }, + }, + + { "_gplkxy", + {20, 19, 18, 17, 16}, + { {"00000"_b, "sqneg_asimdmisc_r"}, + }, + }, + + { "_gpxltv", + {23, 18, 17, 16}, + { {"0000"_b, "uqxtnt_z_zz"}, + }, + }, + + { "_gqmjys", + {18, 17}, + { {"0x"_b, "st1_asisdlsop_sx1_r1s"}, + {"10"_b, "st1_asisdlsop_sx1_r1s"}, + {"11"_b, "st1_asisdlsop_s1_i1s"}, + }, + }, + + { "_grgrpt", + {18}, + { {"1"_b, "fmaxv_v_p_z"}, + }, + }, + + { "_grjzyl", + {30}, + { {"0"_b, "bl_only_branch_imm"}, + {"1"_b, "_hjtsgj"}, + }, + }, + + { "_grktgm", + {30, 23, 22, 19}, + { {"1001"_b, "aesd_b_cryptoaes"}, + {"xxx0"_b, "cnt_asimdmisc_r"}, + }, + }, + + { "_grmpht", + {20, 18, 17}, + { {"000"_b, "_mjjhqj"}, + }, + }, + + { "_grprpj", + {20, 19, 18, 17, 16}, + { {"11111"_b, "ldarb_lr32_ldstexcl"}, + }, + }, + + { "_grqsgp", + {23, 22, 4, 3, 2, 1, 0}, + { {"0000001"_b, "svc_ex_exception"}, + {"0000010"_b, "hvc_ex_exception"}, + {"0000011"_b, "smc_ex_exception"}, + {"0100000"_b, "hlt_ex_exception"}, + }, + }, + + { "_grsnms", + {20, 19, 18, 17, 16}, + { {"00010"_b, "scvtf_h32_float2fix"}, + {"00011"_b, "ucvtf_h32_float2fix"}, + {"11000"_b, "fcvtzs_32h_float2fix"}, + {"11001"_b, "fcvtzu_32h_float2fix"}, + }, + }, + + { "_grsslr", + {30, 23, 22, 11, 10, 4}, + { {"001000"_b, "ccmn_32_condcmp_reg"}, + {"001100"_b, "ccmn_32_condcmp_imm"}, + {"101000"_b, "ccmp_32_condcmp_reg"}, + {"101100"_b, "ccmp_32_condcmp_imm"}, + }, + }, + + { "_grvxrm", + {12}, + { {"0"_b, "st4_asisdlsop_dx4_r4d"}, + }, + }, + + { "_gshlgj", + {30, 23, 22, 13, 12, 11, 10}, + { {"100xxx1"_b, "ins_asimdins_iv_v"}, + {"x00xxx0"_b, "ext_asimdext_only"}, + {"x010001"_b, "fmaxnmp_asimdsamefp16_only"}, + {"x010101"_b, "faddp_asimdsamefp16_only"}, + {"x010111"_b, "fmul_asimdsamefp16_only"}, + {"x011001"_b, "fcmge_asimdsamefp16_only"}, + {"x011011"_b, "facge_asimdsamefp16_only"}, + {"x011101"_b, "fmaxp_asimdsamefp16_only"}, + {"x011111"_b, "fdiv_asimdsamefp16_only"}, + {"x110001"_b, "fminnmp_asimdsamefp16_only"}, + {"x110101"_b, "fabd_asimdsamefp16_only"}, + {"x111001"_b, "fcmgt_asimdsamefp16_only"}, + {"x111011"_b, "facgt_asimdsamefp16_only"}, + {"x111101"_b, "fminp_asimdsamefp16_only"}, + }, + }, + + { "_gshrzq", + {22, 20, 11}, + { {"010"_b, "decb_r_rs"}, + {"110"_b, "dech_r_rs"}, + }, + }, + + { "_gsjvmx", + {12}, + { {"0"_b, "st3_asisdlsop_dx3_r3d"}, + }, + }, + + { "_gslmjl", + {23, 22}, + { {"00"_b, "fcsel_s_floatsel"}, + {"01"_b, "fcsel_d_floatsel"}, + {"11"_b, "fcsel_h_floatsel"}, + }, + }, + + { "_gsnnnt", + {30, 23, 22, 13, 12, 11, 10}, + { {"000xx00"_b, "stlurb_32_ldapstl_unscaled"}, + {"001xx00"_b, "ldapurb_32_ldapstl_unscaled"}, + {"010xx00"_b, "ldapursb_64_ldapstl_unscaled"}, + {"011xx00"_b, "ldapursb_32_ldapstl_unscaled"}, + {"100xx00"_b, "stlurh_32_ldapstl_unscaled"}, + {"101xx00"_b, "ldapurh_32_ldapstl_unscaled"}, + {"110xx00"_b, "ldapursh_64_ldapstl_unscaled"}, + {"111xx00"_b, "ldapursh_32_ldapstl_unscaled"}, + {"x000001"_b, "cpyfprn_cpy_memcms"}, + {"x000101"_b, "cpyfpwtrn_cpy_memcms"}, + {"x001001"_b, "cpyfprtrn_cpy_memcms"}, + {"x001101"_b, "cpyfptrn_cpy_memcms"}, + {"x010001"_b, "cpyfmrn_cpy_memcms"}, + {"x010101"_b, "cpyfmwtrn_cpy_memcms"}, + {"x011001"_b, "cpyfmrtrn_cpy_memcms"}, + {"x011101"_b, "cpyfmtrn_cpy_memcms"}, + {"x100001"_b, "cpyfern_cpy_memcms"}, + {"x100101"_b, "cpyfewtrn_cpy_memcms"}, + {"x101001"_b, "cpyfertrn_cpy_memcms"}, + {"x101101"_b, "cpyfetrn_cpy_memcms"}, + {"x110001"_b, "sete_set_memcms"}, + {"x110101"_b, "setet_set_memcms"}, + {"x111001"_b, "seten_set_memcms"}, + {"x111101"_b, "setetn_set_memcms"}, + }, + }, + + { "_gsvlph", + {22, 4, 3}, + { {"00x"_b, "prfm_p_ldst_regoff"}, + {"010"_b, "prfm_p_ldst_regoff"}, + {"011"_b, "rprfm_r_ldst_regoff"}, + }, + }, + + { "_gtqnvr", + {30, 23, 22}, + { {"000"_b, "msub_32a_dp_3src"}, + }, + }, + + { "_gtsglj", + {11, 10, 9, 8, 7, 6}, + { {"000001"_b, "tcommit_only_barriers"}, + {"000011"_b, "sb_only_barriers"}, + {"xx1000"_b, "dsb_bon_barriers"}, + {"xxxx10"_b, "dmb_bo_barriers"}, + }, + }, + + { "_gtxpgx", + {30, 23, 13, 4}, + { {"0000"_b, "prfw_i_p_bz_s_x32_scaled"}, + {"0010"_b, "prfd_i_p_bz_s_x32_scaled"}, + {"010x"_b, "ld1h_z_p_bz_s_x32_scaled"}, + {"011x"_b, "ldff1h_z_p_bz_s_x32_scaled"}, + {"1000"_b, "prfw_i_p_bz_d_x32_scaled"}, + {"1010"_b, "prfd_i_p_bz_d_x32_scaled"}, + {"110x"_b, "ld1h_z_p_bz_d_x32_scaled"}, + {"111x"_b, "ldff1h_z_p_bz_d_x32_scaled"}, + }, + }, + + { "_gvpvjn", + {20, 19, 18, 17, 16, 13, 12}, + { {"0000000"_b, "rev_32_dp_1src"}, + {"0000001"_b, "ctz_32_dp_1src"}, + }, + }, + + { "_gvxjvz", + {23, 22, 12}, + { {"000"_b, "_tgvkhm"}, + {"001"_b, "_ktyrgy"}, + {"010"_b, "_gxzgtk"}, + {"011"_b, "_vlxrps"}, + {"110"_b, "_jqrmyp"}, + {"111"_b, "_ssypmm"}, + }, + }, + + { "_gxmnkl", + {23, 22}, + { {"10"_b, "cdot_z_zzzi_s"}, + {"11"_b, "cdot_z_zzzi_d"}, + }, + }, + + { "_gxqnph", + {23, 22, 13, 12, 11, 10}, + { {"0x1001"_b, "ucvtf_asimdshf_c"}, + {"0x1111"_b, "fcvtzu_asimdshf_c"}, + {"1000x0"_b, "fmlsl2_asimdelem_lh"}, + {"xx01x0"_b, "sqrdmlah_asimdelem_r"}, + {"xx10x0"_b, "udot_asimdelem_d"}, + {"xx11x0"_b, "sqrdmlsh_asimdelem_r"}, + }, + }, + + { "_gxzgtk", + {20, 19, 18, 17, 16, 13}, + { {"000000"_b, "fabs_d_floatdp1"}, + {"000010"_b, "fsqrt_d_floatdp1"}, + {"000110"_b, "fcvt_hd_floatdp1"}, + {"001000"_b, "frintp_d_floatdp1"}, + {"001010"_b, "frintz_d_floatdp1"}, + {"001110"_b, "frinti_d_floatdp1"}, + {"010000"_b, "frint32x_d_floatdp1"}, + {"010010"_b, "frint64x_d_floatdp1"}, + }, + }, + + { "_gyjphh", + {30, 23, 22, 11, 10}, + { {"00000"_b, "_plgrmv"}, + {"00001"_b, "_xmxhhg"}, + {"00100"_b, "_lmmjvx"}, + {"00110"_b, "_tmtgqm"}, + {"01100"_b, "_hvmyjz"}, + {"10000"_b, "_mgtxyt"}, + {"10100"_b, "_rkzlpp"}, + {"10110"_b, "_xqrgjj"}, + }, + }, + + { "_gyllxt", + {23}, + { {"0"_b, "_hzkxht"}, + }, + }, + + { "_gylmmr", + {30, 23, 22, 11, 10}, + { {"00010"_b, "str_b_ldst_regoff"}, + {"00110"_b, "ldr_b_ldst_regoff"}, + {"01010"_b, "str_q_ldst_regoff"}, + {"01110"_b, "ldr_q_ldst_regoff"}, + {"10010"_b, "str_h_ldst_regoff"}, + {"10110"_b, "ldr_h_ldst_regoff"}, + }, + }, + + { "_gyrjrm", + {20, 19, 18, 17, 16}, + { {"00000"_b, "cpy_z_p_v"}, + {"00001"_b, "compact_z_p_z"}, + {"00010"_b, "lasta_v_p_z"}, + {"00011"_b, "lastb_v_p_z"}, + {"00100"_b, "revb_z_z"}, + {"00101"_b, "revh_z_z"}, + {"00110"_b, "revw_z_z"}, + {"00111"_b, "rbit_z_p_z"}, + {"01000"_b, "clasta_z_p_zz"}, + {"01001"_b, "clastb_z_p_zz"}, + {"01010"_b, "clasta_v_p_z"}, + {"01011"_b, "clastb_v_p_z"}, + {"01100"_b, "splice_z_p_zz_des"}, + {"01101"_b, "splice_z_p_zz_con"}, + }, + }, + + { "_gyrkkz", + {30, 22, 11}, + { {"000"_b, "_nqjvmr"}, + {"001"_b, "_jjnvrv"}, + {"010"_b, "_yptgjg"}, + {"011"_b, "_vsyjql"}, + {"100"_b, "_lzqxgt"}, + {"110"_b, "_xvrvhv"}, + {"111"_b, "_ptstkz"}, + }, + }, + + { "_gyymmx", + {30, 13, 12}, + { {"000"_b, "stilp_32se_ldiappstilp"}, + {"001"_b, "stilp_32s_ldiappstilp"}, + {"100"_b, "stilp_64ss_ldiappstilp"}, + {"101"_b, "stilp_64s_ldiappstilp"}, + }, + }, + + { "_gzgpjp", + {23}, + { {"0"_b, "fmaxp_asimdsame_only"}, + {"1"_b, "fminp_asimdsame_only"}, + }, + }, + + { "_gznrjv", + {30, 23, 22, 19, 16}, + { {"10010"_b, "aese_b_cryptoaes"}, + {"xxx00"_b, "cls_asimdmisc_r"}, + {"xxx01"_b, "sqxtn_asimdmisc_n"}, + }, + }, + + { "_gzpkvm", + {30, 23, 22, 13, 12, 11, 10}, + { {"0000000"_b, "swpb_32_memop"}, + {"0000100"_b, "rcwclr_64_memop"}, + {"0001000"_b, "rcwswp_64_memop"}, + {"0001100"_b, "rcwset_64_memop"}, + {"000xx10"_b, "strb_32b_ldst_regoff"}, + {"0010000"_b, "swplb_32_memop"}, + {"0010100"_b, "rcwclrl_64_memop"}, + {"0011000"_b, "rcwswpl_64_memop"}, + {"0011100"_b, "rcwsetl_64_memop"}, + {"001xx10"_b, "ldrb_32b_ldst_regoff"}, + {"0100000"_b, "swpab_32_memop"}, + {"0100100"_b, "rcwclra_64_memop"}, + {"0101000"_b, "rcwswpa_64_memop"}, + {"0101100"_b, "rcwseta_64_memop"}, + {"010xx10"_b, "ldrsb_64b_ldst_regoff"}, + {"0110000"_b, "swpalb_32_memop"}, + {"0110100"_b, "rcwclral_64_memop"}, + {"0111000"_b, "rcwswpal_64_memop"}, + {"0111100"_b, "rcwsetal_64_memop"}, + {"011xx10"_b, "ldrsb_32b_ldst_regoff"}, + {"1000000"_b, "swph_32_memop"}, + {"1000100"_b, "rcwsclr_64_memop"}, + {"1001000"_b, "rcwsswp_64_memop"}, + {"1001100"_b, "rcwsset_64_memop"}, + {"100xx10"_b, "strh_32_ldst_regoff"}, + {"1010000"_b, "swplh_32_memop"}, + {"1010100"_b, "rcwsclrl_64_memop"}, + {"1011000"_b, "rcwsswpl_64_memop"}, + {"1011100"_b, "rcwssetl_64_memop"}, + {"101xx10"_b, "ldrh_32_ldst_regoff"}, + {"1100000"_b, "swpah_32_memop"}, + {"1100100"_b, "rcwsclra_64_memop"}, + {"1101000"_b, "rcwsswpa_64_memop"}, + {"1101100"_b, "rcwsseta_64_memop"}, + {"110xx10"_b, "ldrsh_64_ldst_regoff"}, + {"1110000"_b, "swpalh_32_memop"}, + {"1110100"_b, "rcwsclral_64_memop"}, + {"1111000"_b, "rcwsswpal_64_memop"}, + {"1111100"_b, "rcwssetal_64_memop"}, + {"111xx10"_b, "ldrsh_32_ldst_regoff"}, + }, + }, + + { "_gzqvnk", + {23, 12, 4, 3, 2, 1, 0}, + { {"1000000"_b, "ctermeq_rr"}, + {"1010000"_b, "ctermne_rr"}, + {"x10xxxx"_b, "whilewr_p_rr"}, + {"x11xxxx"_b, "whilerw_p_rr"}, + }, + }, + + { "_gzrtkk", + {18, 17}, + { {"0x"_b, "ld1_asisdlsep_r1_r1"}, + {"10"_b, "ld1_asisdlsep_r1_r1"}, + {"11"_b, "ld1_asisdlsep_i1_i1"}, + }, + }, + + { "_gzvylr", + {30, 13}, + { {"00"_b, "_rjyrnt"}, + {"01"_b, "_mzhsrq"}, + {"10"_b, "_prtvjm"}, + {"11"_b, "_zspprz"}, + }, + }, + + { "_gzzsgh", + {18}, + { {"0"_b, "ld3_asisdlso_b3_3b"}, + }, + }, + + { "_hgjgpm", + {30}, + { {"0"_b, "bic_64_log_shift"}, + {"1"_b, "eon_64_log_shift"}, + }, + }, + + { "_hgxtqy", + {30, 23, 22, 13}, + { {"0001"_b, "ldnt1w_z_p_ar_s_x32_unscaled"}, + {"0010"_b, "ld1rsh_z_p_bi_s64"}, + {"0011"_b, "ld1rsh_z_p_bi_s32"}, + {"0110"_b, "ld1rsb_z_p_bi_s64"}, + {"0111"_b, "ld1rsb_z_p_bi_s32"}, + {"1000"_b, "ldnt1sw_z_p_ar_d_64_unscaled"}, + {"1010"_b, "ld1sw_z_p_bz_d_64_unscaled"}, + {"1011"_b, "ldff1sw_z_p_bz_d_64_unscaled"}, + }, + }, + + { "_hhkqtn", + {20, 19, 18, 17, 16}, + { {"00000"_b, "lasta_r_p_z"}, + {"00001"_b, "lastb_r_p_z"}, + {"01000"_b, "cpy_z_p_r"}, + {"10000"_b, "clasta_r_p_z"}, + {"10001"_b, "clastb_r_p_z"}, + }, + }, + + { "_hhlmrg", + {23, 20, 19, 18, 17, 16, 13}, + { {"0000000"_b, "ld2r_asisdlso_r2"}, + {"0000001"_b, "ld4r_asisdlso_r4"}, + {"10xxxx0"_b, "ld2r_asisdlsop_rx2_r"}, + {"10xxxx1"_b, "ld4r_asisdlsop_rx4_r"}, + {"110xxx0"_b, "ld2r_asisdlsop_rx2_r"}, + {"110xxx1"_b, "ld4r_asisdlsop_rx4_r"}, + {"1110xx0"_b, "ld2r_asisdlsop_rx2_r"}, + {"1110xx1"_b, "ld4r_asisdlsop_rx4_r"}, + {"11110x0"_b, "ld2r_asisdlsop_rx2_r"}, + {"11110x1"_b, "ld4r_asisdlsop_rx4_r"}, + {"1111100"_b, "ld2r_asisdlsop_rx2_r"}, + {"1111101"_b, "ld4r_asisdlsop_rx4_r"}, + {"1111110"_b, "ld2r_asisdlsop_r2_i"}, + {"1111111"_b, "ld4r_asisdlsop_r4_i"}, + }, + }, + + { "_hhxpjz", + {18}, + { {"0"_b, "ld2_asisdlso_b2_2b"}, + }, + }, + + { "_hhxpyt", + {30, 23, 22, 13, 12, 11, 10}, + { {"000xx10"_b, "stlur_b_ldapstl_simd"}, + {"001xx10"_b, "ldapur_b_ldapstl_simd"}, + {"010xx10"_b, "stlur_q_ldapstl_simd"}, + {"011xx10"_b, "ldapur_q_ldapstl_simd"}, + {"100xx10"_b, "stlur_h_ldapstl_simd"}, + {"101xx10"_b, "ldapur_h_ldapstl_simd"}, + {"x000001"_b, "cpyp_cpy_memcms"}, + {"x000101"_b, "cpypwt_cpy_memcms"}, + {"x001001"_b, "cpyprt_cpy_memcms"}, + {"x001101"_b, "cpypt_cpy_memcms"}, + {"x010001"_b, "cpym_cpy_memcms"}, + {"x010101"_b, "cpymwt_cpy_memcms"}, + {"x011001"_b, "cpymrt_cpy_memcms"}, + {"x011101"_b, "cpymt_cpy_memcms"}, + {"x100001"_b, "cpye_cpy_memcms"}, + {"x100101"_b, "cpyewt_cpy_memcms"}, + {"x101001"_b, "cpyert_cpy_memcms"}, + {"x101101"_b, "cpyet_cpy_memcms"}, + {"x110001"_b, "setgp_set_memcms"}, + {"x110101"_b, "setgpt_set_memcms"}, + {"x111001"_b, "setgpn_set_memcms"}, + {"x111101"_b, "setgptn_set_memcms"}, + }, + }, + + { "_hjplhs", + {20, 19, 18, 17, 16, 13, 12}, + { {"1111100"_b, "ldaprb_32l_memop"}, + }, + }, + + { "_hjqryy", + {11, 10, 9, 8, 7, 6}, + { {"000000"_b, "wfit_only_systeminstrswithreg"}, + }, + }, + + { "_hjtsgj", + {23}, + { {"0"_b, "_pnkxsr"}, + }, + }, + + { "_hjvkkq", + {18}, + { {"0"_b, "ld4_asisdlsep_r4_r"}, + {"1"_b, "ld4_asisdlsep_i4_i"}, + }, + }, + + { "_hkgzsh", + {13, 12, 11, 10}, + { {"1111"_b, "_qvzvmq"}, + }, + }, + + { "_hkjjsr", + {12}, + { {"0"_b, "ld1_asisdlsop_dx1_r1d"}, + }, + }, + + { "_hkpjqm", + {30}, + { {"1"_b, "_qgyppr"}, + }, + }, + + { "_hkxlsm", + {18}, + { {"0"_b, "st4_asisdlsop_hx4_r4h"}, + {"1"_b, "st4_asisdlsop_h4_i4h"}, + }, + }, + + { "_hkxzqg", + {2, 1}, + { {"00"_b, "br_64_branch_reg"}, + }, + }, + + { "_hljttg", + {12}, + { {"0"_b, "ld2_asisdlsop_dx2_r2d"}, + }, + }, + + { "_hlljqz", + {30, 23, 22, 11, 10}, + { {"00000"_b, "stur_s_ldst_unscaled"}, + {"00001"_b, "str_s_ldst_immpost"}, + {"00011"_b, "str_s_ldst_immpre"}, + {"00100"_b, "ldur_s_ldst_unscaled"}, + {"00101"_b, "ldr_s_ldst_immpost"}, + {"00111"_b, "ldr_s_ldst_immpre"}, + {"10000"_b, "stur_d_ldst_unscaled"}, + {"10001"_b, "str_d_ldst_immpost"}, + {"10011"_b, "str_d_ldst_immpre"}, + {"10100"_b, "ldur_d_ldst_unscaled"}, + {"10101"_b, "ldr_d_ldst_immpost"}, + {"10111"_b, "ldr_d_ldst_immpre"}, + }, + }, + + { "_hlqvmm", + {20, 19, 18, 17, 16, 13, 12, 9, 8, 7, 6, 5}, + { {"000010011111"_b, "xpaci_64z_dp_1src"}, + }, + }, + + { "_hlxmpy", + {13, 12, 11, 10}, + { {"0000"_b, "umlal_asimddiff_l"}, + {"0001"_b, "sub_asimdsame_only"}, + {"0010"_b, "_hytrnv"}, + {"0011"_b, "cmeq_asimdsame_only"}, + {"0101"_b, "mls_asimdsame_only"}, + {"0110"_b, "_vjhrzl"}, + {"0111"_b, "pmul_asimdsame_only"}, + {"1000"_b, "umlsl_asimddiff_l"}, + {"1001"_b, "umaxp_asimdsame_only"}, + {"1010"_b, "_zpjzst"}, + {"1011"_b, "uminp_asimdsame_only"}, + {"1101"_b, "sqrdmulh_asimdsame_only"}, + {"1110"_b, "_jztlrz"}, + }, + }, + + { "_hlypvy", + {30, 23, 22}, + { {"000"_b, "smaddl_64wa_dp_3src"}, + {"010"_b, "umaddl_64wa_dp_3src"}, + }, + }, + + { "_hmgzjl", + {18}, + { {"0"_b, "st3_asisdlso_h3_3h"}, + }, + }, + + { "_hmjrmm", + {30, 23, 22, 20, 19, 18}, + { {"00xxxx"_b, "add_32_addsub_imm"}, + {"011000"_b, "smax_32_minmax_imm"}, + {"011001"_b, "umax_32u_minmax_imm"}, + {"011010"_b, "smin_32_minmax_imm"}, + {"011011"_b, "umin_32u_minmax_imm"}, + {"10xxxx"_b, "sub_32_addsub_imm"}, + }, + }, + + { "_hmpzzg", + {22, 20, 19, 18, 17, 16}, + { {"111000"_b, "fcmle_asisdmiscfp16_fz"}, + {"111001"_b, "frsqrte_asisdmiscfp16_r"}, + {"x00000"_b, "fcmle_asisdmisc_fz"}, + {"x00001"_b, "frsqrte_asisdmisc_r"}, + }, + }, + + { "_hmsgpj", + {13, 12, 10}, + { {"000"_b, "_hthxvr"}, + {"100"_b, "ptrue_p_s"}, + {"101"_b, "_kkvrzq"}, + {"110"_b, "_xxjrsy"}, + }, + }, + + { "_hmtmlq", + {4}, + { {"0"_b, "nor_p_p_pp_z"}, + {"1"_b, "nand_p_p_pp_z"}, + }, + }, + + { "_hnjrmp", + {4}, + { {"0"_b, "cmplo_p_p_zi"}, + {"1"_b, "cmpls_p_p_zi"}, + }, + }, + + { "_hnkyxy", + {18, 17, 16}, + { {"011"_b, "_ykpgyh"}, + }, + }, + + { "_hnsvjh", + {19}, + { {"0"_b, "_ntjrlg"}, + {"1"_b, "sysl_rc_systeminstrs"}, + }, + }, + + { "_hpmvzr", + {11, 10, 9, 8, 7, 6}, + { {"000000"_b, "yield_hi_hints"}, + {"000001"_b, "wfi_hi_hints"}, + {"000010"_b, "sevl_hi_hints"}, + {"000011"_b, "xpaclri_hi_hints"}, + {"001000"_b, "psb_hc_hints"}, + {"001001"_b, "gcsb_hd_hints"}, + {"001100"_b, "paciasp_hi_hints"}, + {"001101"_b, "pacibsp_hi_hints"}, + {"001110"_b, "autiasp_hi_hints"}, + {"001111"_b, "autibsp_hi_hints"}, + {"0x01xx"_b, "hint_hm_hints"}, + {"0x101x"_b, "hint_hm_hints"}, + {"10x0xx"_b, "hint_hm_hints"}, + {"10x1xx"_b, "hint_hm_hints"}, + {"1101xx"_b, "hint_hm_hints"}, + {"11101x"_b, "hint_hm_hints"}, + {"x100xx"_b, "hint_hm_hints"}, + {"x1100x"_b, "hint_hm_hints"}, + {"x111xx"_b, "hint_hm_hints"}, + }, + }, + + { "_hpqkhv", + {20, 19, 18, 17, 16}, + { {"11111"_b, "ldaxp_lp64_ldstexcl"}, + }, + }, + + { "_hptkrj", + {30, 22, 13, 12, 11, 10}, + { {"000001"_b, "rmif_only_rmif"}, + {"01xx00"_b, "ccmn_64_condcmp_reg"}, + {"01xx10"_b, "ccmn_64_condcmp_imm"}, + {"11xx00"_b, "ccmp_64_condcmp_reg"}, + {"11xx10"_b, "ccmp_64_condcmp_imm"}, + }, + }, + + { "_hqkhsy", + {12}, + { {"0"_b, "st3_asisdlsop_dx3_r3d"}, + }, + }, + + { "_hqkljv", + {30, 23, 22}, + { {"000"_b, "and_32_log_imm"}, + {"010"_b, "movn_32_movewide"}, + {"100"_b, "eor_32_log_imm"}, + {"110"_b, "movz_32_movewide"}, + }, + }, + + { "_hqnsvg", + {30}, + { {"0"_b, "add_64_addsub_shift"}, + {"1"_b, "sub_64_addsub_shift"}, + }, + }, + + { "_hqvhjp", + {22}, + { {"0"_b, "str_32_ldst_regoff"}, + {"1"_b, "ldr_32_ldst_regoff"}, + }, + }, + + { "_hrmsnk", + {9, 8, 7, 6, 5, 2, 1}, + { {"1111111"_b, "eretaa_64e_branch_reg"}, + }, + }, + + { "_hrpkqg", + {18, 17, 12}, + { {"000"_b, "st4_asisdlso_d4_4d"}, + }, + }, + + { "_hrxtnj", + {30, 23, 22, 13, 12, 11, 10}, + { {"1010000"_b, "sm3partw1_vvv4_cryptosha512_3"}, + {"1010001"_b, "sm3partw2_vvv4_cryptosha512_3"}, + {"1010010"_b, "sm4ekey_vvv4_cryptosha512_3"}, + }, + }, + + { "_hrxyts", + {23, 22, 20, 19, 18, 13}, + { {"00000x"_b, "orr_z_zi"}, + {"01000x"_b, "eor_z_zi"}, + {"10000x"_b, "and_z_zi"}, + {"11000x"_b, "dupm_z_i"}, + {"xx1xx0"_b, "fcpy_z_p_i"}, + }, + }, + + { "_hrymnk", + {18}, + { {"0"_b, "st1_asisdlso_h1_1h"}, + }, + }, + + { "_hspyhv", + {13, 12}, + { {"10"_b, "umax_64_dp_2src"}, + }, + }, + + { "_hsrkqt", + {13, 12, 11, 10}, + { {"0000"_b, "addhn_asimddiff_n"}, + {"0001"_b, "sshl_asimdsame_only"}, + {"0010"_b, "_qtgrzv"}, + {"0011"_b, "sqshl_asimdsame_only"}, + {"0100"_b, "sabal_asimddiff_l"}, + {"0101"_b, "srshl_asimdsame_only"}, + {"0110"_b, "_vhkpvn"}, + {"0111"_b, "sqrshl_asimdsame_only"}, + {"1000"_b, "subhn_asimddiff_n"}, + {"1001"_b, "smax_asimdsame_only"}, + {"1010"_b, "_rgztgm"}, + {"1011"_b, "smin_asimdsame_only"}, + {"1100"_b, "sabdl_asimddiff_l"}, + {"1101"_b, "sabd_asimdsame_only"}, + {"1110"_b, "_grmpht"}, + {"1111"_b, "saba_asimdsame_only"}, + }, + }, + + { "_hthxvr", + {23, 22, 9}, + { {"010"_b, "pfirst_p_p_p"}, + }, + }, + + { "_htjmmx", + {30}, + { {"0"_b, "tbnz_only_testbranch"}, + }, + }, + + { "_htkpks", + {30, 23, 22}, + { {"000"_b, "add_32_addsub_ext"}, + {"100"_b, "sub_32_addsub_ext"}, + }, + }, + + { "_htplsj", + {4}, + { {"0"_b, "cmpeq_p_p_zz"}, + {"1"_b, "cmpne_p_p_zz"}, + }, + }, + + { "_htqpks", + {30, 20, 19, 18, 17, 16, 13}, + { {"000000x"_b, "add_z_zi"}, + {"000001x"_b, "sub_z_zi"}, + {"000011x"_b, "subr_z_zi"}, + {"000100x"_b, "sqadd_z_zi"}, + {"000101x"_b, "uqadd_z_zi"}, + {"000110x"_b, "sqsub_z_zi"}, + {"000111x"_b, "uqsub_z_zi"}, + {"0010000"_b, "smax_z_zi"}, + {"0010010"_b, "umax_z_zi"}, + {"0010100"_b, "smin_z_zi"}, + {"0010110"_b, "umin_z_zi"}, + {"0100000"_b, "mul_z_zi"}, + {"011000x"_b, "dup_z_i"}, + {"0110010"_b, "fdup_z_i"}, + {"1xxxxx0"_b, "fnmad_z_p_zzz"}, + {"1xxxxx1"_b, "fnmsb_z_p_zzz"}, + }, + }, + + { "_htrtzz", + {30, 23, 22, 13, 12, 11, 10}, + { {"000xx10"_b, "stlur_b_ldapstl_simd"}, + {"001xx10"_b, "ldapur_b_ldapstl_simd"}, + {"010xx10"_b, "stlur_q_ldapstl_simd"}, + {"011xx10"_b, "ldapur_q_ldapstl_simd"}, + {"100xx10"_b, "stlur_h_ldapstl_simd"}, + {"101xx10"_b, "ldapur_h_ldapstl_simd"}, + {"x000001"_b, "cpypwn_cpy_memcms"}, + {"x000101"_b, "cpypwtwn_cpy_memcms"}, + {"x001001"_b, "cpyprtwn_cpy_memcms"}, + {"x001101"_b, "cpyptwn_cpy_memcms"}, + {"x010001"_b, "cpymwn_cpy_memcms"}, + {"x010101"_b, "cpymwtwn_cpy_memcms"}, + {"x011001"_b, "cpymrtwn_cpy_memcms"}, + {"x011101"_b, "cpymtwn_cpy_memcms"}, + {"x100001"_b, "cpyewn_cpy_memcms"}, + {"x100101"_b, "cpyewtwn_cpy_memcms"}, + {"x101001"_b, "cpyertwn_cpy_memcms"}, + {"x101101"_b, "cpyetwn_cpy_memcms"}, + {"x110001"_b, "setgm_set_memcms"}, + {"x110101"_b, "setgmt_set_memcms"}, + {"x111001"_b, "setgmn_set_memcms"}, + {"x111101"_b, "setgmtn_set_memcms"}, + }, + }, + + { "_htsjxj", + {23, 22, 13, 12, 11, 10}, + { {"001010"_b, "pmullb_z_zz_q"}, + {"001011"_b, "pmullt_z_zz_q"}, + {"101010"_b, "pmullb_z_zz"}, + {"101011"_b, "pmullt_z_zz"}, + {"x11010"_b, "pmullb_z_zz"}, + {"x11011"_b, "pmullt_z_zz"}, + {"xx0000"_b, "saddwb_z_zz"}, + {"xx0001"_b, "saddwt_z_zz"}, + {"xx0010"_b, "uaddwb_z_zz"}, + {"xx0011"_b, "uaddwt_z_zz"}, + {"xx0100"_b, "ssubwb_z_zz"}, + {"xx0101"_b, "ssubwt_z_zz"}, + {"xx0110"_b, "usubwb_z_zz"}, + {"xx0111"_b, "usubwt_z_zz"}, + {"xx1000"_b, "sqdmullb_z_zz"}, + {"xx1001"_b, "sqdmullt_z_zz"}, + {"xx1100"_b, "smullb_z_zz"}, + {"xx1101"_b, "smullt_z_zz"}, + {"xx1110"_b, "umullb_z_zz"}, + {"xx1111"_b, "umullt_z_zz"}, + }, + }, + + { "_hvhrsq", + {30, 23, 22}, + { {"000"_b, "str_32_ldst_pos"}, + {"001"_b, "ldr_32_ldst_pos"}, + {"010"_b, "ldrsw_64_ldst_pos"}, + {"100"_b, "str_64_ldst_pos"}, + {"101"_b, "ldr_64_ldst_pos"}, + {"110"_b, "prfm_p_ldst_pos"}, + }, + }, + + { "_hvmyjz", + {13, 12}, + { {"00"_b, "subps_64s_dp_2src"}, + }, + }, + + { "_hvnhmh", + {30, 23, 22, 13, 12, 11, 10}, + { {"000xx00"_b, "stlurb_32_ldapstl_unscaled"}, + {"001xx00"_b, "ldapurb_32_ldapstl_unscaled"}, + {"010xx00"_b, "ldapursb_64_ldapstl_unscaled"}, + {"011xx00"_b, "ldapursb_32_ldapstl_unscaled"}, + {"100xx00"_b, "stlurh_32_ldapstl_unscaled"}, + {"101xx00"_b, "ldapurh_32_ldapstl_unscaled"}, + {"110xx00"_b, "ldapursh_64_ldapstl_unscaled"}, + {"111xx00"_b, "ldapursh_32_ldapstl_unscaled"}, + {"x000001"_b, "cpyfpwn_cpy_memcms"}, + {"x000101"_b, "cpyfpwtwn_cpy_memcms"}, + {"x001001"_b, "cpyfprtwn_cpy_memcms"}, + {"x001101"_b, "cpyfptwn_cpy_memcms"}, + {"x010001"_b, "cpyfmwn_cpy_memcms"}, + {"x010101"_b, "cpyfmwtwn_cpy_memcms"}, + {"x011001"_b, "cpyfmrtwn_cpy_memcms"}, + {"x011101"_b, "cpyfmtwn_cpy_memcms"}, + {"x100001"_b, "cpyfewn_cpy_memcms"}, + {"x100101"_b, "cpyfewtwn_cpy_memcms"}, + {"x101001"_b, "cpyfertwn_cpy_memcms"}, + {"x101101"_b, "cpyfetwn_cpy_memcms"}, + {"x110001"_b, "setm_set_memcms"}, + {"x110101"_b, "setmt_set_memcms"}, + {"x111001"_b, "setmn_set_memcms"}, + {"x111101"_b, "setmtn_set_memcms"}, + }, + }, + + { "_hvrjyt", + {30, 23, 22}, + { {"000"_b, "sbfm_32m_bitfield"}, + {"010"_b, "extr_32_extract"}, + {"100"_b, "ubfm_32m_bitfield"}, + }, + }, + + { "_hvyjnk", + {11}, + { {"0"_b, "sqrdmulh_z_zzi_h"}, + }, + }, + + { "_hxgngr", + {23, 22, 13}, + { {"100"_b, "fmlsl_asimdelem_lh"}, + {"xx1"_b, "smlsl_asimdelem_l"}, + }, + }, + + { "_hxlznn", + {30, 23, 22, 13}, + { {"0000"_b, "ld1sh_z_p_br_s32"}, + {"0001"_b, "ldff1sh_z_p_br_s32"}, + {"0010"_b, "ld1w_z_p_br_u64"}, + {"0011"_b, "ldff1w_z_p_br_u64"}, + {"0100"_b, "ld1sb_z_p_br_s32"}, + {"0101"_b, "ldff1sb_z_p_br_s32"}, + {"0110"_b, "ld1d_z_p_br_u64"}, + {"0111"_b, "ldff1d_z_p_br_u64"}, + {"1001"_b, "st2w_z_p_br_contiguous"}, + {"1010"_b, "st1w_z_p_br"}, + {"1011"_b, "st4w_z_p_br_contiguous"}, + {"1100"_b, "str_z_bi"}, + {"1101"_b, "st2d_z_p_br_contiguous"}, + {"1110"_b, "st1d_z_p_br"}, + {"1111"_b, "st4d_z_p_br_contiguous"}, + }, + }, + + { "_hxrnns", + {23, 22, 13, 12}, + { {"0000"_b, "fmul_s_floatdp2"}, + {"0001"_b, "fdiv_s_floatdp2"}, + {"0010"_b, "fadd_s_floatdp2"}, + {"0011"_b, "fsub_s_floatdp2"}, + {"0100"_b, "fmul_d_floatdp2"}, + {"0101"_b, "fdiv_d_floatdp2"}, + {"0110"_b, "fadd_d_floatdp2"}, + {"0111"_b, "fsub_d_floatdp2"}, + {"1100"_b, "fmul_h_floatdp2"}, + {"1101"_b, "fdiv_h_floatdp2"}, + {"1110"_b, "fadd_h_floatdp2"}, + {"1111"_b, "fsub_h_floatdp2"}, + }, + }, + + { "_hxxqks", + {23}, + { {"0"_b, "fmla_asimdsame_only"}, + {"1"_b, "fmls_asimdsame_only"}, + }, + }, + + { "_hxxxyy", + {13, 12}, + { {"00"_b, "cpyfm_cpy_memcms"}, + {"01"_b, "cpyfmwt_cpy_memcms"}, + {"10"_b, "cpyfmrt_cpy_memcms"}, + {"11"_b, "cpyfmt_cpy_memcms"}, + }, + }, + + { "_hykhmt", + {20, 19, 18, 17, 16}, + { {"00000"_b, "saddv_r_p_z"}, + {"00001"_b, "uaddv_r_p_z"}, + {"01000"_b, "smaxv_r_p_z"}, + {"01001"_b, "umaxv_r_p_z"}, + {"01010"_b, "sminv_r_p_z"}, + {"01011"_b, "uminv_r_p_z"}, + {"1000x"_b, "movprfx_z_p_z"}, + {"11000"_b, "orv_r_p_z"}, + {"11001"_b, "eorv_r_p_z"}, + {"11010"_b, "andv_r_p_z"}, + }, + }, + + { "_hynprk", + {18}, + { {"0"_b, "st2_asisdlso_h2_2h"}, + }, + }, + + { "_hyskth", + {22}, + { {"0"_b, "str_64_ldst_regoff"}, + {"1"_b, "ldr_64_ldst_regoff"}, + }, + }, + + { "_hytrnv", + {23, 22, 20, 19, 18, 17, 16}, + { {"0111001"_b, "frinta_asimdmiscfp16_r"}, + {"0x00001"_b, "frinta_asimdmisc_r"}, + {"xx00000"_b, "cmge_asimdmisc_z"}, + }, + }, + + { "_hzkglv", + {30, 23, 22, 13}, + { {"0000"_b, "ld1b_z_p_br_u8"}, + {"0001"_b, "ldff1b_z_p_br_u8"}, + {"0010"_b, "ld1b_z_p_br_u32"}, + {"0011"_b, "ldff1b_z_p_br_u32"}, + {"0100"_b, "ld1sw_z_p_br_s64"}, + {"0101"_b, "ldff1sw_z_p_br_s64"}, + {"0110"_b, "ld1h_z_p_br_u32"}, + {"0111"_b, "ldff1h_z_p_br_u32"}, + {"1001"_b, "stnt1b_z_p_br_contiguous"}, + {"1011"_b, "st3b_z_p_br_contiguous"}, + {"10x0"_b, "st1b_z_p_br"}, + {"1101"_b, "stnt1h_z_p_br_contiguous"}, + {"1111"_b, "st3h_z_p_br_contiguous"}, + {"11x0"_b, "st1h_z_p_br"}, + }, + }, + + { "_hzkxht", + {22, 20}, + { {"00"_b, "_zrxhzq"}, + {"01"_b, "msr_sr_systemmove"}, + {"10"_b, "_krllsy"}, + {"11"_b, "msrr_sr_systemmovepr"}, + }, + }, + + { "_hzsxkp", + {30, 13}, + { {"00"_b, "_jlrrlt"}, + {"01"_b, "_jrlynj"}, + {"10"_b, "_ghpxms"}, + {"11"_b, "_nyjtng"}, + }, + }, + + { "_jggxjz", + {13, 12}, + { {"00"_b, "cmtst_asisdsame_only"}, + }, + }, + + { "_jgklkt", + {30}, + { {"0"_b, "ldrsw_64_loadlit"}, + {"1"_b, "prfm_p_loadlit"}, + }, + }, + + { "_jgmlpk", + {4}, + { {"0"_b, "match_p_p_zz"}, + {"1"_b, "nmatch_p_p_zz"}, + }, + }, + + { "_jgsryt", + {20, 19, 18, 17, 16}, + { {"11111"_b, "ldaxrh_lr32_ldstexcl"}, + }, + }, + + { "_jgxqzr", + {13, 12, 11, 10}, + { {"0000"_b, "_xzjvkv"}, + {"0001"_b, "_nqjtqn"}, + {"0011"_b, "_qzmrnj"}, + {"0100"_b, "_xptsns"}, + {"0101"_b, "_qpgxxr"}, + {"0110"_b, "uzp1_asimdperm_only"}, + {"0111"_b, "_rsnvnr"}, + {"1000"_b, "_yszlqj"}, + {"1001"_b, "_lzvxxj"}, + {"1010"_b, "trn1_asimdperm_only"}, + {"1011"_b, "_zmrhxx"}, + {"1100"_b, "_skytvx"}, + {"1101"_b, "_smptxh"}, + {"1110"_b, "zip1_asimdperm_only"}, + {"1111"_b, "_rjvgkl"}, + }, + }, + + { "_jgyhrh", + {4}, + { {"0"_b, "cmplo_p_p_zi"}, + {"1"_b, "cmpls_p_p_zi"}, + }, + }, + + { "_jhkkgv", + {10}, + { {"0"_b, "_qvgtlh"}, + }, + }, + + { "_jhllmn", + {4}, + { {"0"_b, "cmpge_p_p_zz"}, + {"1"_b, "cmpgt_p_p_zz"}, + }, + }, + + { "_jhltlz", + {20, 19, 18, 17, 16}, + { {"11111"_b, "ldxr_lr64_ldstexcl"}, + }, + }, + + { "_jjgpxz", + {9, 8, 7, 6, 5}, + { {"00000"_b, "fmov_h_floatimm"}, + }, + }, + + { "_jjnvrv", + {20, 19, 18, 17, 16, 13, 12, 4, 3, 2, 1, 0}, + { {"000000001101"_b, "setf8_only_setf"}, + }, + }, + + { "_jkkqvy", + {22, 20, 11}, + { {"100"_b, "uqinch_z_zs"}, + {"101"_b, "uqdech_z_zs"}, + {"110"_b, "dech_z_zs"}, + }, + }, + + { "_jkvsxy", + {30, 23, 22, 13, 12, 11, 10}, + { {"0000010"_b, "rcwcas_c64_rcwcomswap"}, + {"0000011"_b, "rcwcasp_c64_rcwcomswappr"}, + {"0000100"_b, "ldclrp_128_memop_128"}, + {"0001100"_b, "ldsetp_128_memop_128"}, + {"0010010"_b, "rcwcasl_c64_rcwcomswap"}, + {"0010011"_b, "rcwcaspl_c64_rcwcomswappr"}, + {"0010100"_b, "ldclrpl_128_memop_128"}, + {"0011100"_b, "ldsetpl_128_memop_128"}, + {"0100010"_b, "rcwcasa_c64_rcwcomswap"}, + {"0100011"_b, "rcwcaspa_c64_rcwcomswappr"}, + {"0100100"_b, "ldclrpa_128_memop_128"}, + {"0101100"_b, "ldsetpa_128_memop_128"}, + {"0110010"_b, "rcwcasal_c64_rcwcomswap"}, + {"0110011"_b, "rcwcaspal_c64_rcwcomswappr"}, + {"0110100"_b, "ldclrpal_128_memop_128"}, + {"0111100"_b, "ldsetpal_128_memop_128"}, + {"1000010"_b, "rcwscas_c64_rcwcomswap"}, + {"1000011"_b, "rcwscasp_c64_rcwcomswappr"}, + {"1010010"_b, "rcwscasl_c64_rcwcomswap"}, + {"1010011"_b, "rcwscaspl_c64_rcwcomswappr"}, + {"1100010"_b, "rcwscasa_c64_rcwcomswap"}, + {"1100011"_b, "rcwscaspa_c64_rcwcomswappr"}, + {"1110010"_b, "rcwscasal_c64_rcwcomswap"}, + {"1110011"_b, "rcwscaspal_c64_rcwcomswappr"}, + }, + }, + + { "_jkvvtp", + {30, 23, 22}, + { {"100"_b, "bcax_vvv16_crypto4"}, + }, + }, + + { "_jkxyvn", + {23}, + { {"0"_b, "fadd_asimdsame_only"}, + {"1"_b, "fsub_asimdsame_only"}, + }, + }, + + { "_jlnjsy", + {23, 22, 20, 19, 18, 17, 16, 13, 12, 11}, + { {"0011111001"_b, "_ssjnph"}, + }, + }, + + { "_jlrrlt", + {11, 10, 4}, + { {"000"_b, "whilege_p_p_rr"}, + {"001"_b, "whilegt_p_p_rr"}, + {"010"_b, "whilelt_p_p_rr"}, + {"011"_b, "whilele_p_p_rr"}, + {"100"_b, "whilehs_p_p_rr"}, + {"101"_b, "whilehi_p_p_rr"}, + {"110"_b, "whilelo_p_p_rr"}, + {"111"_b, "whilels_p_p_rr"}, + }, + }, + + { "_jmvgsp", + {22, 20, 11}, + { {"100"_b, "sqinch_z_zs"}, + {"101"_b, "sqdech_z_zs"}, + {"110"_b, "inch_z_zs"}, + }, + }, + + { "_jmxstz", + {13, 12, 11, 10}, + { {"0000"_b, "sqdecp_z_p_z"}, + {"0010"_b, "sqdecp_r_p_r_sx"}, + {"0011"_b, "sqdecp_r_p_r_x"}, + }, + }, + + { "_jnktqs", + {18, 17}, + { {"00"_b, "ld1_asisdlso_s1_1s"}, + }, + }, + + { "_jnnmjk", + {23, 22, 20, 19, 16, 13, 12}, + { {"0111100"_b, "fcvtas_asisdmiscfp16_r"}, + {"0111101"_b, "scvtf_asisdmiscfp16_r"}, + {"0x00100"_b, "fcvtas_asisdmisc_r"}, + {"0x00101"_b, "scvtf_asisdmisc_r"}, + {"0x10000"_b, "fmaxnmp_asisdpair_only_h"}, + {"0x10001"_b, "faddp_asisdpair_only_h"}, + {"0x10011"_b, "fmaxp_asisdpair_only_h"}, + {"1111000"_b, "fcmgt_asisdmiscfp16_fz"}, + {"1111001"_b, "fcmeq_asisdmiscfp16_fz"}, + {"1111010"_b, "fcmlt_asisdmiscfp16_fz"}, + {"1111101"_b, "frecpe_asisdmiscfp16_r"}, + {"1111111"_b, "frecpx_asisdmiscfp16_r"}, + {"1x00000"_b, "fcmgt_asisdmisc_fz"}, + {"1x00001"_b, "fcmeq_asisdmisc_fz"}, + {"1x00010"_b, "fcmlt_asisdmisc_fz"}, + {"1x00101"_b, "frecpe_asisdmisc_r"}, + {"1x00111"_b, "frecpx_asisdmisc_r"}, + {"1x10000"_b, "fminnmp_asisdpair_only_h"}, + {"1x10011"_b, "fminp_asisdpair_only_h"}, + }, + }, + + { "_jpvmkz", + {18, 17}, + { {"00"_b, "_jnnmjk"}, + }, + }, + + { "_jqhvhn", + {30, 23, 11, 10}, + { {"0000"_b, "_ygtpyl"}, + {"0010"_b, "_hqvhjp"}, + {"0100"_b, "_xkylhh"}, + {"0110"_b, "_mnxgml"}, + {"1000"_b, "_qyyrqq"}, + {"1001"_b, "ldraa_64_ldst_pac"}, + {"1010"_b, "_kpsnsk"}, + {"1011"_b, "ldraa_64w_ldst_pac"}, + {"1100"_b, "_tyzpxk"}, + {"1101"_b, "ldrab_64_ldst_pac"}, + {"1111"_b, "ldrab_64w_ldst_pac"}, + }, + }, + + { "_jqlgts", + {30, 23, 22}, + { {"000"_b, "str_s_ldst_pos"}, + {"001"_b, "ldr_s_ldst_pos"}, + {"100"_b, "str_d_ldst_pos"}, + {"101"_b, "ldr_d_ldst_pos"}, + }, + }, + + { "_jqrmyp", + {20, 19, 18, 17, 16, 13}, + { {"000000"_b, "fabs_h_floatdp1"}, + {"000010"_b, "fsqrt_h_floatdp1"}, + {"000100"_b, "fcvt_dh_floatdp1"}, + {"001000"_b, "frintp_h_floatdp1"}, + {"001010"_b, "frintz_h_floatdp1"}, + {"001110"_b, "frinti_h_floatdp1"}, + }, + }, + + { "_jqsjtj", + {18}, + { {"0"_b, "st2_asisdlse_r2"}, + }, + }, + + { "_jqtksx", + {30, 23, 22, 13, 12, 11, 10}, + { {"0000000"_b, "ldsmaxb_32_memop"}, + {"0000100"_b, "ldsminb_32_memop"}, + {"0000x10"_b, "strb_32b_ldst_regoff"}, + {"0001000"_b, "ldumaxb_32_memop"}, + {"0001100"_b, "lduminb_32_memop"}, + {"0001x10"_b, "strb_32bl_ldst_regoff"}, + {"0010000"_b, "ldsmaxlb_32_memop"}, + {"0010100"_b, "ldsminlb_32_memop"}, + {"0010x10"_b, "ldrb_32b_ldst_regoff"}, + {"0011000"_b, "ldumaxlb_32_memop"}, + {"0011100"_b, "lduminlb_32_memop"}, + {"0011x10"_b, "ldrb_32bl_ldst_regoff"}, + {"0100000"_b, "ldsmaxab_32_memop"}, + {"0100100"_b, "ldsminab_32_memop"}, + {"0100x10"_b, "ldrsb_64b_ldst_regoff"}, + {"0101000"_b, "ldumaxab_32_memop"}, + {"0101100"_b, "lduminab_32_memop"}, + {"0101x10"_b, "ldrsb_64bl_ldst_regoff"}, + {"0110000"_b, "ldsmaxalb_32_memop"}, + {"0110100"_b, "ldsminalb_32_memop"}, + {"0110x10"_b, "ldrsb_32b_ldst_regoff"}, + {"0111000"_b, "ldumaxalb_32_memop"}, + {"0111100"_b, "lduminalb_32_memop"}, + {"0111x10"_b, "ldrsb_32bl_ldst_regoff"}, + {"1000000"_b, "ldsmaxh_32_memop"}, + {"1000100"_b, "ldsminh_32_memop"}, + {"1001000"_b, "ldumaxh_32_memop"}, + {"1001100"_b, "lduminh_32_memop"}, + {"100xx10"_b, "strh_32_ldst_regoff"}, + {"1010000"_b, "ldsmaxlh_32_memop"}, + {"1010100"_b, "ldsminlh_32_memop"}, + {"1011000"_b, "ldumaxlh_32_memop"}, + {"1011100"_b, "lduminlh_32_memop"}, + {"101xx10"_b, "ldrh_32_ldst_regoff"}, + {"1100000"_b, "ldsmaxah_32_memop"}, + {"1100100"_b, "ldsminah_32_memop"}, + {"1101000"_b, "ldumaxah_32_memop"}, + {"1101100"_b, "lduminah_32_memop"}, + {"110xx10"_b, "ldrsh_64_ldst_regoff"}, + {"1110000"_b, "ldsmaxalh_32_memop"}, + {"1110100"_b, "ldsminalh_32_memop"}, + {"1111000"_b, "ldumaxalh_32_memop"}, + {"1111100"_b, "lduminalh_32_memop"}, + {"111xx10"_b, "ldrsh_32_ldst_regoff"}, + }, + }, + + { "_jqvpqx", + {23, 22}, + { {"00"_b, "fmlal_asimdsame_f"}, + {"10"_b, "fmlsl_asimdsame_f"}, + }, + }, + + { "_jqxqql", + {22, 20, 11}, + { {"000"_b, "uqincw_z_zs"}, + {"001"_b, "uqdecw_z_zs"}, + {"010"_b, "decw_z_zs"}, + {"100"_b, "uqincd_z_zs"}, + {"101"_b, "uqdecd_z_zs"}, + {"110"_b, "decd_z_zs"}, + }, + }, + + { "_jrlynj", + {11, 10}, + { {"00"_b, "_gzqvnk"}, + }, + }, + + { "_jrnxzh", + {12}, + { {"0"_b, "cmla_z_zzz"}, + {"1"_b, "sqrdcmlah_z_zzz"}, + }, + }, + + { "_jrqxvn", + {23, 22, 13, 12, 11, 10}, + { {"000000"_b, "tbl_asimdtbl_l3_3"}, + {"000100"_b, "tbx_asimdtbl_l3_3"}, + {"001000"_b, "tbl_asimdtbl_l4_4"}, + {"001100"_b, "tbx_asimdtbl_l4_4"}, + {"xx0110"_b, "uzp2_asimdperm_only"}, + {"xx1010"_b, "trn2_asimdperm_only"}, + {"xx1110"_b, "zip2_asimdperm_only"}, + }, + }, + + { "_jrxtzg", + {30, 23, 22, 11, 10}, + { {"10001"_b, "stg_64spost_ldsttags"}, + {"10010"_b, "stg_64soffset_ldsttags"}, + {"10011"_b, "stg_64spre_ldsttags"}, + {"10100"_b, "ldg_64loffset_ldsttags"}, + {"10101"_b, "stzg_64spost_ldsttags"}, + {"10110"_b, "stzg_64soffset_ldsttags"}, + {"10111"_b, "stzg_64spre_ldsttags"}, + {"11001"_b, "st2g_64spost_ldsttags"}, + {"11010"_b, "st2g_64soffset_ldsttags"}, + {"11011"_b, "st2g_64spre_ldsttags"}, + {"11101"_b, "stz2g_64spost_ldsttags"}, + {"11110"_b, "stz2g_64soffset_ldsttags"}, + {"11111"_b, "stz2g_64spre_ldsttags"}, + }, + }, + + { "_jsqvtn", + {23, 22, 11, 10}, + { {"0000"_b, "_lnsjqy"}, + {"0001"_b, "stg_64spost_ldsttags"}, + {"0010"_b, "stg_64soffset_ldsttags"}, + {"0011"_b, "stg_64spre_ldsttags"}, + {"0100"_b, "ldg_64loffset_ldsttags"}, + {"0101"_b, "stzg_64spost_ldsttags"}, + {"0110"_b, "stzg_64soffset_ldsttags"}, + {"0111"_b, "stzg_64spre_ldsttags"}, + {"1000"_b, "_myzhml"}, + {"1001"_b, "st2g_64spost_ldsttags"}, + {"1010"_b, "st2g_64soffset_ldsttags"}, + {"1011"_b, "st2g_64spre_ldsttags"}, + {"1100"_b, "_mjstgz"}, + {"1101"_b, "stz2g_64spost_ldsttags"}, + {"1110"_b, "stz2g_64soffset_ldsttags"}, + {"1111"_b, "stz2g_64spre_ldsttags"}, + }, + }, + + { "_jvkxtj", + {30, 23, 22}, + { {"000"_b, "stnp_q_ldstnapair_offs"}, + {"001"_b, "ldnp_q_ldstnapair_offs"}, + {"010"_b, "stp_q_ldstpair_post"}, + {"011"_b, "ldp_q_ldstpair_post"}, + }, + }, + + { "_jvnsgt", + {18}, + { {"0"_b, "ld4_asisdlsop_bx4_r4b"}, + {"1"_b, "ld4_asisdlsop_b4_i4b"}, + }, + }, + + { "_jvpjsm", + {20, 19, 18, 17, 16, 13, 12}, + { {"0000000"_b, "_xrnqyn"}, + }, + }, + + { "_jxgpgg", + {13, 12}, + { {"00"_b, "udiv_64_dp_2src"}, + {"10"_b, "asrv_64_dp_2src"}, + }, + }, + + { "_jxgqqz", + {30}, + { {"0"_b, "cbz_64_compbranch"}, + }, + }, + + { "_jxltqm", + {13, 12}, + { {"01"_b, "sqdmull_asisddiff_only"}, + }, + }, + + { "_jxszhy", + {23, 22, 11}, + { {"000"_b, "_rqhryp"}, + }, + }, + + { "_jxyskn", + {13, 12, 11, 10}, + { {"0000"_b, "uqincp_z_p_z"}, + {"0010"_b, "uqincp_r_p_r_uw"}, + {"0011"_b, "uqincp_r_p_r_x"}, + }, + }, + + { "_jymnkk", + {23, 22, 12, 11, 10}, + { {"01000"_b, "bfdot_z_zzzi"}, + {"100x0"_b, "fmlalb_z_zzzi_s"}, + {"100x1"_b, "fmlalt_z_zzzi_s"}, + {"110x0"_b, "bfmlalb_z_zzzi"}, + {"110x1"_b, "bfmlalt_z_zzzi"}, + }, + }, + + { "_jyzhnh", + {18}, + { {"0"_b, "st1_asisdlsop_hx1_r1h"}, + {"1"_b, "st1_asisdlsop_h1_i1h"}, + }, + }, + + { "_jzjvtv", + {19, 18, 17, 16, 4}, + { {"00000"_b, "brkbs_p_p_p_z"}, + }, + }, + + { "_jzkqhn", + {23, 22, 12, 11, 10}, + { {"10000"_b, "fmlslb_z_zzz"}, + {"10001"_b, "fmlslt_z_zzz"}, + }, + }, + + { "_jztlrz", + {23, 22, 20, 19, 18, 17, 16}, + { {"0111001"_b, "fcvtmu_asimdmiscfp16_r"}, + {"0x00001"_b, "fcvtmu_asimdmisc_r"}, + {"1111001"_b, "fcvtzu_asimdmiscfp16_r"}, + {"1x00001"_b, "fcvtzu_asimdmisc_r"}, + {"xx00000"_b, "neg_asimdmisc_r"}, + }, + }, + + { "_jztspt", + {18, 17}, + { {"00"_b, "st4_asisdlso_s4_4s"}, + }, + }, + + { "_kgmqkh", + {30, 23, 22, 13}, + { {"0000"_b, "ld1w_z_p_ai_s"}, + {"0001"_b, "ldff1w_z_p_ai_s"}, + {"0010"_b, "ld1rw_z_p_bi_u32"}, + {"0011"_b, "ld1rw_z_p_bi_u64"}, + {"0110"_b, "ld1rsb_z_p_bi_s16"}, + {"0111"_b, "ld1rd_z_p_bi_u64"}, + {"1000"_b, "ld1w_z_p_ai_d"}, + {"1001"_b, "ldff1w_z_p_ai_d"}, + {"1010"_b, "ld1w_z_p_bz_d_64_scaled"}, + {"1011"_b, "ldff1w_z_p_bz_d_64_scaled"}, + {"1100"_b, "ld1d_z_p_ai_d"}, + {"1101"_b, "ldff1d_z_p_ai_d"}, + {"1110"_b, "ld1d_z_p_bz_d_64_scaled"}, + {"1111"_b, "ldff1d_z_p_bz_d_64_scaled"}, + }, + }, + + { "_kgpgly", + {23, 22, 10}, + { {"100"_b, "smlslb_z_zzzi_s"}, + {"101"_b, "smlslt_z_zzzi_s"}, + {"110"_b, "smlslb_z_zzzi_d"}, + {"111"_b, "smlslt_z_zzzi_d"}, + }, + }, + + { "_kgpsjz", + {13, 12, 11, 10}, + { {"0000"_b, "saddl_asimddiff_l"}, + {"0001"_b, "shadd_asimdsame_only"}, + {"0010"_b, "_rkrlsy"}, + {"0011"_b, "sqadd_asimdsame_only"}, + {"0100"_b, "saddw_asimddiff_w"}, + {"0101"_b, "srhadd_asimdsame_only"}, + {"0110"_b, "_vypgrt"}, + {"0111"_b, "_xygvjp"}, + {"1000"_b, "ssubl_asimddiff_l"}, + {"1001"_b, "shsub_asimdsame_only"}, + {"1010"_b, "_pjhmvy"}, + {"1011"_b, "sqsub_asimdsame_only"}, + {"1100"_b, "ssubw_asimddiff_w"}, + {"1101"_b, "cmgt_asimdsame_only"}, + {"1110"_b, "_ygghnn"}, + {"1111"_b, "cmge_asimdsame_only"}, + }, + }, + + { "_kgygky", + {30, 23, 22}, + { {"000"_b, "sbfm_32m_bitfield"}, + {"100"_b, "ubfm_32m_bitfield"}, + }, + }, + + { "_khjvqq", + {22, 11}, + { {"00"_b, "sqrdmulh_z_zzi_s"}, + {"10"_b, "sqrdmulh_z_zzi_d"}, + }, + }, + + { "_khrsgv", + {22, 20, 19, 13, 12}, + { {"0x100"_b, "sri_asisdshf_r"}, + {"0x101"_b, "sli_asisdshf_r"}, + {"0x110"_b, "sqshlu_asisdshf_r"}, + {"0x111"_b, "uqshl_asisdshf_r"}, + {"10x00"_b, "sri_asisdshf_r"}, + {"10x01"_b, "sli_asisdshf_r"}, + {"10x10"_b, "sqshlu_asisdshf_r"}, + {"10x11"_b, "uqshl_asisdshf_r"}, + {"11100"_b, "sri_asisdshf_r"}, + {"11101"_b, "sli_asisdshf_r"}, + {"11110"_b, "sqshlu_asisdshf_r"}, + {"11111"_b, "uqshl_asisdshf_r"}, + {"x1000"_b, "sri_asisdshf_r"}, + {"x1001"_b, "sli_asisdshf_r"}, + {"x1010"_b, "sqshlu_asisdshf_r"}, + {"x1011"_b, "uqshl_asisdshf_r"}, + }, + }, + + { "_khtsmx", + {18}, + { {"0"_b, "ld4_asisdlsop_hx4_r4h"}, + {"1"_b, "ld4_asisdlsop_h4_i4h"}, + }, + }, + + { "_khvvtr", + {20, 19, 18, 17, 16, 13, 12}, + { {"0000000"_b, "rev16_32_dp_1src"}, + {"0000001"_b, "cls_32_dp_1src"}, + }, + }, + + { "_kjpxvh", + {20, 19, 18}, + { {"000"_b, "_yyrkmn"}, + }, + }, + + { "_kjqynn", + {4}, + { {"0"_b, "cmphs_p_p_zi"}, + {"1"_b, "cmphi_p_p_zi"}, + }, + }, + + { "_kjsrkm", + {18, 17, 16, 13, 12, 11, 10, 9, 8, 7, 4, 3, 2, 1, 0}, + { {"000000000011111"_b, "_zztypv"}, + }, + }, + + { "_kkkltp", + {30}, + { {"1"_b, "_sqkkqy"}, + }, + }, + + { "_kkpxth", + {18}, + { {"0"_b, "ld1_asisdlsop_bx1_r1b"}, + {"1"_b, "ld1_asisdlsop_b1_i1b"}, + }, + }, + + { "_kktglv", + {30, 13, 12}, + { {"000"_b, "_njvkjq"}, + {"001"_b, "_rpzykx"}, + {"010"_b, "_zzvxvh"}, + {"011"_b, "_yqxnzl"}, + {"100"_b, "_gxmnkl"}, + {"110"_b, "_lkxgjy"}, + {"111"_b, "_vjmklj"}, + }, + }, + + { "_kktzst", + {13, 12, 11, 10}, + { {"1111"_b, "frsqrts_asisdsamefp16_only"}, + }, + }, + + { "_kkvrzq", + {23, 22, 9, 8, 7, 6, 5}, + { {"0000000"_b, "pfalse_p"}, + }, + }, + + { "_klrksl", + {30, 23, 22, 19, 16}, + { {"10010"_b, "aesmc_b_cryptoaes"}, + {"x0x01"_b, "fcvtn_asimdmisc_n"}, + {"x1001"_b, "bfcvtn_asimdmisc_4s"}, + {"xxx00"_b, "sadalp_asimdmisc_p"}, + }, + }, + + { "_klsmsv", + {30, 23, 22, 10}, + { {"1001"_b, "ins_asimdins_iv_v"}, + {"x000"_b, "ext_asimdext_only"}, + }, + }, + + { "_kltlmp", + {22, 20, 19, 13, 12}, + { {"0x100"_b, "ushr_asisdshf_r"}, + {"0x101"_b, "usra_asisdshf_r"}, + {"0x110"_b, "urshr_asisdshf_r"}, + {"0x111"_b, "ursra_asisdshf_r"}, + {"10x00"_b, "ushr_asisdshf_r"}, + {"10x01"_b, "usra_asisdshf_r"}, + {"10x10"_b, "urshr_asisdshf_r"}, + {"10x11"_b, "ursra_asisdshf_r"}, + {"11100"_b, "ushr_asisdshf_r"}, + {"11101"_b, "usra_asisdshf_r"}, + {"11110"_b, "urshr_asisdshf_r"}, + {"11111"_b, "ursra_asisdshf_r"}, + {"x1000"_b, "ushr_asisdshf_r"}, + {"x1001"_b, "usra_asisdshf_r"}, + {"x1010"_b, "urshr_asisdshf_r"}, + {"x1011"_b, "ursra_asisdshf_r"}, + }, + }, + + { "_klxxgx", + {20, 19, 18, 17, 16, 13}, + { {"000000"_b, "fmov_s_floatdp1"}, + {"000010"_b, "fneg_s_floatdp1"}, + {"001000"_b, "frintn_s_floatdp1"}, + {"001010"_b, "frintm_s_floatdp1"}, + {"001100"_b, "frinta_s_floatdp1"}, + {"001110"_b, "frintx_s_floatdp1"}, + {"010000"_b, "frint32z_s_floatdp1"}, + {"010010"_b, "frint64z_s_floatdp1"}, + }, + }, + + { "_kmqlmz", + {18}, + { {"0"_b, "st1_asisdlso_b1_1b"}, + }, + }, + + { "_knkjnz", + {30, 23, 22, 20, 13}, + { {"00001"_b, "ld1sh_z_p_bi_s32"}, + {"00011"_b, "ldnf1sh_z_p_bi_s32"}, + {"00101"_b, "ld1w_z_p_bi_u64"}, + {"00111"_b, "ldnf1w_z_p_bi_u64"}, + {"01001"_b, "ld1sb_z_p_bi_s32"}, + {"01011"_b, "ldnf1sb_z_p_bi_s32"}, + {"01101"_b, "ld1d_z_p_bi_u64"}, + {"01111"_b, "ldnf1d_z_p_bi_u64"}, + {"100x0"_b, "st1w_z_p_bz_d_x32_scaled"}, + {"100x1"_b, "st1w_z_p_bz_d_64_scaled"}, + {"101x0"_b, "st1w_z_p_bz_s_x32_scaled"}, + {"101x1"_b, "st1w_z_p_ai_s"}, + {"110x0"_b, "st1d_z_p_bz_d_x32_scaled"}, + {"110x1"_b, "st1d_z_p_bz_d_64_scaled"}, + }, + }, + + { "_knpjtt", + {20, 19, 18, 17, 16}, + { {"11111"_b, "ldxrh_lr32_ldstexcl"}, + }, + }, + + { "_kpgghm", + {22, 20, 19, 18, 17, 16, 13, 12}, + { {"01111100"_b, "ldapr_64l_memop"}, + }, + }, + + { "_kpnlmr", + {20, 19, 18, 17, 16}, + { {"00000"_b, "clz_asimdmisc_r"}, + {"00001"_b, "uqxtn_asimdmisc_n"}, + }, + }, + + { "_kppzvh", + {30, 23, 22, 13, 12, 11, 10}, + { {"000xx10"_b, "stlur_s_ldapstl_simd"}, + {"001xx10"_b, "ldapur_s_ldapstl_simd"}, + {"100xx10"_b, "stlur_d_ldapstl_simd"}, + {"101xx10"_b, "ldapur_d_ldapstl_simd"}, + {"x000001"_b, "cpyprn_cpy_memcms"}, + {"x000101"_b, "cpypwtrn_cpy_memcms"}, + {"x001001"_b, "cpyprtrn_cpy_memcms"}, + {"x001101"_b, "cpyptrn_cpy_memcms"}, + {"x010001"_b, "cpymrn_cpy_memcms"}, + {"x010101"_b, "cpymwtrn_cpy_memcms"}, + {"x011001"_b, "cpymrtrn_cpy_memcms"}, + {"x011101"_b, "cpymtrn_cpy_memcms"}, + {"x100001"_b, "cpyern_cpy_memcms"}, + {"x100101"_b, "cpyewtrn_cpy_memcms"}, + {"x101001"_b, "cpyertrn_cpy_memcms"}, + {"x101101"_b, "cpyetrn_cpy_memcms"}, + {"x110001"_b, "setge_set_memcms"}, + {"x110101"_b, "setget_set_memcms"}, + {"x111001"_b, "setgen_set_memcms"}, + {"x111101"_b, "setgetn_set_memcms"}, + }, + }, + + { "_kpsnsk", + {22}, + { {"0"_b, "str_64_ldst_regoff"}, + {"1"_b, "ldr_64_ldst_regoff"}, + }, + }, + + { "_kqsqly", + {18}, + { {"0"_b, "st1_asisdlsep_r2_r2"}, + {"1"_b, "st1_asisdlsep_i2_i2"}, + }, + }, + + { "_kqstrr", + {18, 17, 12}, + { {"000"_b, "st3_asisdlso_d3_3d"}, + }, + }, + + { "_kqvljp", + {18, 17, 16}, + { {"000"_b, "fabd_z_p_zz"}, + {"001"_b, "fscale_z_p_zz"}, + {"010"_b, "fmulx_z_p_zz"}, + {"100"_b, "fdivr_z_p_zz"}, + {"101"_b, "fdiv_z_p_zz"}, + }, + }, + + { "_kqzmtr", + {30, 23, 22, 20, 13}, + { {"00001"_b, "ld1b_z_p_bi_u16"}, + {"00011"_b, "ldnf1b_z_p_bi_u16"}, + {"00101"_b, "ld1b_z_p_bi_u64"}, + {"00111"_b, "ldnf1b_z_p_bi_u64"}, + {"01001"_b, "ld1h_z_p_bi_u16"}, + {"01011"_b, "ldnf1h_z_p_bi_u16"}, + {"01101"_b, "ld1h_z_p_bi_u64"}, + {"01111"_b, "ldnf1h_z_p_bi_u64"}, + {"101x1"_b, "st1b_z_p_ai_s"}, + {"110x0"_b, "st1h_z_p_bz_d_x32_scaled"}, + {"110x1"_b, "st1h_z_p_bz_d_64_scaled"}, + {"111x0"_b, "st1h_z_p_bz_s_x32_scaled"}, + {"111x1"_b, "st1h_z_p_ai_s"}, + }, + }, + + { "_krllsy", + {19}, + { {"1"_b, "sysp_cr_syspairinstrs"}, + }, + }, + + { "_krtvhr", + {12, 10}, + { {"00"_b, "_xvmxrg"}, + {"01"_b, "_mvvngm"}, + {"10"_b, "_mkyyng"}, + {"11"_b, "_vvzsmg"}, + }, + }, + + { "_krvxxx", + {12, 9, 8, 7, 6, 5}, + { {"100000"_b, "_skjqrx"}, + }, + }, + + { "_ksgpqz", + {30}, + { {"1"_b, "_trjmmn"}, + }, + }, + + { "_kshtnj", + {23, 22, 13, 12, 11, 10}, + { {"01x1x0"_b, "fcmla_asimdelem_c_h"}, + {"0x0001"_b, "sri_asimdshf_r"}, + {"0x0101"_b, "sli_asimdshf_r"}, + {"0x1001"_b, "sqshlu_asimdshf_r"}, + {"0x1101"_b, "uqshl_asimdshf_r"}, + {"10x1x0"_b, "fcmla_asimdelem_c_s"}, + {"xx00x0"_b, "mls_asimdelem_r"}, + {"xx10x0"_b, "umlsl_asimdelem_l"}, + }, + }, + + { "_kskqmz", + {20, 19, 18, 17, 16, 13, 12}, + { {"0000011"_b, "sqabs_asisdmisc_r"}, + {"0000100"_b, "sqxtn_asisdmisc_n"}, + }, + }, + + { "_ksrkkn", + {22}, + { {"0"_b, "str_32_ldst_regoff"}, + {"1"_b, "ldr_32_ldst_regoff"}, + }, + }, + + { "_kssltr", + {13, 12, 11, 10}, + { {"0000"_b, "smull_asimddiff_l"}, + {"0001"_b, "_pstgvl"}, + {"0010"_b, "_ztlysk"}, + {"0011"_b, "_hxxqks"}, + {"0100"_b, "sqdmull_asimddiff_l"}, + {"0101"_b, "_jkxyvn"}, + {"0110"_b, "_lvsrnj"}, + {"0111"_b, "_vvgnhm"}, + {"1000"_b, "pmull_asimddiff_l"}, + {"1001"_b, "_skqzyg"}, + {"1010"_b, "_szqlsn"}, + {"1011"_b, "_jqvpqx"}, + {"1101"_b, "_yyvjqv"}, + {"1110"_b, "_xlyppq"}, + {"1111"_b, "_mhljkp"}, + }, + }, + + { "_ktngnm", + {12, 10}, + { {"00"_b, "_hxgngr"}, + {"01"_b, "_ngkgsg"}, + {"10"_b, "_plrggq"}, + {"11"_b, "_kxztps"}, + }, + }, + + { "_ktpxrr", + {30, 23, 22, 13, 12, 11, 10}, + { {"0001111"_b, "casp_cp32_ldstexcl"}, + {"0011111"_b, "caspa_cp32_ldstexcl"}, + {"0101111"_b, "casb_c32_ldstexcl"}, + {"0111111"_b, "casab_c32_ldstexcl"}, + {"1001111"_b, "casp_cp64_ldstexcl"}, + {"1011111"_b, "caspa_cp64_ldstexcl"}, + {"1101111"_b, "cash_c32_ldstexcl"}, + {"1111111"_b, "casah_c32_ldstexcl"}, + }, + }, + + { "_ktsgth", + {23, 22}, + { {"00"_b, "fcsel_s_floatsel"}, + {"01"_b, "fcsel_d_floatsel"}, + {"11"_b, "fcsel_h_floatsel"}, + }, + }, + + { "_ktyppm", + {11, 10}, + { {"00"_b, "asr_z_zw"}, + {"01"_b, "lsr_z_zw"}, + {"11"_b, "lsl_z_zw"}, + }, + }, + + { "_ktyrgy", + {9, 8, 7, 6, 5}, + { {"00000"_b, "fmov_s_floatimm"}, + }, + }, + + { "_kvnqhn", + {22, 20, 11}, + { {"000"_b, "sqincw_r_rs_sx"}, + {"001"_b, "sqdecw_r_rs_sx"}, + {"010"_b, "sqincw_r_rs_x"}, + {"011"_b, "sqdecw_r_rs_x"}, + {"100"_b, "sqincd_r_rs_sx"}, + {"101"_b, "sqdecd_r_rs_sx"}, + {"110"_b, "sqincd_r_rs_x"}, + {"111"_b, "sqdecd_r_rs_x"}, + }, + }, + + { "_kxhmlx", + {23, 22, 20, 19, 18, 17, 16}, + { {"0111001"_b, "fcvtnu_asisdmiscfp16_r"}, + {"0x00001"_b, "fcvtnu_asisdmisc_r"}, + {"1111001"_b, "fcvtpu_asisdmiscfp16_r"}, + {"1x00001"_b, "fcvtpu_asisdmisc_r"}, + }, + }, + + { "_kxmjsh", + {20, 19, 18, 17, 16}, + { {"10000"_b, "fmaxp_asisdpair_only_sd"}, + }, + }, + + { "_kxmxxm", + {23}, + { {"0"_b, "fcmge_asimdsame_only"}, + {"1"_b, "fcmgt_asimdsame_only"}, + }, + }, + + { "_kxpqhv", + {30, 23, 22, 11, 10}, + { {"10001"_b, "stg_64spost_ldsttags"}, + {"10010"_b, "stg_64soffset_ldsttags"}, + {"10011"_b, "stg_64spre_ldsttags"}, + {"10100"_b, "ldg_64loffset_ldsttags"}, + {"10101"_b, "stzg_64spost_ldsttags"}, + {"10110"_b, "stzg_64soffset_ldsttags"}, + {"10111"_b, "stzg_64spre_ldsttags"}, + {"11001"_b, "st2g_64spost_ldsttags"}, + {"11010"_b, "st2g_64soffset_ldsttags"}, + {"11011"_b, "st2g_64spre_ldsttags"}, + {"11101"_b, "stz2g_64spost_ldsttags"}, + {"11110"_b, "stz2g_64soffset_ldsttags"}, + {"11111"_b, "stz2g_64spre_ldsttags"}, + }, + }, + + { "_kxtqjh", + {23, 22}, + { {"01"_b, "_mhnlsy"}, + {"10"_b, "xar_vvv2_crypto3_imm6"}, + {"11"_b, "_spxvlt"}, + }, + }, + + { "_kxvvkq", + {30, 23, 13}, + { {"000"_b, "ld1b_z_p_bz_s_x32_unscaled"}, + {"001"_b, "ldff1b_z_p_bz_s_x32_unscaled"}, + {"010"_b, "ld1h_z_p_bz_s_x32_unscaled"}, + {"011"_b, "ldff1h_z_p_bz_s_x32_unscaled"}, + {"100"_b, "ld1b_z_p_bz_d_x32_unscaled"}, + {"101"_b, "ldff1b_z_p_bz_d_x32_unscaled"}, + {"110"_b, "ld1h_z_p_bz_d_x32_unscaled"}, + {"111"_b, "ldff1h_z_p_bz_d_x32_unscaled"}, + }, + }, + + { "_kxztps", + {23, 22, 20, 19, 13, 11}, + { {"0000x0"_b, "orr_asimdimm_l_sl"}, + {"00x100"_b, "shl_asimdshf_r"}, + {"00x110"_b, "sqshl_asimdshf_r"}, + {"010x00"_b, "shl_asimdshf_r"}, + {"010x10"_b, "sqshl_asimdshf_r"}, + {"011100"_b, "shl_asimdshf_r"}, + {"011110"_b, "sqshl_asimdshf_r"}, + {"0x1000"_b, "shl_asimdshf_r"}, + {"0x1010"_b, "sqshl_asimdshf_r"}, + }, + }, + + { "_kyhhqt", + {23, 20, 19, 18, 17, 16, 13}, + { {"0000000"_b, "ld1r_asisdlso_r1"}, + {"0000001"_b, "ld3r_asisdlso_r3"}, + {"10xxxx0"_b, "ld1r_asisdlsop_rx1_r"}, + {"10xxxx1"_b, "ld3r_asisdlsop_rx3_r"}, + {"110xxx0"_b, "ld1r_asisdlsop_rx1_r"}, + {"110xxx1"_b, "ld3r_asisdlsop_rx3_r"}, + {"1110xx0"_b, "ld1r_asisdlsop_rx1_r"}, + {"1110xx1"_b, "ld3r_asisdlsop_rx3_r"}, + {"11110x0"_b, "ld1r_asisdlsop_rx1_r"}, + {"11110x1"_b, "ld3r_asisdlsop_rx3_r"}, + {"1111100"_b, "ld1r_asisdlsop_rx1_r"}, + {"1111101"_b, "ld3r_asisdlsop_rx3_r"}, + {"1111110"_b, "ld1r_asisdlsop_r1_i"}, + {"1111111"_b, "ld3r_asisdlsop_r3_i"}, + }, + }, + + { "_kyjxrr", + {30, 13}, + { {"00"_b, "_qtxpky"}, + {"01"_b, "_hnjrmp"}, + {"11"_b, "_vzjvtv"}, + }, + }, + + { "_kynxnz", + {30, 23, 22, 20, 19}, + { {"0xxxx"_b, "bl_only_branch_imm"}, + {"10001"_b, "sysl_rc_systeminstrs"}, + {"1001x"_b, "mrs_rs_systemmove"}, + {"1011x"_b, "mrrs_rs_systemmovepr"}, + }, + }, + + { "_kyspnn", + {22}, + { {"0"_b, "sqdmullb_z_zzi_s"}, + {"1"_b, "sqdmullb_z_zzi_d"}, + }, + }, + + { "_kyxrqg", + {10}, + { {"0"_b, "uabalb_z_zzz"}, + {"1"_b, "uabalt_z_zzz"}, + }, + }, + + { "_kzjxxk", + {20, 19, 18, 17, 16, 13, 12}, + { {"0000000"_b, "rbit_32_dp_1src"}, + {"0000001"_b, "clz_32_dp_1src"}, + {"0000010"_b, "abs_32_dp_1src"}, + }, + }, + + { "_kzksnv", + {13, 12}, + { {"00"_b, "sqshl_asisdsame_only"}, + {"01"_b, "sqrshl_asisdsame_only"}, + }, + }, + + { "_kzmvpk", + {23, 22, 10}, + { {"100"_b, "smlalb_z_zzzi_s"}, + {"101"_b, "smlalt_z_zzzi_s"}, + {"110"_b, "smlalb_z_zzzi_d"}, + {"111"_b, "smlalt_z_zzzi_d"}, + }, + }, + + { "_kzprzt", + {9, 8, 7, 6, 5, 2, 1}, + { {"1111111"_b, "retaa_64e_branch_reg"}, + }, + }, + + { "_kzpyzy", + {30, 23, 22, 13}, + { {"0000"_b, "ld1sh_z_p_br_s64"}, + {"0001"_b, "ldff1sh_z_p_br_s64"}, + {"0010"_b, "ld1w_z_p_br_u32"}, + {"0011"_b, "ldff1w_z_p_br_u32"}, + {"0100"_b, "ld1sb_z_p_br_s64"}, + {"0101"_b, "ldff1sb_z_p_br_s64"}, + {"0110"_b, "ld1sb_z_p_br_s16"}, + {"0111"_b, "ldff1sb_z_p_br_s16"}, + {"1001"_b, "stnt1w_z_p_br_contiguous"}, + {"1010"_b, "st1w_z_p_br"}, + {"1011"_b, "st3w_z_p_br_contiguous"}, + {"1100"_b, "str_z_bi"}, + {"1101"_b, "stnt1d_z_p_br_contiguous"}, + {"1111"_b, "st3d_z_p_br_contiguous"}, + }, + }, + + { "_kzyzrh", + {16, 13, 12}, + { {"000"_b, "rev16_64_dp_1src"}, + {"001"_b, "cls_64_dp_1src"}, + {"100"_b, "pacib_64p_dp_1src"}, + {"101"_b, "autib_64p_dp_1src"}, + {"110"_b, "_vpyvjr"}, + {"111"_b, "_sntnsm"}, + }, + }, + + { "_lgmlmt", + {18, 17}, + { {"00"_b, "ld3_asisdlse_r3"}, + }, + }, + + { "_lgyqpk", + {18, 17}, + { {"0x"_b, "st2_asisdlsop_sx2_r2s"}, + {"10"_b, "st2_asisdlsop_sx2_r2s"}, + {"11"_b, "st2_asisdlsop_s2_i2s"}, + }, + }, + + { "_lgzlyq", + {30, 23, 11, 10}, + { {"1001"_b, "_kltlmp"}, + }, + }, + + { "_ljljkv", + {30, 23, 22, 13, 12, 11, 10}, + { {"0001100"_b, "and_z_zz"}, + {"0001110"_b, "eor3_z_zzz"}, + {"0001111"_b, "bsl_z_zzz"}, + {"0011100"_b, "orr_z_zz"}, + {"0011110"_b, "bcax_z_zzz"}, + {"0011111"_b, "bsl1n_z_zzz"}, + {"0101100"_b, "eor_z_zz"}, + {"0101111"_b, "bsl2n_z_zzz"}, + {"0111100"_b, "bic_z_zz"}, + {"0111111"_b, "nbsl_z_zzz"}, + {"0xx0000"_b, "add_z_zz"}, + {"0xx0001"_b, "sub_z_zz"}, + {"0xx0100"_b, "sqadd_z_zz"}, + {"0xx0101"_b, "uqadd_z_zz"}, + {"0xx0110"_b, "sqsub_z_zz"}, + {"0xx0111"_b, "uqsub_z_zz"}, + {"0xx1101"_b, "xar_z_zzi"}, + {"10x0010"_b, "mla_z_zzzi_h"}, + {"10x0011"_b, "mls_z_zzzi_h"}, + {"10x0100"_b, "sqrdmlah_z_zzzi_h"}, + {"10x0101"_b, "sqrdmlsh_z_zzzi_h"}, + {"1100000"_b, "sdot_z_zzzi_s"}, + {"1100001"_b, "udot_z_zzzi_s"}, + {"1100010"_b, "mla_z_zzzi_s"}, + {"1100011"_b, "mls_z_zzzi_s"}, + {"1100100"_b, "sqrdmlah_z_zzzi_s"}, + {"1100101"_b, "sqrdmlsh_z_zzzi_s"}, + {"1100110"_b, "usdot_z_zzzi_s"}, + {"1100111"_b, "sudot_z_zzzi_s"}, + {"11010x0"_b, "sqdmlalb_z_zzzi_s"}, + {"11010x1"_b, "sqdmlalt_z_zzzi_s"}, + {"11011x0"_b, "sqdmlslb_z_zzzi_s"}, + {"11011x1"_b, "sqdmlslt_z_zzzi_s"}, + {"1110000"_b, "sdot_z_zzzi_d"}, + {"1110001"_b, "udot_z_zzzi_d"}, + {"1110010"_b, "mla_z_zzzi_d"}, + {"1110011"_b, "mls_z_zzzi_d"}, + {"1110100"_b, "sqrdmlah_z_zzzi_d"}, + {"1110101"_b, "sqrdmlsh_z_zzzi_d"}, + {"11110x0"_b, "sqdmlalb_z_zzzi_d"}, + {"11110x1"_b, "sqdmlalt_z_zzzi_d"}, + {"11111x0"_b, "sqdmlslb_z_zzzi_d"}, + {"11111x1"_b, "sqdmlslt_z_zzzi_d"}, + }, + }, + + { "_ljtvgz", + {23, 22, 20, 19, 18, 17, 16}, + { {"0111001"_b, "ucvtf_asimdmiscfp16_r"}, + {"0x00001"_b, "ucvtf_asimdmisc_r"}, + {"1111000"_b, "fcmle_asimdmiscfp16_fz"}, + {"1111001"_b, "frsqrte_asimdmiscfp16_r"}, + {"1x00000"_b, "fcmle_asimdmisc_fz"}, + {"1x00001"_b, "frsqrte_asimdmisc_r"}, + }, + }, + + { "_lkpprr", + {30, 23, 22}, + { {"000"_b, "sbfm_32m_bitfield"}, + {"100"_b, "ubfm_32m_bitfield"}, + }, + }, + + { "_lkttgy", + {10}, + { {"0"_b, "saba_z_zzz"}, + {"1"_b, "uaba_z_zzz"}, + }, + }, + + { "_lkxgjy", + {23, 22}, + { {"10"_b, "cmla_z_zzzi_h"}, + {"11"_b, "cmla_z_zzzi_s"}, + }, + }, + + { "_lkzyzv", + {30}, + { {"0"_b, "bl_only_branch_imm"}, + {"1"_b, "_vgxtvy"}, + }, + }, + + { "_lljxgp", + {1}, + { {"1"_b, "blrabz_64_branch_reg"}, + }, + }, + + { "_llpsqq", + {13, 12, 10}, + { {"001"_b, "_zjjxjl"}, + {"100"_b, "ptrues_p_s"}, + {"110"_b, "_njngkk"}, + }, + }, + + { "_llqtkj", + {18, 17}, + { {"00"_b, "ld2_asisdlso_s2_2s"}, + }, + }, + + { "_lltzjg", + {18, 17, 12}, + { {"0x0"_b, "ld2_asisdlsop_dx2_r2d"}, + {"100"_b, "ld2_asisdlsop_dx2_r2d"}, + {"110"_b, "ld2_asisdlsop_d2_i2d"}, + }, + }, + + { "_llvrrk", + {23, 18, 17, 16}, + { {"0000"_b, "sqxtnb_z_zz"}, + }, + }, + + { "_lmmjvx", + {4}, + { {"0"_b, "ccmn_64_condcmp_reg"}, + }, + }, + + { "_lmmkzh", + {4, 3, 2, 1, 0}, + { {"11111"_b, "_nntvzj"}, + }, + }, + + { "_lmyxhr", + {9, 4}, + { {"00"_b, "_gnqhsl"}, + }, + }, + + { "_lnkrzt", + {18, 4}, + { {"00"_b, "fcmne_p_p_z0"}, + }, + }, + + { "_lnmhqq", + {22, 13, 12}, + { {"000"_b, "ldsmaxa_64_memop"}, + {"001"_b, "ldsmina_64_memop"}, + {"010"_b, "ldumaxa_64_memop"}, + {"011"_b, "ldumina_64_memop"}, + {"100"_b, "ldsmaxal_64_memop"}, + {"101"_b, "ldsminal_64_memop"}, + {"110"_b, "ldumaxal_64_memop"}, + {"111"_b, "lduminal_64_memop"}, + }, + }, + + { "_lnntps", + {30, 11, 10}, + { {"000"_b, "_gvxjvz"}, + {"001"_b, "_ypzllm"}, + {"011"_b, "_gslmjl"}, + {"100"_b, "_jxltqm"}, + {"101"_b, "_shqyqv"}, + {"110"_b, "_jpvmkz"}, + {"111"_b, "_pxnyvl"}, + }, + }, + + { "_lnsjqy", + {20, 19, 18, 17, 16, 13, 12}, + { {"0000000"_b, "stzgm_64bulk_ldsttags"}, + }, + }, + + { "_lplpkk", + {30, 23, 22, 13, 12, 11, 10}, + { {"1101001"_b, "smmla_asimdsame2_g"}, + {"1101011"_b, "usmmla_asimdsame2_g"}, + {"x100111"_b, "usdot_asimdsame2_d"}, + {"xxx0101"_b, "sdot_asimdsame2_d"}, + }, + }, + + { "_lplzxv", + {13, 12, 11, 10}, + { {"0000"_b, "umull_asimddiff_l"}, + {"0001"_b, "_yxgmrs"}, + {"0010"_b, "_vyqxyz"}, + {"0011"_b, "_snzvtt"}, + {"0101"_b, "_svgvjm"}, + {"0110"_b, "_ljtvgz"}, + {"0111"_b, "_snhmgn"}, + {"1001"_b, "_kxmxxm"}, + {"1010"_b, "_nkpyjg"}, + {"1011"_b, "_gmsqqz"}, + {"1101"_b, "_gzgpjp"}, + {"1110"_b, "_nzmqhv"}, + {"1111"_b, "_xgxtlr"}, + }, + }, + + { "_lptrlg", + {13, 12}, + { {"00"_b, "sqadd_asisdsame_only"}, + {"10"_b, "sqsub_asisdsame_only"}, + {"11"_b, "cmge_asisdsame_only"}, + }, + }, + + { "_lpzgvs", + {20, 19, 18, 17, 16}, + { {"11111"_b, "stllr_sl32_ldstexcl"}, + }, + }, + + { "_lqjlkj", + {13, 12}, + { {"00"_b, "cpyfp_cpy_memcms"}, + {"01"_b, "cpyfpwt_cpy_memcms"}, + {"10"_b, "cpyfprt_cpy_memcms"}, + {"11"_b, "cpyfpt_cpy_memcms"}, + }, + }, + + { "_lqknkn", + {18, 17}, + { {"0x"_b, "st4_asisdlsop_sx4_r4s"}, + {"10"_b, "st4_asisdlsop_sx4_r4s"}, + {"11"_b, "st4_asisdlsop_s4_i4s"}, + }, + }, + + { "_lqlrxp", + {20, 19, 18, 17, 16}, + { {"11111"_b, "stlrb_sl32_ldstexcl"}, + }, + }, + + { "_lqmksm", + {30, 23, 22, 20, 13, 4}, + { {"00001x"_b, "ld1row_z_p_bi_u32"}, + {"000x0x"_b, "ld1row_z_p_br_contiguous"}, + {"01001x"_b, "ld1rod_z_p_bi_u64"}, + {"010x0x"_b, "ld1rod_z_p_br_contiguous"}, + {"110x00"_b, "str_p_bi"}, + }, + }, + + { "_lrmgmq", + {30, 23, 22}, + { {"00x"_b, "add_64_addsub_imm"}, + {"010"_b, "addg_64_addsub_immtags"}, + {"10x"_b, "sub_64_addsub_imm"}, + {"110"_b, "subg_64_addsub_immtags"}, + }, + }, + + { "_lrntmz", + {13, 12, 11, 10}, + { {"0000"_b, "saddlb_z_zz"}, + {"0001"_b, "saddlt_z_zz"}, + {"0010"_b, "uaddlb_z_zz"}, + {"0011"_b, "uaddlt_z_zz"}, + {"0100"_b, "ssublb_z_zz"}, + {"0101"_b, "ssublt_z_zz"}, + {"0110"_b, "usublb_z_zz"}, + {"0111"_b, "usublt_z_zz"}, + {"1100"_b, "sabdlb_z_zz"}, + {"1101"_b, "sabdlt_z_zz"}, + {"1110"_b, "uabdlb_z_zz"}, + {"1111"_b, "uabdlt_z_zz"}, + }, + }, + + { "_lrptrn", + {30, 23, 13, 12, 11, 10}, + { {"100001"_b, "sri_asisdshf_r"}, + {"100101"_b, "sli_asisdshf_r"}, + {"101001"_b, "sqshlu_asisdshf_r"}, + {"101101"_b, "uqshl_asisdshf_r"}, + }, + }, + + { "_lrqlrg", + {30}, + { {"1"_b, "_ylhgrh"}, + }, + }, + + { "_lspzrv", + {30, 23, 13}, + { {"000"_b, "ld1sb_z_p_bz_s_x32_unscaled"}, + {"001"_b, "ldff1sb_z_p_bz_s_x32_unscaled"}, + {"010"_b, "ld1sh_z_p_bz_s_x32_unscaled"}, + {"011"_b, "ldff1sh_z_p_bz_s_x32_unscaled"}, + {"100"_b, "ld1sb_z_p_bz_d_x32_unscaled"}, + {"101"_b, "ldff1sb_z_p_bz_d_x32_unscaled"}, + {"110"_b, "ld1sh_z_p_bz_d_x32_unscaled"}, + {"111"_b, "ldff1sh_z_p_bz_d_x32_unscaled"}, + }, + }, + + { "_lsqgkk", + {30}, + { {"1"_b, "_jsqvtn"}, + }, + }, + + { "_lssjyz", + {30}, + { {"1"_b, "_kxtqjh"}, + }, + }, + + { "_lszlkq", + {22, 20, 19, 18, 17, 16, 13, 12}, + { {"01111100"_b, "_xtgmvr"}, + }, + }, + + { "_ltrntg", + {12}, + { {"0"_b, "udot_asimdelem_d"}, + {"1"_b, "sqrdmlsh_asimdelem_r"}, + }, + }, + + { "_lvjtlg", + {30, 11, 10}, + { {"000"_b, "_krvxxx"}, + {"001"_b, "_rpjrhs"}, + {"010"_b, "_tsypsz"}, + {"011"_b, "_ktsgth"}, + {"100"_b, "_yhnqyy"}, + {"101"_b, "_xzqmkv"}, + {"110"_b, "_vxqtkl"}, + {"111"_b, "_jggxjz"}, + }, + }, + + { "_lvryvp", + {30}, + { {"0"_b, "_gkqhyz"}, + {"1"_b, "_nzqxrj"}, + }, + }, + + { "_lvsrnj", + {23, 22, 20, 19, 18, 17, 16}, + { {"0111001"_b, "scvtf_asimdmiscfp16_r"}, + {"0x00001"_b, "scvtf_asimdmisc_r"}, + {"1111000"_b, "fcmeq_asimdmiscfp16_fz"}, + {"1111001"_b, "frecpe_asimdmiscfp16_r"}, + {"1x00000"_b, "fcmeq_asimdmisc_fz"}, + {"1x00001"_b, "frecpe_asimdmisc_r"}, + }, + }, + + { "_lvszgj", + {2, 1}, + { {"11"_b, "brabz_64_branch_reg"}, + }, + }, + + { "_lxggmz", + {30}, + { {"0"_b, "b_only_branch_imm"}, + }, + }, + + { "_lxhlkx", + {12, 11, 10}, + { {"000"_b, "ftmad_z_zzi"}, + }, + }, + + { "_lxlqks", + {19}, + { {"1"_b, "sysp_cr_syspairinstrs"}, + }, + }, + + { "_lylpyx", + {10}, + { {"0"_b, "sabalb_z_zzz"}, + {"1"_b, "sabalt_z_zzz"}, + }, + }, + + { "_lymhlk", + {30}, + { {"0"_b, "bl_only_branch_imm"}, + {"1"_b, "_vpgxgk"}, + }, + }, + + { "_lynsgm", + {13}, + { {"0"_b, "_ttplgp"}, + }, + }, + + { "_lytkrx", + {12, 11, 10}, + { {"000"_b, "dup_z_zi"}, + {"010"_b, "tbl_z_zz_2"}, + {"011"_b, "tbx_z_zz"}, + {"100"_b, "tbl_z_zz_1"}, + {"110"_b, "_ylnsvy"}, + }, + }, + + { "_lyzhrq", + {23, 22, 20, 19, 18, 17, 16}, + { {"0111001"_b, "fcvtms_asimdmiscfp16_r"}, + {"0x00001"_b, "fcvtms_asimdmisc_r"}, + {"1111001"_b, "fcvtzs_asimdmiscfp16_r"}, + {"1x00001"_b, "fcvtzs_asimdmisc_r"}, + {"xx00000"_b, "abs_asimdmisc_r"}, + {"xx10001"_b, "addv_asimdall_only"}, + }, + }, + + { "_lzjyhm", + {30}, + { {"0"_b, "ldapursw_64_ldapstl_unscaled"}, + }, + }, + + { "_lzqxgt", + {13, 12}, + { {"00"_b, "sbcs_32_addsub_carry"}, + }, + }, + + { "_lzvxxj", + {23, 22}, + { {"01"_b, "fcmeq_asimdsamefp16_only"}, + }, + }, + + { "_lzzsyj", + {18, 17}, + { {"0x"_b, "st3_asisdlsep_r3_r"}, + {"10"_b, "st3_asisdlsep_r3_r"}, + {"11"_b, "st3_asisdlsep_i3_i"}, + }, + }, + + { "_mgjhts", + {13, 12, 10}, + { {"001"_b, "_rvtxys"}, + {"010"_b, "_ppyynh"}, + {"011"_b, "_vvyjmh"}, + {"101"_b, "_rpplns"}, + {"110"_b, "sqdmlal_asisdelem_l"}, + {"111"_b, "_ymmhtq"}, + }, + }, + + { "_mgspnm", + {30, 23}, + { {"00"_b, "orr_64_log_imm"}, + {"10"_b, "ands_64s_log_imm"}, + {"11"_b, "movk_64_movewide"}, + }, + }, + + { "_mgtxyt", + {13, 12}, + { {"00"_b, "sbcs_64_addsub_carry"}, + }, + }, + + { "_mhksnq", + {23, 22, 20, 19, 11}, + { {"00010"_b, "ucvtf_asisdshf_c"}, + {"001x0"_b, "ucvtf_asisdshf_c"}, + {"01xx0"_b, "ucvtf_asisdshf_c"}, + }, + }, + + { "_mhljkp", + {23}, + { {"0"_b, "frecps_asimdsame_only"}, + {"1"_b, "frsqrts_asimdsame_only"}, + }, + }, + + { "_mhnlsy", + {11, 10}, + { {"00"_b, "sm3tt1a_vvv4_crypto3_imm2"}, + {"01"_b, "sm3tt1b_vvv4_crypto3_imm2"}, + {"10"_b, "sm3tt2a_vvv4_crypto3_imm2"}, + {"11"_b, "sm3tt2b_vvv_crypto3_imm2"}, + }, + }, + + { "_mhpgjx", + {20, 19, 18, 17, 16}, + { {"11111"_b, "stlr_sl64_ldstexcl"}, + }, + }, + + { "_mhrjvp", + {30, 13}, + { {"00"_b, "_vxhgzz"}, + {"01"_b, "_lytkrx"}, + {"10"_b, "_rlyvpn"}, + {"11"_b, "_yvptvx"}, + }, + }, + + { "_mjjhqj", + {30, 23, 22, 19, 16}, + { {"10010"_b, "aesimc_b_cryptoaes"}, + {"x0x01"_b, "fcvtl_asimdmisc_l"}, + {"xxx00"_b, "sqabs_asimdmisc_r"}, + }, + }, + + { "_mjrlkp", + {23, 22, 20, 19, 13, 11}, + { {"0000x0"_b, "movi_asimdimm_l_hl"}, + {"00x100"_b, "shrn_asimdshf_n"}, + {"00x101"_b, "rshrn_asimdshf_n"}, + {"00x110"_b, "sshll_asimdshf_l"}, + {"010x00"_b, "shrn_asimdshf_n"}, + {"010x01"_b, "rshrn_asimdshf_n"}, + {"010x10"_b, "sshll_asimdshf_l"}, + {"011100"_b, "shrn_asimdshf_n"}, + {"011101"_b, "rshrn_asimdshf_n"}, + {"011110"_b, "sshll_asimdshf_l"}, + {"0x1000"_b, "shrn_asimdshf_n"}, + {"0x1001"_b, "rshrn_asimdshf_n"}, + {"0x1010"_b, "sshll_asimdshf_l"}, + }, + }, + + { "_mjrqhl", + {18, 17}, + { {"0x"_b, "st3_asisdlsop_sx3_r3s"}, + {"10"_b, "st3_asisdlsop_sx3_r3s"}, + {"11"_b, "st3_asisdlsop_s3_i3s"}, + }, + }, + + { "_mjstgz", + {20, 19, 18, 17, 16, 13, 12}, + { {"0000000"_b, "ldgm_64bulk_ldsttags"}, + }, + }, + + { "_mjyhsl", + {20, 19, 18, 17, 16}, + { {"11111"_b, "ldxp_lp32_ldstexcl"}, + }, + }, + + { "_mkgsly", + {19, 18, 17, 16, 4}, + { {"00000"_b, "brkas_p_p_p_z"}, + {"10000"_b, "brkns_p_p_pp"}, + }, + }, + + { "_mkrgxr", + {23, 4}, + { {"00"_b, "_hptkrj"}, + }, + }, + + { "_mkyyng", + {23, 22}, + { {"01"_b, "fcmla_asimdelem_c_h"}, + {"10"_b, "fcmla_asimdelem_c_s"}, + }, + }, + + { "_mkzysy", + {30, 23, 22}, + { {"000"_b, "str_b_ldst_pos"}, + {"001"_b, "ldr_b_ldst_pos"}, + {"010"_b, "str_q_ldst_pos"}, + {"011"_b, "ldr_q_ldst_pos"}, + {"100"_b, "str_h_ldst_pos"}, + {"101"_b, "ldr_h_ldst_pos"}, + }, + }, + + { "_mlgmqm", + {18, 17}, + { {"00"_b, "st2_asisdlso_s2_2s"}, + }, + }, + + { "_mlxtxs", + {10}, + { {"0"_b, "ssra_z_zi"}, + {"1"_b, "usra_z_zi"}, + }, + }, + + { "_mmgpkx", + {13, 12}, + { {"11"_b, "cmgt_asisdsame_only"}, + }, + }, + + { "_mmxgrt", + {20, 19, 18, 17, 16}, + { {"00000"_b, "rev32_asimdmisc_r"}, + }, + }, + + { "_mnmtql", + {10}, + { {"0"_b, "srsra_z_zi"}, + {"1"_b, "ursra_z_zi"}, + }, + }, + + { "_mntnlr", + {18}, + { {"0"_b, "ld1_asisdlse_r4_4v"}, + }, + }, + + { "_mnxgml", + {22}, + { {"0"_b, "ldrsw_64_ldst_regoff"}, + }, + }, + + { "_mnxgqm", + {30, 23, 22, 13, 12, 11, 10}, + { {"000xxxx"_b, "fnmadd_s_floatdp3"}, + {"001xxxx"_b, "fnmadd_d_floatdp3"}, + {"011xxxx"_b, "fnmadd_h_floatdp3"}, + {"10001x0"_b, "fmls_asisdelem_rh_h"}, + {"10x0101"_b, "shl_asisdshf_r"}, + {"10x1101"_b, "sqshl_asisdshf_r"}, + {"11x01x0"_b, "fmls_asisdelem_r_sd"}, + {"1xx11x0"_b, "sqdmlsl_asisdelem_l"}, + }, + }, + + { "_mnzgkx", + {12}, + { {"0"_b, "st1_asisdlsop_dx1_r1d"}, + }, + }, + + { "_mnzzhk", + {20, 19, 18, 17, 16}, + { {"11111"_b, "stlr_sl32_ldstexcl"}, + }, + }, + + { "_mphkpq", + {12}, + { {"0"_b, "st1_asisdlsop_dx1_r1d"}, + }, + }, + + { "_mpstrr", + {23, 22, 8, 7, 6, 5, 4, 3, 2, 1, 0}, + { {"00000000000"_b, "setffr_f"}, + }, + }, + + { "_mpvsng", + {30}, + { {"0"_b, "_vvtnrv"}, + {"1"_b, "_yykhjv"}, + }, + }, + + { "_mpytmv", + {23, 22, 20, 19, 11}, + { {"00011"_b, "fcvtzu_asisdshf_c"}, + {"001x1"_b, "fcvtzu_asisdshf_c"}, + {"01xx1"_b, "fcvtzu_asisdshf_c"}, + }, + }, + + { "_mqljmr", + {2, 1, 0}, + { {"000"_b, "_rnphqp"}, + }, + }, + + { "_mqmrng", + {9, 8, 7, 6, 5, 2, 1}, + { {"1111100"_b, "eret_64e_branch_reg"}, + }, + }, + + { "_mqrzzk", + {22, 20, 11}, + { {"000"_b, "sqincw_z_zs"}, + {"001"_b, "sqdecw_z_zs"}, + {"010"_b, "incw_z_zs"}, + {"100"_b, "sqincd_z_zs"}, + {"101"_b, "sqdecd_z_zs"}, + {"110"_b, "incd_z_zs"}, + }, + }, + + { "_mqssgy", + {30}, + { {"0"_b, "_slzrtr"}, + {"1"_b, "_nsgxlz"}, + }, + }, + + { "_mqtgvk", + {30, 23, 22, 13, 12, 11, 10}, + { {"000xx00"_b, "stlurb_32_ldapstl_unscaled"}, + {"001xx00"_b, "ldapurb_32_ldapstl_unscaled"}, + {"010xx00"_b, "ldapursb_64_ldapstl_unscaled"}, + {"011xx00"_b, "ldapursb_32_ldapstl_unscaled"}, + {"100xx00"_b, "stlurh_32_ldapstl_unscaled"}, + {"101xx00"_b, "ldapurh_32_ldapstl_unscaled"}, + {"110xx00"_b, "ldapursh_64_ldapstl_unscaled"}, + {"111xx00"_b, "ldapursh_32_ldapstl_unscaled"}, + {"x000001"_b, "cpyfp_cpy_memcms"}, + {"x000101"_b, "cpyfpwt_cpy_memcms"}, + {"x001001"_b, "cpyfprt_cpy_memcms"}, + {"x001101"_b, "cpyfpt_cpy_memcms"}, + {"x010001"_b, "cpyfm_cpy_memcms"}, + {"x010101"_b, "cpyfmwt_cpy_memcms"}, + {"x011001"_b, "cpyfmrt_cpy_memcms"}, + {"x011101"_b, "cpyfmt_cpy_memcms"}, + {"x100001"_b, "cpyfe_cpy_memcms"}, + {"x100101"_b, "cpyfewt_cpy_memcms"}, + {"x101001"_b, "cpyfert_cpy_memcms"}, + {"x101101"_b, "cpyfet_cpy_memcms"}, + {"x110001"_b, "setp_set_memcms"}, + {"x110101"_b, "setpt_set_memcms"}, + {"x111001"_b, "setpn_set_memcms"}, + {"x111101"_b, "setptn_set_memcms"}, + }, + }, + + { "_mrhtxt", + {23, 22, 20, 9}, + { {"0000"_b, "brkpb_p_p_pp"}, + {"0100"_b, "brkpbs_p_p_pp"}, + }, + }, + + { "_mrlpxr", + {30, 23, 22}, + { {"000"_b, "_vqzsgg"}, + {"001"_b, "_tzjyhy"}, + {"011"_b, "_grsnms"}, + {"100"_b, "_sknvhk"}, + {"101"_b, "_ptqtmp"}, + {"111"_b, "_kktzst"}, + }, + }, + + { "_msnshr", + {23, 22, 13, 12, 11, 10}, + { {"0001x0"_b, "fmls_asimdelem_rh_h"}, + {"0x0101"_b, "shl_asimdshf_r"}, + {"0x1101"_b, "sqshl_asimdshf_r"}, + {"1000x0"_b, "fmlsl_asimdelem_lh"}, + {"1x01x0"_b, "fmls_asimdelem_r_sd"}, + {"xx10x0"_b, "smlsl_asimdelem_l"}, + {"xx11x0"_b, "sqdmlsl_asimdelem_l"}, + }, + }, + + { "_msvhjv", + {9, 8, 7, 6, 5}, + { {"00000"_b, "fmov_d_floatimm"}, + }, + }, + + { "_msvjxq", + {20, 19, 18, 17, 16}, + { {"00001"_b, "sqxtun_asisdmisc_n"}, + }, + }, + + { "_msyrjz", + {13, 12, 11, 10}, + { {"1111"_b, "casal_c64_ldstexcl"}, + }, + }, + + { "_mthlnv", + {18}, + { {"0"_b, "ld1_asisdlsep_r4_r4"}, + {"1"_b, "ld1_asisdlsep_i4_i4"}, + }, + }, + + { "_mtkhgz", + {10}, + { {"0"_b, "sha512su0_vv2_cryptosha512_2"}, + {"1"_b, "sm4e_vv4_cryptosha512_2"}, + }, + }, + + { "_mtlxqp", + {30, 23, 22}, + { {"000"_b, "stnp_64_ldstnapair_offs"}, + {"001"_b, "ldnp_64_ldstnapair_offs"}, + {"010"_b, "stp_64_ldstpair_post"}, + {"011"_b, "ldp_64_ldstpair_post"}, + }, + }, + + { "_mtshvn", + {18}, + { {"0"_b, "ld1_asisdlso_b1_1b"}, + }, + }, + + { "_mtzhrn", + {30, 23, 22, 11, 10, 4}, + { {"001000"_b, "ccmn_64_condcmp_reg"}, + {"001100"_b, "ccmn_64_condcmp_imm"}, + {"101000"_b, "ccmp_64_condcmp_reg"}, + {"101100"_b, "ccmp_64_condcmp_imm"}, + }, + }, + + { "_mvqkzv", + {18, 17, 12}, + { {"000"_b, "st2_asisdlso_d2_2d"}, + }, + }, + + { "_mvvngm", + {23, 22, 20, 19, 13, 11}, + { {"0000x0"_b, "mvni_asimdimm_l_sl"}, + {"00x100"_b, "ushr_asimdshf_r"}, + {"00x110"_b, "urshr_asimdshf_r"}, + {"010x00"_b, "ushr_asimdshf_r"}, + {"010x10"_b, "urshr_asimdshf_r"}, + {"011100"_b, "ushr_asimdshf_r"}, + {"011110"_b, "urshr_asimdshf_r"}, + {"0x1000"_b, "ushr_asimdshf_r"}, + {"0x1010"_b, "urshr_asimdshf_r"}, + }, + }, + + { "_mxgykv", + {19, 18, 17, 16}, + { {"0000"_b, "cntp_r_p_p"}, + {"1000"_b, "_lynsgm"}, + {"1001"_b, "_jxyskn"}, + {"1010"_b, "_jmxstz"}, + {"1011"_b, "_yjzknm"}, + {"1100"_b, "_zmtkvx"}, + {"1101"_b, "_yhmlxk"}, + }, + }, + + { "_mxnzst", + {30}, + { {"0"_b, "_vghjnt"}, + {"1"_b, "_pkqvxk"}, + }, + }, + + { "_mxnzyr", + {19, 16}, + { {"00"_b, "_nhxxmh"}, + {"10"_b, "_qgymsy"}, + {"11"_b, "_gjprmg"}, + }, + }, + + { "_mxplnn", + {30, 23, 22}, + { {"000"_b, "stnp_s_ldstnapair_offs"}, + {"001"_b, "ldnp_s_ldstnapair_offs"}, + {"010"_b, "stp_s_ldstpair_post"}, + {"011"_b, "ldp_s_ldstpair_post"}, + {"100"_b, "stnp_d_ldstnapair_offs"}, + {"101"_b, "ldnp_d_ldstnapair_offs"}, + {"110"_b, "stp_d_ldstpair_post"}, + {"111"_b, "ldp_d_ldstpair_post"}, + }, + }, + + { "_mxvjxx", + {20, 19, 18, 16}, + { {"0000"_b, "_nshjhk"}, + }, + }, + + { "_mylphg", + {30, 13, 4}, + { {"000"_b, "cmpge_p_p_zw"}, + {"001"_b, "cmpgt_p_p_zw"}, + {"010"_b, "cmplt_p_p_zw"}, + {"011"_b, "cmple_p_p_zw"}, + {"1xx"_b, "fcmla_z_p_zzz"}, + }, + }, + + { "_myrkmk", + {16, 13, 12}, + { {"000"_b, "rev32_64_dp_1src"}, + {"001"_b, "ctz_64_dp_1src"}, + {"100"_b, "pacda_64p_dp_1src"}, + {"101"_b, "autda_64p_dp_1src"}, + {"110"_b, "_tnjhxp"}, + {"111"_b, "_qqjtpm"}, + }, + }, + + { "_myvqtn", + {12}, + { {"0"_b, "_yrgzqr"}, + }, + }, + + { "_myzhml", + {20, 19, 18, 17, 16, 13, 12}, + { {"0000000"_b, "stgm_64bulk_ldsttags"}, + }, + }, + + { "_mzhsrq", + {4}, + { {"0"_b, "cmplt_p_p_zi"}, + {"1"_b, "cmple_p_p_zi"}, + }, + }, + + { "_mzkxzm", + {1}, + { {"0"_b, "blr_64_branch_reg"}, + }, + }, + + { "_nghmrp", + {13, 12, 11, 10}, + { {"1111"_b, "casal_c32_ldstexcl"}, + }, + }, + + { "_ngkgsg", + {23, 22, 20, 19, 11}, + { {"00000"_b, "movi_asimdimm_l_sl"}, + }, + }, + + { "_ngnxrx", + {18}, + { {"0"_b, "ld1_asisdlse_r2_2v"}, + }, + }, + + { "_ngtlpz", + {18, 17, 12}, + { {"0x0"_b, "st3_asisdlsop_dx3_r3d"}, + {"100"_b, "st3_asisdlsop_dx3_r3d"}, + {"110"_b, "st3_asisdlsop_d3_i3d"}, + }, + }, + + { "_ngttyj", + {30, 23, 22, 13}, + { {"0000"_b, "ld1b_z_p_br_u16"}, + {"0001"_b, "ldff1b_z_p_br_u16"}, + {"0010"_b, "ld1b_z_p_br_u64"}, + {"0011"_b, "ldff1b_z_p_br_u64"}, + {"0100"_b, "ld1h_z_p_br_u16"}, + {"0101"_b, "ldff1h_z_p_br_u16"}, + {"0110"_b, "ld1h_z_p_br_u64"}, + {"0111"_b, "ldff1h_z_p_br_u64"}, + {"1001"_b, "st2b_z_p_br_contiguous"}, + {"1011"_b, "st4b_z_p_br_contiguous"}, + {"10x0"_b, "st1b_z_p_br"}, + {"1101"_b, "st2h_z_p_br_contiguous"}, + {"1111"_b, "st4h_z_p_br_contiguous"}, + {"11x0"_b, "st1h_z_p_br"}, + }, + }, + + { "_ngvqhs", + {13, 12, 11, 10}, + { {"0001"_b, "ushl_asisdsame_only"}, + {"0010"_b, "_vrxhss"}, + {"0011"_b, "uqshl_asisdsame_only"}, + {"0101"_b, "urshl_asisdsame_only"}, + {"0111"_b, "uqrshl_asisdsame_only"}, + {"1010"_b, "_xprqgs"}, + {"1110"_b, "_yskyrg"}, + }, + }, + + { "_ngzyqj", + {11, 10}, + { {"00"_b, "asr_z_zi"}, + {"01"_b, "lsr_z_zi"}, + {"11"_b, "lsl_z_zi"}, + }, + }, + + { "_nhnhzp", + {23, 22, 20, 19, 17, 16, 13}, + { {"0000000"_b, "_hrymnk"}, + {"0000001"_b, "_hmgzjl"}, + {"0100000"_b, "_nxmgqz"}, + {"0100001"_b, "_ssjrxs"}, + {"100xxx0"_b, "st1_asisdlsop_hx1_r1h"}, + {"100xxx1"_b, "st3_asisdlsop_hx3_r3h"}, + {"1010xx0"_b, "st1_asisdlsop_hx1_r1h"}, + {"1010xx1"_b, "st3_asisdlsop_hx3_r3h"}, + {"10110x0"_b, "st1_asisdlsop_hx1_r1h"}, + {"10110x1"_b, "st3_asisdlsop_hx3_r3h"}, + {"1011100"_b, "st1_asisdlsop_hx1_r1h"}, + {"1011101"_b, "st3_asisdlsop_hx3_r3h"}, + {"1011110"_b, "_jyzhnh"}, + {"1011111"_b, "_qzlvkm"}, + {"110xxx0"_b, "ld1_asisdlsop_hx1_r1h"}, + {"110xxx1"_b, "ld3_asisdlsop_hx3_r3h"}, + {"1110xx0"_b, "ld1_asisdlsop_hx1_r1h"}, + {"1110xx1"_b, "ld3_asisdlsop_hx3_r3h"}, + {"11110x0"_b, "ld1_asisdlsop_hx1_r1h"}, + {"11110x1"_b, "ld3_asisdlsop_hx3_r3h"}, + {"1111100"_b, "ld1_asisdlsop_hx1_r1h"}, + {"1111101"_b, "ld3_asisdlsop_hx3_r3h"}, + {"1111110"_b, "_zmkntq"}, + {"1111111"_b, "_rxhssh"}, + }, + }, + + { "_nhrkqm", + {22, 20, 19, 18, 17, 16}, + { {"111001"_b, "ucvtf_asisdmiscfp16_r"}, + {"x00001"_b, "ucvtf_asisdmisc_r"}, + {"x10000"_b, "faddp_asisdpair_only_sd"}, + }, + }, + + { "_nhxxmh", + {23, 22, 9, 3, 2, 1, 0}, + { {"0100000"_b, "ptest_p_p"}, + }, + }, + + { "_njjlxy", + {30, 23, 22}, + { {"000"_b, "stlxp_sp32_ldstexcl"}, + {"001"_b, "_ymvzyh"}, + {"010"_b, "_nxttqn"}, + {"011"_b, "_nghmrp"}, + {"100"_b, "stlxp_sp64_ldstexcl"}, + {"101"_b, "_hpqkhv"}, + {"110"_b, "_xspjzn"}, + {"111"_b, "_msyrjz"}, + }, + }, + + { "_njngkk", + {23, 22, 9, 8, 7, 6, 5}, + { {"0000000"_b, "rdffr_p_f"}, + }, + }, + + { "_njnsqm", + {23, 22, 20, 19, 18, 17, 16}, + { {"0111001"_b, "frintn_asimdmiscfp16_r"}, + {"0x00001"_b, "frintn_asimdmisc_r"}, + {"1111001"_b, "frintp_asimdmiscfp16_r"}, + {"1x00001"_b, "frintp_asimdmisc_r"}, + {"xx00000"_b, "cmgt_asimdmisc_z"}, + }, + }, + + { "_njvkjq", + {11, 10}, + { {"00"_b, "index_z_ii"}, + {"01"_b, "index_z_ri"}, + {"10"_b, "index_z_ir"}, + {"11"_b, "index_z_rr"}, + }, + }, + + { "_nklqly", + {13, 12, 11, 10}, + { {"0000"_b, "sha256h_qqv_cryptosha3"}, + {"0100"_b, "sha256h2_qqv_cryptosha3"}, + {"1000"_b, "sha256su1_vvv_cryptosha3"}, + }, + }, + + { "_nklvmv", + {30, 23, 22, 13, 12, 11, 10}, + { {"1011001"_b, "fcmge_asisdsamefp16_only"}, + {"1011011"_b, "facge_asisdsamefp16_only"}, + {"1110101"_b, "fabd_asisdsamefp16_only"}, + {"1111001"_b, "fcmgt_asisdsamefp16_only"}, + {"1111011"_b, "facgt_asisdsamefp16_only"}, + }, + }, + + { "_nklyky", + {18, 17, 12}, + { {"000"_b, "st1_asisdlso_d1_1d"}, + }, + }, + + { "_nkmkvz", + {18}, + { {"0"_b, "st3_asisdlsop_bx3_r3b"}, + {"1"_b, "st3_asisdlsop_b3_i3b"}, + }, + }, + + { "_nknntn", + {23, 22, 20, 19, 18, 17, 16}, + { {"0111001"_b, "fcvtns_asimdmiscfp16_r"}, + {"0x00001"_b, "fcvtns_asimdmisc_r"}, + {"1111001"_b, "fcvtps_asimdmiscfp16_r"}, + {"1x00001"_b, "fcvtps_asimdmisc_r"}, + {"xx00000"_b, "cmlt_asimdmisc_z"}, + {"xx10000"_b, "smaxv_asimdall_only"}, + {"xx10001"_b, "sminv_asimdall_only"}, + }, + }, + + { "_nkpyjg", + {23, 20, 19, 18, 17, 16}, + { {"000001"_b, "frint32x_asimdmisc_r"}, + }, + }, + + { "_nktrpj", + {23, 22, 12}, + { {"001"_b, "sudot_asimdelem_d"}, + {"011"_b, "bfdot_asimdelem_e"}, + {"101"_b, "usdot_asimdelem_d"}, + {"111"_b, "bfmlal_asimdelem_f"}, + {"xx0"_b, "sdot_asimdelem_d"}, + }, + }, + + { "_nkxhsy", + {22, 20, 11}, + { {"000"_b, "cntb_r_s"}, + {"010"_b, "incb_r_rs"}, + {"100"_b, "cnth_r_s"}, + {"110"_b, "inch_r_rs"}, + }, + }, + + { "_nkyrpv", + {30, 23, 13, 12, 11, 10}, + { {"101001"_b, "ucvtf_asisdshf_c"}, + {"101111"_b, "fcvtzu_asisdshf_c"}, + {"1x01x0"_b, "sqrdmlah_asisdelem_r"}, + {"1x11x0"_b, "sqrdmlsh_asisdelem_r"}, + }, + }, + + { "_nkyynq", + {23, 22, 20, 19, 17, 16}, + { {"000010"_b, "scvtf_s32_float2fix"}, + {"000011"_b, "ucvtf_s32_float2fix"}, + {"001100"_b, "fcvtzs_32s_float2fix"}, + {"001101"_b, "fcvtzu_32s_float2fix"}, + {"010010"_b, "scvtf_d32_float2fix"}, + {"010011"_b, "ucvtf_d32_float2fix"}, + {"011100"_b, "fcvtzs_32d_float2fix"}, + {"011101"_b, "fcvtzu_32d_float2fix"}, + {"110010"_b, "scvtf_h32_float2fix"}, + {"110011"_b, "ucvtf_h32_float2fix"}, + {"111100"_b, "fcvtzs_32h_float2fix"}, + {"111101"_b, "fcvtzu_32h_float2fix"}, + }, + }, + + { "_nlpmvl", + {30, 13}, + { {"00"_b, "mad_z_p_zzz"}, + {"01"_b, "msb_z_p_zzz"}, + }, + }, + + { "_nlrjsj", + {30, 23, 22, 13, 12, 11, 10}, + { {"000xx10"_b, "stlur_s_ldapstl_simd"}, + {"001xx10"_b, "ldapur_s_ldapstl_simd"}, + {"100xx10"_b, "stlur_d_ldapstl_simd"}, + {"101xx10"_b, "ldapur_d_ldapstl_simd"}, + {"x000001"_b, "cpypn_cpy_memcms"}, + {"x000101"_b, "cpypwtn_cpy_memcms"}, + {"x001001"_b, "cpyprtn_cpy_memcms"}, + {"x001101"_b, "cpyptn_cpy_memcms"}, + {"x010001"_b, "cpymn_cpy_memcms"}, + {"x010101"_b, "cpymwtn_cpy_memcms"}, + {"x011001"_b, "cpymrtn_cpy_memcms"}, + {"x011101"_b, "cpymtn_cpy_memcms"}, + {"x100001"_b, "cpyen_cpy_memcms"}, + {"x100101"_b, "cpyewtn_cpy_memcms"}, + {"x101001"_b, "cpyertn_cpy_memcms"}, + {"x101101"_b, "cpyetn_cpy_memcms"}, + }, + }, + + { "_nmqrtr", + {23, 22, 13, 12, 11, 10}, + { {"0001x0"_b, "fmul_asimdelem_rh_h"}, + {"0x0001"_b, "shrn_asimdshf_n"}, + {"0x0011"_b, "rshrn_asimdshf_n"}, + {"0x0101"_b, "sqshrn_asimdshf_n"}, + {"0x0111"_b, "sqrshrn_asimdshf_n"}, + {"0x1001"_b, "sshll_asimdshf_l"}, + {"1x01x0"_b, "fmul_asimdelem_r_sd"}, + {"xx00x0"_b, "mul_asimdelem_r"}, + {"xx10x0"_b, "smull_asimdelem_l"}, + {"xx11x0"_b, "sqdmull_asimdelem_l"}, + }, + }, + + { "_nmqskh", + {23, 22, 20, 19, 16, 13, 12}, + { {"0000000"_b, "_xkznrh"}, + {"0000010"_b, "_svlrvy"}, + {"0000011"_b, "_prmjlz"}, + {"0100000"_b, "_lgmlmt"}, + {"0100010"_b, "_qhpkhm"}, + {"0100011"_b, "_sqlsyr"}, + {"100xx00"_b, "st3_asisdlsep_r3_r"}, + {"100xx10"_b, "st1_asisdlsep_r3_r3"}, + {"100xx11"_b, "st1_asisdlsep_r1_r1"}, + {"1010x00"_b, "st3_asisdlsep_r3_r"}, + {"1010x10"_b, "st1_asisdlsep_r3_r3"}, + {"1010x11"_b, "st1_asisdlsep_r1_r1"}, + {"1011000"_b, "st3_asisdlsep_r3_r"}, + {"1011010"_b, "st1_asisdlsep_r3_r3"}, + {"1011011"_b, "st1_asisdlsep_r1_r1"}, + {"1011100"_b, "_lzzsyj"}, + {"1011110"_b, "_xqvzvl"}, + {"1011111"_b, "_vxrnyh"}, + {"110xx00"_b, "ld3_asisdlsep_r3_r"}, + {"110xx10"_b, "ld1_asisdlsep_r3_r3"}, + {"110xx11"_b, "ld1_asisdlsep_r1_r1"}, + {"1110x00"_b, "ld3_asisdlsep_r3_r"}, + {"1110x10"_b, "ld1_asisdlsep_r3_r3"}, + {"1110x11"_b, "ld1_asisdlsep_r1_r1"}, + {"1111000"_b, "ld3_asisdlsep_r3_r"}, + {"1111010"_b, "ld1_asisdlsep_r3_r3"}, + {"1111011"_b, "ld1_asisdlsep_r1_r1"}, + {"1111100"_b, "_ntxnpq"}, + {"1111110"_b, "_ghmtnl"}, + {"1111111"_b, "_gzrtkk"}, + }, + }, + + { "_nnkxgr", + {11, 10}, + { {"00"_b, "ftssel_z_zz"}, + {"10"_b, "_yhlntp"}, + {"11"_b, "_rsqmgk"}, + }, + }, + + { "_nnrtpm", + {20, 19, 18, 17, 16}, + { {"11111"_b, "stllrb_sl32_ldstexcl"}, + }, + }, + + { "_nntvzj", + {11, 10, 9, 8, 7, 6}, + { {"000000"_b, "nop_hi_hints"}, + {"000001"_b, "wfe_hi_hints"}, + {"000010"_b, "sev_hi_hints"}, + {"000011"_b, "dgh_hi_hints"}, + {"000100"_b, "pacia1716_hi_hints"}, + {"000101"_b, "pacib1716_hi_hints"}, + {"000110"_b, "autia1716_hi_hints"}, + {"000111"_b, "autib1716_hi_hints"}, + {"001000"_b, "esb_hi_hints"}, + {"001001"_b, "tsb_hc_hints"}, + {"001010"_b, "csdb_hi_hints"}, + {"001011"_b, "clrbhb_hi_hints"}, + {"001100"_b, "paciaz_hi_hints"}, + {"001101"_b, "pacibz_hi_hints"}, + {"001110"_b, "autiaz_hi_hints"}, + {"001111"_b, "autibz_hi_hints"}, + {"0100xx"_b, "bti_hb_hints"}, + {"010100"_b, "chkfeat_hi_hints"}, + {"0101x1"_b, "hint_hm_hints"}, + {"01x110"_b, "hint_hm_hints"}, + {"10xxxx"_b, "hint_hm_hints"}, + {"110xxx"_b, "hint_hm_hints"}, + {"111110"_b, "hint_hm_hints"}, + {"x110xx"_b, "hint_hm_hints"}, + {"x1110x"_b, "hint_hm_hints"}, + {"x11111"_b, "hint_hm_hints"}, + }, + }, + + { "_nnzhgm", + {19, 18, 17, 16, 4}, + { {"0000x"_b, "brka_p_p_p"}, + {"10000"_b, "brkn_p_p_pp"}, + }, + }, + + { "_npjnlv", + {20, 19, 18, 17}, + { {"0000"_b, "_kzyzrh"}, + }, + }, + + { "_npxkzq", + {20, 19, 18, 17, 16, 13, 12}, + { {"0000000"_b, "_tykvnx"}, + }, + }, + + { "_nqjtqn", + {23, 22}, + { {"00"_b, "dup_asimdins_dv_v"}, + {"01"_b, "fmaxnm_asimdsamefp16_only"}, + {"11"_b, "fminnm_asimdsamefp16_only"}, + }, + }, + + { "_nqjvmr", + {13, 12}, + { {"00"_b, "adcs_32_addsub_carry"}, + }, + }, + + { "_nqkhrv", + {30, 13}, + { {"10"_b, "fnmla_z_p_zzz"}, + {"11"_b, "fnmls_z_p_zzz"}, + }, + }, + + { "_nqlrmv", + {30, 23, 22}, + { {"000"_b, "bfm_32m_bitfield"}, + }, + }, + + { "_nqmnzp", + {30, 23, 22, 20, 19, 18, 17, 16}, + { {"00000000"_b, "udf_only_perm_undef"}, + }, + }, + + { "_nrmlqv", + {23, 22, 20, 19, 13, 11}, + { {"0000x0"_b, "orr_asimdimm_l_sl"}, + {"00x100"_b, "ssra_asimdshf_r"}, + {"00x110"_b, "srsra_asimdshf_r"}, + {"010x00"_b, "ssra_asimdshf_r"}, + {"010x10"_b, "srsra_asimdshf_r"}, + {"011100"_b, "ssra_asimdshf_r"}, + {"011110"_b, "srsra_asimdshf_r"}, + {"0x1000"_b, "ssra_asimdshf_r"}, + {"0x1010"_b, "srsra_asimdshf_r"}, + }, + }, + + { "_nsgvsv", + {9, 8, 7, 6, 5}, + { {"00000"_b, "fmov_h_floatimm"}, + }, + }, + + { "_nsgxlz", + {13, 12, 10}, + { {"000"_b, "sqdmulh_asisdelem_r"}, + {"010"_b, "sqrdmulh_asisdelem_r"}, + {"101"_b, "_rkjjtp"}, + {"111"_b, "_pzpxxv"}, + }, + }, + + { "_nshjhk", + {17, 9, 8, 7, 6, 5}, + { {"000000"_b, "aesimc_z_z"}, + {"1xxxxx"_b, "aesd_z_zz"}, + }, + }, + + { "_nsjhhg", + {30, 13}, + { {"00"_b, "_jhllmn"}, + {"01"_b, "_htplsj"}, + {"10"_b, "_rztvnl"}, + {"11"_b, "_vgtnjh"}, + }, + }, + + { "_ntjpsx", + {22, 20, 11}, + { {"000"_b, "uqincb_r_rs_uw"}, + {"001"_b, "uqdecb_r_rs_uw"}, + {"010"_b, "uqincb_r_rs_x"}, + {"011"_b, "uqdecb_r_rs_x"}, + {"100"_b, "uqinch_r_rs_uw"}, + {"101"_b, "uqdech_r_rs_uw"}, + {"110"_b, "uqinch_r_rs_x"}, + {"111"_b, "uqdech_r_rs_x"}, + }, + }, + + { "_ntjrlg", + {18, 17, 16, 13, 12, 11, 10, 9, 7, 6, 5}, + { {"01111000011"_b, "_vsslrs"}, + }, + }, + + { "_ntxnpq", + {18, 17}, + { {"0x"_b, "ld3_asisdlsep_r3_r"}, + {"10"_b, "ld3_asisdlsep_r3_r"}, + {"11"_b, "ld3_asisdlsep_i3_i"}, + }, + }, + + { "_nvkthr", + {30, 13}, + { {"00"_b, "_kjqynn"}, + {"01"_b, "_jgyhrh"}, + {"10"_b, "_jymnkk"}, + {"11"_b, "_pqjjsh"}, + }, + }, + + { "_nvkxzs", + {12}, + { {"0"_b, "gcsstr_64_ldst_gcs"}, + {"1"_b, "gcssttr_64_ldst_gcs"}, + }, + }, + + { "_nvnjyp", + {23, 22, 20, 19, 11}, + { {"00000"_b, "mvni_asimdimm_m_sm"}, + }, + }, + + { "_nvyxmh", + {20, 19, 18, 17, 16}, + { {"00000"_b, "add_z_p_zz"}, + {"00001"_b, "sub_z_p_zz"}, + {"00011"_b, "subr_z_p_zz"}, + {"01000"_b, "smax_z_p_zz"}, + {"01001"_b, "umax_z_p_zz"}, + {"01010"_b, "smin_z_p_zz"}, + {"01011"_b, "umin_z_p_zz"}, + {"01100"_b, "sabd_z_p_zz"}, + {"01101"_b, "uabd_z_p_zz"}, + {"10000"_b, "mul_z_p_zz"}, + {"10010"_b, "smulh_z_p_zz"}, + {"10011"_b, "umulh_z_p_zz"}, + {"10100"_b, "sdiv_z_p_zz"}, + {"10101"_b, "udiv_z_p_zz"}, + {"10110"_b, "sdivr_z_p_zz"}, + {"10111"_b, "udivr_z_p_zz"}, + {"11000"_b, "orr_z_p_zz"}, + {"11001"_b, "eor_z_p_zz"}, + {"11010"_b, "and_z_p_zz"}, + {"11011"_b, "bic_z_p_zz"}, + }, + }, + + { "_nvzsxn", + {18, 17, 12}, + { {"000"_b, "stl1_asisdlso_d1"}, + }, + }, + + { "_nxjkqs", + {23, 22, 12, 11, 10}, + { {"0x000"_b, "fmla_z_zzzi_h"}, + {"0x001"_b, "fmls_z_zzzi_h"}, + {"10000"_b, "fmla_z_zzzi_s"}, + {"10001"_b, "fmls_z_zzzi_s"}, + {"101xx"_b, "fcmla_z_zzzi_h"}, + {"11000"_b, "fmla_z_zzzi_d"}, + {"11001"_b, "fmls_z_zzzi_d"}, + {"111xx"_b, "fcmla_z_zzzi_s"}, + }, + }, + + { "_nxlmhz", + {30, 23}, + { {"00"_b, "add_32_addsub_imm"}, + {"10"_b, "sub_32_addsub_imm"}, + }, + }, + + { "_nxlsjm", + {20, 19, 18, 17, 16}, + { {"11111"_b, "ldxrb_lr32_ldstexcl"}, + }, + }, + + { "_nxmgqz", + {18}, + { {"0"_b, "ld1_asisdlso_h1_1h"}, + }, + }, + + { "_nxrqmg", + {30, 23, 22, 13, 12, 11, 10}, + { {"000xx00"_b, "stlur_32_ldapstl_unscaled"}, + {"001xx00"_b, "ldapur_32_ldapstl_unscaled"}, + {"010xx00"_b, "ldapursw_64_ldapstl_unscaled"}, + {"100xx00"_b, "stlur_64_ldapstl_unscaled"}, + {"101xx00"_b, "ldapur_64_ldapstl_unscaled"}, + {"x000001"_b, "cpyfpn_cpy_memcms"}, + {"x000101"_b, "cpyfpwtn_cpy_memcms"}, + {"x001001"_b, "cpyfprtn_cpy_memcms"}, + {"x001101"_b, "cpyfptn_cpy_memcms"}, + {"x010001"_b, "cpyfmn_cpy_memcms"}, + {"x010101"_b, "cpyfmwtn_cpy_memcms"}, + {"x011001"_b, "cpyfmrtn_cpy_memcms"}, + {"x011101"_b, "cpyfmtn_cpy_memcms"}, + {"x100001"_b, "cpyfen_cpy_memcms"}, + {"x100101"_b, "cpyfewtn_cpy_memcms"}, + {"x101001"_b, "cpyfertn_cpy_memcms"}, + {"x101101"_b, "cpyfetn_cpy_memcms"}, + }, + }, + + { "_nxttqn", + {13, 12, 11, 10}, + { {"1111"_b, "casl_c32_ldstexcl"}, + }, + }, + + { "_nygsjm", + {18}, + { {"0"_b, "st2_asisdlso_b2_2b"}, + }, + }, + + { "_nyjtng", + {23, 22}, + { {"01"_b, "fmls_z_p_zzz"}, + {"1x"_b, "fmls_z_p_zzz"}, + }, + }, + + { "_nynrns", + {23, 22, 12}, + { {"000"_b, "_klxxgx"}, + {"001"_b, "_pglvnj"}, + {"010"_b, "_pzttrn"}, + {"011"_b, "_svyszp"}, + {"110"_b, "_prrkzv"}, + {"111"_b, "_nsgvsv"}, + }, + }, + + { "_nzmqhv", + {23, 22, 20, 19, 18, 17, 16}, + { {"0x00001"_b, "frint64x_asimdmisc_r"}, + {"0x10000"_b, "fmaxv_asimdall_only_sd"}, + {"1111000"_b, "fneg_asimdmiscfp16_r"}, + {"1111001"_b, "fsqrt_asimdmiscfp16_r"}, + {"1x00000"_b, "fneg_asimdmisc_r"}, + {"1x00001"_b, "fsqrt_asimdmisc_r"}, + {"1x10000"_b, "fminv_asimdall_only_sd"}, + }, + }, + + { "_nzqxrj", + {12}, + { {"1"_b, "_qgvtrn"}, + }, + }, + + { "_nzskzl", + {13, 12, 11, 10}, + { {"0000"_b, "uaddl_asimddiff_l"}, + {"0001"_b, "uhadd_asimdsame_only"}, + {"0010"_b, "_mmxgrt"}, + {"0011"_b, "uqadd_asimdsame_only"}, + {"0100"_b, "uaddw_asimddiff_w"}, + {"0101"_b, "urhadd_asimdsame_only"}, + {"0111"_b, "_yyvnrp"}, + {"1000"_b, "usubl_asimddiff_l"}, + {"1001"_b, "uhsub_asimdsame_only"}, + {"1010"_b, "_vlhkgr"}, + {"1011"_b, "uqsub_asimdsame_only"}, + {"1100"_b, "usubw_asimddiff_w"}, + {"1101"_b, "cmhi_asimdsame_only"}, + {"1110"_b, "_srpptk"}, + {"1111"_b, "cmhs_asimdsame_only"}, + }, + }, + + { "_nzvlzt", + {18}, + { {"0"_b, "st1_asisdlse_r4_4v"}, + }, + }, + + { "_pgjjsz", + {30, 13, 12, 11, 10}, + { {"00000"_b, "_lmyxhr"}, + {"00001"_b, "_tmhlvh"}, + {"00010"_b, "_qvtxpr"}, + {"00011"_b, "_ymkthj"}, + {"00100"_b, "_rhmxyp"}, + {"00101"_b, "_zryvjk"}, + {"01000"_b, "zip1_z_zz"}, + {"01001"_b, "zip2_z_zz"}, + {"01010"_b, "uzp1_z_zz"}, + {"01011"_b, "uzp2_z_zz"}, + {"01100"_b, "trn1_z_zz"}, + {"01101"_b, "trn2_z_zz"}, + {"10000"_b, "_llvrrk"}, + {"10001"_b, "_qyjvqr"}, + {"10010"_b, "_tmtnkq"}, + {"10011"_b, "_gpxltv"}, + {"10100"_b, "_pnlnzt"}, + {"10101"_b, "_pygvrr"}, + {"11000"_b, "addhnb_z_zz"}, + {"11001"_b, "addhnt_z_zz"}, + {"11010"_b, "raddhnb_z_zz"}, + {"11011"_b, "raddhnt_z_zz"}, + {"11100"_b, "subhnb_z_zz"}, + {"11101"_b, "subhnt_z_zz"}, + {"11110"_b, "rsubhnb_z_zz"}, + {"11111"_b, "rsubhnt_z_zz"}, + }, + }, + + { "_pglvnj", + {9, 8, 7, 6, 5}, + { {"00000"_b, "fmov_s_floatimm"}, + }, + }, + + { "_pgmlrt", + {30, 23, 22}, + { {"000"_b, "stxrb_sr32_ldstexcl"}, + {"001"_b, "_nxlsjm"}, + {"010"_b, "_nnrtpm"}, + {"011"_b, "_sksvrn"}, + {"100"_b, "stxrh_sr32_ldstexcl"}, + {"101"_b, "_knpjtt"}, + {"110"_b, "_zqhhlq"}, + {"111"_b, "_xtzykp"}, + }, + }, + + { "_pgvjgs", + {30, 23, 22, 13, 12, 11, 10}, + { {"000xx10"_b, "stlur_s_ldapstl_simd"}, + {"001xx10"_b, "ldapur_s_ldapstl_simd"}, + {"100xx10"_b, "stlur_d_ldapstl_simd"}, + {"101xx10"_b, "ldapur_d_ldapstl_simd"}, + {"x000001"_b, "cpypwn_cpy_memcms"}, + {"x000101"_b, "cpypwtwn_cpy_memcms"}, + {"x001001"_b, "cpyprtwn_cpy_memcms"}, + {"x001101"_b, "cpyptwn_cpy_memcms"}, + {"x010001"_b, "cpymwn_cpy_memcms"}, + {"x010101"_b, "cpymwtwn_cpy_memcms"}, + {"x011001"_b, "cpymrtwn_cpy_memcms"}, + {"x011101"_b, "cpymtwn_cpy_memcms"}, + {"x100001"_b, "cpyewn_cpy_memcms"}, + {"x100101"_b, "cpyewtwn_cpy_memcms"}, + {"x101001"_b, "cpyertwn_cpy_memcms"}, + {"x101101"_b, "cpyetwn_cpy_memcms"}, + {"x110001"_b, "setgm_set_memcms"}, + {"x110101"_b, "setgmt_set_memcms"}, + {"x111001"_b, "setgmn_set_memcms"}, + {"x111101"_b, "setgmtn_set_memcms"}, + }, + }, + + { "_phjkhr", + {9, 8, 7, 6, 5}, + { {"11111"_b, "autdzb_64z_dp_1src"}, + }, + }, + + { "_phktvp", + {7, 6, 4, 3, 2, 1, 0}, + { {"0111111"_b, "clrex_bn_barriers"}, + {"1011111"_b, "dsb_bo_barriers"}, + {"1111111"_b, "isb_bi_barriers"}, + }, + }, + + { "_phpphm", + {18}, + { {"0"_b, "st4_asisdlso_h4_4h"}, + }, + }, + + { "_phrqqx", + {23, 22, 13}, + { {"100"_b, "fmlal_asimdelem_lh"}, + {"xx1"_b, "smlal_asimdelem_l"}, + }, + }, + + { "_phsrlk", + {23, 22, 13}, + { {"000"_b, "fmla_asimdelem_rh_h"}, + {"1x0"_b, "fmla_asimdelem_r_sd"}, + {"xx1"_b, "sqdmlal_asimdelem_l"}, + }, + }, + + { "_phthqj", + {30, 13}, + { {"00"_b, "_sntyqy"}, + {"01"_b, "_xhlhmh"}, + {"10"_b, "_rtrlts"}, + {"11"_b, "_jzkqhn"}, + }, + }, + + { "_phtxqg", + {13, 10}, + { {"00"_b, "_vrjhtm"}, + {"01"_b, "_spktyg"}, + {"10"_b, "_nktrpj"}, + {"11"_b, "_vzvstm"}, + }, + }, + + { "_pjgkjs", + {18, 17}, + { {"00"_b, "_mxnzyr"}, + }, + }, + + { "_pjhmvy", + {20, 19, 18, 17, 16}, + { {"00000"_b, "saddlp_asimdmisc_p"}, + {"00001"_b, "xtn_asimdmisc_n"}, + }, + }, + + { "_pjlnhh", + {30, 23}, + { {"00"_b, "and_64_log_imm"}, + {"01"_b, "movn_64_movewide"}, + {"10"_b, "eor_64_log_imm"}, + {"11"_b, "movz_64_movewide"}, + }, + }, + + { "_pjskhr", + {18, 17}, + { {"00"_b, "st3_asisdlso_s3_3s"}, + }, + }, + + { "_pjvkjz", + {13, 12}, + { {"00"_b, "sbc_64_addsub_carry"}, + }, + }, + + { "_pkjqsy", + {20, 19, 18, 17, 16, 13, 12}, + { {"0000001"_b, "cnt_32_dp_1src"}, + }, + }, + + { "_pkpvmj", + {13, 12, 11, 10}, + { {"1111"_b, "casa_c64_ldstexcl"}, + }, + }, + + { "_pkqvxk", + {12}, + { {"1"_b, "_ynsytg"}, + }, + }, + + { "_pkskpp", + {30, 23}, + { {"00"_b, "adds_64s_addsub_imm"}, + {"10"_b, "subs_64s_addsub_imm"}, + }, + }, + + { "_plgrmv", + {13, 12}, + { {"00"_b, "adcs_64_addsub_carry"}, + }, + }, + + { "_plrggq", + {23, 22, 13}, + { {"000"_b, "fmls_asimdelem_rh_h"}, + {"1x0"_b, "fmls_asimdelem_r_sd"}, + {"xx1"_b, "sqdmlsl_asimdelem_l"}, + }, + }, + + { "_plyhhz", + {20, 19, 18, 17, 16}, + { {"00000"_b, "cmge_asisdmisc_z"}, + }, + }, + + { "_plymgg", + {18}, + { {"1"_b, "frsqrte_z_z"}, + }, + }, + + { "_plytvr", + {22}, + { {"0"_b, "str_32_ldst_regoff"}, + {"1"_b, "ldr_32_ldst_regoff"}, + }, + }, + + { "_plyxlq", + {30, 18}, + { {"00"_b, "_nkyynq"}, + }, + }, + + { "_plzqrv", + {23, 22, 20, 19, 12, 11, 10}, + { {"00x1001"_b, "sqshrun_asisdshf_n"}, + {"00x1011"_b, "sqrshrun_asisdshf_n"}, + {"00x1101"_b, "uqshrn_asisdshf_n"}, + {"00x1111"_b, "uqrshrn_asisdshf_n"}, + {"00xx1x0"_b, "fmulx_asisdelem_rh_h"}, + {"010x001"_b, "sqshrun_asisdshf_n"}, + {"010x011"_b, "sqrshrun_asisdshf_n"}, + {"010x101"_b, "uqshrn_asisdshf_n"}, + {"010x111"_b, "uqrshrn_asisdshf_n"}, + {"0111001"_b, "sqshrun_asisdshf_n"}, + {"0111011"_b, "sqrshrun_asisdshf_n"}, + {"0111101"_b, "uqshrn_asisdshf_n"}, + {"0111111"_b, "uqrshrn_asisdshf_n"}, + {"0x10001"_b, "sqshrun_asisdshf_n"}, + {"0x10011"_b, "sqrshrun_asisdshf_n"}, + {"0x10101"_b, "uqshrn_asisdshf_n"}, + {"0x10111"_b, "uqrshrn_asisdshf_n"}, + {"1xxx1x0"_b, "fmulx_asisdelem_r_sd"}, + }, + }, + + { "_pmpsvs", + {18, 17, 12}, + { {"000"_b, "ld2_asisdlso_d2_2d"}, + }, + }, + + { "_pnkxsr", + {22, 20}, + { {"00"_b, "_hnsvjh"}, + {"01"_b, "mrs_rs_systemmove"}, + {"11"_b, "mrrs_rs_systemmovepr"}, + }, + }, + + { "_pnlnzt", + {23, 18, 17, 16}, + { {"0000"_b, "sqxtunb_z_zz"}, + }, + }, + + { "_ppnssm", + {30, 13, 12}, + { {"000"_b, "_ktyppm"}, + {"001"_b, "_ngzyqj"}, + {"010"_b, "_yxnslx"}, + {"011"_b, "_nnkxgr"}, + {"100"_b, "_kzmvpk"}, + {"101"_b, "_thrxph"}, + {"110"_b, "_kgpgly"}, + {"111"_b, "_yppszx"}, + }, + }, + + { "_pppsmg", + {30}, + { {"0"_b, "_xyhmgh"}, + {"1"_b, "_rlrjxp"}, + }, + }, + + { "_ppvnly", + {18, 17}, + { {"0x"_b, "ld2_asisdlsop_sx2_r2s"}, + {"10"_b, "ld2_asisdlsop_sx2_r2s"}, + {"11"_b, "ld2_asisdlsop_s2_i2s"}, + }, + }, + + { "_ppyynh", + {23, 22}, + { {"00"_b, "fmla_asisdelem_rh_h"}, + {"1x"_b, "fmla_asisdelem_r_sd"}, + }, + }, + + { "_pqjjsh", + {23, 22, 12, 10}, + { {"1000"_b, "fmlslb_z_zzzi_s"}, + {"1001"_b, "fmlslt_z_zzzi_s"}, + }, + }, + + { "_pqmqrg", + {30, 23, 22}, + { {"000"_b, "stp_s_ldstpair_off"}, + {"001"_b, "ldp_s_ldstpair_off"}, + {"010"_b, "stp_s_ldstpair_pre"}, + {"011"_b, "ldp_s_ldstpair_pre"}, + {"100"_b, "stp_d_ldstpair_off"}, + {"101"_b, "ldp_d_ldstpair_off"}, + {"110"_b, "stp_d_ldstpair_pre"}, + {"111"_b, "ldp_d_ldstpair_pre"}, + }, + }, + + { "_pqsvty", + {13}, + { {"0"_b, "_qqslmv"}, + {"1"_b, "_gjxsrn"}, + }, + }, + + { "_prgrzz", + {30}, + { {"0"_b, "cbnz_32_compbranch"}, + }, + }, + + { "_prjzxs", + {12}, + { {"0"_b, "ld2_asisdlsop_dx2_r2d"}, + }, + }, + + { "_prkmty", + {23, 22, 9}, + { {"000"_b, "brkpa_p_p_pp"}, + {"010"_b, "brkpas_p_p_pp"}, + }, + }, + + { "_prmjlz", + {18, 17}, + { {"00"_b, "st1_asisdlse_r1_1v"}, + }, + }, + + { "_prrkzv", + {20, 19, 18, 17, 16, 13}, + { {"000000"_b, "fmov_h_floatdp1"}, + {"000010"_b, "fneg_h_floatdp1"}, + {"000100"_b, "fcvt_sh_floatdp1"}, + {"001000"_b, "frintn_h_floatdp1"}, + {"001010"_b, "frintm_h_floatdp1"}, + {"001100"_b, "frinta_h_floatdp1"}, + {"001110"_b, "frintx_h_floatdp1"}, + }, + }, + + { "_prtvjm", + {23, 22, 12, 11, 10}, + { {"10000"_b, "fadd_z_zz"}, + {"10001"_b, "fsub_z_zz"}, + {"10010"_b, "fmul_z_zz"}, + {"x1000"_b, "fadd_z_zz"}, + {"x1001"_b, "fsub_z_zz"}, + {"x1010"_b, "fmul_z_zz"}, + {"xx011"_b, "ftsmul_z_zz"}, + {"xx110"_b, "frecps_z_zz"}, + {"xx111"_b, "frsqrts_z_zz"}, + }, + }, + + { "_prxyhr", + {9, 8, 7, 6, 5}, + { {"11111"_b, "autiza_64z_dp_1src"}, + }, + }, + + { "_prytjs", + {18, 4}, + { {"00"_b, "fcmge_p_p_z0"}, + {"01"_b, "fcmgt_p_p_z0"}, + }, + }, + + { "_pstgvl", + {23}, + { {"0"_b, "fmaxnm_asimdsame_only"}, + {"1"_b, "fminnm_asimdsame_only"}, + }, + }, + + { "_ptjyqx", + {13}, + { {"0"_b, "fcmuo_p_p_zz"}, + }, + }, + + { "_ptkgrz", + {22}, + { {"0"_b, "ldrsw_64_ldst_regoff"}, + }, + }, + + { "_ptqtmp", + {13, 12, 11, 10}, + { {"0111"_b, "fmulx_asisdsamefp16_only"}, + {"1001"_b, "fcmeq_asisdsamefp16_only"}, + {"1111"_b, "frecps_asisdsamefp16_only"}, + }, + }, + + { "_ptsjnr", + {30, 20, 19, 18, 17, 16, 13}, + { {"0000000"_b, "asr_z_p_zi"}, + {"0000010"_b, "lsr_z_p_zi"}, + {"0000110"_b, "lsl_z_p_zi"}, + {"0001000"_b, "asrd_z_p_zi"}, + {"0001100"_b, "sqshl_z_p_zi"}, + {"0001110"_b, "uqshl_z_p_zi"}, + {"0011000"_b, "srshr_z_p_zi"}, + {"0011010"_b, "urshr_z_p_zi"}, + {"0011110"_b, "sqshlu_z_p_zi"}, + {"0100000"_b, "asr_z_p_zz"}, + {"0100001"_b, "sxtb_z_p_z"}, + {"0100010"_b, "lsr_z_p_zz"}, + {"0100011"_b, "uxtb_z_p_z"}, + {"0100101"_b, "sxth_z_p_z"}, + {"0100110"_b, "lsl_z_p_zz"}, + {"0100111"_b, "uxth_z_p_z"}, + {"0101000"_b, "asrr_z_p_zz"}, + {"0101001"_b, "sxtw_z_p_z"}, + {"0101010"_b, "lsrr_z_p_zz"}, + {"0101011"_b, "uxtw_z_p_z"}, + {"0101101"_b, "abs_z_p_z"}, + {"0101110"_b, "lslr_z_p_zz"}, + {"0101111"_b, "neg_z_p_z"}, + {"0110000"_b, "asr_z_p_zw"}, + {"0110001"_b, "cls_z_p_z"}, + {"0110010"_b, "lsr_z_p_zw"}, + {"0110011"_b, "clz_z_p_z"}, + {"0110101"_b, "cnt_z_p_z"}, + {"0110110"_b, "lsl_z_p_zw"}, + {"0110111"_b, "cnot_z_p_z"}, + {"0111001"_b, "fabs_z_p_z"}, + {"0111011"_b, "fneg_z_p_z"}, + {"0111101"_b, "not_z_p_z"}, + {"1000001"_b, "urecpe_z_p_z"}, + {"1000011"_b, "ursqrte_z_p_z"}, + {"1000100"_b, "srshl_z_p_zz"}, + {"1000110"_b, "urshl_z_p_zz"}, + {"1001001"_b, "sadalp_z_p_z"}, + {"1001011"_b, "uadalp_z_p_z"}, + {"1001100"_b, "srshlr_z_p_zz"}, + {"1001110"_b, "urshlr_z_p_zz"}, + {"1010000"_b, "sqshl_z_p_zz"}, + {"1010001"_b, "sqabs_z_p_z"}, + {"1010010"_b, "uqshl_z_p_zz"}, + {"1010011"_b, "sqneg_z_p_z"}, + {"1010100"_b, "sqrshl_z_p_zz"}, + {"1010110"_b, "uqrshl_z_p_zz"}, + {"1011000"_b, "sqshlr_z_p_zz"}, + {"1011010"_b, "uqshlr_z_p_zz"}, + {"1011100"_b, "sqrshlr_z_p_zz"}, + {"1011110"_b, "uqrshlr_z_p_zz"}, + {"1100000"_b, "shadd_z_p_zz"}, + {"1100010"_b, "uhadd_z_p_zz"}, + {"1100011"_b, "addp_z_p_zz"}, + {"1100100"_b, "shsub_z_p_zz"}, + {"1100110"_b, "uhsub_z_p_zz"}, + {"1101000"_b, "srhadd_z_p_zz"}, + {"1101001"_b, "smaxp_z_p_zz"}, + {"1101010"_b, "urhadd_z_p_zz"}, + {"1101011"_b, "umaxp_z_p_zz"}, + {"1101100"_b, "shsubr_z_p_zz"}, + {"1101101"_b, "sminp_z_p_zz"}, + {"1101110"_b, "uhsubr_z_p_zz"}, + {"1101111"_b, "uminp_z_p_zz"}, + {"1110000"_b, "sqadd_z_p_zz"}, + {"1110010"_b, "uqadd_z_p_zz"}, + {"1110100"_b, "sqsub_z_p_zz"}, + {"1110110"_b, "uqsub_z_p_zz"}, + {"1111000"_b, "suqadd_z_p_zz"}, + {"1111010"_b, "usqadd_z_p_zz"}, + {"1111100"_b, "sqsubr_z_p_zz"}, + {"1111110"_b, "uqsubr_z_p_zz"}, + }, + }, + + { "_ptslzg", + {30, 23, 22, 13, 4}, + { {"01000"_b, "ldr_p_bi"}, + {"01100"_b, "prfb_i_p_bi_s"}, + {"01110"_b, "prfh_i_p_bi_s"}, + {"10x0x"_b, "ld1sw_z_p_bz_d_x32_scaled"}, + {"10x1x"_b, "ldff1sw_z_p_bz_d_x32_scaled"}, + }, + }, + + { "_ptstkz", + {4}, + { {"0"_b, "ccmp_32_condcmp_imm"}, + }, + }, + + { "_ptyynt", + {13, 12, 11, 10}, + { {"1111"_b, "_stmtkr"}, + }, + }, + + { "_pvtyjz", + {30}, + { {"0"_b, "ldapur_32_ldapstl_unscaled"}, + {"1"_b, "ldapur_64_ldapstl_unscaled"}, + }, + }, + + { "_pxnyvl", + {23, 13, 12}, + { {"001"_b, "fmulx_asisdsame_only"}, + {"011"_b, "frecps_asisdsame_only"}, + {"111"_b, "frsqrts_asisdsame_only"}, + }, + }, + + { "_pxvjkp", + {30}, + { {"0"_b, "bl_only_branch_imm"}, + {"1"_b, "_rmkpsk"}, + }, + }, + + { "_pxyrpm", + {22, 11}, + { {"00"_b, "sqdmulh_z_zzi_s"}, + {"01"_b, "mul_z_zzi_s"}, + {"10"_b, "sqdmulh_z_zzi_d"}, + {"11"_b, "mul_z_zzi_d"}, + }, + }, + + { "_pxzvjl", + {30, 23, 22, 13, 12, 11, 10}, + { {"000xxxx"_b, "fnmadd_s_floatdp3"}, + {"001xxxx"_b, "fnmadd_d_floatdp3"}, + {"011xxxx"_b, "fnmadd_h_floatdp3"}, + {"10001x0"_b, "fmla_asisdelem_rh_h"}, + {"10x0001"_b, "sshr_asisdshf_r"}, + {"10x0101"_b, "ssra_asisdshf_r"}, + {"10x1001"_b, "srshr_asisdshf_r"}, + {"10x1101"_b, "srsra_asisdshf_r"}, + {"11x01x0"_b, "fmla_asisdelem_r_sd"}, + {"1xx11x0"_b, "sqdmlal_asisdelem_l"}, + }, + }, + + { "_pygvrr", + {23, 18, 17, 16}, + { {"0000"_b, "sqxtunt_z_zz"}, + }, + }, + + { "_pyhrrt", + {30, 23, 22, 13, 12, 11, 10}, + { {"10001x0"_b, "fmulx_asisdelem_rh_h"}, + {"10x0001"_b, "sqshrun_asisdshf_n"}, + {"10x0011"_b, "sqrshrun_asisdshf_n"}, + {"10x0101"_b, "uqshrn_asisdshf_n"}, + {"10x0111"_b, "uqrshrn_asisdshf_n"}, + {"11x01x0"_b, "fmulx_asisdelem_r_sd"}, + }, + }, + + { "_pyjnpz", + {30, 13}, + { {"00"_b, "_xpqglq"}, + {"10"_b, "_ryrkqt"}, + {"11"_b, "_zjzmvh"}, + }, + }, + + { "_pyjtyn", + {22, 20, 19, 18, 17, 16}, + { {"111001"_b, "fcvtau_asisdmiscfp16_r"}, + {"x00001"_b, "fcvtau_asisdmisc_r"}, + {"x10000"_b, "fmaxnmp_asisdpair_only_sd"}, + }, + }, + + { "_pyttkp", + {30, 13, 12, 11, 10}, + { {"10001"_b, "sqrdmlah_asisdsame2_only"}, + {"10011"_b, "sqrdmlsh_asisdsame2_only"}, + }, + }, + + { "_pyvvqx", + {10}, + { {"0"_b, "_rkrntt"}, + }, + }, + + { "_pzpxxv", + {23, 22, 20, 19, 11}, + { {"00011"_b, "fcvtzs_asisdshf_c"}, + {"001x1"_b, "fcvtzs_asisdshf_c"}, + {"01xx1"_b, "fcvtzs_asisdshf_c"}, + }, + }, + + { "_pzttrn", + {20, 19, 18, 17, 16, 13}, + { {"000000"_b, "fmov_d_floatdp1"}, + {"000010"_b, "fneg_d_floatdp1"}, + {"000100"_b, "fcvt_sd_floatdp1"}, + {"000110"_b, "bfcvt_bs_floatdp1"}, + {"001000"_b, "frintn_d_floatdp1"}, + {"001010"_b, "frintm_d_floatdp1"}, + {"001100"_b, "frinta_d_floatdp1"}, + {"001110"_b, "frintx_d_floatdp1"}, + {"010000"_b, "frint32z_d_floatdp1"}, + {"010010"_b, "frint64z_d_floatdp1"}, + }, + }, + + { "_pzzgts", + {30, 23, 22, 13, 12, 11, 10}, + { {"000xx10"_b, "stlur_s_ldapstl_simd"}, + {"001xx10"_b, "ldapur_s_ldapstl_simd"}, + {"100xx10"_b, "stlur_d_ldapstl_simd"}, + {"101xx10"_b, "ldapur_d_ldapstl_simd"}, + {"x000001"_b, "cpyp_cpy_memcms"}, + {"x000101"_b, "cpypwt_cpy_memcms"}, + {"x001001"_b, "cpyprt_cpy_memcms"}, + {"x001101"_b, "cpypt_cpy_memcms"}, + {"x010001"_b, "cpym_cpy_memcms"}, + {"x010101"_b, "cpymwt_cpy_memcms"}, + {"x011001"_b, "cpymrt_cpy_memcms"}, + {"x011101"_b, "cpymt_cpy_memcms"}, + {"x100001"_b, "cpye_cpy_memcms"}, + {"x100101"_b, "cpyewt_cpy_memcms"}, + {"x101001"_b, "cpyert_cpy_memcms"}, + {"x101101"_b, "cpyet_cpy_memcms"}, + {"x110001"_b, "setgp_set_memcms"}, + {"x110101"_b, "setgpt_set_memcms"}, + {"x111001"_b, "setgpn_set_memcms"}, + {"x111101"_b, "setgptn_set_memcms"}, + }, + }, + + { "_qgqgkx", + {30, 23, 22}, + { {"000"_b, "adds_32s_addsub_ext"}, + {"100"_b, "subs_32s_addsub_ext"}, + }, + }, + + { "_qgshrr", + {30, 22, 20, 19, 18, 17, 16}, + { {"00xxxxx"_b, "stlxp_sp32_ldstexcl"}, + {"0111111"_b, "ldaxp_lp32_ldstexcl"}, + {"10xxxxx"_b, "stlxp_sp64_ldstexcl"}, + {"1111111"_b, "ldaxp_lp64_ldstexcl"}, + }, + }, + + { "_qgsrqq", + {23, 22}, + { {"00"_b, "fmadd_s_floatdp3"}, + {"01"_b, "fmadd_d_floatdp3"}, + {"11"_b, "fmadd_h_floatdp3"}, + }, + }, + + { "_qgvrqy", + {1}, + { {"1"_b, "blraaz_64_branch_reg"}, + }, + }, + + { "_qgvtrn", + {23, 22, 20, 19, 13, 11, 10}, + { {"00x1001"_b, "sqshrn_asisdshf_n"}, + {"00x1011"_b, "sqrshrn_asisdshf_n"}, + {"00xx0x0"_b, "fmul_asisdelem_rh_h"}, + {"010x001"_b, "sqshrn_asisdshf_n"}, + {"010x011"_b, "sqrshrn_asisdshf_n"}, + {"0111001"_b, "sqshrn_asisdshf_n"}, + {"0111011"_b, "sqrshrn_asisdshf_n"}, + {"0x10001"_b, "sqshrn_asisdshf_n"}, + {"0x10011"_b, "sqrshrn_asisdshf_n"}, + {"1xxx0x0"_b, "fmul_asisdelem_r_sd"}, + {"xxxx1x0"_b, "sqdmull_asisdelem_l"}, + }, + }, + + { "_qgymsy", + {11}, + { {"0"_b, "_hmsgpj"}, + }, + }, + + { "_qgyppr", + {23, 13, 12, 11, 10}, + { {"00010"_b, "_pyjtyn"}, + {"00110"_b, "_nhrkqm"}, + {"01001"_b, "fcmge_asisdsame_only"}, + {"01011"_b, "facge_asisdsame_only"}, + {"01110"_b, "_kxmjsh"}, + {"10010"_b, "_rpjgkh"}, + {"10101"_b, "fabd_asisdsame_only"}, + {"10110"_b, "_hmpzzg"}, + {"11001"_b, "fcmgt_asisdsame_only"}, + {"11011"_b, "facgt_asisdsame_only"}, + {"11110"_b, "_sxsxxt"}, + }, + }, + + { "_qhpkhm", + {18, 17}, + { {"00"_b, "ld1_asisdlse_r3_3v"}, + }, + }, + + { "_qhzvvh", + {30}, + { {"0"_b, "bl_only_branch_imm"}, + }, + }, + + { "_qjqrgz", + {20, 19, 18, 17, 16}, + { {"11111"_b, "ldarh_lr32_ldstexcl"}, + }, + }, + + { "_qjrllr", + {23, 22, 12}, + { {"000"_b, "_pqsvty"}, + {"001"_b, "_rjrqxt"}, + {"010"_b, "_rnsmjq"}, + {"011"_b, "_msvhjv"}, + {"110"_b, "_rnlxtv"}, + {"111"_b, "_jjgpxz"}, + }, + }, + + { "_qjstll", + {18, 17}, + { {"0x"_b, "ld3_asisdlsop_sx3_r3s"}, + {"10"_b, "ld3_asisdlsop_sx3_r3s"}, + {"11"_b, "ld3_asisdlsop_s3_i3s"}, + }, + }, + + { "_qkhrkh", + {20, 19, 18, 17, 16}, + { {"00000"_b, "cmle_asisdmisc_z"}, + }, + }, + + { "_qkrnms", + {30}, + { {"0"_b, "orr_32_log_shift"}, + {"1"_b, "ands_32_log_shift"}, + }, + }, + + { "_qkxmvp", + {13, 12}, + { {"10"_b, "smin_64_dp_2src"}, + }, + }, + + { "_qkzjxm", + {30, 23, 22, 20, 13}, + { {"00001"_b, "ldnt1w_z_p_bi_contiguous"}, + {"000x0"_b, "ldnt1w_z_p_br_contiguous"}, + {"00101"_b, "ld3w_z_p_bi_contiguous"}, + {"001x0"_b, "ld3w_z_p_br_contiguous"}, + {"01001"_b, "ldnt1d_z_p_bi_contiguous"}, + {"010x0"_b, "ldnt1d_z_p_br_contiguous"}, + {"01101"_b, "ld3d_z_p_bi_contiguous"}, + {"011x0"_b, "ld3d_z_p_br_contiguous"}, + {"10011"_b, "stnt1w_z_p_bi_contiguous"}, + {"100x0"_b, "st1w_z_p_bz_d_x32_unscaled"}, + {"10101"_b, "st1w_z_p_bi"}, + {"10111"_b, "st3w_z_p_bi_contiguous"}, + {"101x0"_b, "st1w_z_p_bz_s_x32_unscaled"}, + {"11011"_b, "stnt1d_z_p_bi_contiguous"}, + {"110x0"_b, "st1d_z_p_bz_d_x32_unscaled"}, + {"11111"_b, "st3d_z_p_bi_contiguous"}, + }, + }, + + { "_qljhnp", + {22}, + { {"0"_b, "sqdmullt_z_zzi_s"}, + {"1"_b, "sqdmullt_z_zzi_d"}, + }, + }, + + { "_qlmqyx", + {18, 17, 12}, + { {"0x0"_b, "ld3_asisdlsop_dx3_r3d"}, + {"100"_b, "ld3_asisdlsop_dx3_r3d"}, + {"110"_b, "ld3_asisdlsop_d3_i3d"}, + }, + }, + + { "_qlpnnn", + {23, 10, 4}, + { {"000"_b, "_vryrnh"}, + }, + }, + + { "_qlxksl", + {30}, + { {"0"_b, "_hrxyts"}, + {"1"_b, "_tytvjk"}, + }, + }, + + { "_qlxlxk", + {20, 19, 18, 17, 16}, + { {"11111"_b, "ldar_lr32_ldstexcl"}, + }, + }, + + { "_qlzvpg", + {13, 12, 11, 10}, + { {"0000"_b, "raddhn_asimddiff_n"}, + {"0001"_b, "ushl_asimdsame_only"}, + {"0010"_b, "_kpnlmr"}, + {"0011"_b, "uqshl_asimdsame_only"}, + {"0100"_b, "uabal_asimddiff_l"}, + {"0101"_b, "urshl_asimdsame_only"}, + {"0110"_b, "_ssqyrk"}, + {"0111"_b, "uqrshl_asimdsame_only"}, + {"1000"_b, "rsubhn_asimddiff_n"}, + {"1001"_b, "umax_asimdsame_only"}, + {"1010"_b, "_sjlqvg"}, + {"1011"_b, "umin_asimdsame_only"}, + {"1100"_b, "uabdl_asimddiff_l"}, + {"1101"_b, "uabd_asimdsame_only"}, + {"1110"_b, "_gplkxy"}, + {"1111"_b, "uaba_asimdsame_only"}, + }, + }, + + { "_qnprqt", + {4}, + { {"0"_b, "eor_p_p_pp_z"}, + {"1"_b, "sel_p_p_pp"}, + }, + }, + + { "_qntrvk", + {30, 23, 22, 13, 12, 11, 10}, + { {"000xxxx"_b, "fnmsub_s_floatdp3"}, + {"001xxxx"_b, "fnmsub_d_floatdp3"}, + {"011xxxx"_b, "fnmsub_h_floatdp3"}, + {"10001x0"_b, "fmul_asisdelem_rh_h"}, + {"10x0101"_b, "sqshrn_asisdshf_n"}, + {"10x0111"_b, "sqrshrn_asisdshf_n"}, + {"11x01x0"_b, "fmul_asisdelem_r_sd"}, + {"1xx11x0"_b, "sqdmull_asisdelem_l"}, + }, + }, + + { "_qnysqv", + {30}, + { {"0"_b, "cbnz_64_compbranch"}, + }, + }, + + { "_qpgxxr", + {23, 22}, + { {"01"_b, "fadd_asimdsamefp16_only"}, + {"11"_b, "fsub_asimdsamefp16_only"}, + }, + }, + + { "_qpsryx", + {30, 23, 22, 11, 10}, + { {"01000"_b, "csel_64_condsel"}, + {"01001"_b, "csinc_64_condsel"}, + {"11000"_b, "csinv_64_condsel"}, + {"11001"_b, "csneg_64_condsel"}, + }, + }, + + { "_qpvgnh", + {30, 23, 22, 20, 13}, + { {"00001"_b, "ld2b_z_p_bi_contiguous"}, + {"000x0"_b, "ld2b_z_p_br_contiguous"}, + {"00101"_b, "ld4b_z_p_bi_contiguous"}, + {"001x0"_b, "ld4b_z_p_br_contiguous"}, + {"01001"_b, "ld2h_z_p_bi_contiguous"}, + {"010x0"_b, "ld2h_z_p_br_contiguous"}, + {"01101"_b, "ld4h_z_p_bi_contiguous"}, + {"011x0"_b, "ld4h_z_p_br_contiguous"}, + {"10011"_b, "st2b_z_p_bi_contiguous"}, + {"10111"_b, "st4b_z_p_bi_contiguous"}, + {"10x01"_b, "st1b_z_p_bi"}, + {"11011"_b, "st2h_z_p_bi_contiguous"}, + {"110x0"_b, "st1h_z_p_bz_d_x32_scaled"}, + {"11111"_b, "st4h_z_p_bi_contiguous"}, + {"111x0"_b, "st1h_z_p_bz_s_x32_scaled"}, + {"11x01"_b, "st1h_z_p_bi"}, + }, + }, + + { "_qpyxsv", + {18}, + { {"0"_b, "ld4_asisdlso_h4_4h"}, + }, + }, + + { "_qqjtpm", + {9, 8, 7, 6, 5}, + { {"11111"_b, "autdza_64z_dp_1src"}, + }, + }, + + { "_qqslmv", + {20, 19, 18, 17, 16}, + { {"00000"_b, "fcvtns_32s_float2int"}, + {"00001"_b, "fcvtnu_32s_float2int"}, + {"00010"_b, "scvtf_s32_float2int"}, + {"00011"_b, "ucvtf_s32_float2int"}, + {"00100"_b, "fcvtas_32s_float2int"}, + {"00101"_b, "fcvtau_32s_float2int"}, + {"00110"_b, "fmov_32s_float2int"}, + {"00111"_b, "fmov_s32_float2int"}, + {"01000"_b, "fcvtps_32s_float2int"}, + {"01001"_b, "fcvtpu_32s_float2int"}, + {"10000"_b, "fcvtms_32s_float2int"}, + {"10001"_b, "fcvtmu_32s_float2int"}, + {"11000"_b, "fcvtzs_32s_float2int"}, + {"11001"_b, "fcvtzu_32s_float2int"}, + }, + }, + + { "_qqvgql", + {4, 3, 2, 1, 0}, + { {"11111"_b, "_gtsglj"}, + }, + }, + + { "_qqyryl", + {30, 23, 22, 13, 4}, + { {"00x0x"_b, "ld1w_z_p_bz_s_x32_unscaled"}, + {"00x1x"_b, "ldff1w_z_p_bz_s_x32_unscaled"}, + {"0100x"_b, "ldr_z_bi"}, + {"01100"_b, "prfw_i_p_bi_s"}, + {"01110"_b, "prfd_i_p_bi_s"}, + {"10x0x"_b, "ld1w_z_p_bz_d_x32_unscaled"}, + {"10x1x"_b, "ldff1w_z_p_bz_d_x32_unscaled"}, + {"11x0x"_b, "ld1d_z_p_bz_d_x32_unscaled"}, + {"11x1x"_b, "ldff1d_z_p_bz_d_x32_unscaled"}, + }, + }, + + { "_qrsxzp", + {23, 22, 20, 19, 16, 13, 10}, + { {"0000000"_b, "_tjnzjl"}, + {"0000001"_b, "_nklyky"}, + {"0000010"_b, "_pjskhr"}, + {"0000011"_b, "_kqstrr"}, + {"0000101"_b, "_nvzsxn"}, + {"0100000"_b, "_jnktqs"}, + {"0100001"_b, "_ttzlqn"}, + {"0100010"_b, "_sxgnmg"}, + {"0100011"_b, "_yqzxvr"}, + {"0100101"_b, "_tvtvkt"}, + {"100xx00"_b, "st1_asisdlsop_sx1_r1s"}, + {"100xx01"_b, "_mnzgkx"}, + {"100xx10"_b, "st3_asisdlsop_sx3_r3s"}, + {"100xx11"_b, "_tjxyky"}, + {"1010x00"_b, "st1_asisdlsop_sx1_r1s"}, + {"1010x01"_b, "_mphkpq"}, + {"1010x10"_b, "st3_asisdlsop_sx3_r3s"}, + {"1010x11"_b, "_hqkhsy"}, + {"1011000"_b, "st1_asisdlsop_sx1_r1s"}, + {"1011001"_b, "_qsszkx"}, + {"1011010"_b, "st3_asisdlsop_sx3_r3s"}, + {"1011011"_b, "_gsjvmx"}, + {"1011100"_b, "_gqmjys"}, + {"1011101"_b, "_qtqrmn"}, + {"1011110"_b, "_mjrqhl"}, + {"1011111"_b, "_ngtlpz"}, + {"110xx00"_b, "ld1_asisdlsop_sx1_r1s"}, + {"110xx01"_b, "_hkjjsr"}, + {"110xx10"_b, "ld3_asisdlsop_sx3_r3s"}, + {"110xx11"_b, "_yryygq"}, + {"1110x00"_b, "ld1_asisdlsop_sx1_r1s"}, + {"1110x01"_b, "_tptqjs"}, + {"1110x10"_b, "ld3_asisdlsop_sx3_r3s"}, + {"1110x11"_b, "_szmyzt"}, + {"1111000"_b, "ld1_asisdlsop_sx1_r1s"}, + {"1111001"_b, "_zxklzp"}, + {"1111010"_b, "ld3_asisdlsop_sx3_r3s"}, + {"1111011"_b, "_qzxgqh"}, + {"1111100"_b, "_yzgthp"}, + {"1111101"_b, "_rgnryt"}, + {"1111110"_b, "_qjstll"}, + {"1111111"_b, "_qlmqyx"}, + }, + }, + + { "_qrtjvn", + {30, 23, 22, 20, 19, 12, 11}, + { {"0000000"_b, "movi_asimdimm_d_ds"}, + {"1000000"_b, "movi_asimdimm_d2_d"}, + {"1000010"_b, "fmov_asimdimm_d2_d"}, + {"x00x100"_b, "ucvtf_asimdshf_c"}, + {"x00x111"_b, "fcvtzu_asimdshf_c"}, + {"x010x00"_b, "ucvtf_asimdshf_c"}, + {"x010x11"_b, "fcvtzu_asimdshf_c"}, + {"x011100"_b, "ucvtf_asimdshf_c"}, + {"x011111"_b, "fcvtzu_asimdshf_c"}, + {"x0x1000"_b, "ucvtf_asimdshf_c"}, + {"x0x1011"_b, "fcvtzu_asimdshf_c"}, + }, + }, + + { "_qrygny", + {30, 23, 22, 20, 13}, + { {"00001"_b, "ld1b_z_p_bi_u8"}, + {"00011"_b, "ldnf1b_z_p_bi_u8"}, + {"00101"_b, "ld1b_z_p_bi_u32"}, + {"00111"_b, "ldnf1b_z_p_bi_u32"}, + {"01001"_b, "ld1sw_z_p_bi_s64"}, + {"01011"_b, "ldnf1sw_z_p_bi_s64"}, + {"01101"_b, "ld1h_z_p_bi_u32"}, + {"01111"_b, "ldnf1h_z_p_bi_u32"}, + {"100x0"_b, "st1b_z_p_bz_d_x32_unscaled"}, + {"100x1"_b, "st1b_z_p_bz_d_64_unscaled"}, + {"101x0"_b, "st1b_z_p_bz_s_x32_unscaled"}, + {"101x1"_b, "st1b_z_p_ai_d"}, + {"110x0"_b, "st1h_z_p_bz_d_x32_unscaled"}, + {"110x1"_b, "st1h_z_p_bz_d_64_unscaled"}, + {"111x0"_b, "st1h_z_p_bz_s_x32_unscaled"}, + {"111x1"_b, "st1h_z_p_ai_d"}, + }, + }, + + { "_qsszkx", + {12}, + { {"0"_b, "st1_asisdlsop_dx1_r1d"}, + }, + }, + + { "_qtghgs", + {22}, + { {"0"_b, "ldrsw_64_ldst_regoff"}, + }, + }, + + { "_qtgrzv", + {20, 18, 17}, + { {"000"_b, "_gznrjv"}, + }, + }, + + { "_qtgvlx", + {23, 22, 20, 19, 17, 16}, + { {"000010"_b, "scvtf_s64_float2fix"}, + {"000011"_b, "ucvtf_s64_float2fix"}, + {"001100"_b, "fcvtzs_64s_float2fix"}, + {"001101"_b, "fcvtzu_64s_float2fix"}, + {"010010"_b, "scvtf_d64_float2fix"}, + {"010011"_b, "ucvtf_d64_float2fix"}, + {"011100"_b, "fcvtzs_64d_float2fix"}, + {"011101"_b, "fcvtzu_64d_float2fix"}, + {"110010"_b, "scvtf_h64_float2fix"}, + {"110011"_b, "ucvtf_h64_float2fix"}, + {"111100"_b, "fcvtzs_64h_float2fix"}, + {"111101"_b, "fcvtzu_64h_float2fix"}, + }, + }, + + { "_qtqrmn", + {18, 17, 12}, + { {"0x0"_b, "st1_asisdlsop_dx1_r1d"}, + {"100"_b, "st1_asisdlsop_dx1_r1d"}, + {"110"_b, "st1_asisdlsop_d1_i1d"}, + }, + }, + + { "_qtxlsr", + {13, 12, 11, 10}, + { {"1111"_b, "cas_c64_ldstexcl"}, + }, + }, + + { "_qtxpky", + {4}, + { {"0"_b, "cmphs_p_p_zi"}, + {"1"_b, "cmphi_p_p_zi"}, + }, + }, + + { "_qvgtlh", + {30, 23, 22, 11}, + { {"0001"_b, "strb_32b_ldst_regoff"}, + {"0011"_b, "ldrb_32b_ldst_regoff"}, + {"0100"_b, "_hjplhs"}, + {"0101"_b, "ldrsb_64b_ldst_regoff"}, + {"0111"_b, "ldrsb_32b_ldst_regoff"}, + {"1001"_b, "strh_32_ldst_regoff"}, + {"1011"_b, "ldrh_32_ldst_regoff"}, + {"1100"_b, "_vrzksz"}, + {"1101"_b, "ldrsh_64_ldst_regoff"}, + {"1111"_b, "ldrsh_32_ldst_regoff"}, + }, + }, + + { "_qvjmmq", + {30}, + { {"0"_b, "b_only_branch_imm"}, + }, + }, + + { "_qvlnll", + {22, 20, 11}, + { {"010"_b, "decw_r_rs"}, + {"110"_b, "decd_r_rs"}, + }, + }, + + { "_qvtxpr", + {20, 9, 4}, + { {"000"_b, "uzp1_p_pp"}, + }, + }, + + { "_qvzvmq", + {30, 23, 22}, + { {"000"_b, "stlxrb_sr32_ldstexcl"}, + {"001"_b, "_ynznxv"}, + {"010"_b, "_lqlrxp"}, + {"011"_b, "_grprpj"}, + {"100"_b, "stlxrh_sr32_ldstexcl"}, + {"101"_b, "_jgsryt"}, + {"110"_b, "_qyrqxp"}, + {"111"_b, "_qjqrgz"}, + }, + }, + + { "_qyjvqr", + {23, 18, 17, 16}, + { {"0000"_b, "sqxtnt_z_zz"}, + }, + }, + + { "_qyrqxp", + {20, 19, 18, 17, 16}, + { {"11111"_b, "stlrh_sl32_ldstexcl"}, + }, + }, + + { "_qyyrqq", + {22, 13, 12}, + { {"000"_b, "swp_64_memop"}, + {"001"_b, "_ymghnh"}, + {"010"_b, "st64bv0_64_memop"}, + {"011"_b, "st64bv_64_memop"}, + {"100"_b, "swpl_64_memop"}, + }, + }, + + { "_qzlvkm", + {18}, + { {"0"_b, "st3_asisdlsop_hx3_r3h"}, + {"1"_b, "st3_asisdlsop_h3_i3h"}, + }, + }, + + { "_qzmrnj", + {23, 22}, + { {"00"_b, "dup_asimdins_dr_r"}, + {"01"_b, "fmla_asimdsamefp16_only"}, + {"11"_b, "fmls_asimdsamefp16_only"}, + }, + }, + + { "_qzsyvx", + {30, 23, 22, 11, 10}, + { {"00010"_b, "str_s_ldst_regoff"}, + {"00110"_b, "ldr_s_ldst_regoff"}, + {"10010"_b, "str_d_ldst_regoff"}, + {"10110"_b, "ldr_d_ldst_regoff"}, + }, + }, + + { "_qzxgqh", + {12}, + { {"0"_b, "ld3_asisdlsop_dx3_r3d"}, + }, + }, + + { "_rgnryt", + {18, 17, 12}, + { {"0x0"_b, "ld1_asisdlsop_dx1_r1d"}, + {"100"_b, "ld1_asisdlsop_dx1_r1d"}, + {"110"_b, "ld1_asisdlsop_d1_i1d"}, + }, + }, + + { "_rgxthl", + {30, 23, 22}, + { {"000"_b, "stxp_sp32_ldstexcl"}, + {"001"_b, "_mjyhsl"}, + {"010"_b, "_vrsjnp"}, + {"011"_b, "_zyxnpz"}, + {"100"_b, "stxp_sp64_ldstexcl"}, + {"101"_b, "_snrzky"}, + {"110"_b, "_qtxlsr"}, + {"111"_b, "_pkpvmj"}, + }, + }, + + { "_rgztgm", + {20, 18, 17}, + { {"000"_b, "_klrksl"}, + }, + }, + + { "_rhhrhg", + {30, 13, 4}, + { {"000"_b, "cmphs_p_p_zw"}, + {"001"_b, "cmphi_p_p_zw"}, + {"010"_b, "cmplo_p_p_zw"}, + {"011"_b, "cmpls_p_p_zw"}, + }, + }, + + { "_rhmxyp", + {20, 9, 4}, + { {"000"_b, "trn1_p_pp"}, + }, + }, + + { "_rhpmjz", + {12, 11}, + { {"00"_b, "incp_z_p_z"}, + {"01"_b, "incp_r_p_r"}, + {"10"_b, "_mpstrr"}, + }, + }, + + { "_rjmhxr", + {30}, + { {"0"_b, "adds_64_addsub_shift"}, + {"1"_b, "subs_64_addsub_shift"}, + }, + }, + + { "_rjrqxt", + {9, 8, 7, 6, 5}, + { {"00000"_b, "fmov_s_floatimm"}, + }, + }, + + { "_rjspzr", + {13, 12}, + { {"00"_b, "udiv_32_dp_2src"}, + {"10"_b, "asrv_32_dp_2src"}, + }, + }, + + { "_rjthsm", + {30, 23, 22}, + { {"001"_b, "sbfm_64m_bitfield"}, + {"101"_b, "ubfm_64m_bitfield"}, + }, + }, + + { "_rjvgkl", + {30, 23, 22, 19, 18, 17, 16}, + { {"000xxxx"_b, "umov_asimdins_w_w"}, + {"1001000"_b, "umov_asimdins_x_x"}, + {"x01xxxx"_b, "frecps_asimdsamefp16_only"}, + {"x11xxxx"_b, "frsqrts_asimdsamefp16_only"}, + }, + }, + + { "_rjyrnt", + {4}, + { {"0"_b, "cmpge_p_p_zi"}, + {"1"_b, "cmpgt_p_p_zi"}, + }, + }, + + { "_rjysnh", + {18, 17, 16, 9, 8, 7, 6}, + { {"0000000"_b, "fadd_z_p_zs"}, + {"0010000"_b, "fsub_z_p_zs"}, + {"0100000"_b, "fmul_z_p_zs"}, + {"0110000"_b, "fsubr_z_p_zs"}, + {"1000000"_b, "fmaxnm_z_p_zs"}, + {"1010000"_b, "fminnm_z_p_zs"}, + {"1100000"_b, "fmax_z_p_zs"}, + {"1110000"_b, "fmin_z_p_zs"}, + }, + }, + + { "_rkjjtp", + {23, 22, 20, 19, 11}, + { {"00010"_b, "scvtf_asisdshf_c"}, + {"001x0"_b, "scvtf_asisdshf_c"}, + {"01xx0"_b, "scvtf_asisdshf_c"}, + }, + }, + + { "_rknxlg", + {12}, + { {"0"_b, "ld4_asisdlsop_dx4_r4d"}, + }, + }, + + { "_rkpylh", + {20, 19, 18, 17, 16}, + { {"00010"_b, "scvtf_d32_float2fix"}, + {"00011"_b, "ucvtf_d32_float2fix"}, + {"11000"_b, "fcvtzs_32d_float2fix"}, + {"11001"_b, "fcvtzu_32d_float2fix"}, + }, + }, + + { "_rkrlsy", + {20, 19, 18, 17, 16}, + { {"00000"_b, "rev64_asimdmisc_r"}, + }, + }, + + { "_rkrntt", + {23, 22, 20, 19, 17, 16, 13}, + { {"0000000"_b, "_hynprk"}, + {"0000001"_b, "_phpphm"}, + {"0100000"_b, "_tlvmlq"}, + {"0100001"_b, "_qpyxsv"}, + {"100xxx0"_b, "st2_asisdlsop_hx2_r2h"}, + {"100xxx1"_b, "st4_asisdlsop_hx4_r4h"}, + {"1010xx0"_b, "st2_asisdlsop_hx2_r2h"}, + {"1010xx1"_b, "st4_asisdlsop_hx4_r4h"}, + {"10110x0"_b, "st2_asisdlsop_hx2_r2h"}, + {"10110x1"_b, "st4_asisdlsop_hx4_r4h"}, + {"1011100"_b, "st2_asisdlsop_hx2_r2h"}, + {"1011101"_b, "st4_asisdlsop_hx4_r4h"}, + {"1011110"_b, "_skmzll"}, + {"1011111"_b, "_hkxlsm"}, + {"110xxx0"_b, "ld2_asisdlsop_hx2_r2h"}, + {"110xxx1"_b, "ld4_asisdlsop_hx4_r4h"}, + {"1110xx0"_b, "ld2_asisdlsop_hx2_r2h"}, + {"1110xx1"_b, "ld4_asisdlsop_hx4_r4h"}, + {"11110x0"_b, "ld2_asisdlsop_hx2_r2h"}, + {"11110x1"_b, "ld4_asisdlsop_hx4_r4h"}, + {"1111100"_b, "ld2_asisdlsop_hx2_r2h"}, + {"1111101"_b, "ld4_asisdlsop_hx4_r4h"}, + {"1111110"_b, "_ykhhqq"}, + {"1111111"_b, "_khtsmx"}, + }, + }, + + { "_rkskkv", + {18}, + { {"1"_b, "fminv_v_p_z"}, + }, + }, + + { "_rktqym", + {30, 23, 22, 13, 12, 11, 10}, + { {"010xx00"_b, "csel_32_condsel"}, + {"010xx01"_b, "csinc_32_condsel"}, + {"0110000"_b, "crc32b_32c_dp_2src"}, + {"0110001"_b, "crc32h_32c_dp_2src"}, + {"0110010"_b, "crc32w_32c_dp_2src"}, + {"0110100"_b, "crc32cb_32c_dp_2src"}, + {"0110101"_b, "crc32ch_32c_dp_2src"}, + {"0110110"_b, "crc32cw_32c_dp_2src"}, + {"0111000"_b, "smax_32_dp_2src"}, + {"0111001"_b, "umax_32_dp_2src"}, + {"0111010"_b, "smin_32_dp_2src"}, + {"0111011"_b, "umin_32_dp_2src"}, + {"110xx00"_b, "csinv_32_condsel"}, + {"110xx01"_b, "csneg_32_condsel"}, + }, + }, + + { "_rkxlyj", + {30, 23, 22, 13, 12, 11, 10}, + { {"000xx10"_b, "stlur_b_ldapstl_simd"}, + {"001xx10"_b, "ldapur_b_ldapstl_simd"}, + {"010xx10"_b, "stlur_q_ldapstl_simd"}, + {"011xx10"_b, "ldapur_q_ldapstl_simd"}, + {"100xx10"_b, "stlur_h_ldapstl_simd"}, + {"101xx10"_b, "ldapur_h_ldapstl_simd"}, + {"x000001"_b, "cpyprn_cpy_memcms"}, + {"x000101"_b, "cpypwtrn_cpy_memcms"}, + {"x001001"_b, "cpyprtrn_cpy_memcms"}, + {"x001101"_b, "cpyptrn_cpy_memcms"}, + {"x010001"_b, "cpymrn_cpy_memcms"}, + {"x010101"_b, "cpymwtrn_cpy_memcms"}, + {"x011001"_b, "cpymrtrn_cpy_memcms"}, + {"x011101"_b, "cpymtrn_cpy_memcms"}, + {"x100001"_b, "cpyern_cpy_memcms"}, + {"x100101"_b, "cpyewtrn_cpy_memcms"}, + {"x101001"_b, "cpyertrn_cpy_memcms"}, + {"x101101"_b, "cpyetrn_cpy_memcms"}, + {"x110001"_b, "setge_set_memcms"}, + {"x110101"_b, "setget_set_memcms"}, + {"x111001"_b, "setgen_set_memcms"}, + {"x111101"_b, "setgetn_set_memcms"}, + }, + }, + + { "_rkzlpp", + {4}, + { {"0"_b, "ccmp_64_condcmp_reg"}, + }, + }, + + { "_rlgtnn", + {23}, + { {"0"_b, "_sxsgmq"}, + }, + }, + + { "_rlpmrx", + {30}, + { {"0"_b, "_txzxzs"}, + {"1"_b, "_htsjxj"}, + }, + }, + + { "_rlrjxp", + {13, 4}, + { {"00"_b, "fcmge_p_p_zz"}, + {"01"_b, "fcmgt_p_p_zz"}, + {"10"_b, "fcmeq_p_p_zz"}, + {"11"_b, "fcmne_p_p_zz"}, + }, + }, + + { "_rlxhxz", + {9, 8, 7, 6, 5}, + { {"11111"_b, "pacdzb_64z_dp_1src"}, + }, + }, + + { "_rlylxh", + {18}, + { {"0"_b, "ld3_asisdlsop_bx3_r3b"}, + {"1"_b, "ld3_asisdlsop_b3_i3b"}, + }, + }, + + { "_rlyvpn", + {23, 12, 11, 10}, + { {"0000"_b, "sqshrunb_z_zi"}, + {"0001"_b, "sqshrunt_z_zi"}, + {"0010"_b, "sqrshrunb_z_zi"}, + {"0011"_b, "sqrshrunt_z_zi"}, + {"0100"_b, "shrnb_z_zi"}, + {"0101"_b, "shrnt_z_zi"}, + {"0110"_b, "rshrnb_z_zi"}, + {"0111"_b, "rshrnt_z_zi"}, + }, + }, + + { "_rmkpsk", + {23}, + { {"0"_b, "_srkslp"}, + }, + }, + + { "_rmmpym", + {2, 1, 0}, + { {"000"_b, "_glgznt"}, + }, + }, + + { "_rmyzpp", + {20, 19, 18, 17}, + { {"0000"_b, "_gnhjkl"}, + }, + }, + + { "_rnlxtv", + {13}, + { {"0"_b, "_vvgpzq"}, + {"1"_b, "_mqljmr"}, + }, + }, + + { "_rnphqp", + {20, 19, 18, 17, 16, 4, 3}, + { {"0000001"_b, "fcmp_hz_floatcmp"}, + {"0000011"_b, "fcmpe_hz_floatcmp"}, + {"xxxxx00"_b, "fcmp_h_floatcmp"}, + {"xxxxx10"_b, "fcmpe_h_floatcmp"}, + }, + }, + + { "_rnqmyp", + {23, 22, 20, 19, 13, 11}, + { {"0000x0"_b, "mvni_asimdimm_l_sl"}, + {"00x100"_b, "sri_asimdshf_r"}, + {"00x110"_b, "sqshlu_asimdshf_r"}, + {"010x00"_b, "sri_asimdshf_r"}, + {"010x10"_b, "sqshlu_asimdshf_r"}, + {"011100"_b, "sri_asimdshf_r"}, + {"011110"_b, "sqshlu_asimdshf_r"}, + {"0x1000"_b, "sri_asimdshf_r"}, + {"0x1010"_b, "sqshlu_asimdshf_r"}, + }, + }, + + { "_rnqtmt", + {30}, + { {"0"_b, "_zyjjgs"}, + {"1"_b, "_lrntmz"}, + }, + }, + + { "_rnsmjq", + {13}, + { {"0"_b, "_xxqzvy"}, + {"1"_b, "_rmmpym"}, + }, + }, + + { "_rpjgkh", + {22, 20, 19, 18, 17, 16}, + { {"111000"_b, "fcmge_asisdmiscfp16_fz"}, + {"x00000"_b, "fcmge_asisdmisc_fz"}, + {"x10000"_b, "fminnmp_asisdpair_only_sd"}, + }, + }, + + { "_rpjrhs", + {23, 22, 4}, + { {"000"_b, "fccmp_s_floatccmp"}, + {"001"_b, "fccmpe_s_floatccmp"}, + {"010"_b, "fccmp_d_floatccmp"}, + {"011"_b, "fccmpe_d_floatccmp"}, + {"110"_b, "fccmp_h_floatccmp"}, + {"111"_b, "fccmpe_h_floatccmp"}, + }, + }, + + { "_rpplns", + {23, 22, 20, 19, 11}, + { {"00010"_b, "srshr_asisdshf_r"}, + {"001x0"_b, "srshr_asisdshf_r"}, + {"01xx0"_b, "srshr_asisdshf_r"}, + }, + }, + + { "_rpzykx", + {11}, + { {"0"_b, "_svvyrz"}, + }, + }, + + { "_rqghyv", + {30, 23, 22, 11, 10}, + { {"00000"_b, "stur_32_ldst_unscaled"}, + {"00001"_b, "str_32_ldst_immpost"}, + {"00010"_b, "sttr_32_ldst_unpriv"}, + {"00011"_b, "str_32_ldst_immpre"}, + {"00100"_b, "ldur_32_ldst_unscaled"}, + {"00101"_b, "ldr_32_ldst_immpost"}, + {"00110"_b, "ldtr_32_ldst_unpriv"}, + {"00111"_b, "ldr_32_ldst_immpre"}, + {"01000"_b, "ldursw_64_ldst_unscaled"}, + {"01001"_b, "ldrsw_64_ldst_immpost"}, + {"01010"_b, "ldtrsw_64_ldst_unpriv"}, + {"01011"_b, "ldrsw_64_ldst_immpre"}, + {"10000"_b, "stur_64_ldst_unscaled"}, + {"10001"_b, "str_64_ldst_immpost"}, + {"10010"_b, "sttr_64_ldst_unpriv"}, + {"10011"_b, "str_64_ldst_immpre"}, + {"10100"_b, "ldur_64_ldst_unscaled"}, + {"10101"_b, "ldr_64_ldst_immpost"}, + {"10110"_b, "ldtr_64_ldst_unpriv"}, + {"10111"_b, "ldr_64_ldst_immpre"}, + {"11000"_b, "prfum_p_ldst_unscaled"}, + }, + }, + + { "_rqhryp", + {12, 10}, + { {"00"_b, "_kjpxvh"}, + {"01"_b, "_mxvjxx"}, + {"10"_b, "sm4ekey_z_zz"}, + {"11"_b, "rax1_z_zz"}, + }, + }, + + { "_rqpjjs", + {30, 11, 10}, + { {"000"_b, "_qjrllr"}, + {"001"_b, "_xlgxhn"}, + {"010"_b, "_hxrnns"}, + {"011"_b, "_xnhkpk"}, + {"101"_b, "_mmgpkx"}, + {"110"_b, "_vxhjgg"}, + {"111"_b, "_lptrlg"}, + }, + }, + + { "_rqzpzq", + {23, 22, 11, 10, 4, 3, 0}, + { {"0000000"_b, "_hkxzqg"}, + {"0010111"_b, "_zqlzzp"}, + {"0011111"_b, "_lvszgj"}, + {"0100000"_b, "_tmsjzg"}, + {"0110111"_b, "_kzprzt"}, + {"0111111"_b, "_tzsnmy"}, + {"1000000"_b, "_mqmrng"}, + {"1010111"_b, "_hrmsnk"}, + {"1011111"_b, "_tqlrzh"}, + }, + }, + + { "_rrkmyl", + {23, 22, 4}, + { {"000"_b, "fccmp_s_floatccmp"}, + {"001"_b, "fccmpe_s_floatccmp"}, + {"010"_b, "fccmp_d_floatccmp"}, + {"011"_b, "fccmpe_d_floatccmp"}, + {"110"_b, "fccmp_h_floatccmp"}, + {"111"_b, "fccmpe_h_floatccmp"}, + }, + }, + + { "_rrvltp", + {18, 4}, + { {"00"_b, "fcmlt_p_p_z0"}, + {"01"_b, "fcmle_p_p_z0"}, + }, + }, + + { "_rshyht", + {13}, + { {"0"_b, "facge_p_p_zz"}, + {"1"_b, "facgt_p_p_zz"}, + }, + }, + + { "_rsjgyk", + {30, 23, 22, 20, 13}, + { {"00001"_b, "ld2w_z_p_bi_contiguous"}, + {"000x0"_b, "ld2w_z_p_br_contiguous"}, + {"00101"_b, "ld4w_z_p_bi_contiguous"}, + {"001x0"_b, "ld4w_z_p_br_contiguous"}, + {"01001"_b, "ld2d_z_p_bi_contiguous"}, + {"010x0"_b, "ld2d_z_p_br_contiguous"}, + {"01101"_b, "ld4d_z_p_bi_contiguous"}, + {"011x0"_b, "ld4d_z_p_br_contiguous"}, + {"10011"_b, "st2w_z_p_bi_contiguous"}, + {"100x0"_b, "st1w_z_p_bz_d_x32_scaled"}, + {"10101"_b, "st1w_z_p_bi"}, + {"10111"_b, "st4w_z_p_bi_contiguous"}, + {"101x0"_b, "st1w_z_p_bz_s_x32_scaled"}, + {"11011"_b, "st2d_z_p_bi_contiguous"}, + {"110x0"_b, "st1d_z_p_bz_d_x32_scaled"}, + {"11101"_b, "st1d_z_p_bi"}, + {"11111"_b, "st4d_z_p_bi_contiguous"}, + }, + }, + + { "_rsmyth", + {20, 19, 18, 17, 16}, + { {"11111"_b, "stllr_sl64_ldstexcl"}, + }, + }, + + { "_rsnvnr", + {30, 23, 22}, + { {"100"_b, "ins_asimdins_ir_r"}, + {"x01"_b, "fmulx_asimdsamefp16_only"}, + }, + }, + + { "_rspmth", + {18}, + { {"0"_b, "st1_asisdlse_r2_2v"}, + }, + }, + + { "_rsqmgk", + {23, 22, 20, 19, 18, 17, 16}, + { {"0000000"_b, "movprfx_z_z"}, + }, + }, + + { "_rsqxrs", + {30, 23, 22, 11, 10}, + { {"00000"_b, "_ggvlym"}, + {"01000"_b, "csel_32_condsel"}, + {"01001"_b, "csinc_32_condsel"}, + {"01100"_b, "_svvylr"}, + {"01101"_b, "_zmhqmr"}, + {"01110"_b, "_rjspzr"}, + {"01111"_b, "_vpknjg"}, + {"10000"_b, "_rzymmk"}, + {"11000"_b, "csinv_32_condsel"}, + {"11001"_b, "csneg_32_condsel"}, + {"11100"_b, "_kzjxxk"}, + {"11101"_b, "_khvvtr"}, + {"11110"_b, "_gvpvjn"}, + {"11111"_b, "_pkjqsy"}, + }, + }, + + { "_rssrty", + {30, 23, 22, 13, 12, 11, 10}, + { {"1011011"_b, "bfmmla_asimdsame2_e"}, + {"x011111"_b, "bfdot_asimdsame2_d"}, + {"x111111"_b, "bfmlal_asimdsame2_f"}, + {"xxx0xx1"_b, "fcmla_asimdsame2_c"}, + {"xxx1x01"_b, "fcadd_asimdsame2_c"}, + }, + }, + + { "_rszgzl", + {30, 23, 22}, + { {"000"_b, "smsubl_64wa_dp_3src"}, + {"010"_b, "umsubl_64wa_dp_3src"}, + }, + }, + + { "_rtlvxq", + {30, 23, 22}, + { {"000"_b, "madd_32a_dp_3src"}, + }, + }, + + { "_rtpztp", + {22}, + { {"0"_b, "umullb_z_zzi_s"}, + {"1"_b, "umullb_z_zzi_d"}, + }, + }, + + { "_rtrlts", + {23, 22, 12, 11, 10}, + { {"01000"_b, "bfdot_z_zzz"}, + {"10000"_b, "fmlalb_z_zzz"}, + {"10001"_b, "fmlalt_z_zzz"}, + {"11000"_b, "bfmlalb_z_zzz"}, + {"11001"_b, "bfmlalt_z_zzz"}, + }, + }, + + { "_rvjkyp", + {13, 12}, + { {"01"_b, "gmi_64g_dp_2src"}, + {"10"_b, "lsrv_64_dp_2src"}, + }, + }, + + { "_rvsylx", + {18}, + { {"1"_b, "frecpe_z_z"}, + }, + }, + + { "_rvtxys", + {23, 22, 20, 19, 11}, + { {"00010"_b, "sshr_asisdshf_r"}, + {"001x0"_b, "sshr_asisdshf_r"}, + {"01xx0"_b, "sshr_asisdshf_r"}, + }, + }, + + { "_rvvshx", + {23, 22, 13, 12}, + { {"0000"_b, "fmax_s_floatdp2"}, + {"0001"_b, "fmin_s_floatdp2"}, + {"0010"_b, "fmaxnm_s_floatdp2"}, + {"0011"_b, "fminnm_s_floatdp2"}, + {"0100"_b, "fmax_d_floatdp2"}, + {"0101"_b, "fmin_d_floatdp2"}, + {"0110"_b, "fmaxnm_d_floatdp2"}, + {"0111"_b, "fminnm_d_floatdp2"}, + {"1100"_b, "fmax_h_floatdp2"}, + {"1101"_b, "fmin_h_floatdp2"}, + {"1110"_b, "fmaxnm_h_floatdp2"}, + {"1111"_b, "fminnm_h_floatdp2"}, + }, + }, + + { "_rxgkjn", + {30, 23, 22}, + { {"000"_b, "adds_64s_addsub_ext"}, + {"100"_b, "subs_64s_addsub_ext"}, + }, + }, + + { "_rxhssh", + {18}, + { {"0"_b, "ld3_asisdlsop_hx3_r3h"}, + {"1"_b, "ld3_asisdlsop_h3_i3h"}, + }, + }, + + { "_rxnnvv", + {23, 22, 4, 3, 2, 1, 0}, + { {"0000000"_b, "brk_ex_exception"}, + {"0100000"_b, "tcancel_ex_exception"}, + {"1000001"_b, "dcps1_dc_exception"}, + {"1000010"_b, "dcps2_dc_exception"}, + {"1000011"_b, "dcps3_dc_exception"}, + }, + }, + + { "_rxsqhv", + {13, 12}, + { {"00"_b, "adc_64_addsub_carry"}, + }, + }, + + { "_rxtklv", + {30, 18}, + { {"00"_b, "_qtgvlx"}, + }, + }, + + { "_rxytqg", + {30, 23, 22, 20, 19, 18}, + { {"00xxxx"_b, "add_64_addsub_imm"}, + {"011000"_b, "smax_64_minmax_imm"}, + {"011001"_b, "umax_64u_minmax_imm"}, + {"011010"_b, "smin_64_minmax_imm"}, + {"011011"_b, "umin_64u_minmax_imm"}, + {"10xxxx"_b, "sub_64_addsub_imm"}, + }, + }, + + { "_ryrkqt", + {20, 19}, + { {"00"_b, "_tsskys"}, + {"01"_b, "_kqvljp"}, + {"10"_b, "_lxhlkx"}, + {"11"_b, "_rjysnh"}, + }, + }, + + { "_rznrqt", + {22}, + { {"0"_b, "umullt_z_zzi_s"}, + {"1"_b, "umullt_z_zzi_d"}, + }, + }, + + { "_rzpqmm", + {23, 22, 20, 19, 17, 16, 13}, + { {"0000000"_b, "_nygsjm"}, + {"0000001"_b, "_snjmrt"}, + {"0100000"_b, "_hhxpjz"}, + {"0100001"_b, "_tktgvg"}, + {"100xxx0"_b, "st2_asisdlsop_bx2_r2b"}, + {"100xxx1"_b, "st4_asisdlsop_bx4_r4b"}, + {"1010xx0"_b, "st2_asisdlsop_bx2_r2b"}, + {"1010xx1"_b, "st4_asisdlsop_bx4_r4b"}, + {"10110x0"_b, "st2_asisdlsop_bx2_r2b"}, + {"10110x1"_b, "st4_asisdlsop_bx4_r4b"}, + {"1011100"_b, "st2_asisdlsop_bx2_r2b"}, + {"1011101"_b, "st4_asisdlsop_bx4_r4b"}, + {"1011110"_b, "_szjjgk"}, + {"1011111"_b, "_tvgklq"}, + {"110xxx0"_b, "ld2_asisdlsop_bx2_r2b"}, + {"110xxx1"_b, "ld4_asisdlsop_bx4_r4b"}, + {"1110xx0"_b, "ld2_asisdlsop_bx2_r2b"}, + {"1110xx1"_b, "ld4_asisdlsop_bx4_r4b"}, + {"11110x0"_b, "ld2_asisdlsop_bx2_r2b"}, + {"11110x1"_b, "ld4_asisdlsop_bx4_r4b"}, + {"1111100"_b, "ld2_asisdlsop_bx2_r2b"}, + {"1111101"_b, "ld4_asisdlsop_bx4_r4b"}, + {"1111110"_b, "_tzsvyv"}, + {"1111111"_b, "_jvnsgt"}, + }, + }, + + { "_rztvnl", + {20, 19, 18, 17, 16}, + { {"0000x"_b, "fcadd_z_p_zz"}, + {"10000"_b, "faddp_z_p_zz"}, + {"10100"_b, "fmaxnmp_z_p_zz"}, + {"10101"_b, "fminnmp_z_p_zz"}, + {"10110"_b, "fmaxp_z_p_zz"}, + {"10111"_b, "fminp_z_p_zz"}, + }, + }, + + { "_rzymmk", + {13, 12}, + { {"00"_b, "sbc_32_addsub_carry"}, + }, + }, + + { "_rzzxsn", + {30, 13}, + { {"00"_b, "_nvyxmh"}, + {"01"_b, "_hykhmt"}, + {"10"_b, "_yszjsm"}, + {"11"_b, "_jrnxzh"}, + }, + }, + + { "_sghgtk", + {4}, + { {"0"_b, "cmplo_p_p_zi"}, + {"1"_b, "cmpls_p_p_zi"}, + }, + }, + + { "_sgmpvp", + {23, 22, 13}, + { {"000"_b, "fmulx_asimdelem_rh_h"}, + {"1x0"_b, "fmulx_asimdelem_r_sd"}, + }, + }, + + { "_shgktt", + {11}, + { {"0"_b, "_tjjqpx"}, + }, + }, + + { "_shgxyq", + {23, 22, 19, 13, 12}, + { {"00100"_b, "sha1h_ss_cryptosha2"}, + {"00101"_b, "sha1su1_vv_cryptosha2"}, + {"00110"_b, "sha256su0_vv_cryptosha2"}, + {"xx011"_b, "suqadd_asisdmisc_r"}, + }, + }, + + { "_shqygv", + {30, 4}, + { {"00"_b, "_thvxym"}, + {"01"_b, "_mrhtxt"}, + {"10"_b, "_ptjyqx"}, + {"11"_b, "_rshyht"}, + }, + }, + + { "_shqyqv", + {23, 13, 12}, + { {"010"_b, "fcmeq_asisdsame_only"}, + }, + }, + + { "_shvqkt", + {20, 19, 18, 17, 16}, + { {"11111"_b, "ldlar_lr32_ldstexcl"}, + }, + }, + + { "_sjlqvg", + {23, 20, 19, 18, 17, 16}, + { {"000001"_b, "fcvtxn_asimdmisc_n"}, + {"x00000"_b, "uadalp_asimdmisc_p"}, + }, + }, + + { "_sjnspg", + {4}, + { {"0"_b, "nors_p_p_pp_z"}, + {"1"_b, "nands_p_p_pp_z"}, + }, + }, + + { "_sjtrhm", + {30, 23, 22, 20, 13}, + { {"00001"_b, "ld1rqb_z_p_bi_u8"}, + {"000x0"_b, "ld1rqb_z_p_br_contiguous"}, + {"01001"_b, "ld1rqh_z_p_bi_u16"}, + {"010x0"_b, "ld1rqh_z_p_br_contiguous"}, + {"100x1"_b, "stnt1b_z_p_ar_d_64_unscaled"}, + {"101x1"_b, "stnt1b_z_p_ar_s_x32_unscaled"}, + {"110x1"_b, "stnt1h_z_p_ar_d_64_unscaled"}, + {"111x1"_b, "stnt1h_z_p_ar_s_x32_unscaled"}, + }, + }, + + { "_sjvhlq", + {22}, + { {"0"_b, "smullb_z_zzi_s"}, + {"1"_b, "smullb_z_zzi_d"}, + }, + }, + + { "_skjqrx", + {23, 22}, + { {"00"_b, "fmov_s_floatimm"}, + {"01"_b, "fmov_d_floatimm"}, + {"11"_b, "fmov_h_floatimm"}, + }, + }, + + { "_skmzll", + {18}, + { {"0"_b, "st2_asisdlsop_hx2_r2h"}, + {"1"_b, "st2_asisdlsop_h2_i2h"}, + }, + }, + + { "_sknvhk", + {13, 12, 11, 10}, + { {"0000"_b, "sha1c_qsv_cryptosha3"}, + {"0001"_b, "dup_asisdone_only"}, + {"0100"_b, "sha1p_qsv_cryptosha3"}, + {"1000"_b, "sha1m_qsv_cryptosha3"}, + {"1100"_b, "sha1su0_vvv_cryptosha3"}, + }, + }, + + { "_skqzyg", + {23}, + { {"0"_b, "fcmeq_asimdsame_only"}, + }, + }, + + { "_sksvrn", + {20, 19, 18, 17, 16}, + { {"11111"_b, "ldlarb_lr32_ldstexcl"}, + }, + }, + + { "_skszgm", + {13, 12, 11, 10}, + { {"1111"_b, "_xzmrlg"}, + }, + }, + + { "_skytvx", + {23, 22}, + { {"00"_b, "tbx_asimdtbl_l2_2"}, + }, + }, + + { "_slzrtr", + {23, 22}, + { {"00"_b, "fmsub_s_floatdp3"}, + {"01"_b, "fmsub_d_floatdp3"}, + {"11"_b, "fmsub_h_floatdp3"}, + }, + }, + + { "_slzvjh", + {30, 23, 22}, + { {"000"_b, "orr_32_log_imm"}, + {"100"_b, "ands_32s_log_imm"}, + {"110"_b, "movk_32_movewide"}, + }, + }, + + { "_smmrpj", + {18}, + { {"0"_b, "fadda_v_p_z"}, + }, + }, + + { "_smptxh", + {23, 22}, + { {"01"_b, "fmax_asimdsamefp16_only"}, + {"11"_b, "fmin_asimdsamefp16_only"}, + }, + }, + + { "_smsytm", + {13}, + { {"0"_b, "mul_asimdelem_r"}, + {"1"_b, "smull_asimdelem_l"}, + }, + }, + + { "_snhmgn", + {23}, + { {"0"_b, "fmul_asimdsame_only"}, + }, + }, + + { "_snhzxr", + {30, 23, 22}, + { {"001"_b, "bfm_64m_bitfield"}, + }, + }, + + { "_snjmrt", + {18}, + { {"0"_b, "st4_asisdlso_b4_4b"}, + }, + }, + + { "_snnlgr", + {23, 22, 20, 19, 13, 11}, + { {"0000x0"_b, "movi_asimdimm_l_sl"}, + {"00x100"_b, "sshr_asimdshf_r"}, + {"00x110"_b, "srshr_asimdshf_r"}, + {"010x00"_b, "sshr_asimdshf_r"}, + {"010x10"_b, "srshr_asimdshf_r"}, + {"011100"_b, "sshr_asimdshf_r"}, + {"011110"_b, "srshr_asimdshf_r"}, + {"0x1000"_b, "sshr_asimdshf_r"}, + {"0x1010"_b, "srshr_asimdshf_r"}, + }, + }, + + { "_snrzky", + {20, 19, 18, 17, 16}, + { {"11111"_b, "ldxp_lp64_ldstexcl"}, + }, + }, + + { "_sntnsm", + {9, 8, 7, 6, 5}, + { {"11111"_b, "autizb_64z_dp_1src"}, + }, + }, + + { "_sntyqy", + {4}, + { {"0"_b, "cmphs_p_p_zi"}, + {"1"_b, "cmphi_p_p_zi"}, + }, + }, + + { "_snvnjz", + {30, 13}, + { {"10"_b, "_plzqrv"}, + }, + }, + + { "_snvzjr", + {12}, + { {"0"_b, "st2_asisdlsop_dx2_r2d"}, + }, + }, + + { "_snzvtt", + {23, 22}, + { {"00"_b, "fmlal2_asimdsame_f"}, + {"10"_b, "fmlsl2_asimdsame_f"}, + }, + }, + + { "_spktyg", + {23, 22, 20, 19, 11}, + { {"00000"_b, "movi_asimdimm_m_sm"}, + }, + }, + + { "_spxvlt", + {20, 19, 18, 17, 16, 13, 12, 11}, + { {"00000000"_b, "_mtkhgz"}, + }, + }, + + { "_sqgjmn", + {20, 9}, + { {"00"_b, "_mxgykv"}, + }, + }, + + { "_sqhxzj", + {30, 23, 22, 13, 12, 11, 10}, + { {"1010000"_b, "sha512h_qqv_cryptosha512_3"}, + {"1010001"_b, "sha512h2_qqv_cryptosha512_3"}, + {"1010010"_b, "sha512su1_vvv2_cryptosha512_3"}, + {"1010011"_b, "rax1_vvv2_cryptosha512_3"}, + }, + }, + + { "_sqkkqy", + {13, 12, 10}, + { {"010"_b, "sqrdmlah_asisdelem_r"}, + {"101"_b, "_mhksnq"}, + {"110"_b, "sqrdmlsh_asisdelem_r"}, + {"111"_b, "_mpytmv"}, + }, + }, + + { "_sqlsyr", + {18, 17}, + { {"00"_b, "ld1_asisdlse_r1_1v"}, + }, + }, + + { "_sqttsv", + {20, 19, 18, 17, 16, 4, 3}, + { {"0000001"_b, "fcmp_sz_floatcmp"}, + {"0000011"_b, "fcmpe_sz_floatcmp"}, + {"xxxxx00"_b, "fcmp_s_floatcmp"}, + {"xxxxx10"_b, "fcmpe_s_floatcmp"}, + }, + }, + + { "_srkslp", + {22, 20}, + { {"00"_b, "_zvynrg"}, + {"01"_b, "msr_sr_systemmove"}, + {"10"_b, "_lxlqks"}, + {"11"_b, "msrr_sr_systemmovepr"}, + }, + }, + + { "_srnkng", + {18}, + { {"0"_b, "faddv_v_p_z"}, + {"1"_b, "fmaxnmv_v_p_z"}, + }, + }, + + { "_srpptk", + {20, 19, 18, 17, 16}, + { {"00000"_b, "usqadd_asimdmisc_r"}, + {"00001"_b, "shll_asimdmisc_s"}, + {"10000"_b, "uaddlv_asimdall_only"}, + }, + }, + + { "_srpqmk", + {30, 23, 22}, + { {"000"_b, "stp_q_ldstpair_off"}, + {"001"_b, "ldp_q_ldstpair_off"}, + {"010"_b, "stp_q_ldstpair_pre"}, + {"011"_b, "ldp_q_ldstpair_pre"}, + }, + }, + + { "_srsrtk", + {30, 23, 22, 13, 11, 10}, + { {"000010"_b, "str_b_ldst_regoff"}, + {"000110"_b, "str_bl_ldst_regoff"}, + {"001010"_b, "ldr_b_ldst_regoff"}, + {"001110"_b, "ldr_bl_ldst_regoff"}, + {"010x10"_b, "str_q_ldst_regoff"}, + {"011x10"_b, "ldr_q_ldst_regoff"}, + {"100x10"_b, "str_h_ldst_regoff"}, + {"101x10"_b, "ldr_h_ldst_regoff"}, + }, + }, + + { "_srttng", + {23, 22}, + { {"01"_b, "fcmla_asimdelem_c_h"}, + {"10"_b, "fcmla_asimdelem_c_s"}, + }, + }, + + { "_ssjnph", + {10}, + { {"0"_b, "blraa_64p_branch_reg"}, + {"1"_b, "blrab_64p_branch_reg"}, + }, + }, + + { "_ssjrxs", + {18}, + { {"0"_b, "ld3_asisdlso_h3_3h"}, + }, + }, + + { "_ssqyrk", + {23, 22, 20, 19, 18, 17, 16}, + { {"0000000"_b, "not_asimdmisc_r"}, + {"0100000"_b, "rbit_asimdmisc_r"}, + }, + }, + + { "_ssvpxz", + {30, 23, 22}, + { {"000"_b, "stnp_32_ldstnapair_offs"}, + {"001"_b, "ldnp_32_ldstnapair_offs"}, + {"010"_b, "stp_32_ldstpair_post"}, + {"011"_b, "ldp_32_ldstpair_post"}, + {"110"_b, "stgp_64_ldstpair_post"}, + {"111"_b, "ldpsw_64_ldstpair_post"}, + }, + }, + + { "_ssypmm", + {9, 8, 7, 6, 5}, + { {"00000"_b, "fmov_h_floatimm"}, + }, + }, + + { "_stlgrr", + {30, 23, 22, 13, 12, 11, 10}, + { {"0001111"_b, "caspl_cp32_ldstexcl"}, + {"0011111"_b, "caspal_cp32_ldstexcl"}, + {"0101111"_b, "caslb_c32_ldstexcl"}, + {"0111111"_b, "casalb_c32_ldstexcl"}, + {"1001111"_b, "caspl_cp64_ldstexcl"}, + {"1011111"_b, "caspal_cp64_ldstexcl"}, + {"1101111"_b, "caslh_c32_ldstexcl"}, + {"1111111"_b, "casalh_c32_ldstexcl"}, + }, + }, + + { "_stmtkr", + {30, 23, 22}, + { {"000"_b, "stxr_sr32_ldstexcl"}, + {"001"_b, "_zlvjrh"}, + {"010"_b, "_lpzgvs"}, + {"011"_b, "_shvqkt"}, + {"100"_b, "stxr_sr64_ldstexcl"}, + {"101"_b, "_jhltlz"}, + {"110"_b, "_rsmyth"}, + {"111"_b, "_vjtgmx"}, + }, + }, + + { "_svgvjm", + {23}, + { {"0"_b, "faddp_asimdsame_only"}, + {"1"_b, "fabd_asimdsame_only"}, + }, + }, + + { "_svlrvy", + {18, 17}, + { {"00"_b, "st1_asisdlse_r3_3v"}, + }, + }, + + { "_svvylr", + {13, 12}, + { {"10"_b, "lslv_32_dp_2src"}, + }, + }, + + { "_svvyrz", + {23, 22, 20, 19, 18, 17, 16}, + { {"00xxxxx"_b, "addvl_r_ri"}, + {"01xxxxx"_b, "addpl_r_ri"}, + {"1011111"_b, "rdvl_r_i"}, + }, + }, + + { "_svyszp", + {9, 8, 7, 6, 5}, + { {"00000"_b, "fmov_d_floatimm"}, + }, + }, + + { "_sxgnmg", + {18, 17}, + { {"00"_b, "ld3_asisdlso_s3_3s"}, + }, + }, + + { "_sxptnh", + {23, 22, 11, 10}, + { {"0000"_b, "_vmtkqp"}, + {"0001"_b, "_lqjlkj"}, + {"0010"_b, "_gyymmx"}, + {"0011"_b, "_gmqyjv"}, + {"0100"_b, "_pvtyjz"}, + {"0101"_b, "_hxxxyy"}, + {"0110"_b, "_xszmjn"}, + {"1000"_b, "_lzjyhm"}, + {"1001"_b, "_zlkygr"}, + {"1010"_b, "_jvpjsm"}, + {"1101"_b, "_vzyklr"}, + {"1110"_b, "_npxkzq"}, + }, + }, + + { "_sxpvym", + {30, 23, 22, 13}, + { {"0000"_b, "ldnt1sb_z_p_ar_s_x32_unscaled"}, + {"0001"_b, "ldnt1b_z_p_ar_s_x32_unscaled"}, + {"0010"_b, "ld1rb_z_p_bi_u8"}, + {"0011"_b, "ld1rb_z_p_bi_u16"}, + {"0100"_b, "ldnt1sh_z_p_ar_s_x32_unscaled"}, + {"0101"_b, "ldnt1h_z_p_ar_s_x32_unscaled"}, + {"0110"_b, "ld1rsw_z_p_bi_s64"}, + {"0111"_b, "ld1rh_z_p_bi_u16"}, + {"1000"_b, "ldnt1sb_z_p_ar_d_64_unscaled"}, + {"1010"_b, "ld1sb_z_p_bz_d_64_unscaled"}, + {"1011"_b, "ldff1sb_z_p_bz_d_64_unscaled"}, + {"1100"_b, "ldnt1sh_z_p_ar_d_64_unscaled"}, + {"1110"_b, "ld1sh_z_p_bz_d_64_unscaled"}, + {"1111"_b, "ldff1sh_z_p_bz_d_64_unscaled"}, + }, + }, + + { "_sxsgmq", + {30, 22, 20, 19, 18, 17, 16}, + { {"00xxxxx"_b, "stxp_sp32_ldstexcl"}, + {"0111111"_b, "ldxp_lp32_ldstexcl"}, + {"10xxxxx"_b, "stxp_sp64_ldstexcl"}, + {"1111111"_b, "ldxp_lp64_ldstexcl"}, + }, + }, + + { "_sxsxxt", + {20, 19, 18, 17, 16}, + { {"10000"_b, "fminp_asisdpair_only_sd"}, + }, + }, + + { "_sylkvm", + {23, 22, 12}, + { {"100"_b, "fmlsl2_asimdelem_lh"}, + {"xx1"_b, "sqrdmlah_asimdelem_r"}, + }, + }, + + { "_syrmmr", + {18, 4}, + { {"00"_b, "fcmeq_p_p_z0"}, + }, + }, + + { "_szgqrr", + {12, 10}, + { {"00"_b, "_xlyjsz"}, + {"01"_b, "_yppmkl"}, + {"10"_b, "_sgmpvp"}, + {"11"_b, "_gjtmjg"}, + }, + }, + + { "_szjjgk", + {18}, + { {"0"_b, "st2_asisdlsop_bx2_r2b"}, + {"1"_b, "st2_asisdlsop_b2_i2b"}, + }, + }, + + { "_szmnhg", + {12}, + { {"0"_b, "ld2_asisdlsop_dx2_r2d"}, + }, + }, + + { "_szmyzt", + {12}, + { {"0"_b, "ld3_asisdlsop_dx3_r3d"}, + }, + }, + + { "_szqlsn", + {23, 22, 20, 19, 18, 17, 16}, + { {"0x00001"_b, "frint32z_asimdmisc_r"}, + {"1111000"_b, "fcmlt_asimdmiscfp16_fz"}, + {"1x00000"_b, "fcmlt_asimdmisc_fz"}, + }, + }, + + { "_sztkhs", + {30, 23, 22}, + { {"000"_b, "msub_64a_dp_3src"}, + }, + }, + + { "_szylpy", + {22, 12}, + { {"10"_b, "_hhlmrg"}, + }, + }, + + { "_szysqh", + {22, 13, 12}, + { {"000"_b, "ldsmax_32_memop"}, + {"001"_b, "ldsmin_32_memop"}, + {"010"_b, "ldumax_32_memop"}, + {"011"_b, "ldumin_32_memop"}, + {"100"_b, "ldsmaxl_32_memop"}, + {"101"_b, "ldsminl_32_memop"}, + {"110"_b, "ldumaxl_32_memop"}, + {"111"_b, "lduminl_32_memop"}, + }, + }, + + { "_tgvkhm", + {20, 19, 18, 17, 16, 13}, + { {"000000"_b, "fabs_s_floatdp1"}, + {"000010"_b, "fsqrt_s_floatdp1"}, + {"000100"_b, "fcvt_ds_floatdp1"}, + {"000110"_b, "fcvt_hs_floatdp1"}, + {"001000"_b, "frintp_s_floatdp1"}, + {"001010"_b, "frintz_s_floatdp1"}, + {"001110"_b, "frinti_s_floatdp1"}, + {"010000"_b, "frint32x_s_floatdp1"}, + {"010010"_b, "frint64x_s_floatdp1"}, + }, + }, + + { "_thkkgx", + {18}, + { {"1"_b, "fminnmv_v_p_z"}, + }, + }, + + { "_thqgrq", + {13, 12, 11, 10}, + { {"1111"_b, "_pgmlrt"}, + }, + }, + + { "_thrxph", + {23, 22, 10}, + { {"100"_b, "umlalb_z_zzzi_s"}, + {"101"_b, "umlalt_z_zzzi_s"}, + {"110"_b, "umlalb_z_zzzi_d"}, + {"111"_b, "umlalt_z_zzzi_d"}, + }, + }, + + { "_thvxym", + {20}, + { {"0"_b, "_prkmty"}, + {"1"_b, "_pjgkjs"}, + }, + }, + + { "_tjjqpx", + {23, 22, 20, 19, 16, 13, 10}, + { {"0000000"_b, "_mlgmqm"}, + {"0000001"_b, "_mvqkzv"}, + {"0000010"_b, "_jztspt"}, + {"0000011"_b, "_hrpkqg"}, + {"0100000"_b, "_llqtkj"}, + {"0100001"_b, "_pmpsvs"}, + {"0100010"_b, "_vhrkvk"}, + {"0100011"_b, "_xsvpzx"}, + {"100xx00"_b, "st2_asisdlsop_sx2_r2s"}, + {"100xx01"_b, "_ynyqky"}, + {"100xx10"_b, "st4_asisdlsop_sx4_r4s"}, + {"100xx11"_b, "_grvxrm"}, + {"1010x00"_b, "st2_asisdlsop_sx2_r2s"}, + {"1010x01"_b, "_snvzjr"}, + {"1010x10"_b, "st4_asisdlsop_sx4_r4s"}, + {"1010x11"_b, "_xmkysx"}, + {"1011000"_b, "st2_asisdlsop_sx2_r2s"}, + {"1011001"_b, "_xqhxql"}, + {"1011010"_b, "st4_asisdlsop_sx4_r4s"}, + {"1011011"_b, "_ykpqth"}, + {"1011100"_b, "_lgyqpk"}, + {"1011101"_b, "_tplghv"}, + {"1011110"_b, "_lqknkn"}, + {"1011111"_b, "_zprgxt"}, + {"110xx00"_b, "ld2_asisdlsop_sx2_r2s"}, + {"110xx01"_b, "_prjzxs"}, + {"110xx10"_b, "ld4_asisdlsop_sx4_r4s"}, + {"110xx11"_b, "_txsvzz"}, + {"1110x00"_b, "ld2_asisdlsop_sx2_r2s"}, + {"1110x01"_b, "_hljttg"}, + {"1110x10"_b, "ld4_asisdlsop_sx4_r4s"}, + {"1110x11"_b, "_rknxlg"}, + {"1111000"_b, "ld2_asisdlsop_sx2_r2s"}, + {"1111001"_b, "_szmnhg"}, + {"1111010"_b, "ld4_asisdlsop_sx4_r4s"}, + {"1111011"_b, "_tjrtxx"}, + {"1111100"_b, "_ppvnly"}, + {"1111101"_b, "_lltzjg"}, + {"1111110"_b, "_ypsgqz"}, + {"1111111"_b, "_vnrlsj"}, + }, + }, + + { "_tjlthk", + {9, 8, 7, 6, 5, 1}, + { {"111110"_b, "drps_64e_branch_reg"}, + }, + }, + + { "_tjnzjl", + {18, 17}, + { {"00"_b, "st1_asisdlso_s1_1s"}, + }, + }, + + { "_tjrtxx", + {12}, + { {"0"_b, "ld4_asisdlsop_dx4_r4d"}, + }, + }, + + { "_tjxhsy", + {10}, + { {"0"_b, "braa_64p_branch_reg"}, + {"1"_b, "brab_64p_branch_reg"}, + }, + }, + + { "_tjxyky", + {12}, + { {"0"_b, "st3_asisdlsop_dx3_r3d"}, + }, + }, + + { "_tjzqnp", + {30, 23, 22, 20, 13}, + { {"00001"_b, "ldnt1b_z_p_bi_contiguous"}, + {"000x0"_b, "ldnt1b_z_p_br_contiguous"}, + {"00101"_b, "ld3b_z_p_bi_contiguous"}, + {"001x0"_b, "ld3b_z_p_br_contiguous"}, + {"01001"_b, "ldnt1h_z_p_bi_contiguous"}, + {"010x0"_b, "ldnt1h_z_p_br_contiguous"}, + {"01101"_b, "ld3h_z_p_bi_contiguous"}, + {"011x0"_b, "ld3h_z_p_br_contiguous"}, + {"10011"_b, "stnt1b_z_p_bi_contiguous"}, + {"100x0"_b, "st1b_z_p_bz_d_x32_unscaled"}, + {"10111"_b, "st3b_z_p_bi_contiguous"}, + {"101x0"_b, "st1b_z_p_bz_s_x32_unscaled"}, + {"10x01"_b, "st1b_z_p_bi"}, + {"11011"_b, "stnt1h_z_p_bi_contiguous"}, + {"110x0"_b, "st1h_z_p_bz_d_x32_unscaled"}, + {"11111"_b, "st3h_z_p_bi_contiguous"}, + {"111x0"_b, "st1h_z_p_bz_s_x32_unscaled"}, + {"11x01"_b, "st1h_z_p_bi"}, + }, + }, + + { "_tkjtgp", + {30}, + { {"0"_b, "_sqgjmn"}, + {"1"_b, "_ztpryr"}, + }, + }, + + { "_tklxhy", + {18}, + { {"0"_b, "st3_asisdlso_b3_3b"}, + }, + }, + + { "_tknqxs", + {20, 19, 18, 17, 16}, + { {"11111"_b, "ldaxr_lr64_ldstexcl"}, + }, + }, + + { "_tktgvg", + {18}, + { {"0"_b, "ld4_asisdlso_b4_4b"}, + }, + }, + + { "_tlvmlq", + {18}, + { {"0"_b, "ld2_asisdlso_h2_2h"}, + }, + }, + + { "_tmhlvh", + {20, 9, 4}, + { {"000"_b, "zip2_p_pp"}, + }, + }, + + { "_tmsjzg", + {2, 1}, + { {"00"_b, "ret_64r_branch_reg"}, + }, + }, + + { "_tmtgqm", + {4}, + { {"0"_b, "ccmn_64_condcmp_imm"}, + }, + }, + + { "_tmtnkq", + {23, 18, 17, 16}, + { {"0000"_b, "uqxtnb_z_zz"}, + }, + }, + + { "_tnjhxp", + {9, 8, 7, 6, 5}, + { {"11111"_b, "pacdza_64z_dp_1src"}, + }, + }, + + { "_tnngsg", + {23, 22, 13, 12, 11, 10}, + { {"01x1x0"_b, "fcmla_asimdelem_c_h"}, + {"0x0001"_b, "ushr_asimdshf_r"}, + {"0x0101"_b, "usra_asimdshf_r"}, + {"0x1001"_b, "urshr_asimdshf_r"}, + {"0x1101"_b, "ursra_asimdshf_r"}, + {"10x1x0"_b, "fcmla_asimdelem_c_s"}, + {"xx00x0"_b, "mla_asimdelem_r"}, + {"xx10x0"_b, "umlal_asimdelem_l"}, + }, + }, + + { "_tnpjts", + {30}, + { {"0"_b, "and_64_log_shift"}, + {"1"_b, "eor_64_log_shift"}, + }, + }, + + { "_tpkslq", + {30, 23, 22, 20, 13, 4}, + { {"00001x"_b, "ld1rqw_z_p_bi_u32"}, + {"000x0x"_b, "ld1rqw_z_p_br_contiguous"}, + {"01001x"_b, "ld1rqd_z_p_bi_u64"}, + {"010x0x"_b, "ld1rqd_z_p_br_contiguous"}, + {"100x1x"_b, "stnt1w_z_p_ar_d_64_unscaled"}, + {"101x1x"_b, "stnt1w_z_p_ar_s_x32_unscaled"}, + {"110x00"_b, "str_p_bi"}, + {"110x1x"_b, "stnt1d_z_p_ar_d_64_unscaled"}, + }, + }, + + { "_tplghv", + {18, 17, 12}, + { {"0x0"_b, "st2_asisdlsop_dx2_r2d"}, + {"100"_b, "st2_asisdlsop_dx2_r2d"}, + {"110"_b, "st2_asisdlsop_d2_i2d"}, + }, + }, + + { "_tpmqyl", + {30}, + { {"0"_b, "bl_only_branch_imm"}, + {"1"_b, "_lszlkq"}, + }, + }, + + { "_tptqjs", + {12}, + { {"0"_b, "ld1_asisdlsop_dx1_r1d"}, + }, + }, + + { "_tqlrzh", + {9, 8, 7, 6, 5, 2, 1}, + { {"1111111"_b, "eretab_64e_branch_reg"}, + }, + }, + + { "_tqlsyy", + {30}, + { {"0"_b, "add_32_addsub_shift"}, + {"1"_b, "sub_32_addsub_shift"}, + }, + }, + + { "_trjmmn", + {13, 12, 11, 10}, + { {"0001"_b, "sub_asisdsame_only"}, + {"0010"_b, "_plyhhz"}, + {"0011"_b, "cmeq_asisdsame_only"}, + {"0110"_b, "_qkhrkh"}, + {"1010"_b, "_kxhmlx"}, + {"1101"_b, "sqrdmulh_asisdsame_only"}, + {"1110"_b, "_ytrmvz"}, + }, + }, + + { "_tshjsk", + {18}, + { {"0"_b, "st4_asisdlsep_r4_r"}, + {"1"_b, "st4_asisdlsep_i4_i"}, + }, + }, + + { "_tsskys", + {23, 22, 18, 17, 16}, + { {"01000"_b, "fadd_z_p_zz"}, + {"01001"_b, "fsub_z_p_zz"}, + {"01010"_b, "fmul_z_p_zz"}, + {"01100"_b, "fmaxnm_z_p_zz"}, + {"01101"_b, "fminnm_z_p_zz"}, + {"01110"_b, "fmax_z_p_zz"}, + {"01111"_b, "fmin_z_p_zz"}, + {"1x000"_b, "fadd_z_p_zz"}, + {"1x001"_b, "fsub_z_p_zz"}, + {"1x010"_b, "fmul_z_p_zz"}, + {"1x100"_b, "fmaxnm_z_p_zz"}, + {"1x101"_b, "fminnm_z_p_zz"}, + {"1x110"_b, "fmax_z_p_zz"}, + {"1x111"_b, "fmin_z_p_zz"}, + {"xx011"_b, "fsubr_z_p_zz"}, + }, + }, + + { "_tsypsz", + {23, 22, 13, 12}, + { {"0000"_b, "fnmul_s_floatdp2"}, + {"0100"_b, "fnmul_d_floatdp2"}, + {"1100"_b, "fnmul_h_floatdp2"}, + }, + }, + + { "_ttmvpr", + {30, 23, 22, 20, 19}, + { {"0xxxx"_b, "bl_only_branch_imm"}, + {"10001"_b, "sys_cr_systeminstrs"}, + {"1001x"_b, "msr_sr_systemmove"}, + {"10101"_b, "sysp_cr_syspairinstrs"}, + {"1011x"_b, "msrr_sr_systemmovepr"}, + }, + }, + + { "_ttmyrv", + {30, 11, 10}, + { {"000"_b, "_nynrns"}, + {"001"_b, "_rrkmyl"}, + {"010"_b, "_rvvshx"}, + {"011"_b, "_zlmyjt"}, + {"101"_b, "_yrggjm"}, + {"110"_b, "_kskqmz"}, + {"111"_b, "_kzksnv"}, + }, + }, + + { "_ttplgp", + {12, 11, 10}, + { {"000"_b, "sqincp_z_p_z"}, + {"010"_b, "sqincp_r_p_r_sx"}, + {"011"_b, "sqincp_r_p_r_x"}, + {"100"_b, "_zqmrhp"}, + }, + }, + + { "_ttsgkt", + {12, 10}, + { {"00"_b, "_smsytm"}, + {"01"_b, "_mjrlkp"}, + {"10"_b, "_vjkhhm"}, + {"11"_b, "_ymxjjr"}, + }, + }, + + { "_ttzlqn", + {18, 17, 12}, + { {"000"_b, "ld1_asisdlso_d1_1d"}, + }, + }, + + { "_tvgklq", + {18}, + { {"0"_b, "st4_asisdlsop_bx4_r4b"}, + {"1"_b, "st4_asisdlsop_b4_i4b"}, + }, + }, + + { "_tvrlgz", + {18}, + { {"0"_b, "st1_asisdlsop_bx1_r1b"}, + {"1"_b, "st1_asisdlsop_b1_i1b"}, + }, + }, + + { "_tvtvkt", + {18, 17, 12}, + { {"000"_b, "ldap1_asisdlso_d1"}, + }, + }, + + { "_tvyxlr", + {30}, + { {"0"_b, "bl_only_branch_imm"}, + {"1"_b, "_jlnjsy"}, + }, + }, + + { "_txkmvh", + {18}, + { {"0"_b, "ld2_asisdlse_r2"}, + }, + }, + + { "_txsvzz", + {12}, + { {"0"_b, "ld4_asisdlsop_dx4_r4d"}, + }, + }, + + { "_txzxzs", + {23, 22, 20, 19, 18}, + { {"00000"_b, "orr_z_zi"}, + {"01000"_b, "eor_z_zi"}, + {"10000"_b, "and_z_zi"}, + {"11000"_b, "dupm_z_i"}, + {"xx1xx"_b, "cpy_z_p_i"}, + }, + }, + + { "_tykvnx", + {30}, + { {"0"_b, "ldapr_32l_ldapstl_writeback"}, + {"1"_b, "ldapr_64l_ldapstl_writeback"}, + }, + }, + + { "_tymryz", + {23, 22, 20, 19, 13, 11}, + { {"0000x0"_b, "bic_asimdimm_l_sl"}, + {"00x100"_b, "sli_asimdshf_r"}, + {"00x110"_b, "uqshl_asimdshf_r"}, + {"010x00"_b, "sli_asimdshf_r"}, + {"010x10"_b, "uqshl_asimdshf_r"}, + {"011100"_b, "sli_asimdshf_r"}, + {"011110"_b, "uqshl_asimdshf_r"}, + {"0x1000"_b, "sli_asimdshf_r"}, + {"0x1010"_b, "uqshl_asimdshf_r"}, + }, + }, + + { "_tytvjk", + {13, 12, 11}, + { {"000"_b, "_lylpyx"}, + {"001"_b, "_kyxrqg"}, + {"010"_b, "_zmkqxl"}, + {"011"_b, "_gngjxr"}, + {"100"_b, "_mlxtxs"}, + {"101"_b, "_mnmtql"}, + {"110"_b, "_xmxpnx"}, + {"111"_b, "_lkttgy"}, + }, + }, + + { "_tytzpq", + {30}, + { {"0"_b, "bic_32_log_shift"}, + {"1"_b, "eon_32_log_shift"}, + }, + }, + + { "_tyzpxk", + {22, 13, 12}, + { {"000"_b, "swpa_64_memop"}, + {"100"_b, "swpal_64_memop"}, + }, + }, + + { "_tzgtvm", + {13, 12}, + { {"00"_b, "crc32x_64c_dp_2src"}, + {"01"_b, "crc32cx_64c_dp_2src"}, + {"10"_b, "umin_64_dp_2src"}, + }, + }, + + { "_tzjyhy", + {20, 19, 18, 17, 16}, + { {"00010"_b, "scvtf_d32_float2fix"}, + {"00011"_b, "ucvtf_d32_float2fix"}, + {"11000"_b, "fcvtzs_32d_float2fix"}, + {"11001"_b, "fcvtzu_32d_float2fix"}, + }, + }, + + { "_tzrgqq", + {23, 10}, + { {"00"_b, "_gyrkkz"}, + }, + }, + + { "_tzsnmy", + {9, 8, 7, 6, 5, 2, 1}, + { {"1111111"_b, "retab_64e_branch_reg"}, + }, + }, + + { "_tzsvyv", + {18}, + { {"0"_b, "ld2_asisdlsop_bx2_r2b"}, + {"1"_b, "ld2_asisdlsop_b2_i2b"}, + }, + }, + + { "_tzzssm", + {12, 11, 10}, + { {"000"_b, "histseg_z_zz"}, + }, + }, + + { "_vghjnt", + {23, 22}, + { {"00"_b, "fmadd_s_floatdp3"}, + {"01"_b, "fmadd_d_floatdp3"}, + {"11"_b, "fmadd_h_floatdp3"}, + }, + }, + + { "_vgqvys", + {30, 23, 22}, + { {"000"_b, "stp_32_ldstpair_off"}, + {"001"_b, "ldp_32_ldstpair_off"}, + {"010"_b, "stp_32_ldstpair_pre"}, + {"011"_b, "ldp_32_ldstpair_pre"}, + {"100"_b, "stgp_64_ldstpair_off"}, + {"101"_b, "ldpsw_64_ldstpair_off"}, + {"110"_b, "stgp_64_ldstpair_pre"}, + {"111"_b, "ldpsw_64_ldstpair_pre"}, + }, + }, + + { "_vgtnjh", + {23, 22, 20, 19, 18, 17, 16}, + { {"0001010"_b, "fcvtxnt_z_p_z_d2s"}, + {"1001000"_b, "fcvtnt_z_p_z_s2h"}, + {"1001001"_b, "fcvtlt_z_p_z_h2s"}, + {"1001010"_b, "bfcvtnt_z_p_z_s2bf"}, + {"1101010"_b, "fcvtnt_z_p_z_d2s"}, + {"1101011"_b, "fcvtlt_z_p_z_s2d"}, + }, + }, + + { "_vgxtvy", + {23, 22, 20, 19, 18, 17, 16, 13, 12, 11}, + { {"0011111001"_b, "_tjxhsy"}, + }, + }, + + { "_vhkjgh", + {30, 23, 22, 20, 19, 18}, + { {"00xxxx"_b, "add_64_addsub_imm"}, + {"011000"_b, "smax_64_minmax_imm"}, + {"011001"_b, "umax_64u_minmax_imm"}, + {"011010"_b, "smin_64_minmax_imm"}, + {"011011"_b, "umin_64u_minmax_imm"}, + {"10xxxx"_b, "sub_64_addsub_imm"}, + }, + }, + + { "_vhkpvn", + {20, 18, 17, 16}, + { {"0000"_b, "_grktgm"}, + }, + }, + + { "_vhlqpr", + {30, 22, 11, 10}, + { {"0000"_b, "csel_64_condsel"}, + {"0001"_b, "csinc_64_condsel"}, + {"0100"_b, "_xgqhjv"}, + {"0101"_b, "_hspyhv"}, + {"0110"_b, "_qkxmvp"}, + {"0111"_b, "_tzgtvm"}, + {"1000"_b, "csinv_64_condsel"}, + {"1001"_b, "csneg_64_condsel"}, + {"1100"_b, "_hlqvmm"}, + {"1101"_b, "_ghrnmz"}, + }, + }, + + { "_vhrkvk", + {18, 17}, + { {"00"_b, "ld4_asisdlso_s4_4s"}, + }, + }, + + { "_vjhrzl", + {23, 22, 20, 19, 18, 17, 16}, + { {"0111001"_b, "frintx_asimdmiscfp16_r"}, + {"0x00001"_b, "frintx_asimdmisc_r"}, + {"1111001"_b, "frinti_asimdmiscfp16_r"}, + {"1x00001"_b, "frinti_asimdmisc_r"}, + {"xx00000"_b, "cmle_asimdmisc_z"}, + }, + }, + + { "_vjkhhm", + {23, 22, 13}, + { {"000"_b, "fmul_asimdelem_rh_h"}, + {"1x0"_b, "fmul_asimdelem_r_sd"}, + {"xx1"_b, "sqdmull_asimdelem_l"}, + }, + }, + + { "_vjmklj", + {23, 22}, + { {"10"_b, "sqrdcmlah_z_zzzi_h"}, + {"11"_b, "sqrdcmlah_z_zzzi_s"}, + }, + }, + + { "_vjtgmx", + {20, 19, 18, 17, 16}, + { {"11111"_b, "ldlar_lr64_ldstexcl"}, + }, + }, + + { "_vkrkks", + {30, 23, 22, 13, 4}, + { {"00000"_b, "prfb_i_p_br_s"}, + {"00010"_b, "prfb_i_p_ai_s"}, + {"0010x"_b, "ld1rb_z_p_bi_u32"}, + {"0011x"_b, "ld1rb_z_p_bi_u64"}, + {"01000"_b, "prfh_i_p_br_s"}, + {"01010"_b, "prfh_i_p_ai_s"}, + {"0110x"_b, "ld1rh_z_p_bi_u32"}, + {"0111x"_b, "ld1rh_z_p_bi_u64"}, + {"1000x"_b, "ldnt1b_z_p_ar_d_64_unscaled"}, + {"10010"_b, "prfb_i_p_ai_d"}, + {"1010x"_b, "ld1b_z_p_bz_d_64_unscaled"}, + {"1011x"_b, "ldff1b_z_p_bz_d_64_unscaled"}, + {"1100x"_b, "ldnt1h_z_p_ar_d_64_unscaled"}, + {"11010"_b, "prfh_i_p_ai_d"}, + {"1110x"_b, "ld1h_z_p_bz_d_64_unscaled"}, + {"1111x"_b, "ldff1h_z_p_bz_d_64_unscaled"}, + }, + }, + + { "_vkrskv", + {30, 23, 22, 13, 12, 11, 10}, + { {"000xx00"_b, "stlur_32_ldapstl_unscaled"}, + {"001xx00"_b, "ldapur_32_ldapstl_unscaled"}, + {"010xx00"_b, "ldapursw_64_ldapstl_unscaled"}, + {"100xx00"_b, "stlur_64_ldapstl_unscaled"}, + {"101xx00"_b, "ldapur_64_ldapstl_unscaled"}, + {"x000001"_b, "cpyfprn_cpy_memcms"}, + {"x000101"_b, "cpyfpwtrn_cpy_memcms"}, + {"x001001"_b, "cpyfprtrn_cpy_memcms"}, + {"x001101"_b, "cpyfptrn_cpy_memcms"}, + {"x010001"_b, "cpyfmrn_cpy_memcms"}, + {"x010101"_b, "cpyfmwtrn_cpy_memcms"}, + {"x011001"_b, "cpyfmrtrn_cpy_memcms"}, + {"x011101"_b, "cpyfmtrn_cpy_memcms"}, + {"x100001"_b, "cpyfern_cpy_memcms"}, + {"x100101"_b, "cpyfewtrn_cpy_memcms"}, + {"x101001"_b, "cpyfertrn_cpy_memcms"}, + {"x101101"_b, "cpyfetrn_cpy_memcms"}, + {"x110001"_b, "sete_set_memcms"}, + {"x110101"_b, "setet_set_memcms"}, + {"x111001"_b, "seten_set_memcms"}, + {"x111101"_b, "setetn_set_memcms"}, + }, + }, + + { "_vlhkgr", + {20, 19, 18, 17, 16}, + { {"00000"_b, "uaddlp_asimdmisc_p"}, + {"00001"_b, "sqxtun_asimdmisc_n"}, + }, + }, + + { "_vllmnt", + {20, 19, 18, 17}, + { {"0000"_b, "_gmtjvr"}, + }, + }, + + { "_vlrhpy", + {30, 23, 22, 13, 4}, + { {"0000x"_b, "ld1sb_z_p_ai_s"}, + {"0001x"_b, "ldff1sb_z_p_ai_s"}, + {"0010x"_b, "ld1rb_z_p_bi_u8"}, + {"0011x"_b, "ld1rb_z_p_bi_u16"}, + {"0100x"_b, "ld1sh_z_p_ai_s"}, + {"0101x"_b, "ldff1sh_z_p_ai_s"}, + {"0110x"_b, "ld1rsw_z_p_bi_s64"}, + {"0111x"_b, "ld1rh_z_p_bi_u16"}, + {"1000x"_b, "ld1sb_z_p_ai_d"}, + {"1001x"_b, "ldff1sb_z_p_ai_d"}, + {"10100"_b, "prfb_i_p_bz_d_64_scaled"}, + {"10110"_b, "prfh_i_p_bz_d_64_scaled"}, + {"1100x"_b, "ld1sh_z_p_ai_d"}, + {"1101x"_b, "ldff1sh_z_p_ai_d"}, + {"1110x"_b, "ld1sh_z_p_bz_d_64_scaled"}, + {"1111x"_b, "ldff1sh_z_p_bz_d_64_scaled"}, + }, + }, + + { "_vlxrps", + {9, 8, 7, 6, 5}, + { {"00000"_b, "fmov_d_floatimm"}, + }, + }, + + { "_vmgnhk", + {30, 23}, + { {"00"_b, "add_64_addsub_imm"}, + {"10"_b, "sub_64_addsub_imm"}, + }, + }, + + { "_vmsxgq", + {30, 23, 22, 13, 12, 11, 10}, + { {"000xx00"_b, "stlur_32_ldapstl_unscaled"}, + {"001xx00"_b, "ldapur_32_ldapstl_unscaled"}, + {"010xx00"_b, "ldapursw_64_ldapstl_unscaled"}, + {"100xx00"_b, "stlur_64_ldapstl_unscaled"}, + {"101xx00"_b, "ldapur_64_ldapstl_unscaled"}, + {"x000001"_b, "cpyfpwn_cpy_memcms"}, + {"x000101"_b, "cpyfpwtwn_cpy_memcms"}, + {"x001001"_b, "cpyfprtwn_cpy_memcms"}, + {"x001101"_b, "cpyfptwn_cpy_memcms"}, + {"x010001"_b, "cpyfmwn_cpy_memcms"}, + {"x010101"_b, "cpyfmwtwn_cpy_memcms"}, + {"x011001"_b, "cpyfmrtwn_cpy_memcms"}, + {"x011101"_b, "cpyfmtwn_cpy_memcms"}, + {"x100001"_b, "cpyfewn_cpy_memcms"}, + {"x100101"_b, "cpyfewtwn_cpy_memcms"}, + {"x101001"_b, "cpyfertwn_cpy_memcms"}, + {"x101101"_b, "cpyfetwn_cpy_memcms"}, + {"x110001"_b, "setm_set_memcms"}, + {"x110101"_b, "setmt_set_memcms"}, + {"x111001"_b, "setmn_set_memcms"}, + {"x111101"_b, "setmtn_set_memcms"}, + }, + }, + + { "_vmtkqp", + {30}, + { {"0"_b, "stlur_32_ldapstl_unscaled"}, + {"1"_b, "stlur_64_ldapstl_unscaled"}, + }, + }, + + { "_vmxzxt", + {23, 22, 13, 12, 11, 10}, + { {"0001x0"_b, "fmulx_asimdelem_rh_h"}, + {"0x0001"_b, "sqshrun_asimdshf_n"}, + {"0x0011"_b, "sqrshrun_asimdshf_n"}, + {"0x0101"_b, "uqshrn_asimdshf_n"}, + {"0x0111"_b, "uqrshrn_asimdshf_n"}, + {"0x1001"_b, "ushll_asimdshf_l"}, + {"1000x0"_b, "fmlal2_asimdelem_lh"}, + {"1x01x0"_b, "fmulx_asimdelem_r_sd"}, + {"xx10x0"_b, "umull_asimdelem_l"}, + }, + }, + + { "_vmyztj", + {30, 23, 22}, + { {"000"_b, "stp_64_ldstpair_off"}, + {"001"_b, "ldp_64_ldstpair_off"}, + {"010"_b, "stp_64_ldstpair_pre"}, + {"011"_b, "ldp_64_ldstpair_pre"}, + }, + }, + + { "_vnggzq", + {30, 23, 22, 13, 12, 11, 10}, + { {"000xx10"_b, "stlur_b_ldapstl_simd"}, + {"001xx10"_b, "ldapur_b_ldapstl_simd"}, + {"010xx10"_b, "stlur_q_ldapstl_simd"}, + {"011xx10"_b, "ldapur_q_ldapstl_simd"}, + {"100xx10"_b, "stlur_h_ldapstl_simd"}, + {"101xx10"_b, "ldapur_h_ldapstl_simd"}, + {"x000001"_b, "cpypn_cpy_memcms"}, + {"x000101"_b, "cpypwtn_cpy_memcms"}, + {"x001001"_b, "cpyprtn_cpy_memcms"}, + {"x001101"_b, "cpyptn_cpy_memcms"}, + {"x010001"_b, "cpymn_cpy_memcms"}, + {"x010101"_b, "cpymwtn_cpy_memcms"}, + {"x011001"_b, "cpymrtn_cpy_memcms"}, + {"x011101"_b, "cpymtn_cpy_memcms"}, + {"x100001"_b, "cpyen_cpy_memcms"}, + {"x100101"_b, "cpyewtn_cpy_memcms"}, + {"x101001"_b, "cpyertn_cpy_memcms"}, + {"x101101"_b, "cpyetn_cpy_memcms"}, + }, + }, + + { "_vnnjxg", + {30, 23, 22, 13, 12, 11, 10}, + { {"000xxxx"_b, "fnmsub_s_floatdp3"}, + {"001xxxx"_b, "fnmsub_d_floatdp3"}, + {"011xxxx"_b, "fnmsub_h_floatdp3"}, + {"10x1001"_b, "scvtf_asisdshf_c"}, + {"10x1111"_b, "fcvtzs_asisdshf_c"}, + {"1xx00x0"_b, "sqdmulh_asisdelem_r"}, + {"1xx01x0"_b, "sqrdmulh_asisdelem_r"}, + }, + }, + + { "_vnrlrk", + {30}, + { {"0"_b, "orn_64_log_shift"}, + {"1"_b, "bics_64_log_shift"}, + }, + }, + + { "_vnrlsj", + {18, 17, 12}, + { {"0x0"_b, "ld4_asisdlsop_dx4_r4d"}, + {"100"_b, "ld4_asisdlsop_dx4_r4d"}, + {"110"_b, "ld4_asisdlsop_d4_i4d"}, + }, + }, + + { "_vnsqhn", + {30, 23, 11, 10}, + { {"0010"_b, "_plytvr"}, + {"0100"_b, "_zghtll"}, + {"0110"_b, "_ptkgrz"}, + {"1000"_b, "_xksqnh"}, + {"1001"_b, "ldraa_64_ldst_pac"}, + {"1010"_b, "_hyskth"}, + {"1011"_b, "ldraa_64w_ldst_pac"}, + {"1100"_b, "_kpgghm"}, + {"1101"_b, "ldrab_64_ldst_pac"}, + {"1110"_b, "_zxjkmj"}, + {"1111"_b, "ldrab_64w_ldst_pac"}, + }, + }, + + { "_vnzkty", + {30}, + { {"0"_b, "orr_64_log_shift"}, + {"1"_b, "ands_64_log_shift"}, + }, + }, + + { "_vpgxgk", + {20, 19, 18, 17, 16, 13, 12}, + { {"1111100"_b, "_rqzpzq"}, + }, + }, + + { "_vpjktn", + {30, 23, 22}, + { {"000"_b, "madd_64a_dp_3src"}, + }, + }, + + { "_vpknjg", + {13, 12}, + { {"00"_b, "sdiv_32_dp_2src"}, + {"10"_b, "rorv_32_dp_2src"}, + }, + }, + + { "_vpmxrj", + {13}, + { {"0"_b, "histcnt_z_p_zz"}, + {"1"_b, "_jxszhy"}, + }, + }, + + { "_vpyvjr", + {9, 8, 7, 6, 5}, + { {"11111"_b, "pacizb_64z_dp_1src"}, + }, + }, + + { "_vqrqjt", + {30, 23, 22, 11, 10}, + { {"01000"_b, "csel_32_condsel"}, + {"01001"_b, "csinc_32_condsel"}, + {"11000"_b, "csinv_32_condsel"}, + {"11001"_b, "csneg_32_condsel"}, + }, + }, + + { "_vqzsgg", + {20, 19, 18, 17, 16}, + { {"00010"_b, "scvtf_s32_float2fix"}, + {"00011"_b, "ucvtf_s32_float2fix"}, + {"11000"_b, "fcvtzs_32s_float2fix"}, + {"11001"_b, "fcvtzu_32s_float2fix"}, + }, + }, + + { "_vrjhtm", + {12}, + { {"0"_b, "sqdmulh_asimdelem_r"}, + {"1"_b, "sqrdmulh_asimdelem_r"}, + }, + }, + + { "_vrsgzg", + {30, 23, 22, 20, 19, 18}, + { {"00xxxx"_b, "add_64_addsub_imm"}, + {"010xxx"_b, "addg_64_addsub_immtags"}, + {"011000"_b, "smax_64_minmax_imm"}, + {"011001"_b, "umax_64u_minmax_imm"}, + {"011010"_b, "smin_64_minmax_imm"}, + {"011011"_b, "umin_64u_minmax_imm"}, + {"10xxxx"_b, "sub_64_addsub_imm"}, + {"110xxx"_b, "subg_64_addsub_immtags"}, + }, + }, + + { "_vrsjnp", + {13, 12, 11, 10}, + { {"1111"_b, "cas_c32_ldstexcl"}, + }, + }, + + { "_vrxhss", + {20, 19, 18, 17, 16}, + { {"00001"_b, "uqxtn_asisdmisc_n"}, + }, + }, + + { "_vryrnh", + {30, 22, 11}, + { {"001"_b, "_zsgpsn"}, + {"010"_b, "ccmn_32_condcmp_reg"}, + {"011"_b, "ccmn_32_condcmp_imm"}, + {"110"_b, "ccmp_32_condcmp_reg"}, + {"111"_b, "ccmp_32_condcmp_imm"}, + }, + }, + + { "_vrzksz", + {20, 19, 18, 17, 16, 13, 12}, + { {"1111100"_b, "ldaprh_32l_memop"}, + }, + }, + + { "_vshynq", + {30, 23, 22, 11, 10}, + { {"00000"_b, "sturb_32_ldst_unscaled"}, + {"00001"_b, "strb_32_ldst_immpost"}, + {"00010"_b, "sttrb_32_ldst_unpriv"}, + {"00011"_b, "strb_32_ldst_immpre"}, + {"00100"_b, "ldurb_32_ldst_unscaled"}, + {"00101"_b, "ldrb_32_ldst_immpost"}, + {"00110"_b, "ldtrb_32_ldst_unpriv"}, + {"00111"_b, "ldrb_32_ldst_immpre"}, + {"01000"_b, "ldursb_64_ldst_unscaled"}, + {"01001"_b, "ldrsb_64_ldst_immpost"}, + {"01010"_b, "ldtrsb_64_ldst_unpriv"}, + {"01011"_b, "ldrsb_64_ldst_immpre"}, + {"01100"_b, "ldursb_32_ldst_unscaled"}, + {"01101"_b, "ldrsb_32_ldst_immpost"}, + {"01110"_b, "ldtrsb_32_ldst_unpriv"}, + {"01111"_b, "ldrsb_32_ldst_immpre"}, + {"10000"_b, "sturh_32_ldst_unscaled"}, + {"10001"_b, "strh_32_ldst_immpost"}, + {"10010"_b, "sttrh_32_ldst_unpriv"}, + {"10011"_b, "strh_32_ldst_immpre"}, + {"10100"_b, "ldurh_32_ldst_unscaled"}, + {"10101"_b, "ldrh_32_ldst_immpost"}, + {"10110"_b, "ldtrh_32_ldst_unpriv"}, + {"10111"_b, "ldrh_32_ldst_immpre"}, + {"11000"_b, "ldursh_64_ldst_unscaled"}, + {"11001"_b, "ldrsh_64_ldst_immpost"}, + {"11010"_b, "ldtrsh_64_ldst_unpriv"}, + {"11011"_b, "ldrsh_64_ldst_immpre"}, + {"11100"_b, "ldursh_32_ldst_unscaled"}, + {"11101"_b, "ldrsh_32_ldst_immpost"}, + {"11110"_b, "ldtrsh_32_ldst_unpriv"}, + {"11111"_b, "ldrsh_32_ldst_immpre"}, + }, + }, + + { "_vsnnms", + {30, 13, 12, 11, 10}, + { {"00000"_b, "_xzntxr"}, + }, + }, + + { "_vsslrs", + {8}, + { {"0"_b, "tstart_br_systemresult"}, + {"1"_b, "ttest_br_systemresult"}, + }, + }, + + { "_vsyjql", + {4}, + { {"0"_b, "ccmn_32_condcmp_imm"}, + }, + }, + + { "_vtgnnl", + {30}, + { {"0"_b, "_qgsrqq"}, + {"1"_b, "_mgjhts"}, + }, + }, + + { "_vtllgt", + {10}, + { {"0"_b, "_nhnhzp"}, + }, + }, + + { "_vtyqhh", + {30}, + { {"0"_b, "and_32_log_shift"}, + {"1"_b, "eor_32_log_shift"}, + }, + }, + + { "_vvgnhm", + {23}, + { {"0"_b, "fmulx_asimdsame_only"}, + }, + }, + + { "_vvgpzq", + {20, 19, 18, 17, 16}, + { {"00000"_b, "fcvtns_32h_float2int"}, + {"00001"_b, "fcvtnu_32h_float2int"}, + {"00010"_b, "scvtf_h32_float2int"}, + {"00011"_b, "ucvtf_h32_float2int"}, + {"00100"_b, "fcvtas_32h_float2int"}, + {"00101"_b, "fcvtau_32h_float2int"}, + {"00110"_b, "fmov_32h_float2int"}, + {"00111"_b, "fmov_h32_float2int"}, + {"01000"_b, "fcvtps_32h_float2int"}, + {"01001"_b, "fcvtpu_32h_float2int"}, + {"10000"_b, "fcvtms_32h_float2int"}, + {"10001"_b, "fcvtmu_32h_float2int"}, + {"11000"_b, "fcvtzs_32h_float2int"}, + {"11001"_b, "fcvtzu_32h_float2int"}, + }, + }, + + { "_vvtnrv", + {23, 22, 20, 19, 18}, + { {"00000"_b, "orr_z_zi"}, + {"01000"_b, "eor_z_zi"}, + {"10000"_b, "and_z_zi"}, + {"11000"_b, "dupm_z_i"}, + }, + }, + + { "_vvxsxt", + {4}, + { {"0"_b, "ands_p_p_pp_z"}, + {"1"_b, "bics_p_p_pp_z"}, + }, + }, + + { "_vvyjmh", + {23, 22, 20, 19, 11}, + { {"00010"_b, "ssra_asisdshf_r"}, + {"001x0"_b, "ssra_asisdshf_r"}, + {"01xx0"_b, "ssra_asisdshf_r"}, + }, + }, + + { "_vvzsmg", + {23, 22, 20, 19, 13, 11}, + { {"0000x0"_b, "bic_asimdimm_l_sl"}, + {"00x100"_b, "usra_asimdshf_r"}, + {"00x110"_b, "ursra_asimdshf_r"}, + {"010x00"_b, "usra_asimdshf_r"}, + {"010x10"_b, "ursra_asimdshf_r"}, + {"011100"_b, "usra_asimdshf_r"}, + {"011110"_b, "ursra_asimdshf_r"}, + {"0x1000"_b, "usra_asimdshf_r"}, + {"0x1010"_b, "ursra_asimdshf_r"}, + }, + }, + + { "_vxhgzz", + {23, 22, 12, 11, 10}, + { {"00xxx"_b, "ext_z_zi_des"}, + {"01xxx"_b, "ext_z_zi_con"}, + {"10000"_b, "zip1_z_zz_q"}, + {"10001"_b, "zip2_z_zz_q"}, + {"10010"_b, "uzp1_z_zz_q"}, + {"10011"_b, "uzp2_z_zz_q"}, + {"10110"_b, "trn1_z_zz_q"}, + {"10111"_b, "trn2_z_zz_q"}, + }, + }, + + { "_vxhjgg", + {20, 18, 17, 16}, + { {"0000"_b, "_shgxyq"}, + }, + }, + + { "_vxlmxz", + {4, 3, 2, 1, 0}, + { {"11111"_b, "_hpmvzr"}, + }, + }, + + { "_vxqtkl", + {18, 17}, + { {"00"_b, "_zqmvqs"}, + }, + }, + + { "_vxrnyh", + {18, 17}, + { {"0x"_b, "st1_asisdlsep_r1_r1"}, + {"10"_b, "st1_asisdlsep_r1_r1"}, + {"11"_b, "st1_asisdlsep_i1_i1"}, + }, + }, + + { "_vxvyyg", + {30, 23, 22, 13, 12, 11, 10}, + { {"000xx00"_b, "stlurb_32_ldapstl_unscaled"}, + {"001xx00"_b, "ldapurb_32_ldapstl_unscaled"}, + {"010xx00"_b, "ldapursb_64_ldapstl_unscaled"}, + {"011xx00"_b, "ldapursb_32_ldapstl_unscaled"}, + {"100xx00"_b, "stlurh_32_ldapstl_unscaled"}, + {"101xx00"_b, "ldapurh_32_ldapstl_unscaled"}, + {"110xx00"_b, "ldapursh_64_ldapstl_unscaled"}, + {"111xx00"_b, "ldapursh_32_ldapstl_unscaled"}, + {"x000001"_b, "cpyfpn_cpy_memcms"}, + {"x000101"_b, "cpyfpwtn_cpy_memcms"}, + {"x001001"_b, "cpyfprtn_cpy_memcms"}, + {"x001101"_b, "cpyfptn_cpy_memcms"}, + {"x010001"_b, "cpyfmn_cpy_memcms"}, + {"x010101"_b, "cpyfmwtn_cpy_memcms"}, + {"x011001"_b, "cpyfmrtn_cpy_memcms"}, + {"x011101"_b, "cpyfmtn_cpy_memcms"}, + {"x100001"_b, "cpyfen_cpy_memcms"}, + {"x100101"_b, "cpyfewtn_cpy_memcms"}, + {"x101001"_b, "cpyfertn_cpy_memcms"}, + {"x101101"_b, "cpyfetn_cpy_memcms"}, + }, + }, + + { "_vyjsst", + {30, 4}, + { {"0x"_b, "b_only_branch_imm"}, + {"10"_b, "b_only_condbranch"}, + {"11"_b, "bc_only_condbranch"}, + }, + }, + + { "_vypgrt", + {20, 19, 18, 17, 16}, + { {"00000"_b, "rev16_asimdmisc_r"}, + }, + }, + + { "_vypnss", + {30}, + { {"0"_b, "orn_32_log_shift"}, + {"1"_b, "bics_32_log_shift"}, + }, + }, + + { "_vyqxyz", + {23, 22, 20, 19, 18, 17, 16}, + { {"0111001"_b, "fcvtau_asimdmiscfp16_r"}, + {"0x00001"_b, "fcvtau_asimdmisc_r"}, + {"0x10000"_b, "fmaxnmv_asimdall_only_sd"}, + {"1111000"_b, "fcmge_asimdmiscfp16_fz"}, + {"1x00000"_b, "fcmge_asimdmisc_fz"}, + {"1x00001"_b, "ursqrte_asimdmisc_r"}, + {"1x10000"_b, "fminnmv_asimdall_only_sd"}, + }, + }, + + { "_vzjvtv", + {23, 22, 12, 11, 10}, + { {"01001"_b, "bfmmla_z_zzz"}, + {"10001"_b, "fmmla_z_zzz_s"}, + {"11001"_b, "fmmla_z_zzz_d"}, + }, + }, + + { "_vzvstm", + {23, 22, 20, 19, 12, 11}, + { {"000000"_b, "movi_asimdimm_n_b"}, + {"000010"_b, "fmov_asimdimm_s_s"}, + {"000011"_b, "fmov_asimdimm_h_h"}, + {"00x100"_b, "scvtf_asimdshf_c"}, + {"00x111"_b, "fcvtzs_asimdshf_c"}, + {"010x00"_b, "scvtf_asimdshf_c"}, + {"010x11"_b, "fcvtzs_asimdshf_c"}, + {"011100"_b, "scvtf_asimdshf_c"}, + {"011111"_b, "fcvtzs_asimdshf_c"}, + {"0x1000"_b, "scvtf_asimdshf_c"}, + {"0x1011"_b, "fcvtzs_asimdshf_c"}, + }, + }, + + { "_vzyklr", + {13, 12}, + { {"00"_b, "setp_set_memcms"}, + {"01"_b, "setpt_set_memcms"}, + {"10"_b, "setpn_set_memcms"}, + {"11"_b, "setptn_set_memcms"}, + }, + }, + + { "_vzzqhx", + {12, 10}, + { {"00"_b, "_phrqqx"}, + {"01"_b, "_snnlgr"}, + {"10"_b, "_phsrlk"}, + {"11"_b, "_nrmlqv"}, + }, + }, + + { "_xghrjn", + {20, 19, 18, 17, 16}, + { {"00010"_b, "scvtf_h32_float2fix"}, + {"00011"_b, "ucvtf_h32_float2fix"}, + {"11000"_b, "fcvtzs_32h_float2fix"}, + {"11001"_b, "fcvtzu_32h_float2fix"}, + }, + }, + + { "_xgqhjv", + {13, 12}, + { {"10"_b, "smax_64_dp_2src"}, + }, + }, + + { "_xgxtlr", + {23}, + { {"0"_b, "fdiv_asimdsame_only"}, + }, + }, + + { "_xhhqnx", + {30, 23, 22, 13, 12, 11, 10}, + { {"1101001"_b, "ummla_asimdsame2_g"}, + {"xxx0001"_b, "sqrdmlah_asimdsame2_only"}, + {"xxx0011"_b, "sqrdmlsh_asimdsame2_only"}, + {"xxx0101"_b, "udot_asimdsame2_d"}, + }, + }, + + { "_xhktsk", + {22}, + { {"0"_b, "smullt_z_zzi_s"}, + {"1"_b, "smullt_z_zzi_d"}, + }, + }, + + { "_xhlhmh", + {4}, + { {"0"_b, "cmplo_p_p_zi"}, + {"1"_b, "cmpls_p_p_zi"}, + }, + }, + + { "_xhmpmy", + {4}, + { {"0"_b, "and_p_p_pp_z"}, + {"1"_b, "bic_p_p_pp_z"}, + }, + }, + + { "_xjtzgm", + {30, 23, 22, 11, 10}, + { {"00000"_b, "stur_b_ldst_unscaled"}, + {"00001"_b, "str_b_ldst_immpost"}, + {"00011"_b, "str_b_ldst_immpre"}, + {"00100"_b, "ldur_b_ldst_unscaled"}, + {"00101"_b, "ldr_b_ldst_immpost"}, + {"00111"_b, "ldr_b_ldst_immpre"}, + {"01000"_b, "stur_q_ldst_unscaled"}, + {"01001"_b, "str_q_ldst_immpost"}, + {"01011"_b, "str_q_ldst_immpre"}, + {"01100"_b, "ldur_q_ldst_unscaled"}, + {"01101"_b, "ldr_q_ldst_immpost"}, + {"01111"_b, "ldr_q_ldst_immpre"}, + {"10000"_b, "stur_h_ldst_unscaled"}, + {"10001"_b, "str_h_ldst_immpost"}, + {"10011"_b, "str_h_ldst_immpre"}, + {"10100"_b, "ldur_h_ldst_unscaled"}, + {"10101"_b, "ldr_h_ldst_immpost"}, + {"10111"_b, "ldr_h_ldst_immpre"}, + }, + }, + + { "_xksqnh", + {22, 20, 19, 18, 17, 16, 13, 12}, + { {"01111101"_b, "ld64b_64l_memop"}, + }, + }, + + { "_xkylhh", + {22, 13, 12}, + { {"000"_b, "swpa_32_memop"}, + {"100"_b, "swpal_32_memop"}, + }, + }, + + { "_xkznrh", + {18, 17}, + { {"00"_b, "st3_asisdlse_r3"}, + }, + }, + + { "_xlgxhn", + {23, 22, 4}, + { {"000"_b, "fccmp_s_floatccmp"}, + {"001"_b, "fccmpe_s_floatccmp"}, + {"010"_b, "fccmp_d_floatccmp"}, + {"011"_b, "fccmpe_d_floatccmp"}, + {"110"_b, "fccmp_h_floatccmp"}, + {"111"_b, "fccmpe_h_floatccmp"}, + }, + }, + + { "_xlqmhl", + {20, 19, 18, 17, 16}, + { {"11111"_b, "ldar_lr64_ldstexcl"}, + }, + }, + + { "_xlyjsz", + {23, 22, 13}, + { {"100"_b, "fmlal2_asimdelem_lh"}, + {"xx1"_b, "umull_asimdelem_l"}, + }, + }, + + { "_xlyppq", + {23, 22, 20, 19, 18, 17, 16}, + { {"0010000"_b, "fmaxv_asimdall_only_h"}, + {"0x00001"_b, "frint64z_asimdmisc_r"}, + {"1010000"_b, "fminv_asimdall_only_h"}, + {"1111000"_b, "fabs_asimdmiscfp16_r"}, + {"1x00000"_b, "fabs_asimdmisc_r"}, + }, + }, + + { "_xmkysx", + {12}, + { {"0"_b, "st4_asisdlsop_dx4_r4d"}, + }, + }, + + { "_xmxhhg", + {13, 12, 4}, + { {"000"_b, "rmif_only_rmif"}, + }, + }, + + { "_xmxpnx", + {10}, + { {"0"_b, "sri_z_zzi"}, + {"1"_b, "sli_z_zzi"}, + }, + }, + + { "_xnhkpk", + {23, 22}, + { {"00"_b, "fcsel_s_floatsel"}, + {"01"_b, "fcsel_d_floatsel"}, + {"11"_b, "fcsel_h_floatsel"}, + }, + }, + + { "_xnpyvy", + {13, 10}, + { {"00"_b, "_sylkvm"}, + {"01"_b, "_nvnjyp"}, + {"10"_b, "_ltrntg"}, + {"11"_b, "_qrtjvn"}, + }, + }, + + { "_xnrrsy", + {18}, + { {"0"_b, "st1_asisdlsep_r4_r4"}, + {"1"_b, "st1_asisdlsep_i4_i4"}, + }, + }, + + { "_xnrxym", + {18}, + { {"0"_b, "ld2_asisdlsep_r2_r"}, + {"1"_b, "ld2_asisdlsep_i2_i"}, + }, + }, + + { "_xpqglq", + {4}, + { {"0"_b, "cmpeq_p_p_zi"}, + {"1"_b, "cmpne_p_p_zi"}, + }, + }, + + { "_xprqgs", + {23, 20, 19, 18, 17, 16}, + { {"000001"_b, "fcvtxn_asisdmisc_n"}, + }, + }, + + { "_xptsns", + {23, 22}, + { {"00"_b, "tbx_asimdtbl_l1_1"}, + }, + }, + + { "_xqhxql", + {12}, + { {"0"_b, "st2_asisdlsop_dx2_r2d"}, + }, + }, + + { "_xqrgjj", + {4}, + { {"0"_b, "ccmp_64_condcmp_imm"}, + }, + }, + + { "_xqvzvl", + {18, 17}, + { {"0x"_b, "st1_asisdlsep_r3_r3"}, + {"10"_b, "st1_asisdlsep_r3_r3"}, + {"11"_b, "st1_asisdlsep_i3_i3"}, + }, + }, + + { "_xrkzpn", + {12}, + { {"0"_b, "_zjqssg"}, + }, + }, + + { "_xrnqyn", + {30}, + { {"0"_b, "stlr_32s_ldapstl_writeback"}, + {"1"_b, "stlr_64s_ldapstl_writeback"}, + }, + }, + + { "_xrskrk", + {22, 12}, + { {"10"_b, "_kyhhqt"}, + }, + }, + + { "_xrzqtn", + {30}, + { {"0"_b, "bl_only_branch_imm"}, + {"1"_b, "_gyllxt"}, + }, + }, + + { "_xsgnlv", + {30, 23, 13, 12, 11, 10}, + { {"100001"_b, "ushr_asisdshf_r"}, + {"100101"_b, "usra_asisdshf_r"}, + {"101001"_b, "urshr_asisdshf_r"}, + {"101101"_b, "ursra_asisdshf_r"}, + }, + }, + + { "_xspjzn", + {13, 12, 11, 10}, + { {"1111"_b, "casl_c64_ldstexcl"}, + }, + }, + + { "_xsvpzx", + {18, 17, 12}, + { {"000"_b, "ld4_asisdlso_d4_4d"}, + }, + }, + + { "_xszmjn", + {30, 13, 12}, + { {"000"_b, "ldiapp_32le_ldiappstilp"}, + {"001"_b, "ldiapp_32l_ldiappstilp"}, + {"100"_b, "ldiapp_64ls_ldiappstilp"}, + {"101"_b, "ldiapp_64l_ldiappstilp"}, + }, + }, + + { "_xszqrg", + {30, 23, 22}, + { {"000"_b, "_glpxty"}, + {"001"_b, "_rkpylh"}, + {"011"_b, "_xghrjn"}, + {"100"_b, "_nklqly"}, + }, + }, + + { "_xtgmvr", + {23, 11, 10, 4, 3, 2, 0}, + { {"0000000"_b, "_mzkxzm"}, + {"0101111"_b, "_qgvrqy"}, + {"0111111"_b, "_lljxgp"}, + {"1000000"_b, "_tjlthk"}, + }, + }, + + { "_xtgtyz", + {19, 18, 17, 16}, + { {"0000"_b, "brkb_p_p_p"}, + }, + }, + + { "_xtxyxj", + {4}, + { {"0"_b, "orr_p_p_pp_z"}, + {"1"_b, "orn_p_p_pp_z"}, + }, + }, + + { "_xtzykp", + {20, 19, 18, 17, 16}, + { {"11111"_b, "ldlarh_lr32_ldstexcl"}, + }, + }, + + { "_xvmxrg", + {13}, + { {"0"_b, "mla_asimdelem_r"}, + {"1"_b, "umlal_asimdelem_l"}, + }, + }, + + { "_xvnyxq", + {30, 23, 13, 4}, + { {"0000"_b, "prfb_i_p_bz_s_x32_scaled"}, + {"0010"_b, "prfh_i_p_bz_s_x32_scaled"}, + {"010x"_b, "ld1sh_z_p_bz_s_x32_scaled"}, + {"011x"_b, "ldff1sh_z_p_bz_s_x32_scaled"}, + {"1000"_b, "prfb_i_p_bz_d_x32_scaled"}, + {"1010"_b, "prfh_i_p_bz_d_x32_scaled"}, + {"110x"_b, "ld1sh_z_p_bz_d_x32_scaled"}, + {"111x"_b, "ldff1sh_z_p_bz_d_x32_scaled"}, + }, + }, + + { "_xvppmm", + {30, 23, 22, 13, 12, 11, 10}, + { {"0xx0xxx"_b, "mla_z_p_zzz"}, + {"0xx1xxx"_b, "mls_z_p_zzz"}, + {"1101110"_b, "usdot_z_zzz_s"}, + {"1xx0000"_b, "smlalb_z_zzz"}, + {"1xx0001"_b, "smlalt_z_zzz"}, + {"1xx0010"_b, "umlalb_z_zzz"}, + {"1xx0011"_b, "umlalt_z_zzz"}, + {"1xx0100"_b, "smlslb_z_zzz"}, + {"1xx0101"_b, "smlslt_z_zzz"}, + {"1xx0110"_b, "umlslb_z_zzz"}, + {"1xx0111"_b, "umlslt_z_zzz"}, + {"1xx1000"_b, "sqdmlalb_z_zzz"}, + {"1xx1001"_b, "sqdmlalt_z_zzz"}, + {"1xx1010"_b, "sqdmlslb_z_zzz"}, + {"1xx1011"_b, "sqdmlslt_z_zzz"}, + {"1xx1100"_b, "sqrdmlah_z_zzz"}, + {"1xx1101"_b, "sqrdmlsh_z_zzz"}, + }, + }, + + { "_xvrvhv", + {4}, + { {"0"_b, "ccmp_32_condcmp_reg"}, + }, + }, + + { "_xxjrsy", + {23, 22, 9}, + { {"000"_b, "rdffr_p_p_f"}, + {"010"_b, "rdffrs_p_p_f"}, + }, + }, + + { "_xxphlt", + {23}, + { {"0"_b, "_qgshrr"}, + }, + }, + + { "_xxqzvy", + {20, 19, 18, 17, 16}, + { {"00000"_b, "fcvtns_32d_float2int"}, + {"00001"_b, "fcvtnu_32d_float2int"}, + {"00010"_b, "scvtf_d32_float2int"}, + {"00011"_b, "ucvtf_d32_float2int"}, + {"00100"_b, "fcvtas_32d_float2int"}, + {"00101"_b, "fcvtau_32d_float2int"}, + {"01000"_b, "fcvtps_32d_float2int"}, + {"01001"_b, "fcvtpu_32d_float2int"}, + {"10000"_b, "fcvtms_32d_float2int"}, + {"10001"_b, "fcvtmu_32d_float2int"}, + {"11000"_b, "fcvtzs_32d_float2int"}, + {"11001"_b, "fcvtzu_32d_float2int"}, + {"11110"_b, "fjcvtzs_32d_float2int"}, + }, + }, + + { "_xygvjp", + {23, 22}, + { {"00"_b, "and_asimdsame_only"}, + {"01"_b, "bic_asimdsame_only"}, + {"10"_b, "orr_asimdsame_only"}, + {"11"_b, "orn_asimdsame_only"}, + }, + }, + + { "_xyhmgh", + {23, 22, 20, 9}, + { {"0000"_b, "_xhmpmy"}, + {"0001"_b, "_qnprqt"}, + {"0010"_b, "_nnzhgm"}, + {"0100"_b, "_vvxsxt"}, + {"0101"_b, "_yzmjhn"}, + {"0110"_b, "_mkgsly"}, + {"1000"_b, "_xtxyxj"}, + {"1001"_b, "_hmtmlq"}, + {"1010"_b, "_xtgtyz"}, + {"1100"_b, "_yynmjl"}, + {"1101"_b, "_sjnspg"}, + {"1110"_b, "_jzjvtv"}, + }, + }, + + { "_xymnxy", + {30}, + { {"0"_b, "tbz_only_testbranch"}, + }, + }, + + { "_xynxhx", + {30, 23, 22, 11, 10}, + { {"00010"_b, "str_b_ldst_regoff"}, + {"00110"_b, "ldr_b_ldst_regoff"}, + {"01010"_b, "str_q_ldst_regoff"}, + {"01110"_b, "ldr_q_ldst_regoff"}, + {"10010"_b, "str_h_ldst_regoff"}, + {"10110"_b, "ldr_h_ldst_regoff"}, + }, + }, + + { "_xzjvkv", + {23, 22}, + { {"00"_b, "tbl_asimdtbl_l1_1"}, + }, + }, + + { "_xzlxjh", + {30, 23, 22}, + { {"001"_b, "sbfm_64m_bitfield"}, + {"011"_b, "extr_64_extract"}, + {"101"_b, "ubfm_64m_bitfield"}, + }, + }, + + { "_xzmrlg", + {30, 23, 22}, + { {"000"_b, "stlxr_sr32_ldstexcl"}, + {"001"_b, "_zzkgsk"}, + {"010"_b, "_mnzzhk"}, + {"011"_b, "_qlxlxk"}, + {"100"_b, "stlxr_sr64_ldstexcl"}, + {"101"_b, "_tknqxs"}, + {"110"_b, "_mhpgjx"}, + {"111"_b, "_xlqmhl"}, + }, + }, + + { "_xznsqh", + {22, 20, 11}, + { {"000"_b, "cntw_r_s"}, + {"010"_b, "incw_r_rs"}, + {"100"_b, "cntd_r_s"}, + {"110"_b, "incd_r_rs"}, + }, + }, + + { "_xzntxr", + {23, 22, 20, 19, 18, 17, 16}, + { {"0000000"_b, "fcvtns_64s_float2int"}, + {"0000001"_b, "fcvtnu_64s_float2int"}, + {"0000010"_b, "scvtf_s64_float2int"}, + {"0000011"_b, "ucvtf_s64_float2int"}, + {"0000100"_b, "fcvtas_64s_float2int"}, + {"0000101"_b, "fcvtau_64s_float2int"}, + {"0001000"_b, "fcvtps_64s_float2int"}, + {"0001001"_b, "fcvtpu_64s_float2int"}, + {"0010000"_b, "fcvtms_64s_float2int"}, + {"0010001"_b, "fcvtmu_64s_float2int"}, + {"0011000"_b, "fcvtzs_64s_float2int"}, + {"0011001"_b, "fcvtzu_64s_float2int"}, + {"0100000"_b, "fcvtns_64d_float2int"}, + {"0100001"_b, "fcvtnu_64d_float2int"}, + {"0100010"_b, "scvtf_d64_float2int"}, + {"0100011"_b, "ucvtf_d64_float2int"}, + {"0100100"_b, "fcvtas_64d_float2int"}, + {"0100101"_b, "fcvtau_64d_float2int"}, + {"0100110"_b, "fmov_64d_float2int"}, + {"0100111"_b, "fmov_d64_float2int"}, + {"0101000"_b, "fcvtps_64d_float2int"}, + {"0101001"_b, "fcvtpu_64d_float2int"}, + {"0110000"_b, "fcvtms_64d_float2int"}, + {"0110001"_b, "fcvtmu_64d_float2int"}, + {"0111000"_b, "fcvtzs_64d_float2int"}, + {"0111001"_b, "fcvtzu_64d_float2int"}, + {"1001110"_b, "fmov_64vx_float2int"}, + {"1001111"_b, "fmov_v64i_float2int"}, + {"1100000"_b, "fcvtns_64h_float2int"}, + {"1100001"_b, "fcvtnu_64h_float2int"}, + {"1100010"_b, "scvtf_h64_float2int"}, + {"1100011"_b, "ucvtf_h64_float2int"}, + {"1100100"_b, "fcvtas_64h_float2int"}, + {"1100101"_b, "fcvtau_64h_float2int"}, + {"1100110"_b, "fmov_64h_float2int"}, + {"1100111"_b, "fmov_h64_float2int"}, + {"1101000"_b, "fcvtps_64h_float2int"}, + {"1101001"_b, "fcvtpu_64h_float2int"}, + {"1110000"_b, "fcvtms_64h_float2int"}, + {"1110001"_b, "fcvtmu_64h_float2int"}, + {"1111000"_b, "fcvtzs_64h_float2int"}, + {"1111001"_b, "fcvtzu_64h_float2int"}, + }, + }, + + { "_xzqmkv", + {13, 12}, + { {"00"_b, "add_asisdsame_only"}, + {"11"_b, "sqdmulh_asisdsame_only"}, + }, + }, + + { "_ygghnn", + {20, 19, 18, 17, 16}, + { {"00000"_b, "suqadd_asimdmisc_r"}, + {"10000"_b, "saddlv_asimdall_only"}, + }, + }, + + { "_ygtpyl", + {22, 13, 12}, + { {"000"_b, "swp_32_memop"}, + {"100"_b, "swpl_32_memop"}, + }, + }, + + { "_yhhsns", + {20, 19, 18, 17}, + { {"0000"_b, "_myrkmk"}, + }, + }, + + { "_yhlntp", + {20, 19, 18, 17, 16}, + { {"00000"_b, "fexpa_z_z"}, + }, + }, + + { "_yhmlxk", + {13, 12, 11, 10}, + { {"0000"_b, "decp_z_p_z"}, + {"0010"_b, "decp_r_p_r"}, + }, + }, + + { "_yhnqyy", + {13, 12}, + { {"01"_b, "sqdmlal_asisddiff_only"}, + {"11"_b, "sqdmlsl_asisddiff_only"}, + }, + }, + + { "_yjktml", + {30}, + { {"0"_b, "ldr_32_loadlit"}, + {"1"_b, "ldr_64_loadlit"}, + }, + }, + + { "_yjmngt", + {30}, + { {"0"_b, "sel_z_p_zz"}, + {"1"_b, "_vpmxrj"}, + }, + }, + + { "_yjnkrn", + {30}, + { {"0"_b, "bl_only_branch_imm"}, + {"1"_b, "_grqsgp"}, + }, + }, + + { "_yjnmkg", + {30, 23, 11, 10}, + { {"0000"_b, "_szysqh"}, + {"0010"_b, "_ksrkkn"}, + {"0100"_b, "_gljqng"}, + {"0110"_b, "_qtghgs"}, + {"1000"_b, "_gjprgr"}, + {"1001"_b, "ldraa_64_ldst_pac"}, + {"1010"_b, "_gnpgsg"}, + {"1011"_b, "ldraa_64w_ldst_pac"}, + {"1100"_b, "_lnmhqq"}, + {"1101"_b, "ldrab_64_ldst_pac"}, + {"1110"_b, "_gsvlph"}, + {"1111"_b, "ldrab_64w_ldst_pac"}, + }, + }, + + { "_yjzknm", + {13, 12, 11, 10}, + { {"0000"_b, "uqdecp_z_p_z"}, + {"0010"_b, "uqdecp_r_p_r_uw"}, + {"0011"_b, "uqdecp_r_p_r_x"}, + }, + }, + + { "_ykhhqq", + {18}, + { {"0"_b, "ld2_asisdlsop_hx2_r2h"}, + {"1"_b, "ld2_asisdlsop_h2_i2h"}, + }, + }, + + { "_ykjhgg", + {30, 23, 22, 13, 12, 11, 10}, + { {"0000000"_b, "ldaddb_32_memop"}, + {"0000100"_b, "ldclrb_32_memop"}, + {"0001000"_b, "ldeorb_32_memop"}, + {"0001100"_b, "ldsetb_32_memop"}, + {"000xx10"_b, "strb_32b_ldst_regoff"}, + {"0010000"_b, "ldaddlb_32_memop"}, + {"0010100"_b, "ldclrlb_32_memop"}, + {"0011000"_b, "ldeorlb_32_memop"}, + {"0011100"_b, "ldsetlb_32_memop"}, + {"001xx10"_b, "ldrb_32b_ldst_regoff"}, + {"0100000"_b, "ldaddab_32_memop"}, + {"0100100"_b, "ldclrab_32_memop"}, + {"0101000"_b, "ldeorab_32_memop"}, + {"0101100"_b, "ldsetab_32_memop"}, + {"010xx10"_b, "ldrsb_64b_ldst_regoff"}, + {"0110000"_b, "ldaddalb_32_memop"}, + {"0110100"_b, "ldclralb_32_memop"}, + {"0111000"_b, "ldeoralb_32_memop"}, + {"0111100"_b, "ldsetalb_32_memop"}, + {"011xx10"_b, "ldrsb_32b_ldst_regoff"}, + {"1000000"_b, "ldaddh_32_memop"}, + {"1000100"_b, "ldclrh_32_memop"}, + {"1001000"_b, "ldeorh_32_memop"}, + {"1001100"_b, "ldseth_32_memop"}, + {"100xx10"_b, "strh_32_ldst_regoff"}, + {"1010000"_b, "ldaddlh_32_memop"}, + {"1010100"_b, "ldclrlh_32_memop"}, + {"1011000"_b, "ldeorlh_32_memop"}, + {"1011100"_b, "ldsetlh_32_memop"}, + {"101xx10"_b, "ldrh_32_ldst_regoff"}, + {"1100000"_b, "ldaddah_32_memop"}, + {"1100100"_b, "ldclrah_32_memop"}, + {"1101000"_b, "ldeorah_32_memop"}, + {"1101100"_b, "ldsetah_32_memop"}, + {"110xx10"_b, "ldrsh_64_ldst_regoff"}, + {"1110000"_b, "ldaddalh_32_memop"}, + {"1110100"_b, "ldclralh_32_memop"}, + {"1111000"_b, "ldeoralh_32_memop"}, + {"1111100"_b, "ldsetalh_32_memop"}, + {"111xx10"_b, "ldrsh_32_ldst_regoff"}, + }, + }, + + { "_ykpgyh", + {13, 12, 5}, + { {"010"_b, "_gknljg"}, + {"011"_b, "_hjqryy"}, + {"100"_b, "_lmmkzh"}, + {"101"_b, "_vxlmxz"}, + {"110"_b, "_phktvp"}, + {"111"_b, "_qqvgql"}, + }, + }, + + { "_ykpqth", + {12}, + { {"0"_b, "st4_asisdlsop_dx4_r4d"}, + }, + }, + + { "_ykptgl", + {30, 23}, + { {"00"_b, "adds_32s_addsub_imm"}, + {"10"_b, "subs_32s_addsub_imm"}, + }, + }, + + { "_ylhgrh", + {13, 12, 11, 10}, + { {"0011"_b, "uqadd_asisdsame_only"}, + {"1010"_b, "_msvjxq"}, + {"1011"_b, "uqsub_asisdsame_only"}, + {"1101"_b, "cmhi_asisdsame_only"}, + {"1110"_b, "_yzlnrs"}, + {"1111"_b, "cmhs_asisdsame_only"}, + }, + }, + + { "_ylnsvy", + {20, 19, 18, 17, 16}, + { {"00000"_b, "dup_z_r"}, + {"00100"_b, "insr_z_r"}, + {"10000"_b, "sunpklo_z_z"}, + {"10001"_b, "sunpkhi_z_z"}, + {"10010"_b, "uunpklo_z_z"}, + {"10011"_b, "uunpkhi_z_z"}, + {"10100"_b, "insr_z_v"}, + {"11000"_b, "rev_z_z"}, + }, + }, + + { "_ymghnh", + {20, 19, 18, 17, 16}, + { {"11111"_b, "st64b_64l_memop"}, + }, + }, + + { "_ymhgxg", + {30, 13}, + { {"00"_b, "_yrmmmg"}, + {"01"_b, "_sghgtk"}, + {"10"_b, "_nxjkqs"}, + {"11"_b, "_yvyhlh"}, + }, + }, + + { "_ymhkrx", + {30, 23, 22, 13, 4}, + { {"0000x"_b, "ld1b_z_p_ai_s"}, + {"0001x"_b, "ldff1b_z_p_ai_s"}, + {"0010x"_b, "ld1rb_z_p_bi_u32"}, + {"0011x"_b, "ld1rb_z_p_bi_u64"}, + {"0100x"_b, "ld1h_z_p_ai_s"}, + {"0101x"_b, "ldff1h_z_p_ai_s"}, + {"0110x"_b, "ld1rh_z_p_bi_u32"}, + {"0111x"_b, "ld1rh_z_p_bi_u64"}, + {"1000x"_b, "ld1b_z_p_ai_d"}, + {"1001x"_b, "ldff1b_z_p_ai_d"}, + {"10100"_b, "prfw_i_p_bz_d_64_scaled"}, + {"10110"_b, "prfd_i_p_bz_d_64_scaled"}, + {"1100x"_b, "ld1h_z_p_ai_d"}, + {"1101x"_b, "ldff1h_z_p_ai_d"}, + {"1110x"_b, "ld1h_z_p_bz_d_64_scaled"}, + {"1111x"_b, "ldff1h_z_p_bz_d_64_scaled"}, + }, + }, + + { "_ymkthj", + {20, 9, 4}, + { {"000"_b, "uzp2_p_pp"}, + }, + }, + + { "_ymmhtq", + {23, 22, 20, 19, 11}, + { {"00010"_b, "srsra_asisdshf_r"}, + {"001x0"_b, "srsra_asisdshf_r"}, + {"01xx0"_b, "srsra_asisdshf_r"}, + }, + }, + + { "_ymszkr", + {30}, + { {"0"_b, "ldr_q_loadlit"}, + }, + }, + + { "_ymtzjg", + {12, 10}, + { {"00"_b, "_gmsmls"}, + {"01"_b, "_rnqmyp"}, + {"10"_b, "_srttng"}, + {"11"_b, "_tymryz"}, + }, + }, + + { "_ymvlzl", + {18}, + { {"0"_b, "st4_asisdlse_r4"}, + }, + }, + + { "_ymvzyh", + {20, 19, 18, 17, 16}, + { {"11111"_b, "ldaxp_lp32_ldstexcl"}, + }, + }, + + { "_ymxjjr", + {23, 22, 20, 19, 13, 11}, + { {"0000x0"_b, "orr_asimdimm_l_hl"}, + {"00x100"_b, "sqshrn_asimdshf_n"}, + {"00x101"_b, "sqrshrn_asimdshf_n"}, + {"010x00"_b, "sqshrn_asimdshf_n"}, + {"010x01"_b, "sqrshrn_asimdshf_n"}, + {"011100"_b, "sqshrn_asimdshf_n"}, + {"011101"_b, "sqrshrn_asimdshf_n"}, + {"0x1000"_b, "sqshrn_asimdshf_n"}, + {"0x1001"_b, "sqrshrn_asimdshf_n"}, + }, + }, + + { "_ynsytg", + {23, 22, 20, 19, 13, 11, 10}, + { {"0001001"_b, "shl_asisdshf_r"}, + {"0001101"_b, "sqshl_asisdshf_r"}, + {"001x001"_b, "shl_asisdshf_r"}, + {"001x101"_b, "sqshl_asisdshf_r"}, + {"00xx0x0"_b, "fmls_asisdelem_rh_h"}, + {"01xx001"_b, "shl_asisdshf_r"}, + {"01xx101"_b, "sqshl_asisdshf_r"}, + {"1xxx0x0"_b, "fmls_asisdelem_r_sd"}, + {"xxxx1x0"_b, "sqdmlsl_asisdelem_l"}, + }, + }, + + { "_ynyqky", + {12}, + { {"0"_b, "st2_asisdlsop_dx2_r2d"}, + }, + }, + + { "_ynznxv", + {20, 19, 18, 17, 16}, + { {"11111"_b, "ldaxrb_lr32_ldstexcl"}, + }, + }, + + { "_yppmkl", + {23, 22, 20, 19, 13, 11}, + { {"0000x0"_b, "mvni_asimdimm_l_hl"}, + {"00x100"_b, "sqshrun_asimdshf_n"}, + {"00x101"_b, "sqrshrun_asimdshf_n"}, + {"00x110"_b, "ushll_asimdshf_l"}, + {"010x00"_b, "sqshrun_asimdshf_n"}, + {"010x01"_b, "sqrshrun_asimdshf_n"}, + {"010x10"_b, "ushll_asimdshf_l"}, + {"011100"_b, "sqshrun_asimdshf_n"}, + {"011101"_b, "sqrshrun_asimdshf_n"}, + {"011110"_b, "ushll_asimdshf_l"}, + {"0x1000"_b, "sqshrun_asimdshf_n"}, + {"0x1001"_b, "sqrshrun_asimdshf_n"}, + {"0x1010"_b, "ushll_asimdshf_l"}, + }, + }, + + { "_yppszx", + {23, 22, 10}, + { {"100"_b, "umlslb_z_zzzi_s"}, + {"101"_b, "umlslt_z_zzzi_s"}, + {"110"_b, "umlslb_z_zzzi_d"}, + {"111"_b, "umlslt_z_zzzi_d"}, + }, + }, + + { "_yppyky", + {30, 13}, + { {"00"_b, "_gyrjrm"}, + {"01"_b, "_hhkqtn"}, + {"10"_b, "_jgmlpk"}, + {"11"_b, "_tzzssm"}, + }, + }, + + { "_ypsgqz", + {18, 17}, + { {"0x"_b, "ld4_asisdlsop_sx4_r4s"}, + {"10"_b, "ld4_asisdlsop_sx4_r4s"}, + {"11"_b, "ld4_asisdlsop_s4_i4s"}, + }, + }, + + { "_yptgjg", + {4}, + { {"0"_b, "ccmn_32_condcmp_reg"}, + }, + }, + + { "_yptvyx", + {30, 23, 22}, + { {"000"_b, "strb_32_ldst_pos"}, + {"001"_b, "ldrb_32_ldst_pos"}, + {"010"_b, "ldrsb_64_ldst_pos"}, + {"011"_b, "ldrsb_32_ldst_pos"}, + {"100"_b, "strh_32_ldst_pos"}, + {"101"_b, "ldrh_32_ldst_pos"}, + {"110"_b, "ldrsh_64_ldst_pos"}, + {"111"_b, "ldrsh_32_ldst_pos"}, + }, + }, + + { "_ypzllm", + {23, 22, 4}, + { {"000"_b, "fccmp_s_floatccmp"}, + {"001"_b, "fccmpe_s_floatccmp"}, + {"010"_b, "fccmp_d_floatccmp"}, + {"011"_b, "fccmpe_d_floatccmp"}, + {"110"_b, "fccmp_h_floatccmp"}, + {"111"_b, "fccmpe_h_floatccmp"}, + }, + }, + + { "_yqvqtx", + {30, 23, 22, 20, 13}, + { {"00001"_b, "ld1rob_z_p_bi_u8"}, + {"000x0"_b, "ld1rob_z_p_br_contiguous"}, + {"01001"_b, "ld1roh_z_p_bi_u16"}, + {"010x0"_b, "ld1roh_z_p_br_contiguous"}, + }, + }, + + { "_yqxnzl", + {11, 10}, + { {"00"_b, "sqdmulh_z_zz"}, + {"01"_b, "sqrdmulh_z_zz"}, + }, + }, + + { "_yqzxvr", + {18, 17, 12}, + { {"000"_b, "ld3_asisdlso_d3_3d"}, + }, + }, + + { "_yrggjm", + {13, 12}, + { {"00"_b, "sshl_asisdsame_only"}, + {"01"_b, "srshl_asisdsame_only"}, + }, + }, + + { "_yrgzqr", + {23, 22, 20, 19, 17, 16, 13}, + { {"0000000"_b, "_ymvlzl"}, + {"0000001"_b, "_nzvlzt"}, + {"0100000"_b, "_zyhgnz"}, + {"0100001"_b, "_mntnlr"}, + {"100xxx0"_b, "st4_asisdlsep_r4_r"}, + {"100xxx1"_b, "st1_asisdlsep_r4_r4"}, + {"1010xx0"_b, "st4_asisdlsep_r4_r"}, + {"1010xx1"_b, "st1_asisdlsep_r4_r4"}, + {"10110x0"_b, "st4_asisdlsep_r4_r"}, + {"10110x1"_b, "st1_asisdlsep_r4_r4"}, + {"1011100"_b, "st4_asisdlsep_r4_r"}, + {"1011101"_b, "st1_asisdlsep_r4_r4"}, + {"1011110"_b, "_tshjsk"}, + {"1011111"_b, "_xnrrsy"}, + {"110xxx0"_b, "ld4_asisdlsep_r4_r"}, + {"110xxx1"_b, "ld1_asisdlsep_r4_r4"}, + {"1110xx0"_b, "ld4_asisdlsep_r4_r"}, + {"1110xx1"_b, "ld1_asisdlsep_r4_r4"}, + {"11110x0"_b, "ld4_asisdlsep_r4_r"}, + {"11110x1"_b, "ld1_asisdlsep_r4_r4"}, + {"1111100"_b, "ld4_asisdlsep_r4_r"}, + {"1111101"_b, "ld1_asisdlsep_r4_r4"}, + {"1111110"_b, "_hjvkkq"}, + {"1111111"_b, "_mthlnv"}, + }, + }, + + { "_yrjqql", + {30}, + { {"0"_b, "cbz_32_compbranch"}, + }, + }, + + { "_yrmmmg", + {4}, + { {"0"_b, "cmphs_p_p_zi"}, + {"1"_b, "cmphi_p_p_zi"}, + }, + }, + + { "_yrypnt", + {30, 23, 11, 10}, + { {"1001"_b, "_khrsgv"}, + }, + }, + + { "_yryygq", + {12}, + { {"0"_b, "ld3_asisdlsop_dx3_r3d"}, + }, + }, + + { "_yskyrg", + {20, 19, 18, 17, 16}, + { {"00000"_b, "sqneg_asisdmisc_r"}, + }, + }, + + { "_ysspjx", + {13, 12}, + { {"00"_b, "sdiv_64_dp_2src"}, + {"10"_b, "rorv_64_dp_2src"}, + }, + }, + + { "_yszjsm", + {12, 11, 10}, + { {"000"_b, "sdot_z_zzz"}, + {"001"_b, "udot_z_zzz"}, + {"010"_b, "sqdmlalbt_z_zzz"}, + {"011"_b, "sqdmlslbt_z_zzz"}, + {"1xx"_b, "cdot_z_zzz"}, + }, + }, + + { "_yszlqj", + {23, 22}, + { {"00"_b, "tbl_asimdtbl_l2_2"}, + }, + }, + + { "_ytkjxx", + {30, 23, 22, 13, 4}, + { {"00x0x"_b, "ld1w_z_p_bz_s_x32_scaled"}, + {"00x1x"_b, "ldff1w_z_p_bz_s_x32_scaled"}, + {"0100x"_b, "ldr_z_bi"}, + {"01100"_b, "prfw_i_p_bi_s"}, + {"01110"_b, "prfd_i_p_bi_s"}, + {"10x0x"_b, "ld1w_z_p_bz_d_x32_scaled"}, + {"10x1x"_b, "ldff1w_z_p_bz_d_x32_scaled"}, + {"11x0x"_b, "ld1d_z_p_bz_d_x32_scaled"}, + {"11x1x"_b, "ldff1d_z_p_bz_d_x32_scaled"}, + }, + }, + + { "_ytrmvz", + {23, 22, 20, 19, 18, 17, 16}, + { {"0111001"_b, "fcvtmu_asisdmiscfp16_r"}, + {"0x00001"_b, "fcvtmu_asisdmisc_r"}, + {"1111001"_b, "fcvtzu_asisdmiscfp16_r"}, + {"1x00001"_b, "fcvtzu_asisdmisc_r"}, + {"xx00000"_b, "neg_asisdmisc_r"}, + }, + }, + + { "_ytvtqn", + {30, 23, 22, 20, 13}, + { {"00001"_b, "ld1sh_z_p_bi_s64"}, + {"00011"_b, "ldnf1sh_z_p_bi_s64"}, + {"00101"_b, "ld1w_z_p_bi_u32"}, + {"00111"_b, "ldnf1w_z_p_bi_u32"}, + {"01001"_b, "ld1sb_z_p_bi_s64"}, + {"01011"_b, "ldnf1sb_z_p_bi_s64"}, + {"01101"_b, "ld1sb_z_p_bi_s16"}, + {"01111"_b, "ldnf1sb_z_p_bi_s16"}, + {"100x0"_b, "st1w_z_p_bz_d_x32_unscaled"}, + {"100x1"_b, "st1w_z_p_bz_d_64_unscaled"}, + {"101x0"_b, "st1w_z_p_bz_s_x32_unscaled"}, + {"101x1"_b, "st1w_z_p_ai_d"}, + {"110x0"_b, "st1d_z_p_bz_d_x32_unscaled"}, + {"110x1"_b, "st1d_z_p_bz_d_64_unscaled"}, + {"111x1"_b, "st1d_z_p_ai_d"}, + }, + }, + + { "_yvptvx", + {23, 12, 11, 10}, + { {"0000"_b, "sqshrnb_z_zi"}, + {"0001"_b, "sqshrnt_z_zi"}, + {"0010"_b, "sqrshrnb_z_zi"}, + {"0011"_b, "sqrshrnt_z_zi"}, + {"0100"_b, "uqshrnb_z_zi"}, + {"0101"_b, "uqshrnt_z_zi"}, + {"0110"_b, "uqrshrnb_z_zi"}, + {"0111"_b, "uqrshrnt_z_zi"}, + }, + }, + + { "_yvqnyq", + {23}, + { {"1"_b, "_vhlqpr"}, + }, + }, + + { "_yvxkhv", + {30}, + { {"1"_b, "_ngvqhs"}, + }, + }, + + { "_yvyhlh", + {23, 22, 12, 11, 10}, + { {"0x000"_b, "fmul_z_zzi_h"}, + {"10000"_b, "fmul_z_zzi_s"}, + {"11000"_b, "fmul_z_zzi_d"}, + }, + }, + + { "_yxgmrs", + {23}, + { {"0"_b, "fmaxnmp_asimdsame_only"}, + {"1"_b, "fminnmp_asimdsame_only"}, + }, + }, + + { "_yxnslx", + {23, 22}, + { {"00"_b, "adr_z_az_d_s32_scaled"}, + {"01"_b, "adr_z_az_d_u32_scaled"}, + {"1x"_b, "adr_z_az_sd_same_scaled"}, + }, + }, + + { "_yxvttm", + {30}, + { {"0"_b, "bl_only_branch_imm"}, + }, + }, + + { "_yykhjv", + {23, 22, 13, 12, 11, 10}, + { {"000110"_b, "smmla_z_zzz"}, + {"0x1000"_b, "sshllb_z_zi"}, + {"0x1001"_b, "sshllt_z_zi"}, + {"0x1010"_b, "ushllb_z_zi"}, + {"0x1011"_b, "ushllt_z_zi"}, + {"100110"_b, "usmmla_z_zzz"}, + {"110110"_b, "ummla_z_zzz"}, + {"xx0000"_b, "saddlbt_z_zz"}, + {"xx0010"_b, "ssublbt_z_zz"}, + {"xx0011"_b, "ssubltb_z_zz"}, + {"xx0100"_b, "eorbt_z_zz"}, + {"xx0101"_b, "eortb_z_zz"}, + {"xx1100"_b, "bext_z_zz"}, + {"xx1101"_b, "bdep_z_zz"}, + {"xx1110"_b, "bgrp_z_zz"}, + }, + }, + + { "_yynmjl", + {4}, + { {"0"_b, "orrs_p_p_pp_z"}, + {"1"_b, "orns_p_p_pp_z"}, + }, + }, + + { "_yyrkmn", + {17, 16, 9, 8, 7, 6, 5}, + { {"0000000"_b, "aesmc_z_z"}, + {"10xxxxx"_b, "aese_z_zz"}, + {"11xxxxx"_b, "sm4e_z_zz"}, + }, + }, + + { "_yysxts", + {23, 22, 13, 12, 11, 10}, + { {"0001x0"_b, "fmla_asimdelem_rh_h"}, + {"0x0001"_b, "sshr_asimdshf_r"}, + {"0x0101"_b, "ssra_asimdshf_r"}, + {"0x1001"_b, "srshr_asimdshf_r"}, + {"0x1101"_b, "srsra_asimdshf_r"}, + {"1000x0"_b, "fmlal_asimdelem_lh"}, + {"1x01x0"_b, "fmla_asimdelem_r_sd"}, + {"xx10x0"_b, "smlal_asimdelem_l"}, + {"xx11x0"_b, "sqdmlal_asimdelem_l"}, + }, + }, + + { "_yytvxh", + {30, 23, 22, 13, 4}, + { {"00000"_b, "prfw_i_p_br_s"}, + {"00010"_b, "prfw_i_p_ai_s"}, + {"0010x"_b, "ld1rw_z_p_bi_u32"}, + {"0011x"_b, "ld1rw_z_p_bi_u64"}, + {"01000"_b, "prfd_i_p_br_s"}, + {"01010"_b, "prfd_i_p_ai_s"}, + {"0110x"_b, "ld1rsb_z_p_bi_s16"}, + {"0111x"_b, "ld1rd_z_p_bi_u64"}, + {"1000x"_b, "ldnt1w_z_p_ar_d_64_unscaled"}, + {"10010"_b, "prfw_i_p_ai_d"}, + {"1010x"_b, "ld1w_z_p_bz_d_64_unscaled"}, + {"1011x"_b, "ldff1w_z_p_bz_d_64_unscaled"}, + {"1100x"_b, "ldnt1d_z_p_ar_d_64_unscaled"}, + {"11010"_b, "prfd_i_p_ai_d"}, + {"1110x"_b, "ld1d_z_p_bz_d_64_unscaled"}, + {"1111x"_b, "ldff1d_z_p_bz_d_64_unscaled"}, + }, + }, + + { "_yyvjqv", + {23}, + { {"0"_b, "fmax_asimdsame_only"}, + {"1"_b, "fmin_asimdsame_only"}, + }, + }, + + { "_yyvnrp", + {23, 22}, + { {"00"_b, "eor_asimdsame_only"}, + {"01"_b, "bsl_asimdsame_only"}, + {"10"_b, "bit_asimdsame_only"}, + {"11"_b, "bif_asimdsame_only"}, + }, + }, + + { "_yyyshx", + {30, 13, 4}, + { {"000"_b, "cmphs_p_p_zz"}, + {"001"_b, "cmphi_p_p_zz"}, + {"010"_b, "cmpeq_p_p_zw"}, + {"011"_b, "cmpne_p_p_zw"}, + {"1xx"_b, "fcmla_z_p_zzz"}, + }, + }, + + { "_yyyxhk", + {18}, + { {"0"_b, "ld1_asisdlsep_r2_r2"}, + {"1"_b, "ld1_asisdlsep_i2_i2"}, + }, + }, + + { "_yzgthp", + {18, 17}, + { {"0x"_b, "ld1_asisdlsop_sx1_r1s"}, + {"10"_b, "ld1_asisdlsop_sx1_r1s"}, + {"11"_b, "ld1_asisdlsop_s1_i1s"}, + }, + }, + + { "_yzlnrs", + {20, 19, 18, 17, 16}, + { {"00000"_b, "usqadd_asisdmisc_r"}, + }, + }, + + { "_yzmjhn", + {4}, + { {"0"_b, "eors_p_p_pp_z"}, + }, + }, + + { "_yzpszn", + {30}, + { {"0"_b, "ldr_s_loadlit"}, + {"1"_b, "ldr_d_loadlit"}, + }, + }, + + { "_yzqhtj", + {30, 23, 22, 11, 10}, + { {"00000"_b, "_rxsqhv"}, + {"01000"_b, "csel_64_condsel"}, + {"01001"_b, "csinc_64_condsel"}, + {"01100"_b, "_zqxkxg"}, + {"01101"_b, "_rvjkyp"}, + {"01110"_b, "_jxgpgg"}, + {"01111"_b, "_ysspjx"}, + {"10000"_b, "_pjvkjz"}, + {"11000"_b, "csinv_64_condsel"}, + {"11001"_b, "csneg_64_condsel"}, + {"11100"_b, "_rmyzpp"}, + {"11101"_b, "_npjnlv"}, + {"11110"_b, "_yhhsns"}, + {"11111"_b, "_vllmnt"}, + }, + }, + + { "_yzxjnk", + {9, 8, 7, 6, 5}, + { {"11111"_b, "paciza_64z_dp_1src"}, + }, + }, + + { "_zghtll", + {22, 20, 19, 18, 17, 16, 13, 12}, + { {"01111100"_b, "ldapr_32l_memop"}, + }, + }, + + { "_zgljvg", + {30, 23, 22, 13, 12, 11, 10}, + { {"0000000"_b, "ldadd_32_memop"}, + {"0000100"_b, "ldclr_32_memop"}, + {"0001000"_b, "ldeor_32_memop"}, + {"0001100"_b, "ldset_32_memop"}, + {"000xx10"_b, "str_32_ldst_regoff"}, + {"0010000"_b, "ldaddl_32_memop"}, + {"0010100"_b, "ldclrl_32_memop"}, + {"0011000"_b, "ldeorl_32_memop"}, + {"0011100"_b, "ldsetl_32_memop"}, + {"001xx10"_b, "ldr_32_ldst_regoff"}, + {"0100000"_b, "ldadda_32_memop"}, + {"0100100"_b, "ldclra_32_memop"}, + {"0101000"_b, "ldeora_32_memop"}, + {"0101100"_b, "ldseta_32_memop"}, + {"010xx10"_b, "ldrsw_64_ldst_regoff"}, + {"0110000"_b, "ldaddal_32_memop"}, + {"0110100"_b, "ldclral_32_memop"}, + {"0111000"_b, "ldeoral_32_memop"}, + {"0111100"_b, "ldsetal_32_memop"}, + {"1000000"_b, "ldadd_64_memop"}, + {"1000100"_b, "ldclr_64_memop"}, + {"1001000"_b, "ldeor_64_memop"}, + {"1001100"_b, "ldset_64_memop"}, + {"100xx10"_b, "str_64_ldst_regoff"}, + {"1010000"_b, "ldaddl_64_memop"}, + {"1010100"_b, "ldclrl_64_memop"}, + {"1011000"_b, "ldeorl_64_memop"}, + {"1011100"_b, "ldsetl_64_memop"}, + {"101xx10"_b, "ldr_64_ldst_regoff"}, + {"10xxx01"_b, "ldraa_64_ldst_pac"}, + {"10xxx11"_b, "ldraa_64w_ldst_pac"}, + {"1100000"_b, "ldadda_64_memop"}, + {"1100100"_b, "ldclra_64_memop"}, + {"1101000"_b, "ldeora_64_memop"}, + {"1101100"_b, "ldseta_64_memop"}, + {"1110000"_b, "ldaddal_64_memop"}, + {"1110100"_b, "ldclral_64_memop"}, + {"1111000"_b, "ldeoral_64_memop"}, + {"1111100"_b, "ldsetal_64_memop"}, + {"11xxx01"_b, "ldrab_64_ldst_pac"}, + {"11xxx11"_b, "ldrab_64w_ldst_pac"}, + }, + }, + + { "_zjjxjl", + {9}, + { {"0"_b, "pnext_p_p_p"}, + }, + }, + + { "_zjqssg", + {23, 22, 20, 19, 17, 16, 13}, + { {"0000000"_b, "_jqsjtj"}, + {"0000001"_b, "_rspmth"}, + {"0100000"_b, "_txkmvh"}, + {"0100001"_b, "_ngnxrx"}, + {"100xxx0"_b, "st2_asisdlsep_r2_r"}, + {"100xxx1"_b, "st1_asisdlsep_r2_r2"}, + {"1010xx0"_b, "st2_asisdlsep_r2_r"}, + {"1010xx1"_b, "st1_asisdlsep_r2_r2"}, + {"10110x0"_b, "st2_asisdlsep_r2_r"}, + {"10110x1"_b, "st1_asisdlsep_r2_r2"}, + {"1011100"_b, "st2_asisdlsep_r2_r"}, + {"1011101"_b, "st1_asisdlsep_r2_r2"}, + {"1011110"_b, "_zyzsql"}, + {"1011111"_b, "_kqsqly"}, + {"110xxx0"_b, "ld2_asisdlsep_r2_r"}, + {"110xxx1"_b, "ld1_asisdlsep_r2_r2"}, + {"1110xx0"_b, "ld2_asisdlsep_r2_r"}, + {"1110xx1"_b, "ld1_asisdlsep_r2_r2"}, + {"11110x0"_b, "ld2_asisdlsep_r2_r"}, + {"11110x1"_b, "ld1_asisdlsep_r2_r2"}, + {"1111100"_b, "ld2_asisdlsep_r2_r"}, + {"1111101"_b, "ld1_asisdlsep_r2_r2"}, + {"1111110"_b, "_xnrxym"}, + {"1111111"_b, "_yyyxhk"}, + }, + }, + + { "_zjrsrx", + {30, 23}, + { {"00"_b, "add_64_addsub_imm"}, + {"10"_b, "sub_64_addsub_imm"}, + }, + }, + + { "_zjzmvh", + {23, 22, 20, 19, 18, 17, 16}, + { {"0001010"_b, "fcvtx_z_p_z_d2s"}, + {"0011xx0"_b, "flogb_z_p_z"}, + {"0110010"_b, "scvtf_z_p_z_h2fp16"}, + {"0110011"_b, "ucvtf_z_p_z_h2fp16"}, + {"0110100"_b, "scvtf_z_p_z_w2fp16"}, + {"0110101"_b, "ucvtf_z_p_z_w2fp16"}, + {"0110110"_b, "scvtf_z_p_z_x2fp16"}, + {"0110111"_b, "ucvtf_z_p_z_x2fp16"}, + {"0111010"_b, "fcvtzs_z_p_z_fp162h"}, + {"0111011"_b, "fcvtzu_z_p_z_fp162h"}, + {"0111100"_b, "fcvtzs_z_p_z_fp162w"}, + {"0111101"_b, "fcvtzu_z_p_z_fp162w"}, + {"0111110"_b, "fcvtzs_z_p_z_fp162x"}, + {"0111111"_b, "fcvtzu_z_p_z_fp162x"}, + {"1001000"_b, "fcvt_z_p_z_s2h"}, + {"1001001"_b, "fcvt_z_p_z_h2s"}, + {"1001010"_b, "bfcvt_z_p_z_s2bf"}, + {"1010100"_b, "scvtf_z_p_z_w2s"}, + {"1010101"_b, "ucvtf_z_p_z_w2s"}, + {"1011100"_b, "fcvtzs_z_p_z_s2w"}, + {"1011101"_b, "fcvtzu_z_p_z_s2w"}, + {"1101000"_b, "fcvt_z_p_z_d2h"}, + {"1101001"_b, "fcvt_z_p_z_h2d"}, + {"1101010"_b, "fcvt_z_p_z_d2s"}, + {"1101011"_b, "fcvt_z_p_z_s2d"}, + {"1110000"_b, "scvtf_z_p_z_w2d"}, + {"1110001"_b, "ucvtf_z_p_z_w2d"}, + {"1110100"_b, "scvtf_z_p_z_x2s"}, + {"1110101"_b, "ucvtf_z_p_z_x2s"}, + {"1110110"_b, "scvtf_z_p_z_x2d"}, + {"1110111"_b, "ucvtf_z_p_z_x2d"}, + {"1111000"_b, "fcvtzs_z_p_z_d2w"}, + {"1111001"_b, "fcvtzu_z_p_z_d2w"}, + {"1111100"_b, "fcvtzs_z_p_z_s2x"}, + {"1111101"_b, "fcvtzu_z_p_z_s2x"}, + {"1111110"_b, "fcvtzs_z_p_z_d2x"}, + {"1111111"_b, "fcvtzu_z_p_z_d2x"}, + {"xx00000"_b, "frintn_z_p_z"}, + {"xx00001"_b, "frintp_z_p_z"}, + {"xx00010"_b, "frintm_z_p_z"}, + {"xx00011"_b, "frintz_z_p_z"}, + {"xx00100"_b, "frinta_z_p_z"}, + {"xx00110"_b, "frintx_z_p_z"}, + {"xx00111"_b, "frinti_z_p_z"}, + {"xx01100"_b, "frecpx_z_p_z"}, + {"xx01101"_b, "fsqrt_z_p_z"}, + }, + }, + + { "_zkhjsp", + {11}, + { {"0"_b, "sqdmulh_z_zzi_h"}, + {"1"_b, "mul_z_zzi_h"}, + }, + }, + + { "_zlhlqy", + {23, 22, 20, 19, 18, 17, 16}, + { {"0111001"_b, "frintm_asimdmiscfp16_r"}, + {"0x00001"_b, "frintm_asimdmisc_r"}, + {"1111001"_b, "frintz_asimdmiscfp16_r"}, + {"1x00001"_b, "frintz_asimdmisc_r"}, + {"xx00000"_b, "cmeq_asimdmisc_z"}, + }, + }, + + { "_zlkygr", + {13, 12}, + { {"00"_b, "cpyfe_cpy_memcms"}, + {"01"_b, "cpyfewt_cpy_memcms"}, + {"10"_b, "cpyfert_cpy_memcms"}, + {"11"_b, "cpyfet_cpy_memcms"}, + }, + }, + + { "_zlmyjt", + {23, 22}, + { {"00"_b, "fcsel_s_floatsel"}, + {"01"_b, "fcsel_d_floatsel"}, + {"11"_b, "fcsel_h_floatsel"}, + }, + }, + + { "_zlqnks", + {23, 22, 20, 19, 17, 16, 13}, + { {"0000000"_b, "_kmqlmz"}, + {"0000001"_b, "_tklxhy"}, + {"0100000"_b, "_mtshvn"}, + {"0100001"_b, "_gzzsgh"}, + {"100xxx0"_b, "st1_asisdlsop_bx1_r1b"}, + {"100xxx1"_b, "st3_asisdlsop_bx3_r3b"}, + {"1010xx0"_b, "st1_asisdlsop_bx1_r1b"}, + {"1010xx1"_b, "st3_asisdlsop_bx3_r3b"}, + {"10110x0"_b, "st1_asisdlsop_bx1_r1b"}, + {"10110x1"_b, "st3_asisdlsop_bx3_r3b"}, + {"1011100"_b, "st1_asisdlsop_bx1_r1b"}, + {"1011101"_b, "st3_asisdlsop_bx3_r3b"}, + {"1011110"_b, "_tvrlgz"}, + {"1011111"_b, "_nkmkvz"}, + {"110xxx0"_b, "ld1_asisdlsop_bx1_r1b"}, + {"110xxx1"_b, "ld3_asisdlsop_bx3_r3b"}, + {"1110xx0"_b, "ld1_asisdlsop_bx1_r1b"}, + {"1110xx1"_b, "ld3_asisdlsop_bx3_r3b"}, + {"11110x0"_b, "ld1_asisdlsop_bx1_r1b"}, + {"11110x1"_b, "ld3_asisdlsop_bx3_r3b"}, + {"1111100"_b, "ld1_asisdlsop_bx1_r1b"}, + {"1111101"_b, "ld3_asisdlsop_bx3_r3b"}, + {"1111110"_b, "_kkpxth"}, + {"1111111"_b, "_rlylxh"}, + }, + }, + + { "_zlvjrh", + {20, 19, 18, 17, 16}, + { {"11111"_b, "ldxr_lr32_ldstexcl"}, + }, + }, + + { "_zmhqmr", + {13, 12}, + { {"10"_b, "lsrv_32_dp_2src"}, + }, + }, + + { "_zmkntq", + {18}, + { {"0"_b, "ld1_asisdlsop_hx1_r1h"}, + {"1"_b, "ld1_asisdlsop_h1_i1h"}, + }, + }, + + { "_zmkqxl", + {23, 10}, + { {"00"_b, "adclb_z_zzz"}, + {"01"_b, "adclt_z_zzz"}, + {"10"_b, "sbclb_z_zzz"}, + {"11"_b, "sbclt_z_zzz"}, + }, + }, + + { "_zmrhxx", + {30, 23, 22}, + { {"000"_b, "smov_asimdins_w_w"}, + {"100"_b, "smov_asimdins_x_x"}, + }, + }, + + { "_zmtkvx", + {13, 10}, + { {"00"_b, "_rhpmjz"}, + }, + }, + + { "_zpjzst", + {23, 22, 20, 19, 18, 17, 16}, + { {"0111001"_b, "fcvtnu_asimdmiscfp16_r"}, + {"0x00001"_b, "fcvtnu_asimdmisc_r"}, + {"1111001"_b, "fcvtpu_asimdmiscfp16_r"}, + {"1x00001"_b, "fcvtpu_asimdmisc_r"}, + {"xx10000"_b, "umaxv_asimdall_only"}, + {"xx10001"_b, "uminv_asimdall_only"}, + }, + }, + + { "_zprgxt", + {18, 17, 12}, + { {"0x0"_b, "st4_asisdlsop_dx4_r4d"}, + {"100"_b, "st4_asisdlsop_dx4_r4d"}, + {"110"_b, "st4_asisdlsop_d4_i4d"}, + }, + }, + + { "_zpxrnm", + {30, 23, 22}, + { {"110"_b, "xar_vvv2_crypto3_imm6"}, + }, + }, + + { "_zqhhlq", + {20, 19, 18, 17, 16}, + { {"11111"_b, "stllrh_sl32_ldstexcl"}, + }, + }, + + { "_zqjgzz", + {30, 23, 22}, + { {"000"_b, "add_64_addsub_ext"}, + {"100"_b, "sub_64_addsub_ext"}, + }, + }, + + { "_zqlzzp", + {2, 1}, + { {"11"_b, "braaz_64_branch_reg"}, + }, + }, + + { "_zqmrhp", + {23, 22, 4, 3, 2, 1, 0}, + { {"0000000"_b, "wrffr_f_p"}, + }, + }, + + { "_zqmvqs", + {23, 22, 20, 19, 16, 13, 12}, + { {"0111110"_b, "fcvtns_asisdmiscfp16_r"}, + {"0111111"_b, "fcvtms_asisdmiscfp16_r"}, + {"0x00110"_b, "fcvtns_asisdmisc_r"}, + {"0x00111"_b, "fcvtms_asisdmisc_r"}, + {"1111110"_b, "fcvtps_asisdmiscfp16_r"}, + {"1111111"_b, "fcvtzs_asisdmiscfp16_r"}, + {"1x00110"_b, "fcvtps_asisdmisc_r"}, + {"1x00111"_b, "fcvtzs_asisdmisc_r"}, + {"xx00000"_b, "cmgt_asisdmisc_z"}, + {"xx00001"_b, "cmeq_asisdmisc_z"}, + {"xx00010"_b, "cmlt_asisdmisc_z"}, + {"xx00011"_b, "abs_asisdmisc_r"}, + {"xx10111"_b, "addp_asisdpair_only"}, + }, + }, + + { "_zqxkxg", + {13, 12}, + { {"00"_b, "subp_64s_dp_2src"}, + {"01"_b, "irg_64i_dp_2src"}, + {"10"_b, "lslv_64_dp_2src"}, + {"11"_b, "pacga_64p_dp_2src"}, + }, + }, + + { "_zrmgjx", + {30, 23, 22, 13, 4}, + { {"01000"_b, "ldr_p_bi"}, + {"01100"_b, "prfb_i_p_bi_s"}, + {"01110"_b, "prfh_i_p_bi_s"}, + {"10x0x"_b, "ld1sw_z_p_bz_d_x32_unscaled"}, + {"10x1x"_b, "ldff1sw_z_p_bz_d_x32_unscaled"}, + }, + }, + + { "_zrpzss", + {30, 23, 22, 13, 12, 11, 10}, + { {"0000000"_b, "swpp_128_memop_128"}, + {"0000100"_b, "rcwclrp_128_memop_128"}, + {"0001000"_b, "rcwswpp_128_memop_128"}, + {"0001100"_b, "rcwsetp_128_memop_128"}, + {"0010000"_b, "swppl_128_memop_128"}, + {"0010100"_b, "rcwclrpl_128_memop_128"}, + {"0011000"_b, "rcwswppl_128_memop_128"}, + {"0011100"_b, "rcwsetpl_128_memop_128"}, + {"0100000"_b, "swppa_128_memop_128"}, + {"0100100"_b, "rcwclrpa_128_memop_128"}, + {"0101000"_b, "rcwswppa_128_memop_128"}, + {"0101100"_b, "rcwsetpa_128_memop_128"}, + {"0110000"_b, "swppal_128_memop_128"}, + {"0110100"_b, "rcwclrpal_128_memop_128"}, + {"0111000"_b, "rcwswppal_128_memop_128"}, + {"0111100"_b, "rcwsetpal_128_memop_128"}, + {"1000100"_b, "rcwsclrp_128_memop_128"}, + {"1001000"_b, "rcwsswpp_128_memop_128"}, + {"1001100"_b, "rcwssetp_128_memop_128"}, + {"1010100"_b, "rcwsclrpl_128_memop_128"}, + {"1011000"_b, "rcwsswppl_128_memop_128"}, + {"1011100"_b, "rcwssetpl_128_memop_128"}, + {"1100100"_b, "rcwsclrpa_128_memop_128"}, + {"1101000"_b, "rcwsswppa_128_memop_128"}, + {"1101100"_b, "rcwssetpa_128_memop_128"}, + {"1110100"_b, "rcwsclrpal_128_memop_128"}, + {"1111000"_b, "rcwsswppal_128_memop_128"}, + {"1111100"_b, "rcwssetpal_128_memop_128"}, + }, + }, + + { "_zrqtgx", + {30}, + { {"0"_b, "bl_only_branch_imm"}, + {"1"_b, "_rxnnvv"}, + }, + }, + + { "_zrxhzq", + {19}, + { {"0"_b, "_kjsrkm"}, + {"1"_b, "sys_cr_systeminstrs"}, + }, + }, + + { "_zryvjk", + {20, 9, 4}, + { {"000"_b, "trn2_p_pp"}, + }, + }, + + { "_zsgpsn", + {20, 19, 18, 17, 16, 13, 12, 3, 2, 1, 0}, + { {"00000001101"_b, "setf16_only_setf"}, + }, + }, + + { "_zsltyl", + {22, 20, 11}, + { {"000"_b, "uqincw_r_rs_uw"}, + {"001"_b, "uqdecw_r_rs_uw"}, + {"010"_b, "uqincw_r_rs_x"}, + {"011"_b, "uqdecw_r_rs_x"}, + {"100"_b, "uqincd_r_rs_uw"}, + {"101"_b, "uqdecd_r_rs_uw"}, + {"110"_b, "uqincd_r_rs_x"}, + {"111"_b, "uqdecd_r_rs_x"}, + }, + }, + + { "_zspprz", + {20, 19, 17, 16, 12, 11, 10}, + { {"0000xxx"_b, "_srnkng"}, + {"0001xxx"_b, "_thkkgx"}, + {"0010xxx"_b, "_grgrpt"}, + {"0011xxx"_b, "_rkskkv"}, + {"0110100"_b, "_rvsylx"}, + {"0111100"_b, "_plymgg"}, + {"1000xxx"_b, "_prytjs"}, + {"1001xxx"_b, "_rrvltp"}, + {"1010xxx"_b, "_syrmmr"}, + {"1011xxx"_b, "_lnkrzt"}, + {"1100xxx"_b, "_smmrpj"}, + }, + }, + + { "_ztjjnh", + {30, 23, 22}, + { {"100"_b, "eor3_vvv16_crypto4"}, + {"101"_b, "sm3ss1_vvv4_crypto4"}, + {"110"_b, "xar_vvv2_crypto3_imm6"}, + }, + }, + + { "_ztlysk", + {23, 22, 20, 19, 18, 17, 16}, + { {"0010000"_b, "fmaxnmv_asimdall_only_h"}, + {"0111001"_b, "fcvtas_asimdmiscfp16_r"}, + {"0x00001"_b, "fcvtas_asimdmisc_r"}, + {"1010000"_b, "fminnmv_asimdall_only_h"}, + {"1111000"_b, "fcmgt_asimdmiscfp16_fz"}, + {"1x00000"_b, "fcmgt_asimdmisc_fz"}, + {"1x00001"_b, "urecpe_asimdmisc_r"}, + }, + }, + + { "_ztpryr", + {13}, + { {"0"_b, "fmad_z_p_zzz"}, + {"1"_b, "fmsb_z_p_zzz"}, + }, + }, + + { "_ztyqrj", + {30, 23, 13, 12, 10}, + { {"00000"_b, "_jmvgsp"}, + {"00001"_b, "_jkkqvy"}, + {"00100"_b, "_nkxhsy"}, + {"00101"_b, "_gshrzq"}, + {"00110"_b, "_zvjrlz"}, + {"00111"_b, "_ntjpsx"}, + {"01000"_b, "_mqrzzk"}, + {"01001"_b, "_jqxqql"}, + {"01100"_b, "_xznsqh"}, + {"01101"_b, "_qvlnll"}, + {"01110"_b, "_kvnqhn"}, + {"01111"_b, "_zsltyl"}, + {"10110"_b, "_zkhjsp"}, + {"10111"_b, "_hvyjnk"}, + {"11000"_b, "_sjvhlq"}, + {"11001"_b, "_xhktsk"}, + {"11010"_b, "_rtpztp"}, + {"11011"_b, "_rznrqt"}, + {"11100"_b, "_kyspnn"}, + {"11101"_b, "_qljhnp"}, + {"11110"_b, "_pxyrpm"}, + {"11111"_b, "_khjvqq"}, + }, + }, + + { "_zvjrlz", + {22, 20, 11}, + { {"000"_b, "sqincb_r_rs_sx"}, + {"001"_b, "sqdecb_r_rs_sx"}, + {"010"_b, "sqincb_r_rs_x"}, + {"011"_b, "sqdecb_r_rs_x"}, + {"100"_b, "sqinch_r_rs_sx"}, + {"101"_b, "sqdech_r_rs_sx"}, + {"110"_b, "sqinch_r_rs_x"}, + {"111"_b, "sqdech_r_rs_x"}, + }, + }, + + { "_zvvvhr", + {13, 12, 11, 10}, + { {"0000"_b, "smlal_asimddiff_l"}, + {"0001"_b, "add_asimdsame_only"}, + {"0010"_b, "_njnsqm"}, + {"0011"_b, "cmtst_asimdsame_only"}, + {"0100"_b, "sqdmlal_asimddiff_l"}, + {"0101"_b, "mla_asimdsame_only"}, + {"0110"_b, "_zlhlqy"}, + {"0111"_b, "mul_asimdsame_only"}, + {"1000"_b, "smlsl_asimddiff_l"}, + {"1001"_b, "smaxp_asimdsame_only"}, + {"1010"_b, "_nknntn"}, + {"1011"_b, "sminp_asimdsame_only"}, + {"1100"_b, "sqdmlsl_asimddiff_l"}, + {"1101"_b, "sqdmulh_asimdsame_only"}, + {"1110"_b, "_lyzhrq"}, + {"1111"_b, "addp_asimdsame_only"}, + }, + }, + + { "_zvxxjk", + {30, 23, 22, 13, 12, 11, 10}, + { {"000xxxx"_b, "madd_64a_dp_3src"}, + {"0011111"_b, "smulh_64_dp_3src"}, + {"0111111"_b, "umulh_64_dp_3src"}, + }, + }, + + { "_zvynrg", + {19}, + { {"0"_b, "_hnkyxy"}, + {"1"_b, "sys_cr_systeminstrs"}, + }, + }, + + { "_zxjkmj", + {22, 4, 3}, + { {"00x"_b, "prfm_p_ldst_regoff"}, + {"010"_b, "prfm_p_ldst_regoff"}, + {"011"_b, "rprfm_r_ldst_regoff"}, + }, + }, + + { "_zxklzp", + {12}, + { {"0"_b, "ld1_asisdlsop_dx1_r1d"}, + }, + }, + + { "_zxtzmv", + {30, 23, 22, 13}, + { {"0010"_b, "ld1rsh_z_p_bi_s64"}, + {"0011"_b, "ld1rsh_z_p_bi_s32"}, + {"0110"_b, "ld1rsb_z_p_bi_s64"}, + {"0111"_b, "ld1rsb_z_p_bi_s32"}, + {"1000"_b, "ld1sw_z_p_ai_d"}, + {"1001"_b, "ldff1sw_z_p_ai_d"}, + {"1010"_b, "ld1sw_z_p_bz_d_64_scaled"}, + {"1011"_b, "ldff1sw_z_p_bz_d_64_scaled"}, + }, + }, + + { "_zyhgnz", + {18}, + { {"0"_b, "ld4_asisdlse_r4"}, + }, + }, + + { "_zyjjgs", + {23, 22, 20, 19, 18}, + { {"00000"_b, "orr_z_zi"}, + {"01000"_b, "eor_z_zi"}, + {"10000"_b, "and_z_zi"}, + {"11000"_b, "dupm_z_i"}, + {"xx1xx"_b, "cpy_z_o_i"}, + }, + }, + + { "_zyxnpz", + {13, 12, 11, 10}, + { {"1111"_b, "casa_c32_ldstexcl"}, + }, + }, + + { "_zyzsql", + {18}, + { {"0"_b, "st2_asisdlsep_r2_r"}, + {"1"_b, "st2_asisdlsep_i2_i"}, + }, + }, + + { "_zzhnxv", + {30, 23, 22, 20, 19}, + { {"0xxxx"_b, "bl_only_branch_imm"}, + {"10001"_b, "sysl_rc_systeminstrs"}, + {"1001x"_b, "mrs_rs_systemmove"}, + {"1011x"_b, "mrrs_rs_systemmovepr"}, + }, + }, + + { "_zzkgsk", + {20, 19, 18, 17, 16}, + { {"11111"_b, "ldaxr_lr32_ldstexcl"}, + }, + }, + + { "_zztypv", + {6, 5}, + { {"00"_b, "cfinv_m_pstate"}, + {"01"_b, "xaflag_m_pstate"}, + {"10"_b, "axflag_m_pstate"}, + }, + }, + + { "_zzvxvh", + {23, 22, 11, 10}, + { {"0001"_b, "pmul_z_zz"}, + {"xx00"_b, "mul_z_zz"}, + {"xx10"_b, "smulh_z_zz"}, + {"xx11"_b, "umulh_z_zz"}, + }, + }, + + { "Root", + {31, 29, 28, 27, 26, 25, 24, 21, 15, 14}, + { {"00000000xx"_b, "_nqmnzp"}, + {"0000100000"_b, "_rzzxsn"}, + {"0000100001"_b, "_xvppmm"}, + {"0000100010"_b, "_ptsjnr"}, + {"0000100011"_b, "_nlpmvl"}, + {"0000100100"_b, "_ljljkv"}, + {"0000100101"_b, "_kktglv"}, + {"0000100110"_b, "_ppnssm"}, + {"0000100111"_b, "_ztyqrj"}, + {"0000101000"_b, "_rnqtmt"}, + {"0000101001"_b, "_rlpmrx"}, + {"0000101010"_b, "_mpvsng"}, + {"0000101011"_b, "_qlxksl"}, + {"0000101100"_b, "_mhrjvp"}, + {"0000101101"_b, "_pgjjsz"}, + {"0000101110"_b, "_yppyky"}, + {"0000101111"_b, "_yjmngt"}, + {"0001000001"_b, "_thqgrq"}, + {"0001000011"_b, "_hkgzsh"}, + {"0001000101"_b, "_ktpxrr"}, + {"0001000111"_b, "_stlgrr"}, + {"00010100xx"_b, "_vtyqhh"}, + {"00010101xx"_b, "_tytzpq"}, + {"00010110xx"_b, "_tqlsyy"}, + {"00010111xx"_b, "_htkpks"}, + {"0001100000"_b, "_myvqtn"}, + {"0001100001"_b, "_nmqskh"}, + {"0001100010"_b, "_xrkzpn"}, + {"0001101000"_b, "_zlqnks"}, + {"0001101001"_b, "_vtllgt"}, + {"0001101010"_b, "_ghqqzy"}, + {"0001101011"_b, "_xrskrk"}, + {"0001101100"_b, "_rzpqmm"}, + {"0001101101"_b, "_pyvvqx"}, + {"0001101110"_b, "_shgktt"}, + {"0001101111"_b, "_szylpy"}, + {"0001110000"_b, "_jgxqzr"}, + {"0001110001"_b, "_jrqxvn"}, + {"0001110010"_b, "_lplpkk"}, + {"0001110100"_b, "_kgpsjz"}, + {"0001110101"_b, "_hsrkqt"}, + {"0001110110"_b, "_zvvvhr"}, + {"0001110111"_b, "_kssltr"}, + {"0001111000"_b, "_vzzqhx"}, + {"0001111001"_b, "_ktngnm"}, + {"0001111010"_b, "_ttsgkt"}, + {"0001111011"_b, "_phtxqg"}, + {"0001111100"_b, "_yysxts"}, + {"0001111101"_b, "_msnshr"}, + {"0001111110"_b, "_nmqrtr"}, + {"0001111111"_b, "_gnxrlr"}, + {"00100010xx"_b, "_hmjrmm"}, + {"00100011xx"_b, "_nxlmhz"}, + {"0010010xxx"_b, "_hqkljv"}, + {"001001100x"_b, "_hvrjyt"}, + {"001001110x"_b, "_kgygky"}, + {"0010011x1x"_b, "_lkpprr"}, + {"0010100xxx"_b, "_vyjsst"}, + {"0010110xxx"_b, "_qvjmmq"}, + {"00101x1xxx"_b, "_lxggmz"}, + {"0011000xxx"_b, "_yjktml"}, + {"0011001000"_b, "_mqtgvk"}, + {"0011001001"_b, "_hvnhmh"}, + {"0011001010"_b, "_gsnnnt"}, + {"0011001011"_b, "_vxvyyg"}, + {"0011001100"_b, "_jkvsxy"}, + {"0011001110"_b, "_zrpzss"}, + {"0011010000"_b, "_rsqxrs"}, + {"0011010001"_b, "_rktqym"}, + {"001101001x"_b, "_vqrqjt"}, + {"001101100x"_b, "_rtlvxq"}, + {"001101101x"_b, "_gtqnvr"}, + {"0011100xxx"_b, "_yzpszn"}, + {"0011101000"_b, "_hhxpyt"}, + {"0011101001"_b, "_htrtzz"}, + {"0011101010"_b, "_rkxlyj"}, + {"0011101011"_b, "_vnggzq"}, + {"0011110000"_b, "_mrlpxr"}, + {"0011110001"_b, "_xszqrg"}, + {"001111001x"_b, "_plyxlq"}, + {"0011110100"_b, "_rqpjjs"}, + {"0011110101"_b, "_ttmyrv"}, + {"0011110110"_b, "_lvjtlg"}, + {"0011110111"_b, "_lnntps"}, + {"0011111000"_b, "_vtgnnl"}, + {"0011111001"_b, "_mxnzst"}, + {"0011111010"_b, "_lvryvp"}, + {"0011111011"_b, "_mqssgy"}, + {"0011111100"_b, "_pxzvjl"}, + {"0011111101"_b, "_mnxgqm"}, + {"0011111110"_b, "_qntrvk"}, + {"0011111111"_b, "_vnnjxg"}, + {"0100100000"_b, "_yyyshx"}, + {"0100100001"_b, "_mylphg"}, + {"0100100010"_b, "_nsjhhg"}, + {"0100100011"_b, "_rhhrhg"}, + {"0100100100"_b, "_ymhgxg"}, + {"0100100101"_b, "_nvkthr"}, + {"0100100110"_b, "_phthqj"}, + {"0100100111"_b, "_kyjxrr"}, + {"0100101000"_b, "_gzvylr"}, + {"0100101001"_b, "_pppsmg"}, + {"0100101010"_b, "_pyjnpz"}, + {"0100101011"_b, "_shqygv"}, + {"0100101100"_b, "_hzsxkp"}, + {"0100101101"_b, "_nqkhrv"}, + {"0100101110"_b, "_tkjtgp"}, + {"0100101111"_b, "_htqpks"}, + {"0101000xxx"_b, "_ssvpxz"}, + {"0101001xxx"_b, "_vgqvys"}, + {"01010100xx"_b, "_qkrnms"}, + {"01010101xx"_b, "_vypnss"}, + {"01010110xx"_b, "_glkvkr"}, + {"01010111xx"_b, "_qgqgkx"}, + {"0101100xxx"_b, "_mxplnn"}, + {"0101101xxx"_b, "_pqmqrg"}, + {"0101110000"_b, "_gshlgj"}, + {"0101110001"_b, "_klsmsv"}, + {"0101110010"_b, "_xhhqnx"}, + {"0101110011"_b, "_rssrty"}, + {"0101110100"_b, "_nzskzl"}, + {"0101110101"_b, "_qlzvpg"}, + {"0101110110"_b, "_hlxmpy"}, + {"0101110111"_b, "_lplzxv"}, + {"0101111000"_b, "_krtvhr"}, + {"0101111001"_b, "_ymtzjg"}, + {"0101111010"_b, "_szgqrr"}, + {"0101111011"_b, "_xnpyvy"}, + {"0101111100"_b, "_tnngsg"}, + {"0101111101"_b, "_kshtnj"}, + {"0101111110"_b, "_vmxzxt"}, + {"0101111111"_b, "_gxqnph"}, + {"0110001xxx"_b, "_ykptgl"}, + {"0110010xxx"_b, "_slzvjh"}, + {"0110011xxx"_b, "_nqlrmv"}, + {"0110100xxx"_b, "_yrjqql"}, + {"0110101xxx"_b, "_prgrzz"}, + {"01110000xx"_b, "_vshynq"}, + {"0111000100"_b, "_ykjhgg"}, + {"0111000101"_b, "_jqtksx"}, + {"0111000110"_b, "_gzpkvm"}, + {"0111000111"_b, "_jhkkgv"}, + {"0111001xxx"_b, "_yptvyx"}, + {"0111010000"_b, "_tzrgqq"}, + {"0111010001"_b, "_qlpnnn"}, + {"011101001x"_b, "_grsslr"}, + {"01111000xx"_b, "_xjtzgm"}, + {"0111100101"_b, "_srsrtk"}, + {"0111100111"_b, "_xynxhx"}, + {"01111001x0"_b, "_gylmmr"}, + {"0111101xxx"_b, "_mkzysy"}, + {"0111110000"_b, "_nklvmv"}, + {"0111110010"_b, "_pyttkp"}, + {"0111110100"_b, "_lrqlrg"}, + {"0111110101"_b, "_yvxkhv"}, + {"0111110110"_b, "_ksgpqz"}, + {"0111110111"_b, "_hkpjqm"}, + {"0111111000"_b, "_lgzlyq"}, + {"0111111001"_b, "_yrypnt"}, + {"0111111010"_b, "_snvnjz"}, + {"0111111011"_b, "_kkkltp"}, + {"0111111100"_b, "_xsgnlv"}, + {"0111111101"_b, "_lrptrn"}, + {"0111111110"_b, "_pyhrrt"}, + {"0111111111"_b, "_nkyrpv"}, + {"0x10000xxx"_b, "adr_only_pcreladdr"}, + {"1000100000"_b, "_lspzrv"}, + {"1000100001"_b, "_kxvvkq"}, + {"1000100010"_b, "_sxpvym"}, + {"1000100011"_b, "_vkrkks"}, + {"1000100100"_b, "_xvnyxq"}, + {"1000100101"_b, "_gtxpgx"}, + {"1000100110"_b, "_vlrhpy"}, + {"1000100111"_b, "_ymhkrx"}, + {"1000101000"_b, "_zrmgjx"}, + {"1000101001"_b, "_qqyryl"}, + {"1000101010"_b, "_hgxtqy"}, + {"1000101011"_b, "_yytvxh"}, + {"1000101100"_b, "_ptslzg"}, + {"1000101101"_b, "_ytkjxx"}, + {"1000101110"_b, "_zxtzmv"}, + {"1000101111"_b, "_kgmqkh"}, + {"1001000001"_b, "_ptyynt"}, + {"1001000011"_b, "_skszgm"}, + {"1001000100"_b, "_rlgtnn"}, + {"1001000101"_b, "_rgxthl"}, + {"1001000110"_b, "_xxphlt"}, + {"1001000111"_b, "_njjlxy"}, + {"10010100xx"_b, "_tnpjts"}, + {"10010101xx"_b, "_hgjgpm"}, + {"10010110xx"_b, "_hqnsvg"}, + {"10010111xx"_b, "_zqjgzz"}, + {"100111000x"_b, "_ztjjnh"}, + {"1001110010"_b, "_lssjyz"}, + {"1001110011"_b, "_zpxrnm"}, + {"100111010x"_b, "_jkvvtp"}, + {"1001110110"_b, "_sqhxzj"}, + {"1001110111"_b, "_hrxtnj"}, + {"1010001000"_b, "_vrsgzg"}, + {"1010001010"_b, "_vhkjgh"}, + {"10100010x1"_b, "_rxytqg"}, + {"1010001100"_b, "_lrmgmq"}, + {"1010001110"_b, "_zjrsrx"}, + {"10100011x1"_b, "_vmgnhk"}, + {"1010010xxx"_b, "_pjlnhh"}, + {"10100110xx"_b, "_xzlxjh"}, + {"10100111xx"_b, "_rjthsm"}, + {"10101000xx"_b, "_yjnkrn"}, + {"10101001xx"_b, "_zrqtgx"}, + {"1010101000"_b, "_pxvjkp"}, + {"1010101001"_b, "_xrzqtn"}, + {"101010101x"_b, "_ttmvpr"}, + {"1010101100"_b, "_grjzyl"}, + {"1010101110"_b, "_kynxnz"}, + {"10101011x1"_b, "_zzhnxv"}, + {"1010110000"_b, "_lymhlk"}, + {"1010110100"_b, "_tpmqyl"}, + {"1010111000"_b, "_lkzyzv"}, + {"1010111100"_b, "_tvyxlr"}, + {"101011xx10"_b, "_yxvttm"}, + {"101011xxx1"_b, "_qhzvvh"}, + {"1011000xxx"_b, "_jgklkt"}, + {"1011001000"_b, "_sxptnh"}, + {"1011001001"_b, "_vmsxgq"}, + {"1011001010"_b, "_vkrskv"}, + {"1011001011"_b, "_nxrqmg"}, + {"1011001100"_b, "_lsqgkk"}, + {"1011001110"_b, "_kxpqhv"}, + {"10110011x1"_b, "_jrxtzg"}, + {"1011010000"_b, "_yzqhtj"}, + {"1011010001"_b, "_yvqnyq"}, + {"101101001x"_b, "_qpsryx"}, + {"1011011000"_b, "_vpjktn"}, + {"1011011001"_b, "_zvxxjk"}, + {"101101101x"_b, "_sztkhs"}, + {"101101110x"_b, "_hlypvy"}, + {"101101111x"_b, "_rszgzl"}, + {"1011100xxx"_b, "_ymszkr"}, + {"1011101000"_b, "_pzzgts"}, + {"1011101001"_b, "_pgvjgs"}, + {"1011101010"_b, "_kppzvh"}, + {"1011101011"_b, "_nlrjsj"}, + {"10111100xx"_b, "_rxtklv"}, + {"1011110100"_b, "_vsnnms"}, + {"1100100000"_b, "_sjtrhm"}, + {"1100100001"_b, "_hzkglv"}, + {"1100100010"_b, "_qrygny"}, + {"1100100011"_b, "_tjzqnp"}, + {"1100100100"_b, "_yqvqtx"}, + {"1100100101"_b, "_ngttyj"}, + {"1100100110"_b, "_kqzmtr"}, + {"1100100111"_b, "_qpvgnh"}, + {"1100101000"_b, "_tpkslq"}, + {"1100101001"_b, "_kzpyzy"}, + {"1100101010"_b, "_ytvtqn"}, + {"1100101011"_b, "_qkzjxm"}, + {"1100101100"_b, "_lqmksm"}, + {"1100101101"_b, "_hxlznn"}, + {"1100101110"_b, "_knkjnz"}, + {"1100101111"_b, "_rsjgyk"}, + {"1101000xxx"_b, "_mtlxqp"}, + {"1101001xxx"_b, "_vmyztj"}, + {"11010100xx"_b, "_vnzkty"}, + {"11010101xx"_b, "_vnrlrk"}, + {"11010110xx"_b, "_rjmhxr"}, + {"11010111xx"_b, "_rxgkjn"}, + {"1101100xxx"_b, "_jvkxtj"}, + {"1101101xxx"_b, "_srpqmk"}, + {"1110001xxx"_b, "_pkskpp"}, + {"1110010xxx"_b, "_mgspnm"}, + {"1110011xxx"_b, "_snhzxr"}, + {"1110100xxx"_b, "_jxgqqz"}, + {"1110101xxx"_b, "_qnysqv"}, + {"11110000xx"_b, "_rqghyv"}, + {"1111000100"_b, "_zgljvg"}, + {"1111000101"_b, "_yjnmkg"}, + {"1111000110"_b, "_jqhvhn"}, + {"1111000111"_b, "_vnsqhn"}, + {"1111001xxx"_b, "_hvhrsq"}, + {"1111010000"_b, "_gyjphh"}, + {"1111010010"_b, "_mkrgxr"}, + {"11110100x1"_b, "_mtzhrn"}, + {"11111000xx"_b, "_hlljqz"}, + {"11111001xx"_b, "_qzsyvx"}, + {"1111101xxx"_b, "_jqlgts"}, + {"1x10000xxx"_b, "adrp_only_pcreladdr"}, + {"x110110xxx"_b, "_xymnxy"}, + {"x110111xxx"_b, "_htjmmx"}, + }, + }, + +}; +// clang-format on + +} // namespace aarch64 +} // namespace vixl diff --git a/dep/vixl/include/vixl/aarch64/decoder-visitor-map-aarch64.h b/dep/vixl/include/vixl/aarch64/decoder-visitor-map-aarch64.h new file mode 100644 index 000000000..b40e0ae09 --- /dev/null +++ b/dep/vixl/include/vixl/aarch64/decoder-visitor-map-aarch64.h @@ -0,0 +1,2949 @@ +// Copyright 2020, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Initialisation data for a std::map, from instruction form to the visitor +// function that handles it. This allows reuse of existing visitor functions +// that support groups of instructions, though they may do extra decoding +// no longer needed. +// In the long term, it's expected that each component that uses the decoder +// will want to group instruction handling in the way most appropriate to +// the component's function, so this map initialisation will no longer be +// shared. + +#define DEFAULT_FORM_TO_VISITOR_MAP(VISITORCLASS) \ + {"abs_z_p_z"_h, &VISITORCLASS::VisitSVEIntUnaryArithmeticPredicated}, \ + {"addpl_r_ri"_h, &VISITORCLASS::VisitSVEStackFrameAdjustment}, \ + {"addvl_r_ri"_h, &VISITORCLASS::VisitSVEStackFrameAdjustment}, \ + {"add_z_p_zz"_h, \ + &VISITORCLASS::VisitSVEIntAddSubtractVectors_Predicated}, \ + {"add_z_zi"_h, &VISITORCLASS::VisitSVEIntAddSubtractImm_Unpredicated}, \ + {"add_z_zz"_h, &VISITORCLASS::VisitSVEIntArithmeticUnpredicated}, \ + {"adr_z_az_d_s32_scaled"_h, &VISITORCLASS::VisitSVEAddressGeneration}, \ + {"adr_z_az_d_u32_scaled"_h, &VISITORCLASS::VisitSVEAddressGeneration}, \ + {"adr_z_az_sd_same_scaled"_h, &VISITORCLASS::VisitSVEAddressGeneration}, \ + {"ands_p_p_pp_z"_h, &VISITORCLASS::VisitSVEPredicateLogical}, \ + {"andv_r_p_z"_h, &VISITORCLASS::VisitSVEIntReduction}, \ + {"and_p_p_pp_z"_h, &VISITORCLASS::VisitSVEPredicateLogical}, \ + {"and_z_p_zz"_h, &VISITORCLASS::VisitSVEBitwiseLogical_Predicated}, \ + {"and_z_zi"_h, \ + &VISITORCLASS::VisitSVEBitwiseLogicalWithImm_Unpredicated}, \ + {"and_z_zz"_h, &VISITORCLASS::VisitSVEBitwiseLogicalUnpredicated}, \ + {"asrd_z_p_zi"_h, &VISITORCLASS::VisitSVEBitwiseShiftByImm_Predicated}, \ + {"asrr_z_p_zz"_h, \ + &VISITORCLASS::VisitSVEBitwiseShiftByVector_Predicated}, \ + {"asr_z_p_zi"_h, &VISITORCLASS::VisitSVEBitwiseShiftByImm_Predicated}, \ + {"asr_z_p_zw"_h, \ + &VISITORCLASS::VisitSVEBitwiseShiftByWideElements_Predicated}, \ + {"asr_z_p_zz"_h, \ + &VISITORCLASS::VisitSVEBitwiseShiftByVector_Predicated}, \ + {"asr_z_zi"_h, &VISITORCLASS::VisitSVEBitwiseShiftUnpredicated}, \ + {"asr_z_zw"_h, &VISITORCLASS::VisitSVEBitwiseShiftUnpredicated}, \ + {"bics_p_p_pp_z"_h, &VISITORCLASS::VisitSVEPredicateLogical}, \ + {"bic_p_p_pp_z"_h, &VISITORCLASS::VisitSVEPredicateLogical}, \ + {"bic_z_p_zz"_h, &VISITORCLASS::VisitSVEBitwiseLogical_Predicated}, \ + {"bic_z_zz"_h, &VISITORCLASS::VisitSVEBitwiseLogicalUnpredicated}, \ + {"brkas_p_p_p_z"_h, &VISITORCLASS::VisitSVEPartitionBreakCondition}, \ + {"brka_p_p_p"_h, &VISITORCLASS::VisitSVEPartitionBreakCondition}, \ + {"brkbs_p_p_p_z"_h, &VISITORCLASS::VisitSVEPartitionBreakCondition}, \ + {"brkb_p_p_p"_h, &VISITORCLASS::VisitSVEPartitionBreakCondition}, \ + {"brkns_p_p_pp"_h, \ + &VISITORCLASS::VisitSVEPropagateBreakToNextPartition}, \ + {"brkn_p_p_pp"_h, &VISITORCLASS::VisitSVEPropagateBreakToNextPartition}, \ + {"brkpas_p_p_pp"_h, &VISITORCLASS::VisitSVEPropagateBreak}, \ + {"brkpa_p_p_pp"_h, &VISITORCLASS::VisitSVEPropagateBreak}, \ + {"brkpbs_p_p_pp"_h, &VISITORCLASS::VisitSVEPropagateBreak}, \ + {"brkpb_p_p_pp"_h, &VISITORCLASS::VisitSVEPropagateBreak}, \ + {"clasta_r_p_z"_h, \ + &VISITORCLASS::VisitSVEConditionallyExtractElementToGeneralRegister}, \ + {"clasta_v_p_z"_h, \ + &VISITORCLASS::VisitSVEConditionallyExtractElementToSIMDFPScalar}, \ + {"clasta_z_p_zz"_h, \ + &VISITORCLASS::VisitSVEConditionallyBroadcastElementToVector}, \ + {"clastb_r_p_z"_h, \ + &VISITORCLASS::VisitSVEConditionallyExtractElementToGeneralRegister}, \ + {"clastb_v_p_z"_h, \ + &VISITORCLASS::VisitSVEConditionallyExtractElementToSIMDFPScalar}, \ + {"clastb_z_p_zz"_h, \ + &VISITORCLASS::VisitSVEConditionallyBroadcastElementToVector}, \ + {"cls_z_p_z"_h, &VISITORCLASS::VisitSVEIntUnaryArithmeticPredicated}, \ + {"clz_z_p_z"_h, &VISITORCLASS::VisitSVEIntUnaryArithmeticPredicated}, \ + {"cmpeq_p_p_zi"_h, &VISITORCLASS::VisitSVEIntCompareSignedImm}, \ + {"cmpeq_p_p_zw"_h, &VISITORCLASS::VisitSVEIntCompareVectors}, \ + {"cmpeq_p_p_zz"_h, &VISITORCLASS::VisitSVEIntCompareVectors}, \ + {"cmpge_p_p_zi"_h, &VISITORCLASS::VisitSVEIntCompareSignedImm}, \ + {"cmpge_p_p_zw"_h, &VISITORCLASS::VisitSVEIntCompareVectors}, \ + {"cmpge_p_p_zz"_h, &VISITORCLASS::VisitSVEIntCompareVectors}, \ + {"cmpgt_p_p_zi"_h, &VISITORCLASS::VisitSVEIntCompareSignedImm}, \ + {"cmpgt_p_p_zw"_h, &VISITORCLASS::VisitSVEIntCompareVectors}, \ + {"cmpgt_p_p_zz"_h, &VISITORCLASS::VisitSVEIntCompareVectors}, \ + {"cmphi_p_p_zi"_h, &VISITORCLASS::VisitSVEIntCompareUnsignedImm}, \ + {"cmphi_p_p_zw"_h, &VISITORCLASS::VisitSVEIntCompareVectors}, \ + {"cmphi_p_p_zz"_h, &VISITORCLASS::VisitSVEIntCompareVectors}, \ + {"cmphs_p_p_zi"_h, &VISITORCLASS::VisitSVEIntCompareUnsignedImm}, \ + {"cmphs_p_p_zw"_h, &VISITORCLASS::VisitSVEIntCompareVectors}, \ + {"cmphs_p_p_zz"_h, &VISITORCLASS::VisitSVEIntCompareVectors}, \ + {"cmple_p_p_zi"_h, &VISITORCLASS::VisitSVEIntCompareSignedImm}, \ + {"cmple_p_p_zw"_h, &VISITORCLASS::VisitSVEIntCompareVectors}, \ + {"cmplo_p_p_zi"_h, &VISITORCLASS::VisitSVEIntCompareUnsignedImm}, \ + {"cmplo_p_p_zw"_h, &VISITORCLASS::VisitSVEIntCompareVectors}, \ + {"cmpls_p_p_zi"_h, &VISITORCLASS::VisitSVEIntCompareUnsignedImm}, \ + {"cmpls_p_p_zw"_h, &VISITORCLASS::VisitSVEIntCompareVectors}, \ + {"cmplt_p_p_zi"_h, &VISITORCLASS::VisitSVEIntCompareSignedImm}, \ + {"cmplt_p_p_zw"_h, &VISITORCLASS::VisitSVEIntCompareVectors}, \ + {"cmpne_p_p_zi"_h, &VISITORCLASS::VisitSVEIntCompareSignedImm}, \ + {"cmpne_p_p_zw"_h, &VISITORCLASS::VisitSVEIntCompareVectors}, \ + {"cmpne_p_p_zz"_h, &VISITORCLASS::VisitSVEIntCompareVectors}, \ + {"cnot_z_p_z"_h, &VISITORCLASS::VisitSVEIntUnaryArithmeticPredicated}, \ + {"cntb_r_s"_h, &VISITORCLASS::VisitSVEElementCount}, \ + {"cntd_r_s"_h, &VISITORCLASS::VisitSVEElementCount}, \ + {"cnth_r_s"_h, &VISITORCLASS::VisitSVEElementCount}, \ + {"cntp_r_p_p"_h, &VISITORCLASS::VisitSVEPredicateCount}, \ + {"cntw_r_s"_h, &VISITORCLASS::VisitSVEElementCount}, \ + {"cnt_z_p_z"_h, &VISITORCLASS::VisitSVEIntUnaryArithmeticPredicated}, \ + {"compact_z_p_z"_h, &VISITORCLASS::VisitSVECompressActiveElements}, \ + {"cpy_z_o_i"_h, &VISITORCLASS::VisitSVECopyIntImm_Predicated}, \ + {"cpy_z_p_i"_h, &VISITORCLASS::VisitSVECopyIntImm_Predicated}, \ + {"cpy_z_p_r"_h, \ + &VISITORCLASS::VisitSVECopyGeneralRegisterToVector_Predicated}, \ + {"cpy_z_p_v"_h, \ + &VISITORCLASS::VisitSVECopySIMDFPScalarRegisterToVector_Predicated}, \ + {"ctermeq_rr"_h, &VISITORCLASS::VisitSVEConditionallyTerminateScalars}, \ + {"ctermne_rr"_h, &VISITORCLASS::VisitSVEConditionallyTerminateScalars}, \ + {"decb_r_rs"_h, &VISITORCLASS::VisitSVEIncDecRegisterByElementCount}, \ + {"decd_r_rs"_h, &VISITORCLASS::VisitSVEIncDecRegisterByElementCount}, \ + {"decd_z_zs"_h, &VISITORCLASS::VisitSVEIncDecVectorByElementCount}, \ + {"dech_r_rs"_h, &VISITORCLASS::VisitSVEIncDecRegisterByElementCount}, \ + {"dech_z_zs"_h, &VISITORCLASS::VisitSVEIncDecVectorByElementCount}, \ + {"decp_r_p_r"_h, &VISITORCLASS::VisitSVEIncDecByPredicateCount}, \ + {"decp_z_p_z"_h, &VISITORCLASS::VisitSVEIncDecByPredicateCount}, \ + {"decw_r_rs"_h, &VISITORCLASS::VisitSVEIncDecRegisterByElementCount}, \ + {"decw_z_zs"_h, &VISITORCLASS::VisitSVEIncDecVectorByElementCount}, \ + {"dupm_z_i"_h, &VISITORCLASS::VisitSVEBroadcastBitmaskImm}, \ + {"dup_z_i"_h, &VISITORCLASS::VisitSVEBroadcastIntImm_Unpredicated}, \ + {"dup_z_r"_h, &VISITORCLASS::VisitSVEBroadcastGeneralRegister}, \ + {"dup_z_zi"_h, &VISITORCLASS::VisitSVEBroadcastIndexElement}, \ + {"eors_p_p_pp_z"_h, &VISITORCLASS::VisitSVEPredicateLogical}, \ + {"eorv_r_p_z"_h, &VISITORCLASS::VisitSVEIntReduction}, \ + {"eor_p_p_pp_z"_h, &VISITORCLASS::VisitSVEPredicateLogical}, \ + {"eor_z_p_zz"_h, &VISITORCLASS::VisitSVEBitwiseLogical_Predicated}, \ + {"eor_z_zi"_h, \ + &VISITORCLASS::VisitSVEBitwiseLogicalWithImm_Unpredicated}, \ + {"eor_z_zz"_h, &VISITORCLASS::VisitSVEBitwiseLogicalUnpredicated}, \ + {"ext_z_zi_des"_h, &VISITORCLASS::VisitSVEPermuteVectorExtract}, \ + {"fabd_z_p_zz"_h, &VISITORCLASS::VisitSVEFPArithmetic_Predicated}, \ + {"fabs_z_p_z"_h, &VISITORCLASS::VisitSVEIntUnaryArithmeticPredicated}, \ + {"facge_p_p_zz"_h, &VISITORCLASS::VisitSVEFPCompareVectors}, \ + {"facgt_p_p_zz"_h, &VISITORCLASS::VisitSVEFPCompareVectors}, \ + {"fadda_v_p_z"_h, &VISITORCLASS::VisitSVEFPAccumulatingReduction}, \ + {"faddv_v_p_z"_h, &VISITORCLASS::VisitSVEFPFastReduction}, \ + {"fadd_z_p_zs"_h, \ + &VISITORCLASS::VisitSVEFPArithmeticWithImm_Predicated}, \ + {"fadd_z_p_zz"_h, &VISITORCLASS::VisitSVEFPArithmetic_Predicated}, \ + {"fadd_z_zz"_h, &VISITORCLASS::VisitSVEFPArithmeticUnpredicated}, \ + {"fcadd_z_p_zz"_h, &VISITORCLASS::VisitSVEFPComplexAddition}, \ + {"fcmeq_p_p_z0"_h, &VISITORCLASS::VisitSVEFPCompareWithZero}, \ + {"fcmeq_p_p_zz"_h, &VISITORCLASS::VisitSVEFPCompareVectors}, \ + {"fcmge_p_p_z0"_h, &VISITORCLASS::VisitSVEFPCompareWithZero}, \ + {"fcmge_p_p_zz"_h, &VISITORCLASS::VisitSVEFPCompareVectors}, \ + {"fcmgt_p_p_z0"_h, &VISITORCLASS::VisitSVEFPCompareWithZero}, \ + {"fcmgt_p_p_zz"_h, &VISITORCLASS::VisitSVEFPCompareVectors}, \ + {"fcmla_z_p_zzz"_h, &VISITORCLASS::VisitSVEFPComplexMulAdd}, \ + {"fcmla_z_zzzi_h"_h, &VISITORCLASS::VisitSVEFPComplexMulAddIndex}, \ + {"fcmla_z_zzzi_s"_h, &VISITORCLASS::VisitSVEFPComplexMulAddIndex}, \ + {"fcmle_p_p_z0"_h, &VISITORCLASS::VisitSVEFPCompareWithZero}, \ + {"fcmlt_p_p_z0"_h, &VISITORCLASS::VisitSVEFPCompareWithZero}, \ + {"fcmne_p_p_z0"_h, &VISITORCLASS::VisitSVEFPCompareWithZero}, \ + {"fcmne_p_p_zz"_h, &VISITORCLASS::VisitSVEFPCompareVectors}, \ + {"fcmuo_p_p_zz"_h, &VISITORCLASS::VisitSVEFPCompareVectors}, \ + {"fcpy_z_p_i"_h, &VISITORCLASS::VisitSVECopyFPImm_Predicated}, \ + {"fcvtzs_z_p_z_d2w"_h, &VISITORCLASS::VisitSVEFPConvertToInt}, \ + {"fcvtzs_z_p_z_d2x"_h, &VISITORCLASS::VisitSVEFPConvertToInt}, \ + {"fcvtzs_z_p_z_fp162h"_h, &VISITORCLASS::VisitSVEFPConvertToInt}, \ + {"fcvtzs_z_p_z_fp162w"_h, &VISITORCLASS::VisitSVEFPConvertToInt}, \ + {"fcvtzs_z_p_z_fp162x"_h, &VISITORCLASS::VisitSVEFPConvertToInt}, \ + {"fcvtzs_z_p_z_s2w"_h, &VISITORCLASS::VisitSVEFPConvertToInt}, \ + {"fcvtzs_z_p_z_s2x"_h, &VISITORCLASS::VisitSVEFPConvertToInt}, \ + {"fcvtzu_z_p_z_d2w"_h, &VISITORCLASS::VisitSVEFPConvertToInt}, \ + {"fcvtzu_z_p_z_d2x"_h, &VISITORCLASS::VisitSVEFPConvertToInt}, \ + {"fcvtzu_z_p_z_fp162h"_h, &VISITORCLASS::VisitSVEFPConvertToInt}, \ + {"fcvtzu_z_p_z_fp162w"_h, &VISITORCLASS::VisitSVEFPConvertToInt}, \ + {"fcvtzu_z_p_z_fp162x"_h, &VISITORCLASS::VisitSVEFPConvertToInt}, \ + {"fcvtzu_z_p_z_s2w"_h, &VISITORCLASS::VisitSVEFPConvertToInt}, \ + {"fcvtzu_z_p_z_s2x"_h, &VISITORCLASS::VisitSVEFPConvertToInt}, \ + {"fcvt_z_p_z_d2h"_h, &VISITORCLASS::VisitSVEFPConvertPrecision}, \ + {"fcvt_z_p_z_d2s"_h, &VISITORCLASS::VisitSVEFPConvertPrecision}, \ + {"fcvt_z_p_z_h2d"_h, &VISITORCLASS::VisitSVEFPConvertPrecision}, \ + {"fcvt_z_p_z_h2s"_h, &VISITORCLASS::VisitSVEFPConvertPrecision}, \ + {"fcvt_z_p_z_s2d"_h, &VISITORCLASS::VisitSVEFPConvertPrecision}, \ + {"fcvt_z_p_z_s2h"_h, &VISITORCLASS::VisitSVEFPConvertPrecision}, \ + {"fdivr_z_p_zz"_h, &VISITORCLASS::VisitSVEFPArithmetic_Predicated}, \ + {"fdiv_z_p_zz"_h, &VISITORCLASS::VisitSVEFPArithmetic_Predicated}, \ + {"fdup_z_i"_h, &VISITORCLASS::VisitSVEBroadcastFPImm_Unpredicated}, \ + {"fexpa_z_z"_h, &VISITORCLASS::VisitSVEFPExponentialAccelerator}, \ + {"fmad_z_p_zzz"_h, &VISITORCLASS::VisitSVEFPMulAdd}, \ + {"fmaxnmv_v_p_z"_h, &VISITORCLASS::VisitSVEFPFastReduction}, \ + {"fmaxnm_z_p_zs"_h, \ + &VISITORCLASS::VisitSVEFPArithmeticWithImm_Predicated}, \ + {"fmaxnm_z_p_zz"_h, &VISITORCLASS::VisitSVEFPArithmetic_Predicated}, \ + {"fmaxv_v_p_z"_h, &VISITORCLASS::VisitSVEFPFastReduction}, \ + {"fmax_z_p_zs"_h, \ + &VISITORCLASS::VisitSVEFPArithmeticWithImm_Predicated}, \ + {"fmax_z_p_zz"_h, &VISITORCLASS::VisitSVEFPArithmetic_Predicated}, \ + {"fminnmv_v_p_z"_h, &VISITORCLASS::VisitSVEFPFastReduction}, \ + {"fminnm_z_p_zs"_h, \ + &VISITORCLASS::VisitSVEFPArithmeticWithImm_Predicated}, \ + {"fminnm_z_p_zz"_h, &VISITORCLASS::VisitSVEFPArithmetic_Predicated}, \ + {"fminv_v_p_z"_h, &VISITORCLASS::VisitSVEFPFastReduction}, \ + {"fmin_z_p_zs"_h, \ + &VISITORCLASS::VisitSVEFPArithmeticWithImm_Predicated}, \ + {"fmin_z_p_zz"_h, &VISITORCLASS::VisitSVEFPArithmetic_Predicated}, \ + {"fmla_z_p_zzz"_h, &VISITORCLASS::VisitSVEFPMulAdd}, \ + {"fmla_z_zzzi_d"_h, &VISITORCLASS::VisitSVEFPMulAddIndex}, \ + {"fmla_z_zzzi_h"_h, &VISITORCLASS::VisitSVEFPMulAddIndex}, \ + {"fmla_z_zzzi_s"_h, &VISITORCLASS::VisitSVEFPMulAddIndex}, \ + {"fmls_z_p_zzz"_h, &VISITORCLASS::VisitSVEFPMulAdd}, \ + {"fmls_z_zzzi_d"_h, &VISITORCLASS::VisitSVEFPMulAddIndex}, \ + {"fmls_z_zzzi_h"_h, &VISITORCLASS::VisitSVEFPMulAddIndex}, \ + {"fmls_z_zzzi_s"_h, &VISITORCLASS::VisitSVEFPMulAddIndex}, \ + {"fmsb_z_p_zzz"_h, &VISITORCLASS::VisitSVEFPMulAdd}, \ + {"fmulx_z_p_zz"_h, &VISITORCLASS::VisitSVEFPArithmetic_Predicated}, \ + {"fmul_z_p_zs"_h, \ + &VISITORCLASS::VisitSVEFPArithmeticWithImm_Predicated}, \ + {"fmul_z_p_zz"_h, &VISITORCLASS::VisitSVEFPArithmetic_Predicated}, \ + {"fmul_z_zz"_h, &VISITORCLASS::VisitSVEFPArithmeticUnpredicated}, \ + {"fmul_z_zzi_d"_h, &VISITORCLASS::VisitSVEFPMulIndex}, \ + {"fmul_z_zzi_h"_h, &VISITORCLASS::VisitSVEFPMulIndex}, \ + {"fmul_z_zzi_s"_h, &VISITORCLASS::VisitSVEFPMulIndex}, \ + {"fneg_z_p_z"_h, &VISITORCLASS::VisitSVEIntUnaryArithmeticPredicated}, \ + {"fnmad_z_p_zzz"_h, &VISITORCLASS::VisitSVEFPMulAdd}, \ + {"fnmla_z_p_zzz"_h, &VISITORCLASS::VisitSVEFPMulAdd}, \ + {"fnmls_z_p_zzz"_h, &VISITORCLASS::VisitSVEFPMulAdd}, \ + {"fnmsb_z_p_zzz"_h, &VISITORCLASS::VisitSVEFPMulAdd}, \ + {"frecpe_z_z"_h, &VISITORCLASS::VisitSVEFPUnaryOpUnpredicated}, \ + {"frecps_z_zz"_h, &VISITORCLASS::VisitSVEFPArithmeticUnpredicated}, \ + {"frecpx_z_p_z"_h, &VISITORCLASS::VisitSVEFPUnaryOp}, \ + {"frinta_z_p_z"_h, &VISITORCLASS::VisitSVEFPRoundToIntegralValue}, \ + {"frinti_z_p_z"_h, &VISITORCLASS::VisitSVEFPRoundToIntegralValue}, \ + {"frintm_z_p_z"_h, &VISITORCLASS::VisitSVEFPRoundToIntegralValue}, \ + {"frintn_z_p_z"_h, &VISITORCLASS::VisitSVEFPRoundToIntegralValue}, \ + {"frintp_z_p_z"_h, &VISITORCLASS::VisitSVEFPRoundToIntegralValue}, \ + {"frintx_z_p_z"_h, &VISITORCLASS::VisitSVEFPRoundToIntegralValue}, \ + {"frintz_z_p_z"_h, &VISITORCLASS::VisitSVEFPRoundToIntegralValue}, \ + {"frsqrte_z_z"_h, &VISITORCLASS::VisitSVEFPUnaryOpUnpredicated}, \ + {"frsqrts_z_zz"_h, &VISITORCLASS::VisitSVEFPArithmeticUnpredicated}, \ + {"fscale_z_p_zz"_h, &VISITORCLASS::VisitSVEFPArithmetic_Predicated}, \ + {"fsqrt_z_p_z"_h, &VISITORCLASS::VisitSVEFPUnaryOp}, \ + {"fsubr_z_p_zs"_h, \ + &VISITORCLASS::VisitSVEFPArithmeticWithImm_Predicated}, \ + {"fsubr_z_p_zz"_h, &VISITORCLASS::VisitSVEFPArithmetic_Predicated}, \ + {"fsub_z_p_zs"_h, \ + &VISITORCLASS::VisitSVEFPArithmeticWithImm_Predicated}, \ + {"fsub_z_p_zz"_h, &VISITORCLASS::VisitSVEFPArithmetic_Predicated}, \ + {"fsub_z_zz"_h, &VISITORCLASS::VisitSVEFPArithmeticUnpredicated}, \ + {"ftmad_z_zzi"_h, &VISITORCLASS::VisitSVEFPTrigMulAddCoefficient}, \ + {"ftsmul_z_zz"_h, &VISITORCLASS::VisitSVEFPArithmeticUnpredicated}, \ + {"ftssel_z_zz"_h, &VISITORCLASS::VisitSVEFPTrigSelectCoefficient}, \ + {"incb_r_rs"_h, &VISITORCLASS::VisitSVEIncDecRegisterByElementCount}, \ + {"incd_r_rs"_h, &VISITORCLASS::VisitSVEIncDecRegisterByElementCount}, \ + {"incd_z_zs"_h, &VISITORCLASS::VisitSVEIncDecVectorByElementCount}, \ + {"inch_r_rs"_h, &VISITORCLASS::VisitSVEIncDecRegisterByElementCount}, \ + {"inch_z_zs"_h, &VISITORCLASS::VisitSVEIncDecVectorByElementCount}, \ + {"incp_r_p_r"_h, &VISITORCLASS::VisitSVEIncDecByPredicateCount}, \ + {"incp_z_p_z"_h, &VISITORCLASS::VisitSVEIncDecByPredicateCount}, \ + {"incw_r_rs"_h, &VISITORCLASS::VisitSVEIncDecRegisterByElementCount}, \ + {"incw_z_zs"_h, &VISITORCLASS::VisitSVEIncDecVectorByElementCount}, \ + {"index_z_ii"_h, &VISITORCLASS::VisitSVEIndexGeneration}, \ + {"index_z_ir"_h, &VISITORCLASS::VisitSVEIndexGeneration}, \ + {"index_z_ri"_h, &VISITORCLASS::VisitSVEIndexGeneration}, \ + {"index_z_rr"_h, &VISITORCLASS::VisitSVEIndexGeneration}, \ + {"insr_z_r"_h, &VISITORCLASS::VisitSVEInsertGeneralRegister}, \ + {"insr_z_v"_h, &VISITORCLASS::VisitSVEInsertSIMDFPScalarRegister}, \ + {"lasta_r_p_z"_h, \ + &VISITORCLASS::VisitSVEExtractElementToGeneralRegister}, \ + {"lasta_v_p_z"_h, \ + &VISITORCLASS::VisitSVEExtractElementToSIMDFPScalarRegister}, \ + {"lastb_r_p_z"_h, \ + &VISITORCLASS::VisitSVEExtractElementToGeneralRegister}, \ + {"lastb_v_p_z"_h, \ + &VISITORCLASS::VisitSVEExtractElementToSIMDFPScalarRegister}, \ + {"ld1b_z_p_ai_d"_h, \ + &VISITORCLASS::VisitSVE64BitGatherLoad_VectorPlusImm}, \ + {"ld1b_z_p_ai_s"_h, \ + &VISITORCLASS::VisitSVE32BitGatherLoad_VectorPlusImm}, \ + {"ld1b_z_p_bi_u16"_h, \ + &VISITORCLASS::VisitSVEContiguousLoad_ScalarPlusImm}, \ + {"ld1b_z_p_bi_u32"_h, \ + &VISITORCLASS::VisitSVEContiguousLoad_ScalarPlusImm}, \ + {"ld1b_z_p_bi_u64"_h, \ + &VISITORCLASS::VisitSVEContiguousLoad_ScalarPlusImm}, \ + {"ld1b_z_p_bi_u8"_h, \ + &VISITORCLASS::VisitSVEContiguousLoad_ScalarPlusImm}, \ + {"ld1b_z_p_br_u16"_h, \ + &VISITORCLASS::VisitSVEContiguousLoad_ScalarPlusScalar}, \ + {"ld1b_z_p_br_u32"_h, \ + &VISITORCLASS::VisitSVEContiguousLoad_ScalarPlusScalar}, \ + {"ld1b_z_p_br_u64"_h, \ + &VISITORCLASS::VisitSVEContiguousLoad_ScalarPlusScalar}, \ + {"ld1b_z_p_br_u8"_h, \ + &VISITORCLASS::VisitSVEContiguousLoad_ScalarPlusScalar}, \ + {"ld1b_z_p_bz_d_64_unscaled"_h, \ + &VISITORCLASS::VisitSVE64BitGatherLoad_ScalarPlus64BitUnscaledOffsets}, \ + {"ld1b_z_p_bz_d_x32_unscaled"_h, \ + &VISITORCLASS:: \ + VisitSVE64BitGatherLoad_ScalarPlusUnpacked32BitUnscaledOffsets}, \ + {"ld1b_z_p_bz_s_x32_unscaled"_h, \ + &VISITORCLASS::VisitSVE32BitGatherLoad_ScalarPlus32BitUnscaledOffsets}, \ + {"ld1d_z_p_ai_d"_h, \ + &VISITORCLASS::VisitSVE64BitGatherLoad_VectorPlusImm}, \ + {"ld1d_z_p_bi_u64"_h, \ + &VISITORCLASS::VisitSVEContiguousLoad_ScalarPlusImm}, \ + {"ld1d_z_p_br_u64"_h, \ + &VISITORCLASS::VisitSVEContiguousLoad_ScalarPlusScalar}, \ + {"ld1d_z_p_bz_d_64_scaled"_h, \ + &VISITORCLASS::VisitSVE64BitGatherLoad_ScalarPlus64BitScaledOffsets}, \ + {"ld1d_z_p_bz_d_64_unscaled"_h, \ + &VISITORCLASS::VisitSVE64BitGatherLoad_ScalarPlus64BitUnscaledOffsets}, \ + {"ld1d_z_p_bz_d_x32_scaled"_h, \ + &VISITORCLASS:: \ + VisitSVE64BitGatherLoad_ScalarPlus32BitUnpackedScaledOffsets}, \ + {"ld1d_z_p_bz_d_x32_unscaled"_h, \ + &VISITORCLASS:: \ + VisitSVE64BitGatherLoad_ScalarPlusUnpacked32BitUnscaledOffsets}, \ + {"ld1h_z_p_ai_d"_h, \ + &VISITORCLASS::VisitSVE64BitGatherLoad_VectorPlusImm}, \ + {"ld1h_z_p_ai_s"_h, \ + &VISITORCLASS::VisitSVE32BitGatherLoad_VectorPlusImm}, \ + {"ld1h_z_p_bi_u16"_h, \ + &VISITORCLASS::VisitSVEContiguousLoad_ScalarPlusImm}, \ + {"ld1h_z_p_bi_u32"_h, \ + &VISITORCLASS::VisitSVEContiguousLoad_ScalarPlusImm}, \ + {"ld1h_z_p_bi_u64"_h, \ + &VISITORCLASS::VisitSVEContiguousLoad_ScalarPlusImm}, \ + {"ld1h_z_p_br_u16"_h, \ + &VISITORCLASS::VisitSVEContiguousLoad_ScalarPlusScalar}, \ + {"ld1h_z_p_br_u32"_h, \ + &VISITORCLASS::VisitSVEContiguousLoad_ScalarPlusScalar}, \ + {"ld1h_z_p_br_u64"_h, \ + &VISITORCLASS::VisitSVEContiguousLoad_ScalarPlusScalar}, \ + {"ld1h_z_p_bz_d_64_scaled"_h, \ + &VISITORCLASS::VisitSVE64BitGatherLoad_ScalarPlus64BitScaledOffsets}, \ + {"ld1h_z_p_bz_d_64_unscaled"_h, \ + &VISITORCLASS::VisitSVE64BitGatherLoad_ScalarPlus64BitUnscaledOffsets}, \ + {"ld1h_z_p_bz_d_x32_scaled"_h, \ + &VISITORCLASS:: \ + VisitSVE64BitGatherLoad_ScalarPlus32BitUnpackedScaledOffsets}, \ + {"ld1h_z_p_bz_d_x32_unscaled"_h, \ + &VISITORCLASS:: \ + VisitSVE64BitGatherLoad_ScalarPlusUnpacked32BitUnscaledOffsets}, \ + {"ld1h_z_p_bz_s_x32_scaled"_h, \ + &VISITORCLASS:: \ + VisitSVE32BitGatherLoadHalfwords_ScalarPlus32BitScaledOffsets}, \ + {"ld1h_z_p_bz_s_x32_unscaled"_h, \ + &VISITORCLASS::VisitSVE32BitGatherLoad_ScalarPlus32BitUnscaledOffsets}, \ + {"ld1rb_z_p_bi_u16"_h, &VISITORCLASS::VisitSVELoadAndBroadcastElement}, \ + {"ld1rb_z_p_bi_u32"_h, &VISITORCLASS::VisitSVELoadAndBroadcastElement}, \ + {"ld1rb_z_p_bi_u64"_h, &VISITORCLASS::VisitSVELoadAndBroadcastElement}, \ + {"ld1rb_z_p_bi_u8"_h, &VISITORCLASS::VisitSVELoadAndBroadcastElement}, \ + {"ld1rd_z_p_bi_u64"_h, &VISITORCLASS::VisitSVELoadAndBroadcastElement}, \ + {"ld1rh_z_p_bi_u16"_h, &VISITORCLASS::VisitSVELoadAndBroadcastElement}, \ + {"ld1rh_z_p_bi_u32"_h, &VISITORCLASS::VisitSVELoadAndBroadcastElement}, \ + {"ld1rh_z_p_bi_u64"_h, &VISITORCLASS::VisitSVELoadAndBroadcastElement}, \ + {"ld1rqb_z_p_bi_u8"_h, \ + &VISITORCLASS::VisitSVELoadAndBroadcastQOWord_ScalarPlusImm}, \ + {"ld1rqb_z_p_br_contiguous"_h, \ + &VISITORCLASS::VisitSVELoadAndBroadcastQOWord_ScalarPlusScalar}, \ + {"ld1rqd_z_p_bi_u64"_h, \ + &VISITORCLASS::VisitSVELoadAndBroadcastQOWord_ScalarPlusImm}, \ + {"ld1rqd_z_p_br_contiguous"_h, \ + &VISITORCLASS::VisitSVELoadAndBroadcastQOWord_ScalarPlusScalar}, \ + {"ld1rqh_z_p_bi_u16"_h, \ + &VISITORCLASS::VisitSVELoadAndBroadcastQOWord_ScalarPlusImm}, \ + {"ld1rqh_z_p_br_contiguous"_h, \ + &VISITORCLASS::VisitSVELoadAndBroadcastQOWord_ScalarPlusScalar}, \ + {"ld1rqw_z_p_bi_u32"_h, \ + &VISITORCLASS::VisitSVELoadAndBroadcastQOWord_ScalarPlusImm}, \ + {"ld1rqw_z_p_br_contiguous"_h, \ + &VISITORCLASS::VisitSVELoadAndBroadcastQOWord_ScalarPlusScalar}, \ + {"ld1rsb_z_p_bi_s16"_h, &VISITORCLASS::VisitSVELoadAndBroadcastElement}, \ + {"ld1rsb_z_p_bi_s32"_h, &VISITORCLASS::VisitSVELoadAndBroadcastElement}, \ + {"ld1rsb_z_p_bi_s64"_h, &VISITORCLASS::VisitSVELoadAndBroadcastElement}, \ + {"ld1rsh_z_p_bi_s32"_h, &VISITORCLASS::VisitSVELoadAndBroadcastElement}, \ + {"ld1rsh_z_p_bi_s64"_h, &VISITORCLASS::VisitSVELoadAndBroadcastElement}, \ + {"ld1rsw_z_p_bi_s64"_h, &VISITORCLASS::VisitSVELoadAndBroadcastElement}, \ + {"ld1rw_z_p_bi_u32"_h, &VISITORCLASS::VisitSVELoadAndBroadcastElement}, \ + {"ld1rw_z_p_bi_u64"_h, &VISITORCLASS::VisitSVELoadAndBroadcastElement}, \ + {"ld1sb_z_p_ai_d"_h, \ + &VISITORCLASS::VisitSVE64BitGatherLoad_VectorPlusImm}, \ + {"ld1sb_z_p_ai_s"_h, \ + &VISITORCLASS::VisitSVE32BitGatherLoad_VectorPlusImm}, \ + {"ld1sb_z_p_bi_s16"_h, \ + &VISITORCLASS::VisitSVEContiguousLoad_ScalarPlusImm}, \ + {"ld1sb_z_p_bi_s32"_h, \ + &VISITORCLASS::VisitSVEContiguousLoad_ScalarPlusImm}, \ + {"ld1sb_z_p_bi_s64"_h, \ + &VISITORCLASS::VisitSVEContiguousLoad_ScalarPlusImm}, \ + {"ld1sb_z_p_br_s16"_h, \ + &VISITORCLASS::VisitSVEContiguousLoad_ScalarPlusScalar}, \ + {"ld1sb_z_p_br_s32"_h, \ + &VISITORCLASS::VisitSVEContiguousLoad_ScalarPlusScalar}, \ + {"ld1sb_z_p_br_s64"_h, \ + &VISITORCLASS::VisitSVEContiguousLoad_ScalarPlusScalar}, \ + {"ld1sb_z_p_bz_d_64_unscaled"_h, \ + &VISITORCLASS::VisitSVE64BitGatherLoad_ScalarPlus64BitUnscaledOffsets}, \ + {"ld1sb_z_p_bz_d_x32_unscaled"_h, \ + &VISITORCLASS:: \ + VisitSVE64BitGatherLoad_ScalarPlusUnpacked32BitUnscaledOffsets}, \ + {"ld1sb_z_p_bz_s_x32_unscaled"_h, \ + &VISITORCLASS::VisitSVE32BitGatherLoad_ScalarPlus32BitUnscaledOffsets}, \ + {"ld1sh_z_p_ai_d"_h, \ + &VISITORCLASS::VisitSVE64BitGatherLoad_VectorPlusImm}, \ + {"ld1sh_z_p_ai_s"_h, \ + &VISITORCLASS::VisitSVE32BitGatherLoad_VectorPlusImm}, \ + {"ld1sh_z_p_bi_s32"_h, \ + &VISITORCLASS::VisitSVEContiguousLoad_ScalarPlusImm}, \ + {"ld1sh_z_p_bi_s64"_h, \ + &VISITORCLASS::VisitSVEContiguousLoad_ScalarPlusImm}, \ + {"ld1sh_z_p_br_s32"_h, \ + &VISITORCLASS::VisitSVEContiguousLoad_ScalarPlusScalar}, \ + {"ld1sh_z_p_br_s64"_h, \ + &VISITORCLASS::VisitSVEContiguousLoad_ScalarPlusScalar}, \ + {"ld1sh_z_p_bz_d_64_scaled"_h, \ + &VISITORCLASS::VisitSVE64BitGatherLoad_ScalarPlus64BitScaledOffsets}, \ + {"ld1sh_z_p_bz_d_64_unscaled"_h, \ + &VISITORCLASS::VisitSVE64BitGatherLoad_ScalarPlus64BitUnscaledOffsets}, \ + {"ld1sh_z_p_bz_d_x32_scaled"_h, \ + &VISITORCLASS:: \ + VisitSVE64BitGatherLoad_ScalarPlus32BitUnpackedScaledOffsets}, \ + {"ld1sh_z_p_bz_d_x32_unscaled"_h, \ + &VISITORCLASS:: \ + VisitSVE64BitGatherLoad_ScalarPlusUnpacked32BitUnscaledOffsets}, \ + {"ld1sh_z_p_bz_s_x32_scaled"_h, \ + &VISITORCLASS:: \ + VisitSVE32BitGatherLoadHalfwords_ScalarPlus32BitScaledOffsets}, \ + {"ld1sh_z_p_bz_s_x32_unscaled"_h, \ + &VISITORCLASS::VisitSVE32BitGatherLoad_ScalarPlus32BitUnscaledOffsets}, \ + {"ld1sw_z_p_ai_d"_h, \ + &VISITORCLASS::VisitSVE64BitGatherLoad_VectorPlusImm}, \ + {"ld1sw_z_p_bi_s64"_h, \ + &VISITORCLASS::VisitSVEContiguousLoad_ScalarPlusImm}, \ + {"ld1sw_z_p_br_s64"_h, \ + &VISITORCLASS::VisitSVEContiguousLoad_ScalarPlusScalar}, \ + {"ld1sw_z_p_bz_d_64_scaled"_h, \ + &VISITORCLASS::VisitSVE64BitGatherLoad_ScalarPlus64BitScaledOffsets}, \ + {"ld1sw_z_p_bz_d_64_unscaled"_h, \ + &VISITORCLASS::VisitSVE64BitGatherLoad_ScalarPlus64BitUnscaledOffsets}, \ + {"ld1sw_z_p_bz_d_x32_scaled"_h, \ + &VISITORCLASS:: \ + VisitSVE64BitGatherLoad_ScalarPlus32BitUnpackedScaledOffsets}, \ + {"ld1sw_z_p_bz_d_x32_unscaled"_h, \ + &VISITORCLASS:: \ + VisitSVE64BitGatherLoad_ScalarPlusUnpacked32BitUnscaledOffsets}, \ + {"ld1w_z_p_ai_d"_h, \ + &VISITORCLASS::VisitSVE64BitGatherLoad_VectorPlusImm}, \ + {"ld1w_z_p_ai_s"_h, \ + &VISITORCLASS::VisitSVE32BitGatherLoad_VectorPlusImm}, \ + {"ld1w_z_p_bi_u32"_h, \ + &VISITORCLASS::VisitSVEContiguousLoad_ScalarPlusImm}, \ + {"ld1w_z_p_bi_u64"_h, \ + &VISITORCLASS::VisitSVEContiguousLoad_ScalarPlusImm}, \ + {"ld1w_z_p_br_u32"_h, \ + &VISITORCLASS::VisitSVEContiguousLoad_ScalarPlusScalar}, \ + {"ld1w_z_p_br_u64"_h, \ + &VISITORCLASS::VisitSVEContiguousLoad_ScalarPlusScalar}, \ + {"ld1w_z_p_bz_d_64_scaled"_h, \ + &VISITORCLASS::VisitSVE64BitGatherLoad_ScalarPlus64BitScaledOffsets}, \ + {"ld1w_z_p_bz_d_64_unscaled"_h, \ + &VISITORCLASS::VisitSVE64BitGatherLoad_ScalarPlus64BitUnscaledOffsets}, \ + {"ld1w_z_p_bz_d_x32_scaled"_h, \ + &VISITORCLASS:: \ + VisitSVE64BitGatherLoad_ScalarPlus32BitUnpackedScaledOffsets}, \ + {"ld1w_z_p_bz_d_x32_unscaled"_h, \ + &VISITORCLASS:: \ + VisitSVE64BitGatherLoad_ScalarPlusUnpacked32BitUnscaledOffsets}, \ + {"ld1w_z_p_bz_s_x32_scaled"_h, \ + &VISITORCLASS:: \ + VisitSVE32BitGatherLoadWords_ScalarPlus32BitScaledOffsets}, \ + {"ld1w_z_p_bz_s_x32_unscaled"_h, \ + &VISITORCLASS::VisitSVE32BitGatherLoad_ScalarPlus32BitUnscaledOffsets}, \ + {"ld2b_z_p_bi_contiguous"_h, \ + &VISITORCLASS::VisitSVELoadMultipleStructures_ScalarPlusImm}, \ + {"ld2b_z_p_br_contiguous"_h, \ + &VISITORCLASS::VisitSVELoadMultipleStructures_ScalarPlusScalar}, \ + {"ld2d_z_p_bi_contiguous"_h, \ + &VISITORCLASS::VisitSVELoadMultipleStructures_ScalarPlusImm}, \ + {"ld2d_z_p_br_contiguous"_h, \ + &VISITORCLASS::VisitSVELoadMultipleStructures_ScalarPlusScalar}, \ + {"ld2h_z_p_bi_contiguous"_h, \ + &VISITORCLASS::VisitSVELoadMultipleStructures_ScalarPlusImm}, \ + {"ld2h_z_p_br_contiguous"_h, \ + &VISITORCLASS::VisitSVELoadMultipleStructures_ScalarPlusScalar}, \ + {"ld2w_z_p_bi_contiguous"_h, \ + &VISITORCLASS::VisitSVELoadMultipleStructures_ScalarPlusImm}, \ + {"ld2w_z_p_br_contiguous"_h, \ + &VISITORCLASS::VisitSVELoadMultipleStructures_ScalarPlusScalar}, \ + {"ld3b_z_p_bi_contiguous"_h, \ + &VISITORCLASS::VisitSVELoadMultipleStructures_ScalarPlusImm}, \ + {"ld3b_z_p_br_contiguous"_h, \ + &VISITORCLASS::VisitSVELoadMultipleStructures_ScalarPlusScalar}, \ + {"ld3d_z_p_bi_contiguous"_h, \ + &VISITORCLASS::VisitSVELoadMultipleStructures_ScalarPlusImm}, \ + {"ld3d_z_p_br_contiguous"_h, \ + &VISITORCLASS::VisitSVELoadMultipleStructures_ScalarPlusScalar}, \ + {"ld3h_z_p_bi_contiguous"_h, \ + &VISITORCLASS::VisitSVELoadMultipleStructures_ScalarPlusImm}, \ + {"ld3h_z_p_br_contiguous"_h, \ + &VISITORCLASS::VisitSVELoadMultipleStructures_ScalarPlusScalar}, \ + {"ld3w_z_p_bi_contiguous"_h, \ + &VISITORCLASS::VisitSVELoadMultipleStructures_ScalarPlusImm}, \ + {"ld3w_z_p_br_contiguous"_h, \ + &VISITORCLASS::VisitSVELoadMultipleStructures_ScalarPlusScalar}, \ + {"ld4b_z_p_bi_contiguous"_h, \ + &VISITORCLASS::VisitSVELoadMultipleStructures_ScalarPlusImm}, \ + {"ld4b_z_p_br_contiguous"_h, \ + &VISITORCLASS::VisitSVELoadMultipleStructures_ScalarPlusScalar}, \ + {"ld4d_z_p_bi_contiguous"_h, \ + &VISITORCLASS::VisitSVELoadMultipleStructures_ScalarPlusImm}, \ + {"ld4d_z_p_br_contiguous"_h, \ + &VISITORCLASS::VisitSVELoadMultipleStructures_ScalarPlusScalar}, \ + {"ld4h_z_p_bi_contiguous"_h, \ + &VISITORCLASS::VisitSVELoadMultipleStructures_ScalarPlusImm}, \ + {"ld4h_z_p_br_contiguous"_h, \ + &VISITORCLASS::VisitSVELoadMultipleStructures_ScalarPlusScalar}, \ + {"ld4w_z_p_bi_contiguous"_h, \ + &VISITORCLASS::VisitSVELoadMultipleStructures_ScalarPlusImm}, \ + {"ld4w_z_p_br_contiguous"_h, \ + &VISITORCLASS::VisitSVELoadMultipleStructures_ScalarPlusScalar}, \ + {"ldff1b_z_p_ai_d"_h, \ + &VISITORCLASS::VisitSVE64BitGatherLoad_VectorPlusImm}, \ + {"ldff1b_z_p_ai_s"_h, \ + &VISITORCLASS::VisitSVE32BitGatherLoad_VectorPlusImm}, \ + {"ldff1b_z_p_br_u16"_h, \ + &VISITORCLASS::VisitSVEContiguousFirstFaultLoad_ScalarPlusScalar}, \ + {"ldff1b_z_p_br_u32"_h, \ + &VISITORCLASS::VisitSVEContiguousFirstFaultLoad_ScalarPlusScalar}, \ + {"ldff1b_z_p_br_u64"_h, \ + &VISITORCLASS::VisitSVEContiguousFirstFaultLoad_ScalarPlusScalar}, \ + {"ldff1b_z_p_br_u8"_h, \ + &VISITORCLASS::VisitSVEContiguousFirstFaultLoad_ScalarPlusScalar}, \ + {"ldff1b_z_p_bz_d_64_unscaled"_h, \ + &VISITORCLASS::VisitSVE64BitGatherLoad_ScalarPlus64BitUnscaledOffsets}, \ + {"ldff1b_z_p_bz_d_x32_unscaled"_h, \ + &VISITORCLASS:: \ + VisitSVE64BitGatherLoad_ScalarPlusUnpacked32BitUnscaledOffsets}, \ + {"ldff1b_z_p_bz_s_x32_unscaled"_h, \ + &VISITORCLASS::VisitSVE32BitGatherLoad_ScalarPlus32BitUnscaledOffsets}, \ + {"ldff1d_z_p_ai_d"_h, \ + &VISITORCLASS::VisitSVE64BitGatherLoad_VectorPlusImm}, \ + {"ldff1d_z_p_br_u64"_h, \ + &VISITORCLASS::VisitSVEContiguousFirstFaultLoad_ScalarPlusScalar}, \ + {"ldff1d_z_p_bz_d_64_scaled"_h, \ + &VISITORCLASS::VisitSVE64BitGatherLoad_ScalarPlus64BitScaledOffsets}, \ + {"ldff1d_z_p_bz_d_64_unscaled"_h, \ + &VISITORCLASS::VisitSVE64BitGatherLoad_ScalarPlus64BitUnscaledOffsets}, \ + {"ldff1d_z_p_bz_d_x32_scaled"_h, \ + &VISITORCLASS:: \ + VisitSVE64BitGatherLoad_ScalarPlus32BitUnpackedScaledOffsets}, \ + {"ldff1d_z_p_bz_d_x32_unscaled"_h, \ + &VISITORCLASS:: \ + VisitSVE64BitGatherLoad_ScalarPlusUnpacked32BitUnscaledOffsets}, \ + {"ldff1h_z_p_ai_d"_h, \ + &VISITORCLASS::VisitSVE64BitGatherLoad_VectorPlusImm}, \ + {"ldff1h_z_p_ai_s"_h, \ + &VISITORCLASS::VisitSVE32BitGatherLoad_VectorPlusImm}, \ + {"ldff1h_z_p_br_u16"_h, \ + &VISITORCLASS::VisitSVEContiguousFirstFaultLoad_ScalarPlusScalar}, \ + {"ldff1h_z_p_br_u32"_h, \ + &VISITORCLASS::VisitSVEContiguousFirstFaultLoad_ScalarPlusScalar}, \ + {"ldff1h_z_p_br_u64"_h, \ + &VISITORCLASS::VisitSVEContiguousFirstFaultLoad_ScalarPlusScalar}, \ + {"ldff1h_z_p_bz_d_64_scaled"_h, \ + &VISITORCLASS::VisitSVE64BitGatherLoad_ScalarPlus64BitScaledOffsets}, \ + {"ldff1h_z_p_bz_d_64_unscaled"_h, \ + &VISITORCLASS::VisitSVE64BitGatherLoad_ScalarPlus64BitUnscaledOffsets}, \ + {"ldff1h_z_p_bz_d_x32_scaled"_h, \ + &VISITORCLASS:: \ + VisitSVE64BitGatherLoad_ScalarPlus32BitUnpackedScaledOffsets}, \ + {"ldff1h_z_p_bz_d_x32_unscaled"_h, \ + &VISITORCLASS:: \ + VisitSVE64BitGatherLoad_ScalarPlusUnpacked32BitUnscaledOffsets}, \ + {"ldff1h_z_p_bz_s_x32_scaled"_h, \ + &VISITORCLASS:: \ + VisitSVE32BitGatherLoadHalfwords_ScalarPlus32BitScaledOffsets}, \ + {"ldff1h_z_p_bz_s_x32_unscaled"_h, \ + &VISITORCLASS::VisitSVE32BitGatherLoad_ScalarPlus32BitUnscaledOffsets}, \ + {"ldff1sb_z_p_ai_d"_h, \ + &VISITORCLASS::VisitSVE64BitGatherLoad_VectorPlusImm}, \ + {"ldff1sb_z_p_ai_s"_h, \ + &VISITORCLASS::VisitSVE32BitGatherLoad_VectorPlusImm}, \ + {"ldff1sb_z_p_br_s16"_h, \ + &VISITORCLASS::VisitSVEContiguousFirstFaultLoad_ScalarPlusScalar}, \ + {"ldff1sb_z_p_br_s32"_h, \ + &VISITORCLASS::VisitSVEContiguousFirstFaultLoad_ScalarPlusScalar}, \ + {"ldff1sb_z_p_br_s64"_h, \ + &VISITORCLASS::VisitSVEContiguousFirstFaultLoad_ScalarPlusScalar}, \ + {"ldff1sb_z_p_bz_d_64_unscaled"_h, \ + &VISITORCLASS::VisitSVE64BitGatherLoad_ScalarPlus64BitUnscaledOffsets}, \ + {"ldff1sb_z_p_bz_d_x32_unscaled"_h, \ + &VISITORCLASS:: \ + VisitSVE64BitGatherLoad_ScalarPlusUnpacked32BitUnscaledOffsets}, \ + {"ldff1sb_z_p_bz_s_x32_unscaled"_h, \ + &VISITORCLASS::VisitSVE32BitGatherLoad_ScalarPlus32BitUnscaledOffsets}, \ + {"ldff1sh_z_p_ai_d"_h, \ + &VISITORCLASS::VisitSVE64BitGatherLoad_VectorPlusImm}, \ + {"ldff1sh_z_p_ai_s"_h, \ + &VISITORCLASS::VisitSVE32BitGatherLoad_VectorPlusImm}, \ + {"ldff1sh_z_p_br_s32"_h, \ + &VISITORCLASS::VisitSVEContiguousFirstFaultLoad_ScalarPlusScalar}, \ + {"ldff1sh_z_p_br_s64"_h, \ + &VISITORCLASS::VisitSVEContiguousFirstFaultLoad_ScalarPlusScalar}, \ + {"ldff1sh_z_p_bz_d_64_scaled"_h, \ + &VISITORCLASS::VisitSVE64BitGatherLoad_ScalarPlus64BitScaledOffsets}, \ + {"ldff1sh_z_p_bz_d_64_unscaled"_h, \ + &VISITORCLASS::VisitSVE64BitGatherLoad_ScalarPlus64BitUnscaledOffsets}, \ + {"ldff1sh_z_p_bz_d_x32_scaled"_h, \ + &VISITORCLASS:: \ + VisitSVE64BitGatherLoad_ScalarPlus32BitUnpackedScaledOffsets}, \ + {"ldff1sh_z_p_bz_d_x32_unscaled"_h, \ + &VISITORCLASS:: \ + VisitSVE64BitGatherLoad_ScalarPlusUnpacked32BitUnscaledOffsets}, \ + {"ldff1sh_z_p_bz_s_x32_scaled"_h, \ + &VISITORCLASS:: \ + VisitSVE32BitGatherLoadHalfwords_ScalarPlus32BitScaledOffsets}, \ + {"ldff1sh_z_p_bz_s_x32_unscaled"_h, \ + &VISITORCLASS::VisitSVE32BitGatherLoad_ScalarPlus32BitUnscaledOffsets}, \ + {"ldff1sw_z_p_ai_d"_h, \ + &VISITORCLASS::VisitSVE64BitGatherLoad_VectorPlusImm}, \ + {"ldff1sw_z_p_br_s64"_h, \ + &VISITORCLASS::VisitSVEContiguousFirstFaultLoad_ScalarPlusScalar}, \ + {"ldff1sw_z_p_bz_d_64_scaled"_h, \ + &VISITORCLASS::VisitSVE64BitGatherLoad_ScalarPlus64BitScaledOffsets}, \ + {"ldff1sw_z_p_bz_d_64_unscaled"_h, \ + &VISITORCLASS::VisitSVE64BitGatherLoad_ScalarPlus64BitUnscaledOffsets}, \ + {"ldff1sw_z_p_bz_d_x32_scaled"_h, \ + &VISITORCLASS:: \ + VisitSVE64BitGatherLoad_ScalarPlus32BitUnpackedScaledOffsets}, \ + {"ldff1sw_z_p_bz_d_x32_unscaled"_h, \ + &VISITORCLASS:: \ + VisitSVE64BitGatherLoad_ScalarPlusUnpacked32BitUnscaledOffsets}, \ + {"ldff1w_z_p_ai_d"_h, \ + &VISITORCLASS::VisitSVE64BitGatherLoad_VectorPlusImm}, \ + {"ldff1w_z_p_ai_s"_h, \ + &VISITORCLASS::VisitSVE32BitGatherLoad_VectorPlusImm}, \ + {"ldff1w_z_p_br_u32"_h, \ + &VISITORCLASS::VisitSVEContiguousFirstFaultLoad_ScalarPlusScalar}, \ + {"ldff1w_z_p_br_u64"_h, \ + &VISITORCLASS::VisitSVEContiguousFirstFaultLoad_ScalarPlusScalar}, \ + {"ldff1w_z_p_bz_d_64_scaled"_h, \ + &VISITORCLASS::VisitSVE64BitGatherLoad_ScalarPlus64BitScaledOffsets}, \ + {"ldff1w_z_p_bz_d_64_unscaled"_h, \ + &VISITORCLASS::VisitSVE64BitGatherLoad_ScalarPlus64BitUnscaledOffsets}, \ + {"ldff1w_z_p_bz_d_x32_scaled"_h, \ + &VISITORCLASS:: \ + VisitSVE64BitGatherLoad_ScalarPlus32BitUnpackedScaledOffsets}, \ + {"ldff1w_z_p_bz_d_x32_unscaled"_h, \ + &VISITORCLASS:: \ + VisitSVE64BitGatherLoad_ScalarPlusUnpacked32BitUnscaledOffsets}, \ + {"ldff1w_z_p_bz_s_x32_scaled"_h, \ + &VISITORCLASS:: \ + VisitSVE32BitGatherLoadWords_ScalarPlus32BitScaledOffsets}, \ + {"ldff1w_z_p_bz_s_x32_unscaled"_h, \ + &VISITORCLASS::VisitSVE32BitGatherLoad_ScalarPlus32BitUnscaledOffsets}, \ + {"ldnf1b_z_p_bi_u16"_h, \ + &VISITORCLASS::VisitSVEContiguousNonFaultLoad_ScalarPlusImm}, \ + {"ldnf1b_z_p_bi_u32"_h, \ + &VISITORCLASS::VisitSVEContiguousNonFaultLoad_ScalarPlusImm}, \ + {"ldnf1b_z_p_bi_u64"_h, \ + &VISITORCLASS::VisitSVEContiguousNonFaultLoad_ScalarPlusImm}, \ + {"ldnf1b_z_p_bi_u8"_h, \ + &VISITORCLASS::VisitSVEContiguousNonFaultLoad_ScalarPlusImm}, \ + {"ldnf1d_z_p_bi_u64"_h, \ + &VISITORCLASS::VisitSVEContiguousNonFaultLoad_ScalarPlusImm}, \ + {"ldnf1h_z_p_bi_u16"_h, \ + &VISITORCLASS::VisitSVEContiguousNonFaultLoad_ScalarPlusImm}, \ + {"ldnf1h_z_p_bi_u32"_h, \ + &VISITORCLASS::VisitSVEContiguousNonFaultLoad_ScalarPlusImm}, \ + {"ldnf1h_z_p_bi_u64"_h, \ + &VISITORCLASS::VisitSVEContiguousNonFaultLoad_ScalarPlusImm}, \ + {"ldnf1sb_z_p_bi_s16"_h, \ + &VISITORCLASS::VisitSVEContiguousNonFaultLoad_ScalarPlusImm}, \ + {"ldnf1sb_z_p_bi_s32"_h, \ + &VISITORCLASS::VisitSVEContiguousNonFaultLoad_ScalarPlusImm}, \ + {"ldnf1sb_z_p_bi_s64"_h, \ + &VISITORCLASS::VisitSVEContiguousNonFaultLoad_ScalarPlusImm}, \ + {"ldnf1sh_z_p_bi_s32"_h, \ + &VISITORCLASS::VisitSVEContiguousNonFaultLoad_ScalarPlusImm}, \ + {"ldnf1sh_z_p_bi_s64"_h, \ + &VISITORCLASS::VisitSVEContiguousNonFaultLoad_ScalarPlusImm}, \ + {"ldnf1sw_z_p_bi_s64"_h, \ + &VISITORCLASS::VisitSVEContiguousNonFaultLoad_ScalarPlusImm}, \ + {"ldnf1w_z_p_bi_u32"_h, \ + &VISITORCLASS::VisitSVEContiguousNonFaultLoad_ScalarPlusImm}, \ + {"ldnf1w_z_p_bi_u64"_h, \ + &VISITORCLASS::VisitSVEContiguousNonFaultLoad_ScalarPlusImm}, \ + {"ldnt1b_z_p_bi_contiguous"_h, \ + &VISITORCLASS::VisitSVEContiguousNonTemporalLoad_ScalarPlusImm}, \ + {"ldnt1b_z_p_br_contiguous"_h, \ + &VISITORCLASS::VisitSVEContiguousNonTemporalLoad_ScalarPlusScalar}, \ + {"ldnt1d_z_p_bi_contiguous"_h, \ + &VISITORCLASS::VisitSVEContiguousNonTemporalLoad_ScalarPlusImm}, \ + {"ldnt1d_z_p_br_contiguous"_h, \ + &VISITORCLASS::VisitSVEContiguousNonTemporalLoad_ScalarPlusScalar}, \ + {"ldnt1h_z_p_bi_contiguous"_h, \ + &VISITORCLASS::VisitSVEContiguousNonTemporalLoad_ScalarPlusImm}, \ + {"ldnt1h_z_p_br_contiguous"_h, \ + &VISITORCLASS::VisitSVEContiguousNonTemporalLoad_ScalarPlusScalar}, \ + {"ldnt1w_z_p_bi_contiguous"_h, \ + &VISITORCLASS::VisitSVEContiguousNonTemporalLoad_ScalarPlusImm}, \ + {"ldnt1w_z_p_br_contiguous"_h, \ + &VISITORCLASS::VisitSVEContiguousNonTemporalLoad_ScalarPlusScalar}, \ + {"ldr_p_bi"_h, &VISITORCLASS::VisitSVELoadPredicateRegister}, \ + {"ldr_z_bi"_h, &VISITORCLASS::VisitSVELoadVectorRegister}, \ + {"lslr_z_p_zz"_h, \ + &VISITORCLASS::VisitSVEBitwiseShiftByVector_Predicated}, \ + {"lsl_z_p_zi"_h, &VISITORCLASS::VisitSVEBitwiseShiftByImm_Predicated}, \ + {"lsl_z_p_zw"_h, \ + &VISITORCLASS::VisitSVEBitwiseShiftByWideElements_Predicated}, \ + {"lsl_z_p_zz"_h, \ + &VISITORCLASS::VisitSVEBitwiseShiftByVector_Predicated}, \ + {"lsl_z_zi"_h, &VISITORCLASS::VisitSVEBitwiseShiftUnpredicated}, \ + {"lsl_z_zw"_h, &VISITORCLASS::VisitSVEBitwiseShiftUnpredicated}, \ + {"lsrr_z_p_zz"_h, \ + &VISITORCLASS::VisitSVEBitwiseShiftByVector_Predicated}, \ + {"lsr_z_p_zi"_h, &VISITORCLASS::VisitSVEBitwiseShiftByImm_Predicated}, \ + {"lsr_z_p_zw"_h, \ + &VISITORCLASS::VisitSVEBitwiseShiftByWideElements_Predicated}, \ + {"lsr_z_p_zz"_h, \ + &VISITORCLASS::VisitSVEBitwiseShiftByVector_Predicated}, \ + {"lsr_z_zi"_h, &VISITORCLASS::VisitSVEBitwiseShiftUnpredicated}, \ + {"lsr_z_zw"_h, &VISITORCLASS::VisitSVEBitwiseShiftUnpredicated}, \ + {"mad_z_p_zzz"_h, &VISITORCLASS::VisitSVEIntMulAddPredicated}, \ + {"mla_z_p_zzz"_h, &VISITORCLASS::VisitSVEIntMulAddPredicated}, \ + {"mls_z_p_zzz"_h, &VISITORCLASS::VisitSVEIntMulAddPredicated}, \ + {"movprfx_z_p_z"_h, &VISITORCLASS::VisitSVEMovprfx}, \ + {"movprfx_z_z"_h, \ + &VISITORCLASS::VisitSVEConstructivePrefix_Unpredicated}, \ + {"msb_z_p_zzz"_h, &VISITORCLASS::VisitSVEIntMulAddPredicated}, \ + {"mul_z_p_zz"_h, &VISITORCLASS::VisitSVEIntMulVectors_Predicated}, \ + {"mul_z_zi"_h, &VISITORCLASS::VisitSVEIntMulImm_Unpredicated}, \ + {"nands_p_p_pp_z"_h, &VISITORCLASS::VisitSVEPredicateLogical}, \ + {"nand_p_p_pp_z"_h, &VISITORCLASS::VisitSVEPredicateLogical}, \ + {"neg_z_p_z"_h, &VISITORCLASS::VisitSVEIntUnaryArithmeticPredicated}, \ + {"nors_p_p_pp_z"_h, &VISITORCLASS::VisitSVEPredicateLogical}, \ + {"nor_p_p_pp_z"_h, &VISITORCLASS::VisitSVEPredicateLogical}, \ + {"not_z_p_z"_h, &VISITORCLASS::VisitSVEIntUnaryArithmeticPredicated}, \ + {"orns_p_p_pp_z"_h, &VISITORCLASS::VisitSVEPredicateLogical}, \ + {"orn_p_p_pp_z"_h, &VISITORCLASS::VisitSVEPredicateLogical}, \ + {"orrs_p_p_pp_z"_h, &VISITORCLASS::VisitSVEPredicateLogical}, \ + {"orr_p_p_pp_z"_h, &VISITORCLASS::VisitSVEPredicateLogical}, \ + {"orr_z_p_zz"_h, &VISITORCLASS::VisitSVEBitwiseLogical_Predicated}, \ + {"orr_z_zi"_h, \ + &VISITORCLASS::VisitSVEBitwiseLogicalWithImm_Unpredicated}, \ + {"orr_z_zz"_h, &VISITORCLASS::VisitSVEBitwiseLogicalUnpredicated}, \ + {"orv_r_p_z"_h, &VISITORCLASS::VisitSVEIntReduction}, \ + {"pfalse_p"_h, &VISITORCLASS::VisitSVEPredicateZero}, \ + {"pfirst_p_p_p"_h, &VISITORCLASS::VisitSVEPredicateFirstActive}, \ + {"pnext_p_p_p"_h, &VISITORCLASS::VisitSVEPredicateNextActive}, \ + {"prfb_i_p_ai_d"_h, \ + &VISITORCLASS::VisitSVE64BitGatherPrefetch_VectorPlusImm}, \ + {"prfb_i_p_ai_s"_h, \ + &VISITORCLASS::VisitSVE32BitGatherPrefetch_VectorPlusImm}, \ + {"prfb_i_p_bi_s"_h, \ + &VISITORCLASS::VisitSVEContiguousPrefetch_ScalarPlusImm}, \ + {"prfb_i_p_br_s"_h, \ + &VISITORCLASS::VisitSVEContiguousPrefetch_ScalarPlusScalar}, \ + {"prfb_i_p_bz_d_64_scaled"_h, \ + &VISITORCLASS:: \ + VisitSVE64BitGatherPrefetch_ScalarPlus64BitScaledOffsets}, \ + {"prfb_i_p_bz_d_x32_scaled"_h, \ + &VISITORCLASS:: \ + VisitSVE64BitGatherPrefetch_ScalarPlusUnpacked32BitScaledOffsets}, \ + {"prfb_i_p_bz_s_x32_scaled"_h, \ + &VISITORCLASS:: \ + VisitSVE32BitGatherPrefetch_ScalarPlus32BitScaledOffsets}, \ + {"prfd_i_p_ai_d"_h, \ + &VISITORCLASS::VisitSVE64BitGatherPrefetch_VectorPlusImm}, \ + {"prfd_i_p_ai_s"_h, \ + &VISITORCLASS::VisitSVE32BitGatherPrefetch_VectorPlusImm}, \ + {"prfd_i_p_bi_s"_h, \ + &VISITORCLASS::VisitSVEContiguousPrefetch_ScalarPlusImm}, \ + {"prfd_i_p_br_s"_h, \ + &VISITORCLASS::VisitSVEContiguousPrefetch_ScalarPlusScalar}, \ + {"prfd_i_p_bz_d_64_scaled"_h, \ + &VISITORCLASS:: \ + VisitSVE64BitGatherPrefetch_ScalarPlus64BitScaledOffsets}, \ + {"prfd_i_p_bz_d_x32_scaled"_h, \ + &VISITORCLASS:: \ + VisitSVE64BitGatherPrefetch_ScalarPlusUnpacked32BitScaledOffsets}, \ + {"prfd_i_p_bz_s_x32_scaled"_h, \ + &VISITORCLASS:: \ + VisitSVE32BitGatherPrefetch_ScalarPlus32BitScaledOffsets}, \ + {"prfh_i_p_ai_d"_h, \ + &VISITORCLASS::VisitSVE64BitGatherPrefetch_VectorPlusImm}, \ + {"prfh_i_p_ai_s"_h, \ + &VISITORCLASS::VisitSVE32BitGatherPrefetch_VectorPlusImm}, \ + {"prfh_i_p_bi_s"_h, \ + &VISITORCLASS::VisitSVEContiguousPrefetch_ScalarPlusImm}, \ + {"prfh_i_p_br_s"_h, \ + &VISITORCLASS::VisitSVEContiguousPrefetch_ScalarPlusScalar}, \ + {"prfh_i_p_bz_d_64_scaled"_h, \ + &VISITORCLASS:: \ + VisitSVE64BitGatherPrefetch_ScalarPlus64BitScaledOffsets}, \ + {"prfh_i_p_bz_d_x32_scaled"_h, \ + &VISITORCLASS:: \ + VisitSVE64BitGatherPrefetch_ScalarPlusUnpacked32BitScaledOffsets}, \ + {"prfh_i_p_bz_s_x32_scaled"_h, \ + &VISITORCLASS:: \ + VisitSVE32BitGatherPrefetch_ScalarPlus32BitScaledOffsets}, \ + {"prfw_i_p_ai_d"_h, \ + &VISITORCLASS::VisitSVE64BitGatherPrefetch_VectorPlusImm}, \ + {"prfw_i_p_ai_s"_h, \ + &VISITORCLASS::VisitSVE32BitGatherPrefetch_VectorPlusImm}, \ + {"prfw_i_p_bi_s"_h, \ + &VISITORCLASS::VisitSVEContiguousPrefetch_ScalarPlusImm}, \ + {"prfw_i_p_br_s"_h, \ + &VISITORCLASS::VisitSVEContiguousPrefetch_ScalarPlusScalar}, \ + {"prfw_i_p_bz_d_64_scaled"_h, \ + &VISITORCLASS:: \ + VisitSVE64BitGatherPrefetch_ScalarPlus64BitScaledOffsets}, \ + {"prfw_i_p_bz_d_x32_scaled"_h, \ + &VISITORCLASS:: \ + VisitSVE64BitGatherPrefetch_ScalarPlusUnpacked32BitScaledOffsets}, \ + {"prfw_i_p_bz_s_x32_scaled"_h, \ + &VISITORCLASS:: \ + VisitSVE32BitGatherPrefetch_ScalarPlus32BitScaledOffsets}, \ + {"ptest_p_p"_h, &VISITORCLASS::VisitSVEPredicateTest}, \ + {"ptrues_p_s"_h, &VISITORCLASS::VisitSVEPredicateInitialize}, \ + {"ptrue_p_s"_h, &VISITORCLASS::VisitSVEPredicateInitialize}, \ + {"punpkhi_p_p"_h, &VISITORCLASS::VisitSVEUnpackPredicateElements}, \ + {"punpklo_p_p"_h, &VISITORCLASS::VisitSVEUnpackPredicateElements}, \ + {"rbit_z_p_z"_h, &VISITORCLASS::VisitSVEReverseWithinElements}, \ + {"rdffrs_p_p_f"_h, \ + &VISITORCLASS::VisitSVEPredicateReadFromFFR_Predicated}, \ + {"rdffr_p_f"_h, \ + &VISITORCLASS::VisitSVEPredicateReadFromFFR_Unpredicated}, \ + {"rdffr_p_p_f"_h, \ + &VISITORCLASS::VisitSVEPredicateReadFromFFR_Predicated}, \ + {"rdvl_r_i"_h, &VISITORCLASS::VisitSVEStackFrameSize}, \ + {"revb_z_z"_h, &VISITORCLASS::VisitSVEReverseWithinElements}, \ + {"revh_z_z"_h, &VISITORCLASS::VisitSVEReverseWithinElements}, \ + {"revw_z_z"_h, &VISITORCLASS::VisitSVEReverseWithinElements}, \ + {"rev_p_p"_h, &VISITORCLASS::VisitSVEReversePredicateElements}, \ + {"rev_z_z"_h, &VISITORCLASS::VisitSVEReverseVectorElements}, \ + {"sabd_z_p_zz"_h, \ + &VISITORCLASS::VisitSVEIntMinMaxDifference_Predicated}, \ + {"saddv_r_p_z"_h, &VISITORCLASS::VisitSVEIntReduction}, \ + {"scvtf_z_p_z_h2fp16"_h, &VISITORCLASS::VisitSVEIntConvertToFP}, \ + {"scvtf_z_p_z_w2d"_h, &VISITORCLASS::VisitSVEIntConvertToFP}, \ + {"scvtf_z_p_z_w2fp16"_h, &VISITORCLASS::VisitSVEIntConvertToFP}, \ + {"scvtf_z_p_z_w2s"_h, &VISITORCLASS::VisitSVEIntConvertToFP}, \ + {"scvtf_z_p_z_x2d"_h, &VISITORCLASS::VisitSVEIntConvertToFP}, \ + {"scvtf_z_p_z_x2fp16"_h, &VISITORCLASS::VisitSVEIntConvertToFP}, \ + {"scvtf_z_p_z_x2s"_h, &VISITORCLASS::VisitSVEIntConvertToFP}, \ + {"sdivr_z_p_zz"_h, &VISITORCLASS::VisitSVEIntDivideVectors_Predicated}, \ + {"sdiv_z_p_zz"_h, &VISITORCLASS::VisitSVEIntDivideVectors_Predicated}, \ + {"sdot_z_zzz"_h, &VISITORCLASS::VisitSVEIntMulAddUnpredicated}, \ + {"sdot_z_zzzi_d"_h, &VISITORCLASS::VisitSVEMulIndex}, \ + {"sdot_z_zzzi_s"_h, &VISITORCLASS::VisitSVEMulIndex}, \ + {"sel_p_p_pp"_h, &VISITORCLASS::VisitSVEPredicateLogical}, \ + {"sel_z_p_zz"_h, &VISITORCLASS::VisitSVEVectorSelect}, \ + {"setffr_f"_h, &VISITORCLASS::VisitSVEFFRInitialise}, \ + {"smaxv_r_p_z"_h, &VISITORCLASS::VisitSVEIntReduction}, \ + {"smax_z_p_zz"_h, \ + &VISITORCLASS::VisitSVEIntMinMaxDifference_Predicated}, \ + {"smax_z_zi"_h, &VISITORCLASS::VisitSVEIntMinMaxImm_Unpredicated}, \ + {"sminv_r_p_z"_h, &VISITORCLASS::VisitSVEIntReduction}, \ + {"smin_z_p_zz"_h, \ + &VISITORCLASS::VisitSVEIntMinMaxDifference_Predicated}, \ + {"smin_z_zi"_h, &VISITORCLASS::VisitSVEIntMinMaxImm_Unpredicated}, \ + {"smulh_z_p_zz"_h, &VISITORCLASS::VisitSVEIntMulVectors_Predicated}, \ + {"splice_z_p_zz_des"_h, &VISITORCLASS::VisitSVEVectorSplice}, \ + {"sqadd_z_zi"_h, &VISITORCLASS::VisitSVEIntAddSubtractImm_Unpredicated}, \ + {"sqadd_z_zz"_h, &VISITORCLASS::VisitSVEIntArithmeticUnpredicated}, \ + {"sqdecb_r_rs_sx"_h, \ + &VISITORCLASS::VisitSVESaturatingIncDecRegisterByElementCount}, \ + {"sqdecb_r_rs_x"_h, \ + &VISITORCLASS::VisitSVESaturatingIncDecRegisterByElementCount}, \ + {"sqdecd_r_rs_sx"_h, \ + &VISITORCLASS::VisitSVESaturatingIncDecRegisterByElementCount}, \ + {"sqdecd_r_rs_x"_h, \ + &VISITORCLASS::VisitSVESaturatingIncDecRegisterByElementCount}, \ + {"sqdecd_z_zs"_h, \ + &VISITORCLASS::VisitSVESaturatingIncDecVectorByElementCount}, \ + {"sqdech_r_rs_sx"_h, \ + &VISITORCLASS::VisitSVESaturatingIncDecRegisterByElementCount}, \ + {"sqdech_r_rs_x"_h, \ + &VISITORCLASS::VisitSVESaturatingIncDecRegisterByElementCount}, \ + {"sqdech_z_zs"_h, \ + &VISITORCLASS::VisitSVESaturatingIncDecVectorByElementCount}, \ + {"sqdecp_r_p_r_sx"_h, &VISITORCLASS::VisitSVEIncDecByPredicateCount}, \ + {"sqdecp_r_p_r_x"_h, &VISITORCLASS::VisitSVEIncDecByPredicateCount}, \ + {"sqdecp_z_p_z"_h, &VISITORCLASS::VisitSVEIncDecByPredicateCount}, \ + {"sqdecw_r_rs_sx"_h, \ + &VISITORCLASS::VisitSVESaturatingIncDecRegisterByElementCount}, \ + {"sqdecw_r_rs_x"_h, \ + &VISITORCLASS::VisitSVESaturatingIncDecRegisterByElementCount}, \ + {"sqdecw_z_zs"_h, \ + &VISITORCLASS::VisitSVESaturatingIncDecVectorByElementCount}, \ + {"sqincb_r_rs_sx"_h, \ + &VISITORCLASS::VisitSVESaturatingIncDecRegisterByElementCount}, \ + {"sqincb_r_rs_x"_h, \ + &VISITORCLASS::VisitSVESaturatingIncDecRegisterByElementCount}, \ + {"sqincd_r_rs_sx"_h, \ + &VISITORCLASS::VisitSVESaturatingIncDecRegisterByElementCount}, \ + {"sqincd_r_rs_x"_h, \ + &VISITORCLASS::VisitSVESaturatingIncDecRegisterByElementCount}, \ + {"sqincd_z_zs"_h, \ + &VISITORCLASS::VisitSVESaturatingIncDecVectorByElementCount}, \ + {"sqinch_r_rs_sx"_h, \ + &VISITORCLASS::VisitSVESaturatingIncDecRegisterByElementCount}, \ + {"sqinch_r_rs_x"_h, \ + &VISITORCLASS::VisitSVESaturatingIncDecRegisterByElementCount}, \ + {"sqinch_z_zs"_h, \ + &VISITORCLASS::VisitSVESaturatingIncDecVectorByElementCount}, \ + {"sqincp_r_p_r_sx"_h, &VISITORCLASS::VisitSVEIncDecByPredicateCount}, \ + {"sqincp_r_p_r_x"_h, &VISITORCLASS::VisitSVEIncDecByPredicateCount}, \ + {"sqincp_z_p_z"_h, &VISITORCLASS::VisitSVEIncDecByPredicateCount}, \ + {"sqincw_r_rs_sx"_h, \ + &VISITORCLASS::VisitSVESaturatingIncDecRegisterByElementCount}, \ + {"sqincw_r_rs_x"_h, \ + &VISITORCLASS::VisitSVESaturatingIncDecRegisterByElementCount}, \ + {"sqincw_z_zs"_h, \ + &VISITORCLASS::VisitSVESaturatingIncDecVectorByElementCount}, \ + {"sqsub_z_zi"_h, &VISITORCLASS::VisitSVEIntAddSubtractImm_Unpredicated}, \ + {"sqsub_z_zz"_h, &VISITORCLASS::VisitSVEIntArithmeticUnpredicated}, \ + {"st1b_z_p_ai_d"_h, \ + &VISITORCLASS::VisitSVE64BitScatterStore_VectorPlusImm}, \ + {"st1b_z_p_ai_s"_h, \ + &VISITORCLASS::VisitSVE32BitScatterStore_VectorPlusImm}, \ + {"st1b_z_p_bi"_h, &VISITORCLASS::VisitSVEContiguousStore_ScalarPlusImm}, \ + {"st1b_z_p_br"_h, \ + &VISITORCLASS::VisitSVEContiguousStore_ScalarPlusScalar}, \ + {"st1b_z_p_bz_d_64_unscaled"_h, \ + &VISITORCLASS:: \ + VisitSVE64BitScatterStore_ScalarPlus64BitUnscaledOffsets}, \ + {"st1b_z_p_bz_d_x32_unscaled"_h, \ + &VISITORCLASS:: \ + VisitSVE64BitScatterStore_ScalarPlusUnpacked32BitUnscaledOffsets}, \ + {"st1b_z_p_bz_s_x32_unscaled"_h, \ + &VISITORCLASS:: \ + VisitSVE32BitScatterStore_ScalarPlus32BitUnscaledOffsets}, \ + {"st1d_z_p_ai_d"_h, \ + &VISITORCLASS::VisitSVE64BitScatterStore_VectorPlusImm}, \ + {"st1d_z_p_bi"_h, &VISITORCLASS::VisitSVEContiguousStore_ScalarPlusImm}, \ + {"st1d_z_p_br"_h, \ + &VISITORCLASS::VisitSVEContiguousStore_ScalarPlusScalar}, \ + {"st1d_z_p_bz_d_64_scaled"_h, \ + &VISITORCLASS::VisitSVE64BitScatterStore_ScalarPlus64BitScaledOffsets}, \ + {"st1d_z_p_bz_d_64_unscaled"_h, \ + &VISITORCLASS:: \ + VisitSVE64BitScatterStore_ScalarPlus64BitUnscaledOffsets}, \ + {"st1d_z_p_bz_d_x32_scaled"_h, \ + &VISITORCLASS:: \ + VisitSVE64BitScatterStore_ScalarPlusUnpacked32BitScaledOffsets}, \ + {"st1d_z_p_bz_d_x32_unscaled"_h, \ + &VISITORCLASS:: \ + VisitSVE64BitScatterStore_ScalarPlusUnpacked32BitUnscaledOffsets}, \ + {"st1h_z_p_ai_d"_h, \ + &VISITORCLASS::VisitSVE64BitScatterStore_VectorPlusImm}, \ + {"st1h_z_p_ai_s"_h, \ + &VISITORCLASS::VisitSVE32BitScatterStore_VectorPlusImm}, \ + {"st1h_z_p_bi"_h, &VISITORCLASS::VisitSVEContiguousStore_ScalarPlusImm}, \ + {"st1h_z_p_br"_h, \ + &VISITORCLASS::VisitSVEContiguousStore_ScalarPlusScalar}, \ + {"st1h_z_p_bz_d_64_scaled"_h, \ + &VISITORCLASS::VisitSVE64BitScatterStore_ScalarPlus64BitScaledOffsets}, \ + {"st1h_z_p_bz_d_64_unscaled"_h, \ + &VISITORCLASS:: \ + VisitSVE64BitScatterStore_ScalarPlus64BitUnscaledOffsets}, \ + {"st1h_z_p_bz_d_x32_scaled"_h, \ + &VISITORCLASS:: \ + VisitSVE64BitScatterStore_ScalarPlusUnpacked32BitScaledOffsets}, \ + {"st1h_z_p_bz_d_x32_unscaled"_h, \ + &VISITORCLASS:: \ + VisitSVE64BitScatterStore_ScalarPlusUnpacked32BitUnscaledOffsets}, \ + {"st1h_z_p_bz_s_x32_scaled"_h, \ + &VISITORCLASS::VisitSVE32BitScatterStore_ScalarPlus32BitScaledOffsets}, \ + {"st1h_z_p_bz_s_x32_unscaled"_h, \ + &VISITORCLASS:: \ + VisitSVE32BitScatterStore_ScalarPlus32BitUnscaledOffsets}, \ + {"st1w_z_p_ai_d"_h, \ + &VISITORCLASS::VisitSVE64BitScatterStore_VectorPlusImm}, \ + {"st1w_z_p_ai_s"_h, \ + &VISITORCLASS::VisitSVE32BitScatterStore_VectorPlusImm}, \ + {"st1w_z_p_bi"_h, &VISITORCLASS::VisitSVEContiguousStore_ScalarPlusImm}, \ + {"st1w_z_p_br"_h, \ + &VISITORCLASS::VisitSVEContiguousStore_ScalarPlusScalar}, \ + {"st1w_z_p_bz_d_64_scaled"_h, \ + &VISITORCLASS::VisitSVE64BitScatterStore_ScalarPlus64BitScaledOffsets}, \ + {"st1w_z_p_bz_d_64_unscaled"_h, \ + &VISITORCLASS:: \ + VisitSVE64BitScatterStore_ScalarPlus64BitUnscaledOffsets}, \ + {"st1w_z_p_bz_d_x32_scaled"_h, \ + &VISITORCLASS:: \ + VisitSVE64BitScatterStore_ScalarPlusUnpacked32BitScaledOffsets}, \ + {"st1w_z_p_bz_d_x32_unscaled"_h, \ + &VISITORCLASS:: \ + VisitSVE64BitScatterStore_ScalarPlusUnpacked32BitUnscaledOffsets}, \ + {"st1w_z_p_bz_s_x32_scaled"_h, \ + &VISITORCLASS::VisitSVE32BitScatterStore_ScalarPlus32BitScaledOffsets}, \ + {"st1w_z_p_bz_s_x32_unscaled"_h, \ + &VISITORCLASS:: \ + VisitSVE32BitScatterStore_ScalarPlus32BitUnscaledOffsets}, \ + {"st2b_z_p_bi_contiguous"_h, \ + &VISITORCLASS::VisitSVEStoreMultipleStructures_ScalarPlusImm}, \ + {"st2b_z_p_br_contiguous"_h, \ + &VISITORCLASS::VisitSVEStoreMultipleStructures_ScalarPlusScalar}, \ + {"st2d_z_p_bi_contiguous"_h, \ + &VISITORCLASS::VisitSVEStoreMultipleStructures_ScalarPlusImm}, \ + {"st2d_z_p_br_contiguous"_h, \ + &VISITORCLASS::VisitSVEStoreMultipleStructures_ScalarPlusScalar}, \ + {"st2h_z_p_bi_contiguous"_h, \ + &VISITORCLASS::VisitSVEStoreMultipleStructures_ScalarPlusImm}, \ + {"st2h_z_p_br_contiguous"_h, \ + &VISITORCLASS::VisitSVEStoreMultipleStructures_ScalarPlusScalar}, \ + {"st2w_z_p_bi_contiguous"_h, \ + &VISITORCLASS::VisitSVEStoreMultipleStructures_ScalarPlusImm}, \ + {"st2w_z_p_br_contiguous"_h, \ + &VISITORCLASS::VisitSVEStoreMultipleStructures_ScalarPlusScalar}, \ + {"st3b_z_p_bi_contiguous"_h, \ + &VISITORCLASS::VisitSVEStoreMultipleStructures_ScalarPlusImm}, \ + {"st3b_z_p_br_contiguous"_h, \ + &VISITORCLASS::VisitSVEStoreMultipleStructures_ScalarPlusScalar}, \ + {"st3d_z_p_bi_contiguous"_h, \ + &VISITORCLASS::VisitSVEStoreMultipleStructures_ScalarPlusImm}, \ + {"st3d_z_p_br_contiguous"_h, \ + &VISITORCLASS::VisitSVEStoreMultipleStructures_ScalarPlusScalar}, \ + {"st3h_z_p_bi_contiguous"_h, \ + &VISITORCLASS::VisitSVEStoreMultipleStructures_ScalarPlusImm}, \ + {"st3h_z_p_br_contiguous"_h, \ + &VISITORCLASS::VisitSVEStoreMultipleStructures_ScalarPlusScalar}, \ + {"st3w_z_p_bi_contiguous"_h, \ + &VISITORCLASS::VisitSVEStoreMultipleStructures_ScalarPlusImm}, \ + {"st3w_z_p_br_contiguous"_h, \ + &VISITORCLASS::VisitSVEStoreMultipleStructures_ScalarPlusScalar}, \ + {"st4b_z_p_bi_contiguous"_h, \ + &VISITORCLASS::VisitSVEStoreMultipleStructures_ScalarPlusImm}, \ + {"st4b_z_p_br_contiguous"_h, \ + &VISITORCLASS::VisitSVEStoreMultipleStructures_ScalarPlusScalar}, \ + {"st4d_z_p_bi_contiguous"_h, \ + &VISITORCLASS::VisitSVEStoreMultipleStructures_ScalarPlusImm}, \ + {"st4d_z_p_br_contiguous"_h, \ + &VISITORCLASS::VisitSVEStoreMultipleStructures_ScalarPlusScalar}, \ + {"st4h_z_p_bi_contiguous"_h, \ + &VISITORCLASS::VisitSVEStoreMultipleStructures_ScalarPlusImm}, \ + {"st4h_z_p_br_contiguous"_h, \ + &VISITORCLASS::VisitSVEStoreMultipleStructures_ScalarPlusScalar}, \ + {"st4w_z_p_bi_contiguous"_h, \ + &VISITORCLASS::VisitSVEStoreMultipleStructures_ScalarPlusImm}, \ + {"st4w_z_p_br_contiguous"_h, \ + &VISITORCLASS::VisitSVEStoreMultipleStructures_ScalarPlusScalar}, \ + {"stnt1b_z_p_bi_contiguous"_h, \ + &VISITORCLASS::VisitSVEContiguousNonTemporalStore_ScalarPlusImm}, \ + {"stnt1b_z_p_br_contiguous"_h, \ + &VISITORCLASS::VisitSVEContiguousNonTemporalStore_ScalarPlusScalar}, \ + {"stnt1d_z_p_bi_contiguous"_h, \ + &VISITORCLASS::VisitSVEContiguousNonTemporalStore_ScalarPlusImm}, \ + {"stnt1d_z_p_br_contiguous"_h, \ + &VISITORCLASS::VisitSVEContiguousNonTemporalStore_ScalarPlusScalar}, \ + {"stnt1h_z_p_bi_contiguous"_h, \ + &VISITORCLASS::VisitSVEContiguousNonTemporalStore_ScalarPlusImm}, \ + {"stnt1h_z_p_br_contiguous"_h, \ + &VISITORCLASS::VisitSVEContiguousNonTemporalStore_ScalarPlusScalar}, \ + {"stnt1w_z_p_bi_contiguous"_h, \ + &VISITORCLASS::VisitSVEContiguousNonTemporalStore_ScalarPlusImm}, \ + {"stnt1w_z_p_br_contiguous"_h, \ + &VISITORCLASS::VisitSVEContiguousNonTemporalStore_ScalarPlusScalar}, \ + {"str_p_bi"_h, &VISITORCLASS::VisitSVEStorePredicateRegister}, \ + {"str_z_bi"_h, &VISITORCLASS::VisitSVEStoreVectorRegister}, \ + {"subr_z_p_zz"_h, \ + &VISITORCLASS::VisitSVEIntAddSubtractVectors_Predicated}, \ + {"subr_z_zi"_h, &VISITORCLASS::VisitSVEIntAddSubtractImm_Unpredicated}, \ + {"sub_z_p_zz"_h, \ + &VISITORCLASS::VisitSVEIntAddSubtractVectors_Predicated}, \ + {"sub_z_zi"_h, &VISITORCLASS::VisitSVEIntAddSubtractImm_Unpredicated}, \ + {"sub_z_zz"_h, &VISITORCLASS::VisitSVEIntArithmeticUnpredicated}, \ + {"sunpkhi_z_z"_h, &VISITORCLASS::VisitSVEUnpackVectorElements}, \ + {"sunpklo_z_z"_h, &VISITORCLASS::VisitSVEUnpackVectorElements}, \ + {"sxtb_z_p_z"_h, &VISITORCLASS::VisitSVEIntUnaryArithmeticPredicated}, \ + {"sxth_z_p_z"_h, &VISITORCLASS::VisitSVEIntUnaryArithmeticPredicated}, \ + {"sxtw_z_p_z"_h, &VISITORCLASS::VisitSVEIntUnaryArithmeticPredicated}, \ + {"tbl_z_zz_1"_h, &VISITORCLASS::VisitSVETableLookup}, \ + {"trn1_p_pp"_h, &VISITORCLASS::VisitSVEPermutePredicateElements}, \ + {"trn1_z_zz"_h, &VISITORCLASS::VisitSVEPermuteVectorInterleaving}, \ + {"trn2_p_pp"_h, &VISITORCLASS::VisitSVEPermutePredicateElements}, \ + {"trn2_z_zz"_h, &VISITORCLASS::VisitSVEPermuteVectorInterleaving}, \ + {"uabd_z_p_zz"_h, \ + &VISITORCLASS::VisitSVEIntMinMaxDifference_Predicated}, \ + {"uaddv_r_p_z"_h, &VISITORCLASS::VisitSVEIntReduction}, \ + {"ucvtf_z_p_z_h2fp16"_h, &VISITORCLASS::VisitSVEIntConvertToFP}, \ + {"ucvtf_z_p_z_w2d"_h, &VISITORCLASS::VisitSVEIntConvertToFP}, \ + {"ucvtf_z_p_z_w2fp16"_h, &VISITORCLASS::VisitSVEIntConvertToFP}, \ + {"ucvtf_z_p_z_w2s"_h, &VISITORCLASS::VisitSVEIntConvertToFP}, \ + {"ucvtf_z_p_z_x2d"_h, &VISITORCLASS::VisitSVEIntConvertToFP}, \ + {"ucvtf_z_p_z_x2fp16"_h, &VISITORCLASS::VisitSVEIntConvertToFP}, \ + {"ucvtf_z_p_z_x2s"_h, &VISITORCLASS::VisitSVEIntConvertToFP}, \ + {"udf_only_perm_undef"_h, &VISITORCLASS::VisitReserved}, \ + {"udivr_z_p_zz"_h, &VISITORCLASS::VisitSVEIntDivideVectors_Predicated}, \ + {"udiv_z_p_zz"_h, &VISITORCLASS::VisitSVEIntDivideVectors_Predicated}, \ + {"udot_z_zzz"_h, &VISITORCLASS::VisitSVEIntMulAddUnpredicated}, \ + {"udot_z_zzzi_d"_h, &VISITORCLASS::VisitSVEMulIndex}, \ + {"udot_z_zzzi_s"_h, &VISITORCLASS::VisitSVEMulIndex}, \ + {"umaxv_r_p_z"_h, &VISITORCLASS::VisitSVEIntReduction}, \ + {"umax_z_p_zz"_h, \ + &VISITORCLASS::VisitSVEIntMinMaxDifference_Predicated}, \ + {"umax_z_zi"_h, &VISITORCLASS::VisitSVEIntMinMaxImm_Unpredicated}, \ + {"uminv_r_p_z"_h, &VISITORCLASS::VisitSVEIntReduction}, \ + {"umin_z_p_zz"_h, \ + &VISITORCLASS::VisitSVEIntMinMaxDifference_Predicated}, \ + {"umin_z_zi"_h, &VISITORCLASS::VisitSVEIntMinMaxImm_Unpredicated}, \ + {"umulh_z_p_zz"_h, &VISITORCLASS::VisitSVEIntMulVectors_Predicated}, \ + {"uqadd_z_zi"_h, &VISITORCLASS::VisitSVEIntAddSubtractImm_Unpredicated}, \ + {"uqadd_z_zz"_h, &VISITORCLASS::VisitSVEIntArithmeticUnpredicated}, \ + {"uqdecb_r_rs_uw"_h, \ + &VISITORCLASS::VisitSVESaturatingIncDecRegisterByElementCount}, \ + {"uqdecb_r_rs_x"_h, \ + &VISITORCLASS::VisitSVESaturatingIncDecRegisterByElementCount}, \ + {"uqdecd_r_rs_uw"_h, \ + &VISITORCLASS::VisitSVESaturatingIncDecRegisterByElementCount}, \ + {"uqdecd_r_rs_x"_h, \ + &VISITORCLASS::VisitSVESaturatingIncDecRegisterByElementCount}, \ + {"uqdecd_z_zs"_h, \ + &VISITORCLASS::VisitSVESaturatingIncDecVectorByElementCount}, \ + {"uqdech_r_rs_uw"_h, \ + &VISITORCLASS::VisitSVESaturatingIncDecRegisterByElementCount}, \ + {"uqdech_r_rs_x"_h, \ + &VISITORCLASS::VisitSVESaturatingIncDecRegisterByElementCount}, \ + {"uqdech_z_zs"_h, \ + &VISITORCLASS::VisitSVESaturatingIncDecVectorByElementCount}, \ + {"uqdecp_r_p_r_uw"_h, &VISITORCLASS::VisitSVEIncDecByPredicateCount}, \ + {"uqdecp_r_p_r_x"_h, &VISITORCLASS::VisitSVEIncDecByPredicateCount}, \ + {"uqdecp_z_p_z"_h, &VISITORCLASS::VisitSVEIncDecByPredicateCount}, \ + {"uqdecw_r_rs_uw"_h, \ + &VISITORCLASS::VisitSVESaturatingIncDecRegisterByElementCount}, \ + {"uqdecw_r_rs_x"_h, \ + &VISITORCLASS::VisitSVESaturatingIncDecRegisterByElementCount}, \ + {"uqdecw_z_zs"_h, \ + &VISITORCLASS::VisitSVESaturatingIncDecVectorByElementCount}, \ + {"uqincb_r_rs_uw"_h, \ + &VISITORCLASS::VisitSVESaturatingIncDecRegisterByElementCount}, \ + {"uqincb_r_rs_x"_h, \ + &VISITORCLASS::VisitSVESaturatingIncDecRegisterByElementCount}, \ + {"uqincd_r_rs_uw"_h, \ + &VISITORCLASS::VisitSVESaturatingIncDecRegisterByElementCount}, \ + {"uqincd_r_rs_x"_h, \ + &VISITORCLASS::VisitSVESaturatingIncDecRegisterByElementCount}, \ + {"uqincd_z_zs"_h, \ + &VISITORCLASS::VisitSVESaturatingIncDecVectorByElementCount}, \ + {"uqinch_r_rs_uw"_h, \ + &VISITORCLASS::VisitSVESaturatingIncDecRegisterByElementCount}, \ + {"uqinch_r_rs_x"_h, \ + &VISITORCLASS::VisitSVESaturatingIncDecRegisterByElementCount}, \ + {"uqinch_z_zs"_h, \ + &VISITORCLASS::VisitSVESaturatingIncDecVectorByElementCount}, \ + {"uqincp_r_p_r_uw"_h, &VISITORCLASS::VisitSVEIncDecByPredicateCount}, \ + {"uqincp_r_p_r_x"_h, &VISITORCLASS::VisitSVEIncDecByPredicateCount}, \ + {"uqincp_z_p_z"_h, &VISITORCLASS::VisitSVEIncDecByPredicateCount}, \ + {"uqincw_r_rs_uw"_h, \ + &VISITORCLASS::VisitSVESaturatingIncDecRegisterByElementCount}, \ + {"uqincw_r_rs_x"_h, \ + &VISITORCLASS::VisitSVESaturatingIncDecRegisterByElementCount}, \ + {"uqincw_z_zs"_h, \ + &VISITORCLASS::VisitSVESaturatingIncDecVectorByElementCount}, \ + {"uqsub_z_zi"_h, &VISITORCLASS::VisitSVEIntAddSubtractImm_Unpredicated}, \ + {"uqsub_z_zz"_h, &VISITORCLASS::VisitSVEIntArithmeticUnpredicated}, \ + {"uunpkhi_z_z"_h, &VISITORCLASS::VisitSVEUnpackVectorElements}, \ + {"uunpklo_z_z"_h, &VISITORCLASS::VisitSVEUnpackVectorElements}, \ + {"uxtb_z_p_z"_h, &VISITORCLASS::VisitSVEIntUnaryArithmeticPredicated}, \ + {"uxth_z_p_z"_h, &VISITORCLASS::VisitSVEIntUnaryArithmeticPredicated}, \ + {"uxtw_z_p_z"_h, &VISITORCLASS::VisitSVEIntUnaryArithmeticPredicated}, \ + {"uzp1_p_pp"_h, &VISITORCLASS::VisitSVEPermutePredicateElements}, \ + {"uzp1_z_zz"_h, &VISITORCLASS::VisitSVEPermuteVectorInterleaving}, \ + {"uzp2_p_pp"_h, &VISITORCLASS::VisitSVEPermutePredicateElements}, \ + {"uzp2_z_zz"_h, &VISITORCLASS::VisitSVEPermuteVectorInterleaving}, \ + {"whilele_p_p_rr"_h, \ + &VISITORCLASS::VisitSVEIntCompareScalarCountAndLimit}, \ + {"whilelo_p_p_rr"_h, \ + &VISITORCLASS::VisitSVEIntCompareScalarCountAndLimit}, \ + {"whilels_p_p_rr"_h, \ + &VISITORCLASS::VisitSVEIntCompareScalarCountAndLimit}, \ + {"whilelt_p_p_rr"_h, \ + &VISITORCLASS::VisitSVEIntCompareScalarCountAndLimit}, \ + {"wrffr_f_p"_h, &VISITORCLASS::VisitSVEFFRWriteFromPredicate}, \ + {"zip1_p_pp"_h, &VISITORCLASS::VisitSVEPermutePredicateElements}, \ + {"zip1_z_zz"_h, &VISITORCLASS::VisitSVEPermuteVectorInterleaving}, \ + {"zip2_p_pp"_h, &VISITORCLASS::VisitSVEPermutePredicateElements}, \ + {"zip2_z_zz"_h, &VISITORCLASS::VisitSVEPermuteVectorInterleaving}, \ + {"adds_32s_addsub_ext"_h, &VISITORCLASS::VisitAddSubExtended}, \ + {"adds_64s_addsub_ext"_h, &VISITORCLASS::VisitAddSubExtended}, \ + {"add_32_addsub_ext"_h, &VISITORCLASS::VisitAddSubExtended}, \ + {"add_64_addsub_ext"_h, &VISITORCLASS::VisitAddSubExtended}, \ + {"subs_32s_addsub_ext"_h, &VISITORCLASS::VisitAddSubExtended}, \ + {"subs_64s_addsub_ext"_h, &VISITORCLASS::VisitAddSubExtended}, \ + {"sub_32_addsub_ext"_h, &VISITORCLASS::VisitAddSubExtended}, \ + {"sub_64_addsub_ext"_h, &VISITORCLASS::VisitAddSubExtended}, \ + {"adds_32s_addsub_imm"_h, &VISITORCLASS::VisitAddSubImmediate}, \ + {"adds_64s_addsub_imm"_h, &VISITORCLASS::VisitAddSubImmediate}, \ + {"add_32_addsub_imm"_h, &VISITORCLASS::VisitAddSubImmediate}, \ + {"add_64_addsub_imm"_h, &VISITORCLASS::VisitAddSubImmediate}, \ + {"subs_32s_addsub_imm"_h, &VISITORCLASS::VisitAddSubImmediate}, \ + {"subs_64s_addsub_imm"_h, &VISITORCLASS::VisitAddSubImmediate}, \ + {"sub_32_addsub_imm"_h, &VISITORCLASS::VisitAddSubImmediate}, \ + {"sub_64_addsub_imm"_h, &VISITORCLASS::VisitAddSubImmediate}, \ + {"adds_32_addsub_shift"_h, &VISITORCLASS::VisitAddSubShifted}, \ + {"adds_64_addsub_shift"_h, &VISITORCLASS::VisitAddSubShifted}, \ + {"add_32_addsub_shift"_h, &VISITORCLASS::VisitAddSubShifted}, \ + {"add_64_addsub_shift"_h, &VISITORCLASS::VisitAddSubShifted}, \ + {"subs_32_addsub_shift"_h, &VISITORCLASS::VisitAddSubShifted}, \ + {"subs_64_addsub_shift"_h, &VISITORCLASS::VisitAddSubShifted}, \ + {"sub_32_addsub_shift"_h, &VISITORCLASS::VisitAddSubShifted}, \ + {"sub_64_addsub_shift"_h, &VISITORCLASS::VisitAddSubShifted}, \ + {"adcs_32_addsub_carry"_h, &VISITORCLASS::VisitAddSubWithCarry}, \ + {"adcs_64_addsub_carry"_h, &VISITORCLASS::VisitAddSubWithCarry}, \ + {"adc_32_addsub_carry"_h, &VISITORCLASS::VisitAddSubWithCarry}, \ + {"adc_64_addsub_carry"_h, &VISITORCLASS::VisitAddSubWithCarry}, \ + {"sbcs_32_addsub_carry"_h, &VISITORCLASS::VisitAddSubWithCarry}, \ + {"sbcs_64_addsub_carry"_h, &VISITORCLASS::VisitAddSubWithCarry}, \ + {"sbc_32_addsub_carry"_h, &VISITORCLASS::VisitAddSubWithCarry}, \ + {"sbc_64_addsub_carry"_h, &VISITORCLASS::VisitAddSubWithCarry}, \ + {"ldaddab_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldaddah_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldaddalb_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldaddalh_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldaddal_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldaddal_64_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldadda_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldadda_64_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldaddb_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldaddh_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldaddlb_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldaddlh_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldaddl_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldaddl_64_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldadd_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldadd_64_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldaprb_32l_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldaprh_32l_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldapr_32l_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldapr_64l_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldclrab_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldclrah_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldclralb_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldclralh_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldclral_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldclral_64_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldclra_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldclra_64_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldclrb_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldclrh_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldclrlb_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldclrlh_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldclrl_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldclrl_64_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldclr_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldclr_64_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldeorab_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldeorah_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldeoralb_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldeoralh_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldeoral_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldeoral_64_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldeora_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldeora_64_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldeorb_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldeorh_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldeorlb_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldeorlh_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldeorl_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldeorl_64_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldeor_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldeor_64_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldsetab_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldsetah_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldsetalb_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldsetalh_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldsetal_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldsetal_64_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldseta_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldseta_64_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldsetb_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldseth_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldsetlb_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldsetlh_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldsetl_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldsetl_64_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldset_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldset_64_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldsmaxab_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldsmaxah_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldsmaxalb_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldsmaxalh_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldsmaxal_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldsmaxal_64_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldsmaxa_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldsmaxa_64_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldsmaxb_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldsmaxh_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldsmaxlb_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldsmaxlh_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldsmaxl_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldsmaxl_64_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldsmax_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldsmax_64_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldsminab_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldsminah_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldsminalb_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldsminalh_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldsminal_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldsminal_64_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldsmina_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldsmina_64_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldsminb_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldsminh_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldsminlb_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldsminlh_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldsminl_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldsminl_64_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldsmin_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldsmin_64_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldumaxab_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldumaxah_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldumaxalb_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldumaxalh_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldumaxal_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldumaxal_64_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldumaxa_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldumaxa_64_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldumaxb_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldumaxh_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldumaxlb_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldumaxlh_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldumaxl_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldumaxl_64_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldumax_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldumax_64_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"lduminab_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"lduminah_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"lduminalb_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"lduminalh_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"lduminal_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"lduminal_64_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldumina_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldumina_64_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"lduminb_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"lduminh_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"lduminlb_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"lduminlh_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"lduminl_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"lduminl_64_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldumin_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"ldumin_64_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"swpab_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"swpah_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"swpalb_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"swpalh_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"swpal_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"swpal_64_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"swpa_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"swpa_64_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"swpb_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"swph_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"swplb_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"swplh_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"swpl_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"swpl_64_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"swp_32_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"swp_64_memop"_h, &VISITORCLASS::VisitAtomicMemory}, \ + {"bfm_32m_bitfield"_h, &VISITORCLASS::VisitBitfield}, \ + {"bfm_64m_bitfield"_h, &VISITORCLASS::VisitBitfield}, \ + {"sbfm_32m_bitfield"_h, &VISITORCLASS::VisitBitfield}, \ + {"sbfm_64m_bitfield"_h, &VISITORCLASS::VisitBitfield}, \ + {"ubfm_32m_bitfield"_h, &VISITORCLASS::VisitBitfield}, \ + {"ubfm_64m_bitfield"_h, &VISITORCLASS::VisitBitfield}, \ + {"cbnz_32_compbranch"_h, &VISITORCLASS::VisitCompareBranch}, \ + {"cbnz_64_compbranch"_h, &VISITORCLASS::VisitCompareBranch}, \ + {"cbz_32_compbranch"_h, &VISITORCLASS::VisitCompareBranch}, \ + {"cbz_64_compbranch"_h, &VISITORCLASS::VisitCompareBranch}, \ + {"b_only_condbranch"_h, &VISITORCLASS::VisitConditionalBranch}, \ + {"ccmn_32_condcmp_imm"_h, \ + &VISITORCLASS::VisitConditionalCompareImmediate}, \ + {"ccmn_64_condcmp_imm"_h, \ + &VISITORCLASS::VisitConditionalCompareImmediate}, \ + {"ccmp_32_condcmp_imm"_h, \ + &VISITORCLASS::VisitConditionalCompareImmediate}, \ + {"ccmp_64_condcmp_imm"_h, \ + &VISITORCLASS::VisitConditionalCompareImmediate}, \ + {"ccmn_32_condcmp_reg"_h, \ + &VISITORCLASS::VisitConditionalCompareRegister}, \ + {"ccmn_64_condcmp_reg"_h, \ + &VISITORCLASS::VisitConditionalCompareRegister}, \ + {"ccmp_32_condcmp_reg"_h, \ + &VISITORCLASS::VisitConditionalCompareRegister}, \ + {"ccmp_64_condcmp_reg"_h, \ + &VISITORCLASS::VisitConditionalCompareRegister}, \ + {"csel_32_condsel"_h, &VISITORCLASS::VisitConditionalSelect}, \ + {"csel_64_condsel"_h, &VISITORCLASS::VisitConditionalSelect}, \ + {"csinc_32_condsel"_h, &VISITORCLASS::VisitConditionalSelect}, \ + {"csinc_64_condsel"_h, &VISITORCLASS::VisitConditionalSelect}, \ + {"csinv_32_condsel"_h, &VISITORCLASS::VisitConditionalSelect}, \ + {"csinv_64_condsel"_h, &VISITORCLASS::VisitConditionalSelect}, \ + {"csneg_32_condsel"_h, &VISITORCLASS::VisitConditionalSelect}, \ + {"csneg_64_condsel"_h, &VISITORCLASS::VisitConditionalSelect}, \ + {"sha1h_ss_cryptosha2"_h, &VISITORCLASS::VisitCrypto2RegSHA}, \ + {"sha1su1_vv_cryptosha2"_h, &VISITORCLASS::VisitCrypto2RegSHA}, \ + {"sha256su0_vv_cryptosha2"_h, &VISITORCLASS::VisitCrypto2RegSHA}, \ + {"sha1c_qsv_cryptosha3"_h, &VISITORCLASS::VisitCrypto3RegSHA}, \ + {"sha1m_qsv_cryptosha3"_h, &VISITORCLASS::VisitCrypto3RegSHA}, \ + {"sha1p_qsv_cryptosha3"_h, &VISITORCLASS::VisitCrypto3RegSHA}, \ + {"sha1su0_vvv_cryptosha3"_h, &VISITORCLASS::VisitCrypto3RegSHA}, \ + {"sha256h2_qqv_cryptosha3"_h, &VISITORCLASS::VisitCrypto3RegSHA}, \ + {"sha256h_qqv_cryptosha3"_h, &VISITORCLASS::VisitCrypto3RegSHA}, \ + {"sha256su1_vvv_cryptosha3"_h, &VISITORCLASS::VisitCrypto3RegSHA}, \ + {"aesd_b_cryptoaes"_h, &VISITORCLASS::VisitCryptoAES}, \ + {"aese_b_cryptoaes"_h, &VISITORCLASS::VisitCryptoAES}, \ + {"aesimc_b_cryptoaes"_h, &VISITORCLASS::VisitCryptoAES}, \ + {"aesmc_b_cryptoaes"_h, &VISITORCLASS::VisitCryptoAES}, \ + {"autda_64p_dp_1src"_h, &VISITORCLASS::VisitDataProcessing1Source}, \ + {"autdb_64p_dp_1src"_h, &VISITORCLASS::VisitDataProcessing1Source}, \ + {"autdza_64z_dp_1src"_h, &VISITORCLASS::VisitDataProcessing1Source}, \ + {"autdzb_64z_dp_1src"_h, &VISITORCLASS::VisitDataProcessing1Source}, \ + {"autia_64p_dp_1src"_h, &VISITORCLASS::VisitDataProcessing1Source}, \ + {"autib_64p_dp_1src"_h, &VISITORCLASS::VisitDataProcessing1Source}, \ + {"autiza_64z_dp_1src"_h, &VISITORCLASS::VisitDataProcessing1Source}, \ + {"autizb_64z_dp_1src"_h, &VISITORCLASS::VisitDataProcessing1Source}, \ + {"cls_32_dp_1src"_h, &VISITORCLASS::VisitDataProcessing1Source}, \ + {"cls_64_dp_1src"_h, &VISITORCLASS::VisitDataProcessing1Source}, \ + {"clz_32_dp_1src"_h, &VISITORCLASS::VisitDataProcessing1Source}, \ + {"clz_64_dp_1src"_h, &VISITORCLASS::VisitDataProcessing1Source}, \ + {"pacda_64p_dp_1src"_h, &VISITORCLASS::VisitDataProcessing1Source}, \ + {"pacdb_64p_dp_1src"_h, &VISITORCLASS::VisitDataProcessing1Source}, \ + {"pacdza_64z_dp_1src"_h, &VISITORCLASS::VisitDataProcessing1Source}, \ + {"pacdzb_64z_dp_1src"_h, &VISITORCLASS::VisitDataProcessing1Source}, \ + {"pacia_64p_dp_1src"_h, &VISITORCLASS::VisitDataProcessing1Source}, \ + {"pacib_64p_dp_1src"_h, &VISITORCLASS::VisitDataProcessing1Source}, \ + {"paciza_64z_dp_1src"_h, &VISITORCLASS::VisitDataProcessing1Source}, \ + {"pacizb_64z_dp_1src"_h, &VISITORCLASS::VisitDataProcessing1Source}, \ + {"rbit_32_dp_1src"_h, &VISITORCLASS::VisitDataProcessing1Source}, \ + {"rbit_64_dp_1src"_h, &VISITORCLASS::VisitDataProcessing1Source}, \ + {"rev16_32_dp_1src"_h, &VISITORCLASS::VisitDataProcessing1Source}, \ + {"rev16_64_dp_1src"_h, &VISITORCLASS::VisitDataProcessing1Source}, \ + {"rev32_64_dp_1src"_h, &VISITORCLASS::VisitDataProcessing1Source}, \ + {"rev_32_dp_1src"_h, &VISITORCLASS::VisitDataProcessing1Source}, \ + {"rev_64_dp_1src"_h, &VISITORCLASS::VisitDataProcessing1Source}, \ + {"xpacd_64z_dp_1src"_h, &VISITORCLASS::VisitDataProcessing1Source}, \ + {"xpaci_64z_dp_1src"_h, &VISITORCLASS::VisitDataProcessing1Source}, \ + {"asrv_32_dp_2src"_h, &VISITORCLASS::VisitDataProcessing2Source}, \ + {"asrv_64_dp_2src"_h, &VISITORCLASS::VisitDataProcessing2Source}, \ + {"crc32b_32c_dp_2src"_h, &VISITORCLASS::VisitDataProcessing2Source}, \ + {"crc32cb_32c_dp_2src"_h, &VISITORCLASS::VisitDataProcessing2Source}, \ + {"crc32ch_32c_dp_2src"_h, &VISITORCLASS::VisitDataProcessing2Source}, \ + {"crc32cw_32c_dp_2src"_h, &VISITORCLASS::VisitDataProcessing2Source}, \ + {"crc32cx_64c_dp_2src"_h, &VISITORCLASS::VisitDataProcessing2Source}, \ + {"crc32h_32c_dp_2src"_h, &VISITORCLASS::VisitDataProcessing2Source}, \ + {"crc32w_32c_dp_2src"_h, &VISITORCLASS::VisitDataProcessing2Source}, \ + {"crc32x_64c_dp_2src"_h, &VISITORCLASS::VisitDataProcessing2Source}, \ + {"lslv_32_dp_2src"_h, &VISITORCLASS::VisitDataProcessing2Source}, \ + {"lslv_64_dp_2src"_h, &VISITORCLASS::VisitDataProcessing2Source}, \ + {"lsrv_32_dp_2src"_h, &VISITORCLASS::VisitDataProcessing2Source}, \ + {"lsrv_64_dp_2src"_h, &VISITORCLASS::VisitDataProcessing2Source}, \ + {"pacga_64p_dp_2src"_h, &VISITORCLASS::VisitDataProcessing2Source}, \ + {"rorv_32_dp_2src"_h, &VISITORCLASS::VisitDataProcessing2Source}, \ + {"rorv_64_dp_2src"_h, &VISITORCLASS::VisitDataProcessing2Source}, \ + {"sdiv_32_dp_2src"_h, &VISITORCLASS::VisitDataProcessing2Source}, \ + {"sdiv_64_dp_2src"_h, &VISITORCLASS::VisitDataProcessing2Source}, \ + {"udiv_32_dp_2src"_h, &VISITORCLASS::VisitDataProcessing2Source}, \ + {"udiv_64_dp_2src"_h, &VISITORCLASS::VisitDataProcessing2Source}, \ + {"madd_32a_dp_3src"_h, &VISITORCLASS::VisitDataProcessing3Source}, \ + {"madd_64a_dp_3src"_h, &VISITORCLASS::VisitDataProcessing3Source}, \ + {"msub_32a_dp_3src"_h, &VISITORCLASS::VisitDataProcessing3Source}, \ + {"msub_64a_dp_3src"_h, &VISITORCLASS::VisitDataProcessing3Source}, \ + {"smaddl_64wa_dp_3src"_h, &VISITORCLASS::VisitDataProcessing3Source}, \ + {"smsubl_64wa_dp_3src"_h, &VISITORCLASS::VisitDataProcessing3Source}, \ + {"smulh_64_dp_3src"_h, &VISITORCLASS::VisitDataProcessing3Source}, \ + {"umaddl_64wa_dp_3src"_h, &VISITORCLASS::VisitDataProcessing3Source}, \ + {"umsubl_64wa_dp_3src"_h, &VISITORCLASS::VisitDataProcessing3Source}, \ + {"umulh_64_dp_3src"_h, &VISITORCLASS::VisitDataProcessing3Source}, \ + {"setf16_only_setf"_h, &VISITORCLASS::VisitEvaluateIntoFlags}, \ + {"setf8_only_setf"_h, &VISITORCLASS::VisitEvaluateIntoFlags}, \ + {"brk_ex_exception"_h, &VISITORCLASS::VisitException}, \ + {"dcps1_dc_exception"_h, &VISITORCLASS::VisitException}, \ + {"dcps2_dc_exception"_h, &VISITORCLASS::VisitException}, \ + {"dcps3_dc_exception"_h, &VISITORCLASS::VisitException}, \ + {"hlt_ex_exception"_h, &VISITORCLASS::VisitException}, \ + {"hvc_ex_exception"_h, &VISITORCLASS::VisitException}, \ + {"smc_ex_exception"_h, &VISITORCLASS::VisitException}, \ + {"svc_ex_exception"_h, &VISITORCLASS::VisitException}, \ + {"extr_32_extract"_h, &VISITORCLASS::VisitExtract}, \ + {"extr_64_extract"_h, &VISITORCLASS::VisitExtract}, \ + {"fcmpe_dz_floatcmp"_h, &VISITORCLASS::VisitFPCompare}, \ + {"fcmpe_d_floatcmp"_h, &VISITORCLASS::VisitFPCompare}, \ + {"fcmpe_hz_floatcmp"_h, &VISITORCLASS::VisitFPCompare}, \ + {"fcmpe_h_floatcmp"_h, &VISITORCLASS::VisitFPCompare}, \ + {"fcmpe_sz_floatcmp"_h, &VISITORCLASS::VisitFPCompare}, \ + {"fcmpe_s_floatcmp"_h, &VISITORCLASS::VisitFPCompare}, \ + {"fcmp_dz_floatcmp"_h, &VISITORCLASS::VisitFPCompare}, \ + {"fcmp_d_floatcmp"_h, &VISITORCLASS::VisitFPCompare}, \ + {"fcmp_hz_floatcmp"_h, &VISITORCLASS::VisitFPCompare}, \ + {"fcmp_h_floatcmp"_h, &VISITORCLASS::VisitFPCompare}, \ + {"fcmp_sz_floatcmp"_h, &VISITORCLASS::VisitFPCompare}, \ + {"fcmp_s_floatcmp"_h, &VISITORCLASS::VisitFPCompare}, \ + {"fccmpe_d_floatccmp"_h, &VISITORCLASS::VisitFPConditionalCompare}, \ + {"fccmpe_h_floatccmp"_h, &VISITORCLASS::VisitFPConditionalCompare}, \ + {"fccmpe_s_floatccmp"_h, &VISITORCLASS::VisitFPConditionalCompare}, \ + {"fccmp_d_floatccmp"_h, &VISITORCLASS::VisitFPConditionalCompare}, \ + {"fccmp_h_floatccmp"_h, &VISITORCLASS::VisitFPConditionalCompare}, \ + {"fccmp_s_floatccmp"_h, &VISITORCLASS::VisitFPConditionalCompare}, \ + {"fcsel_d_floatsel"_h, &VISITORCLASS::VisitFPConditionalSelect}, \ + {"fcsel_h_floatsel"_h, &VISITORCLASS::VisitFPConditionalSelect}, \ + {"fcsel_s_floatsel"_h, &VISITORCLASS::VisitFPConditionalSelect}, \ + {"bfcvt_bs_floatdp1"_h, &VISITORCLASS::VisitFPDataProcessing1Source}, \ + {"fabs_d_floatdp1"_h, &VISITORCLASS::VisitFPDataProcessing1Source}, \ + {"fabs_h_floatdp1"_h, &VISITORCLASS::VisitFPDataProcessing1Source}, \ + {"fabs_s_floatdp1"_h, &VISITORCLASS::VisitFPDataProcessing1Source}, \ + {"fcvt_dh_floatdp1"_h, &VISITORCLASS::VisitFPDataProcessing1Source}, \ + {"fcvt_ds_floatdp1"_h, &VISITORCLASS::VisitFPDataProcessing1Source}, \ + {"fcvt_hd_floatdp1"_h, &VISITORCLASS::VisitFPDataProcessing1Source}, \ + {"fcvt_hs_floatdp1"_h, &VISITORCLASS::VisitFPDataProcessing1Source}, \ + {"fcvt_sd_floatdp1"_h, &VISITORCLASS::VisitFPDataProcessing1Source}, \ + {"fcvt_sh_floatdp1"_h, &VISITORCLASS::VisitFPDataProcessing1Source}, \ + {"fmov_d_floatdp1"_h, &VISITORCLASS::VisitFPDataProcessing1Source}, \ + {"fmov_h_floatdp1"_h, &VISITORCLASS::VisitFPDataProcessing1Source}, \ + {"fmov_s_floatdp1"_h, &VISITORCLASS::VisitFPDataProcessing1Source}, \ + {"fneg_d_floatdp1"_h, &VISITORCLASS::VisitFPDataProcessing1Source}, \ + {"fneg_h_floatdp1"_h, &VISITORCLASS::VisitFPDataProcessing1Source}, \ + {"fneg_s_floatdp1"_h, &VISITORCLASS::VisitFPDataProcessing1Source}, \ + {"frint32x_d_floatdp1"_h, &VISITORCLASS::VisitFPDataProcessing1Source}, \ + {"frint32x_s_floatdp1"_h, &VISITORCLASS::VisitFPDataProcessing1Source}, \ + {"frint32z_d_floatdp1"_h, &VISITORCLASS::VisitFPDataProcessing1Source}, \ + {"frint32z_s_floatdp1"_h, &VISITORCLASS::VisitFPDataProcessing1Source}, \ + {"frint64x_d_floatdp1"_h, &VISITORCLASS::VisitFPDataProcessing1Source}, \ + {"frint64x_s_floatdp1"_h, &VISITORCLASS::VisitFPDataProcessing1Source}, \ + {"frint64z_d_floatdp1"_h, &VISITORCLASS::VisitFPDataProcessing1Source}, \ + {"frint64z_s_floatdp1"_h, &VISITORCLASS::VisitFPDataProcessing1Source}, \ + {"frinta_d_floatdp1"_h, &VISITORCLASS::VisitFPDataProcessing1Source}, \ + {"frinta_h_floatdp1"_h, &VISITORCLASS::VisitFPDataProcessing1Source}, \ + {"frinta_s_floatdp1"_h, &VISITORCLASS::VisitFPDataProcessing1Source}, \ + {"frinti_d_floatdp1"_h, &VISITORCLASS::VisitFPDataProcessing1Source}, \ + {"frinti_h_floatdp1"_h, &VISITORCLASS::VisitFPDataProcessing1Source}, \ + {"frinti_s_floatdp1"_h, &VISITORCLASS::VisitFPDataProcessing1Source}, \ + {"frintm_d_floatdp1"_h, &VISITORCLASS::VisitFPDataProcessing1Source}, \ + {"frintm_h_floatdp1"_h, &VISITORCLASS::VisitFPDataProcessing1Source}, \ + {"frintm_s_floatdp1"_h, &VISITORCLASS::VisitFPDataProcessing1Source}, \ + {"frintn_d_floatdp1"_h, &VISITORCLASS::VisitFPDataProcessing1Source}, \ + {"frintn_h_floatdp1"_h, &VISITORCLASS::VisitFPDataProcessing1Source}, \ + {"frintn_s_floatdp1"_h, &VISITORCLASS::VisitFPDataProcessing1Source}, \ + {"frintp_d_floatdp1"_h, &VISITORCLASS::VisitFPDataProcessing1Source}, \ + {"frintp_h_floatdp1"_h, &VISITORCLASS::VisitFPDataProcessing1Source}, \ + {"frintp_s_floatdp1"_h, &VISITORCLASS::VisitFPDataProcessing1Source}, \ + {"frintx_d_floatdp1"_h, &VISITORCLASS::VisitFPDataProcessing1Source}, \ + {"frintx_h_floatdp1"_h, &VISITORCLASS::VisitFPDataProcessing1Source}, \ + {"frintx_s_floatdp1"_h, &VISITORCLASS::VisitFPDataProcessing1Source}, \ + {"frintz_d_floatdp1"_h, &VISITORCLASS::VisitFPDataProcessing1Source}, \ + {"frintz_h_floatdp1"_h, &VISITORCLASS::VisitFPDataProcessing1Source}, \ + {"frintz_s_floatdp1"_h, &VISITORCLASS::VisitFPDataProcessing1Source}, \ + {"fsqrt_d_floatdp1"_h, &VISITORCLASS::VisitFPDataProcessing1Source}, \ + {"fsqrt_h_floatdp1"_h, &VISITORCLASS::VisitFPDataProcessing1Source}, \ + {"fsqrt_s_floatdp1"_h, &VISITORCLASS::VisitFPDataProcessing1Source}, \ + {"fadd_d_floatdp2"_h, &VISITORCLASS::VisitFPDataProcessing2Source}, \ + {"fadd_h_floatdp2"_h, &VISITORCLASS::VisitFPDataProcessing2Source}, \ + {"fadd_s_floatdp2"_h, &VISITORCLASS::VisitFPDataProcessing2Source}, \ + {"fdiv_d_floatdp2"_h, &VISITORCLASS::VisitFPDataProcessing2Source}, \ + {"fdiv_h_floatdp2"_h, &VISITORCLASS::VisitFPDataProcessing2Source}, \ + {"fdiv_s_floatdp2"_h, &VISITORCLASS::VisitFPDataProcessing2Source}, \ + {"fmaxnm_d_floatdp2"_h, &VISITORCLASS::VisitFPDataProcessing2Source}, \ + {"fmaxnm_h_floatdp2"_h, &VISITORCLASS::VisitFPDataProcessing2Source}, \ + {"fmaxnm_s_floatdp2"_h, &VISITORCLASS::VisitFPDataProcessing2Source}, \ + {"fmax_d_floatdp2"_h, &VISITORCLASS::VisitFPDataProcessing2Source}, \ + {"fmax_h_floatdp2"_h, &VISITORCLASS::VisitFPDataProcessing2Source}, \ + {"fmax_s_floatdp2"_h, &VISITORCLASS::VisitFPDataProcessing2Source}, \ + {"fminnm_d_floatdp2"_h, &VISITORCLASS::VisitFPDataProcessing2Source}, \ + {"fminnm_h_floatdp2"_h, &VISITORCLASS::VisitFPDataProcessing2Source}, \ + {"fminnm_s_floatdp2"_h, &VISITORCLASS::VisitFPDataProcessing2Source}, \ + {"fmin_d_floatdp2"_h, &VISITORCLASS::VisitFPDataProcessing2Source}, \ + {"fmin_h_floatdp2"_h, &VISITORCLASS::VisitFPDataProcessing2Source}, \ + {"fmin_s_floatdp2"_h, &VISITORCLASS::VisitFPDataProcessing2Source}, \ + {"fmul_d_floatdp2"_h, &VISITORCLASS::VisitFPDataProcessing2Source}, \ + {"fmul_h_floatdp2"_h, &VISITORCLASS::VisitFPDataProcessing2Source}, \ + {"fmul_s_floatdp2"_h, &VISITORCLASS::VisitFPDataProcessing2Source}, \ + {"fnmul_d_floatdp2"_h, &VISITORCLASS::VisitFPDataProcessing2Source}, \ + {"fnmul_h_floatdp2"_h, &VISITORCLASS::VisitFPDataProcessing2Source}, \ + {"fnmul_s_floatdp2"_h, &VISITORCLASS::VisitFPDataProcessing2Source}, \ + {"fsub_d_floatdp2"_h, &VISITORCLASS::VisitFPDataProcessing2Source}, \ + {"fsub_h_floatdp2"_h, &VISITORCLASS::VisitFPDataProcessing2Source}, \ + {"fsub_s_floatdp2"_h, &VISITORCLASS::VisitFPDataProcessing2Source}, \ + {"fmadd_d_floatdp3"_h, &VISITORCLASS::VisitFPDataProcessing3Source}, \ + {"fmadd_h_floatdp3"_h, &VISITORCLASS::VisitFPDataProcessing3Source}, \ + {"fmadd_s_floatdp3"_h, &VISITORCLASS::VisitFPDataProcessing3Source}, \ + {"fmsub_d_floatdp3"_h, &VISITORCLASS::VisitFPDataProcessing3Source}, \ + {"fmsub_h_floatdp3"_h, &VISITORCLASS::VisitFPDataProcessing3Source}, \ + {"fmsub_s_floatdp3"_h, &VISITORCLASS::VisitFPDataProcessing3Source}, \ + {"fnmadd_d_floatdp3"_h, &VISITORCLASS::VisitFPDataProcessing3Source}, \ + {"fnmadd_h_floatdp3"_h, &VISITORCLASS::VisitFPDataProcessing3Source}, \ + {"fnmadd_s_floatdp3"_h, &VISITORCLASS::VisitFPDataProcessing3Source}, \ + {"fnmsub_d_floatdp3"_h, &VISITORCLASS::VisitFPDataProcessing3Source}, \ + {"fnmsub_h_floatdp3"_h, &VISITORCLASS::VisitFPDataProcessing3Source}, \ + {"fnmsub_s_floatdp3"_h, &VISITORCLASS::VisitFPDataProcessing3Source}, \ + {"fcvtzs_32d_float2fix"_h, &VISITORCLASS::VisitFPFixedPointConvert}, \ + {"fcvtzs_32h_float2fix"_h, &VISITORCLASS::VisitFPFixedPointConvert}, \ + {"fcvtzs_32s_float2fix"_h, &VISITORCLASS::VisitFPFixedPointConvert}, \ + {"fcvtzs_64d_float2fix"_h, &VISITORCLASS::VisitFPFixedPointConvert}, \ + {"fcvtzs_64h_float2fix"_h, &VISITORCLASS::VisitFPFixedPointConvert}, \ + {"fcvtzs_64s_float2fix"_h, &VISITORCLASS::VisitFPFixedPointConvert}, \ + {"fcvtzu_32d_float2fix"_h, &VISITORCLASS::VisitFPFixedPointConvert}, \ + {"fcvtzu_32h_float2fix"_h, &VISITORCLASS::VisitFPFixedPointConvert}, \ + {"fcvtzu_32s_float2fix"_h, &VISITORCLASS::VisitFPFixedPointConvert}, \ + {"fcvtzu_64d_float2fix"_h, &VISITORCLASS::VisitFPFixedPointConvert}, \ + {"fcvtzu_64h_float2fix"_h, &VISITORCLASS::VisitFPFixedPointConvert}, \ + {"fcvtzu_64s_float2fix"_h, &VISITORCLASS::VisitFPFixedPointConvert}, \ + {"scvtf_d32_float2fix"_h, &VISITORCLASS::VisitFPFixedPointConvert}, \ + {"scvtf_d64_float2fix"_h, &VISITORCLASS::VisitFPFixedPointConvert}, \ + {"scvtf_h32_float2fix"_h, &VISITORCLASS::VisitFPFixedPointConvert}, \ + {"scvtf_h64_float2fix"_h, &VISITORCLASS::VisitFPFixedPointConvert}, \ + {"scvtf_s32_float2fix"_h, &VISITORCLASS::VisitFPFixedPointConvert}, \ + {"scvtf_s64_float2fix"_h, &VISITORCLASS::VisitFPFixedPointConvert}, \ + {"ucvtf_d32_float2fix"_h, &VISITORCLASS::VisitFPFixedPointConvert}, \ + {"ucvtf_d64_float2fix"_h, &VISITORCLASS::VisitFPFixedPointConvert}, \ + {"ucvtf_h32_float2fix"_h, &VISITORCLASS::VisitFPFixedPointConvert}, \ + {"ucvtf_h64_float2fix"_h, &VISITORCLASS::VisitFPFixedPointConvert}, \ + {"ucvtf_s32_float2fix"_h, &VISITORCLASS::VisitFPFixedPointConvert}, \ + {"ucvtf_s64_float2fix"_h, &VISITORCLASS::VisitFPFixedPointConvert}, \ + {"fmov_d_floatimm"_h, &VISITORCLASS::VisitFPImmediate}, \ + {"fmov_h_floatimm"_h, &VISITORCLASS::VisitFPImmediate}, \ + {"fmov_s_floatimm"_h, &VISITORCLASS::VisitFPImmediate}, \ + {"fcvtas_32d_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtas_32h_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtas_32s_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtas_64d_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtas_64h_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtas_64s_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtau_32d_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtau_32h_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtau_32s_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtau_64d_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtau_64h_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtau_64s_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtms_32d_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtms_32h_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtms_32s_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtms_64d_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtms_64h_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtms_64s_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtmu_32d_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtmu_32h_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtmu_32s_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtmu_64d_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtmu_64h_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtmu_64s_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtns_32d_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtns_32h_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtns_32s_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtns_64d_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtns_64h_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtns_64s_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtnu_32d_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtnu_32h_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtnu_32s_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtnu_64d_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtnu_64h_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtnu_64s_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtps_32d_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtps_32h_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtps_32s_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtps_64d_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtps_64h_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtps_64s_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtpu_32d_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtpu_32h_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtpu_32s_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtpu_64d_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtpu_64h_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtpu_64s_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtzs_32d_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtzs_32h_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtzs_32s_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtzs_64d_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtzs_64h_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtzs_64s_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtzu_32d_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtzu_32h_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtzu_32s_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtzu_64d_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtzu_64h_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fcvtzu_64s_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fjcvtzs_32d_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fmov_32h_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fmov_32s_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fmov_64d_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fmov_64h_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fmov_64vx_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fmov_d64_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fmov_h32_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fmov_h64_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fmov_s32_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"fmov_v64i_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"scvtf_d32_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"scvtf_d64_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"scvtf_h32_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"scvtf_h64_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"scvtf_s32_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"scvtf_s64_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"ucvtf_d32_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"ucvtf_d64_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"ucvtf_h32_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"ucvtf_h64_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"ucvtf_s32_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"ucvtf_s64_float2int"_h, &VISITORCLASS::VisitFPIntegerConvert}, \ + {"ldrsw_64_loadlit"_h, &VISITORCLASS::VisitLoadLiteral}, \ + {"ldr_32_loadlit"_h, &VISITORCLASS::VisitLoadLiteral}, \ + {"ldr_64_loadlit"_h, &VISITORCLASS::VisitLoadLiteral}, \ + {"ldr_d_loadlit"_h, &VISITORCLASS::VisitLoadLiteral}, \ + {"ldr_q_loadlit"_h, &VISITORCLASS::VisitLoadLiteral}, \ + {"ldr_s_loadlit"_h, &VISITORCLASS::VisitLoadLiteral}, \ + {"prfm_p_loadlit"_h, &VISITORCLASS::VisitLoadLiteral}, \ + {"casab_c32_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"casah_c32_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"casalb_c32_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"casalh_c32_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"casal_c32_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"casal_c64_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"casa_c32_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"casa_c64_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"casb_c32_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"cash_c32_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"caslb_c32_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"caslh_c32_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"casl_c32_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"casl_c64_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"caspal_cp32_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"caspal_cp64_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"caspa_cp32_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"caspa_cp64_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"caspl_cp32_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"caspl_cp64_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"casp_cp32_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"casp_cp64_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"cas_c32_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"cas_c64_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"ldarb_lr32_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"ldarh_lr32_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"ldar_lr32_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"ldar_lr64_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"ldaxp_lp32_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"ldaxp_lp64_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"ldaxrb_lr32_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"ldaxrh_lr32_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"ldaxr_lr32_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"ldaxr_lr64_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"ldlarb_lr32_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"ldlarh_lr32_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"ldlar_lr32_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"ldlar_lr64_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"ldxp_lp32_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"ldxp_lp64_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"ldxrb_lr32_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"ldxrh_lr32_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"ldxr_lr32_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"ldxr_lr64_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"stllrb_sl32_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"stllrh_sl32_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"stllr_sl32_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"stllr_sl64_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"stlrb_sl32_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"stlrh_sl32_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"stlr_sl32_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"stlr_sl64_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"stlxp_sp32_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"stlxp_sp64_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"stlxrb_sr32_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"stlxrh_sr32_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"stlxr_sr32_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"stlxr_sr64_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"stxp_sp32_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"stxp_sp64_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"stxrb_sr32_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"stxrh_sr32_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"stxr_sr32_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"stxr_sr64_ldstexcl"_h, &VISITORCLASS::VisitLoadStoreExclusive}, \ + {"ldraa_64w_ldst_pac"_h, &VISITORCLASS::VisitLoadStorePAC}, \ + {"ldraa_64_ldst_pac"_h, &VISITORCLASS::VisitLoadStorePAC}, \ + {"ldrab_64w_ldst_pac"_h, &VISITORCLASS::VisitLoadStorePAC}, \ + {"ldrab_64_ldst_pac"_h, &VISITORCLASS::VisitLoadStorePAC}, \ + {"ldnp_32_ldstnapair_offs"_h, \ + &VISITORCLASS::VisitLoadStorePairNonTemporal}, \ + {"ldnp_64_ldstnapair_offs"_h, \ + &VISITORCLASS::VisitLoadStorePairNonTemporal}, \ + {"ldnp_d_ldstnapair_offs"_h, \ + &VISITORCLASS::VisitLoadStorePairNonTemporal}, \ + {"ldnp_q_ldstnapair_offs"_h, \ + &VISITORCLASS::VisitLoadStorePairNonTemporal}, \ + {"ldnp_s_ldstnapair_offs"_h, \ + &VISITORCLASS::VisitLoadStorePairNonTemporal}, \ + {"stnp_32_ldstnapair_offs"_h, \ + &VISITORCLASS::VisitLoadStorePairNonTemporal}, \ + {"stnp_64_ldstnapair_offs"_h, \ + &VISITORCLASS::VisitLoadStorePairNonTemporal}, \ + {"stnp_d_ldstnapair_offs"_h, \ + &VISITORCLASS::VisitLoadStorePairNonTemporal}, \ + {"stnp_q_ldstnapair_offs"_h, \ + &VISITORCLASS::VisitLoadStorePairNonTemporal}, \ + {"stnp_s_ldstnapair_offs"_h, \ + &VISITORCLASS::VisitLoadStorePairNonTemporal}, \ + {"ldpsw_64_ldstpair_off"_h, &VISITORCLASS::VisitLoadStorePairOffset}, \ + {"ldp_32_ldstpair_off"_h, &VISITORCLASS::VisitLoadStorePairOffset}, \ + {"ldp_64_ldstpair_off"_h, &VISITORCLASS::VisitLoadStorePairOffset}, \ + {"ldp_d_ldstpair_off"_h, &VISITORCLASS::VisitLoadStorePairOffset}, \ + {"ldp_q_ldstpair_off"_h, &VISITORCLASS::VisitLoadStorePairOffset}, \ + {"ldp_s_ldstpair_off"_h, &VISITORCLASS::VisitLoadStorePairOffset}, \ + {"stp_32_ldstpair_off"_h, &VISITORCLASS::VisitLoadStorePairOffset}, \ + {"stp_64_ldstpair_off"_h, &VISITORCLASS::VisitLoadStorePairOffset}, \ + {"stp_d_ldstpair_off"_h, &VISITORCLASS::VisitLoadStorePairOffset}, \ + {"stp_q_ldstpair_off"_h, &VISITORCLASS::VisitLoadStorePairOffset}, \ + {"stp_s_ldstpair_off"_h, &VISITORCLASS::VisitLoadStorePairOffset}, \ + {"ldpsw_64_ldstpair_post"_h, \ + &VISITORCLASS::VisitLoadStorePairPostIndex}, \ + {"ldp_32_ldstpair_post"_h, &VISITORCLASS::VisitLoadStorePairPostIndex}, \ + {"ldp_64_ldstpair_post"_h, &VISITORCLASS::VisitLoadStorePairPostIndex}, \ + {"ldp_d_ldstpair_post"_h, &VISITORCLASS::VisitLoadStorePairPostIndex}, \ + {"ldp_q_ldstpair_post"_h, &VISITORCLASS::VisitLoadStorePairPostIndex}, \ + {"ldp_s_ldstpair_post"_h, &VISITORCLASS::VisitLoadStorePairPostIndex}, \ + {"stp_32_ldstpair_post"_h, &VISITORCLASS::VisitLoadStorePairPostIndex}, \ + {"stp_64_ldstpair_post"_h, &VISITORCLASS::VisitLoadStorePairPostIndex}, \ + {"stp_d_ldstpair_post"_h, &VISITORCLASS::VisitLoadStorePairPostIndex}, \ + {"stp_q_ldstpair_post"_h, &VISITORCLASS::VisitLoadStorePairPostIndex}, \ + {"stp_s_ldstpair_post"_h, &VISITORCLASS::VisitLoadStorePairPostIndex}, \ + {"ldpsw_64_ldstpair_pre"_h, &VISITORCLASS::VisitLoadStorePairPreIndex}, \ + {"ldp_32_ldstpair_pre"_h, &VISITORCLASS::VisitLoadStorePairPreIndex}, \ + {"ldp_64_ldstpair_pre"_h, &VISITORCLASS::VisitLoadStorePairPreIndex}, \ + {"ldp_d_ldstpair_pre"_h, &VISITORCLASS::VisitLoadStorePairPreIndex}, \ + {"ldp_q_ldstpair_pre"_h, &VISITORCLASS::VisitLoadStorePairPreIndex}, \ + {"ldp_s_ldstpair_pre"_h, &VISITORCLASS::VisitLoadStorePairPreIndex}, \ + {"stp_32_ldstpair_pre"_h, &VISITORCLASS::VisitLoadStorePairPreIndex}, \ + {"stp_64_ldstpair_pre"_h, &VISITORCLASS::VisitLoadStorePairPreIndex}, \ + {"stp_d_ldstpair_pre"_h, &VISITORCLASS::VisitLoadStorePairPreIndex}, \ + {"stp_q_ldstpair_pre"_h, &VISITORCLASS::VisitLoadStorePairPreIndex}, \ + {"stp_s_ldstpair_pre"_h, &VISITORCLASS::VisitLoadStorePairPreIndex}, \ + {"ldrb_32_ldst_immpost"_h, &VISITORCLASS::VisitLoadStorePostIndex}, \ + {"ldrh_32_ldst_immpost"_h, &VISITORCLASS::VisitLoadStorePostIndex}, \ + {"ldrsb_32_ldst_immpost"_h, &VISITORCLASS::VisitLoadStorePostIndex}, \ + {"ldrsb_64_ldst_immpost"_h, &VISITORCLASS::VisitLoadStorePostIndex}, \ + {"ldrsh_32_ldst_immpost"_h, &VISITORCLASS::VisitLoadStorePostIndex}, \ + {"ldrsh_64_ldst_immpost"_h, &VISITORCLASS::VisitLoadStorePostIndex}, \ + {"ldrsw_64_ldst_immpost"_h, &VISITORCLASS::VisitLoadStorePostIndex}, \ + {"ldr_32_ldst_immpost"_h, &VISITORCLASS::VisitLoadStorePostIndex}, \ + {"ldr_64_ldst_immpost"_h, &VISITORCLASS::VisitLoadStorePostIndex}, \ + {"ldr_b_ldst_immpost"_h, &VISITORCLASS::VisitLoadStorePostIndex}, \ + {"ldr_d_ldst_immpost"_h, &VISITORCLASS::VisitLoadStorePostIndex}, \ + {"ldr_h_ldst_immpost"_h, &VISITORCLASS::VisitLoadStorePostIndex}, \ + {"ldr_q_ldst_immpost"_h, &VISITORCLASS::VisitLoadStorePostIndex}, \ + {"ldr_s_ldst_immpost"_h, &VISITORCLASS::VisitLoadStorePostIndex}, \ + {"strb_32_ldst_immpost"_h, &VISITORCLASS::VisitLoadStorePostIndex}, \ + {"strh_32_ldst_immpost"_h, &VISITORCLASS::VisitLoadStorePostIndex}, \ + {"str_32_ldst_immpost"_h, &VISITORCLASS::VisitLoadStorePostIndex}, \ + {"str_64_ldst_immpost"_h, &VISITORCLASS::VisitLoadStorePostIndex}, \ + {"str_b_ldst_immpost"_h, &VISITORCLASS::VisitLoadStorePostIndex}, \ + {"str_d_ldst_immpost"_h, &VISITORCLASS::VisitLoadStorePostIndex}, \ + {"str_h_ldst_immpost"_h, &VISITORCLASS::VisitLoadStorePostIndex}, \ + {"str_q_ldst_immpost"_h, &VISITORCLASS::VisitLoadStorePostIndex}, \ + {"str_s_ldst_immpost"_h, &VISITORCLASS::VisitLoadStorePostIndex}, \ + {"ldrb_32_ldst_immpre"_h, &VISITORCLASS::VisitLoadStorePreIndex}, \ + {"ldrh_32_ldst_immpre"_h, &VISITORCLASS::VisitLoadStorePreIndex}, \ + {"ldrsb_32_ldst_immpre"_h, &VISITORCLASS::VisitLoadStorePreIndex}, \ + {"ldrsb_64_ldst_immpre"_h, &VISITORCLASS::VisitLoadStorePreIndex}, \ + {"ldrsh_32_ldst_immpre"_h, &VISITORCLASS::VisitLoadStorePreIndex}, \ + {"ldrsh_64_ldst_immpre"_h, &VISITORCLASS::VisitLoadStorePreIndex}, \ + {"ldrsw_64_ldst_immpre"_h, &VISITORCLASS::VisitLoadStorePreIndex}, \ + {"ldr_32_ldst_immpre"_h, &VISITORCLASS::VisitLoadStorePreIndex}, \ + {"ldr_64_ldst_immpre"_h, &VISITORCLASS::VisitLoadStorePreIndex}, \ + {"ldr_b_ldst_immpre"_h, &VISITORCLASS::VisitLoadStorePreIndex}, \ + {"ldr_d_ldst_immpre"_h, &VISITORCLASS::VisitLoadStorePreIndex}, \ + {"ldr_h_ldst_immpre"_h, &VISITORCLASS::VisitLoadStorePreIndex}, \ + {"ldr_q_ldst_immpre"_h, &VISITORCLASS::VisitLoadStorePreIndex}, \ + {"ldr_s_ldst_immpre"_h, &VISITORCLASS::VisitLoadStorePreIndex}, \ + {"strb_32_ldst_immpre"_h, &VISITORCLASS::VisitLoadStorePreIndex}, \ + {"strh_32_ldst_immpre"_h, &VISITORCLASS::VisitLoadStorePreIndex}, \ + {"str_32_ldst_immpre"_h, &VISITORCLASS::VisitLoadStorePreIndex}, \ + {"str_64_ldst_immpre"_h, &VISITORCLASS::VisitLoadStorePreIndex}, \ + {"str_b_ldst_immpre"_h, &VISITORCLASS::VisitLoadStorePreIndex}, \ + {"str_d_ldst_immpre"_h, &VISITORCLASS::VisitLoadStorePreIndex}, \ + {"str_h_ldst_immpre"_h, &VISITORCLASS::VisitLoadStorePreIndex}, \ + {"str_q_ldst_immpre"_h, &VISITORCLASS::VisitLoadStorePreIndex}, \ + {"str_s_ldst_immpre"_h, &VISITORCLASS::VisitLoadStorePreIndex}, \ + {"ldapurb_32_ldapstl_unscaled"_h, \ + &VISITORCLASS::VisitLoadStoreRCpcUnscaledOffset}, \ + {"ldapurh_32_ldapstl_unscaled"_h, \ + &VISITORCLASS::VisitLoadStoreRCpcUnscaledOffset}, \ + {"ldapursb_32_ldapstl_unscaled"_h, \ + &VISITORCLASS::VisitLoadStoreRCpcUnscaledOffset}, \ + {"ldapursb_64_ldapstl_unscaled"_h, \ + &VISITORCLASS::VisitLoadStoreRCpcUnscaledOffset}, \ + {"ldapursh_32_ldapstl_unscaled"_h, \ + &VISITORCLASS::VisitLoadStoreRCpcUnscaledOffset}, \ + {"ldapursh_64_ldapstl_unscaled"_h, \ + &VISITORCLASS::VisitLoadStoreRCpcUnscaledOffset}, \ + {"ldapursw_64_ldapstl_unscaled"_h, \ + &VISITORCLASS::VisitLoadStoreRCpcUnscaledOffset}, \ + {"ldapur_32_ldapstl_unscaled"_h, \ + &VISITORCLASS::VisitLoadStoreRCpcUnscaledOffset}, \ + {"ldapur_64_ldapstl_unscaled"_h, \ + &VISITORCLASS::VisitLoadStoreRCpcUnscaledOffset}, \ + {"stlurb_32_ldapstl_unscaled"_h, \ + &VISITORCLASS::VisitLoadStoreRCpcUnscaledOffset}, \ + {"stlurh_32_ldapstl_unscaled"_h, \ + &VISITORCLASS::VisitLoadStoreRCpcUnscaledOffset}, \ + {"stlur_32_ldapstl_unscaled"_h, \ + &VISITORCLASS::VisitLoadStoreRCpcUnscaledOffset}, \ + {"stlur_64_ldapstl_unscaled"_h, \ + &VISITORCLASS::VisitLoadStoreRCpcUnscaledOffset}, \ + {"ldrb_32bl_ldst_regoff"_h, \ + &VISITORCLASS::VisitLoadStoreRegisterOffset}, \ + {"ldrb_32b_ldst_regoff"_h, &VISITORCLASS::VisitLoadStoreRegisterOffset}, \ + {"ldrh_32_ldst_regoff"_h, &VISITORCLASS::VisitLoadStoreRegisterOffset}, \ + {"ldrsb_32bl_ldst_regoff"_h, \ + &VISITORCLASS::VisitLoadStoreRegisterOffset}, \ + {"ldrsb_32b_ldst_regoff"_h, \ + &VISITORCLASS::VisitLoadStoreRegisterOffset}, \ + {"ldrsb_64bl_ldst_regoff"_h, \ + &VISITORCLASS::VisitLoadStoreRegisterOffset}, \ + {"ldrsb_64b_ldst_regoff"_h, \ + &VISITORCLASS::VisitLoadStoreRegisterOffset}, \ + {"ldrsh_32_ldst_regoff"_h, &VISITORCLASS::VisitLoadStoreRegisterOffset}, \ + {"ldrsh_64_ldst_regoff"_h, &VISITORCLASS::VisitLoadStoreRegisterOffset}, \ + {"ldrsw_64_ldst_regoff"_h, &VISITORCLASS::VisitLoadStoreRegisterOffset}, \ + {"ldr_32_ldst_regoff"_h, &VISITORCLASS::VisitLoadStoreRegisterOffset}, \ + {"ldr_64_ldst_regoff"_h, &VISITORCLASS::VisitLoadStoreRegisterOffset}, \ + {"ldr_bl_ldst_regoff"_h, &VISITORCLASS::VisitLoadStoreRegisterOffset}, \ + {"ldr_b_ldst_regoff"_h, &VISITORCLASS::VisitLoadStoreRegisterOffset}, \ + {"ldr_d_ldst_regoff"_h, &VISITORCLASS::VisitLoadStoreRegisterOffset}, \ + {"ldr_h_ldst_regoff"_h, &VISITORCLASS::VisitLoadStoreRegisterOffset}, \ + {"ldr_q_ldst_regoff"_h, &VISITORCLASS::VisitLoadStoreRegisterOffset}, \ + {"ldr_s_ldst_regoff"_h, &VISITORCLASS::VisitLoadStoreRegisterOffset}, \ + {"prfm_p_ldst_regoff"_h, &VISITORCLASS::VisitLoadStoreRegisterOffset}, \ + {"strb_32bl_ldst_regoff"_h, \ + &VISITORCLASS::VisitLoadStoreRegisterOffset}, \ + {"strb_32b_ldst_regoff"_h, &VISITORCLASS::VisitLoadStoreRegisterOffset}, \ + {"strh_32_ldst_regoff"_h, &VISITORCLASS::VisitLoadStoreRegisterOffset}, \ + {"str_32_ldst_regoff"_h, &VISITORCLASS::VisitLoadStoreRegisterOffset}, \ + {"str_64_ldst_regoff"_h, &VISITORCLASS::VisitLoadStoreRegisterOffset}, \ + {"str_bl_ldst_regoff"_h, &VISITORCLASS::VisitLoadStoreRegisterOffset}, \ + {"str_b_ldst_regoff"_h, &VISITORCLASS::VisitLoadStoreRegisterOffset}, \ + {"str_d_ldst_regoff"_h, &VISITORCLASS::VisitLoadStoreRegisterOffset}, \ + {"str_h_ldst_regoff"_h, &VISITORCLASS::VisitLoadStoreRegisterOffset}, \ + {"str_q_ldst_regoff"_h, &VISITORCLASS::VisitLoadStoreRegisterOffset}, \ + {"str_s_ldst_regoff"_h, &VISITORCLASS::VisitLoadStoreRegisterOffset}, \ + {"ldurb_32_ldst_unscaled"_h, \ + &VISITORCLASS::VisitLoadStoreUnscaledOffset}, \ + {"ldurh_32_ldst_unscaled"_h, \ + &VISITORCLASS::VisitLoadStoreUnscaledOffset}, \ + {"ldursb_32_ldst_unscaled"_h, \ + &VISITORCLASS::VisitLoadStoreUnscaledOffset}, \ + {"ldursb_64_ldst_unscaled"_h, \ + &VISITORCLASS::VisitLoadStoreUnscaledOffset}, \ + {"ldursh_32_ldst_unscaled"_h, \ + &VISITORCLASS::VisitLoadStoreUnscaledOffset}, \ + {"ldursh_64_ldst_unscaled"_h, \ + &VISITORCLASS::VisitLoadStoreUnscaledOffset}, \ + {"ldursw_64_ldst_unscaled"_h, \ + &VISITORCLASS::VisitLoadStoreUnscaledOffset}, \ + {"ldur_32_ldst_unscaled"_h, \ + &VISITORCLASS::VisitLoadStoreUnscaledOffset}, \ + {"ldur_64_ldst_unscaled"_h, \ + &VISITORCLASS::VisitLoadStoreUnscaledOffset}, \ + {"ldur_b_ldst_unscaled"_h, &VISITORCLASS::VisitLoadStoreUnscaledOffset}, \ + {"ldur_d_ldst_unscaled"_h, &VISITORCLASS::VisitLoadStoreUnscaledOffset}, \ + {"ldur_h_ldst_unscaled"_h, &VISITORCLASS::VisitLoadStoreUnscaledOffset}, \ + {"ldur_q_ldst_unscaled"_h, &VISITORCLASS::VisitLoadStoreUnscaledOffset}, \ + {"ldur_s_ldst_unscaled"_h, &VISITORCLASS::VisitLoadStoreUnscaledOffset}, \ + {"prfum_p_ldst_unscaled"_h, \ + &VISITORCLASS::VisitLoadStoreUnscaledOffset}, \ + {"sturb_32_ldst_unscaled"_h, \ + &VISITORCLASS::VisitLoadStoreUnscaledOffset}, \ + {"sturh_32_ldst_unscaled"_h, \ + &VISITORCLASS::VisitLoadStoreUnscaledOffset}, \ + {"stur_32_ldst_unscaled"_h, \ + &VISITORCLASS::VisitLoadStoreUnscaledOffset}, \ + {"stur_64_ldst_unscaled"_h, \ + &VISITORCLASS::VisitLoadStoreUnscaledOffset}, \ + {"stur_b_ldst_unscaled"_h, &VISITORCLASS::VisitLoadStoreUnscaledOffset}, \ + {"stur_d_ldst_unscaled"_h, &VISITORCLASS::VisitLoadStoreUnscaledOffset}, \ + {"stur_h_ldst_unscaled"_h, &VISITORCLASS::VisitLoadStoreUnscaledOffset}, \ + {"stur_q_ldst_unscaled"_h, &VISITORCLASS::VisitLoadStoreUnscaledOffset}, \ + {"stur_s_ldst_unscaled"_h, &VISITORCLASS::VisitLoadStoreUnscaledOffset}, \ + {"ldrb_32_ldst_pos"_h, &VISITORCLASS::VisitLoadStoreUnsignedOffset}, \ + {"ldrh_32_ldst_pos"_h, &VISITORCLASS::VisitLoadStoreUnsignedOffset}, \ + {"ldrsb_32_ldst_pos"_h, &VISITORCLASS::VisitLoadStoreUnsignedOffset}, \ + {"ldrsb_64_ldst_pos"_h, &VISITORCLASS::VisitLoadStoreUnsignedOffset}, \ + {"ldrsh_32_ldst_pos"_h, &VISITORCLASS::VisitLoadStoreUnsignedOffset}, \ + {"ldrsh_64_ldst_pos"_h, &VISITORCLASS::VisitLoadStoreUnsignedOffset}, \ + {"ldrsw_64_ldst_pos"_h, &VISITORCLASS::VisitLoadStoreUnsignedOffset}, \ + {"ldr_32_ldst_pos"_h, &VISITORCLASS::VisitLoadStoreUnsignedOffset}, \ + {"ldr_64_ldst_pos"_h, &VISITORCLASS::VisitLoadStoreUnsignedOffset}, \ + {"ldr_b_ldst_pos"_h, &VISITORCLASS::VisitLoadStoreUnsignedOffset}, \ + {"ldr_d_ldst_pos"_h, &VISITORCLASS::VisitLoadStoreUnsignedOffset}, \ + {"ldr_h_ldst_pos"_h, &VISITORCLASS::VisitLoadStoreUnsignedOffset}, \ + {"ldr_q_ldst_pos"_h, &VISITORCLASS::VisitLoadStoreUnsignedOffset}, \ + {"ldr_s_ldst_pos"_h, &VISITORCLASS::VisitLoadStoreUnsignedOffset}, \ + {"prfm_p_ldst_pos"_h, &VISITORCLASS::VisitLoadStoreUnsignedOffset}, \ + {"strb_32_ldst_pos"_h, &VISITORCLASS::VisitLoadStoreUnsignedOffset}, \ + {"strh_32_ldst_pos"_h, &VISITORCLASS::VisitLoadStoreUnsignedOffset}, \ + {"str_32_ldst_pos"_h, &VISITORCLASS::VisitLoadStoreUnsignedOffset}, \ + {"str_64_ldst_pos"_h, &VISITORCLASS::VisitLoadStoreUnsignedOffset}, \ + {"str_b_ldst_pos"_h, &VISITORCLASS::VisitLoadStoreUnsignedOffset}, \ + {"str_d_ldst_pos"_h, &VISITORCLASS::VisitLoadStoreUnsignedOffset}, \ + {"str_h_ldst_pos"_h, &VISITORCLASS::VisitLoadStoreUnsignedOffset}, \ + {"str_q_ldst_pos"_h, &VISITORCLASS::VisitLoadStoreUnsignedOffset}, \ + {"str_s_ldst_pos"_h, &VISITORCLASS::VisitLoadStoreUnsignedOffset}, \ + {"ands_32s_log_imm"_h, &VISITORCLASS::VisitLogicalImmediate}, \ + {"ands_64s_log_imm"_h, &VISITORCLASS::VisitLogicalImmediate}, \ + {"and_32_log_imm"_h, &VISITORCLASS::VisitLogicalImmediate}, \ + {"and_64_log_imm"_h, &VISITORCLASS::VisitLogicalImmediate}, \ + {"eor_32_log_imm"_h, &VISITORCLASS::VisitLogicalImmediate}, \ + {"eor_64_log_imm"_h, &VISITORCLASS::VisitLogicalImmediate}, \ + {"orr_32_log_imm"_h, &VISITORCLASS::VisitLogicalImmediate}, \ + {"orr_64_log_imm"_h, &VISITORCLASS::VisitLogicalImmediate}, \ + {"ands_32_log_shift"_h, &VISITORCLASS::VisitLogicalShifted}, \ + {"ands_64_log_shift"_h, &VISITORCLASS::VisitLogicalShifted}, \ + {"and_32_log_shift"_h, &VISITORCLASS::VisitLogicalShifted}, \ + {"and_64_log_shift"_h, &VISITORCLASS::VisitLogicalShifted}, \ + {"bics_32_log_shift"_h, &VISITORCLASS::VisitLogicalShifted}, \ + {"bics_64_log_shift"_h, &VISITORCLASS::VisitLogicalShifted}, \ + {"bic_32_log_shift"_h, &VISITORCLASS::VisitLogicalShifted}, \ + {"bic_64_log_shift"_h, &VISITORCLASS::VisitLogicalShifted}, \ + {"eon_32_log_shift"_h, &VISITORCLASS::VisitLogicalShifted}, \ + {"eon_64_log_shift"_h, &VISITORCLASS::VisitLogicalShifted}, \ + {"eor_32_log_shift"_h, &VISITORCLASS::VisitLogicalShifted}, \ + {"eor_64_log_shift"_h, &VISITORCLASS::VisitLogicalShifted}, \ + {"orn_32_log_shift"_h, &VISITORCLASS::VisitLogicalShifted}, \ + {"orn_64_log_shift"_h, &VISITORCLASS::VisitLogicalShifted}, \ + {"orr_32_log_shift"_h, &VISITORCLASS::VisitLogicalShifted}, \ + {"orr_64_log_shift"_h, &VISITORCLASS::VisitLogicalShifted}, \ + {"movk_32_movewide"_h, &VISITORCLASS::VisitMoveWideImmediate}, \ + {"movk_64_movewide"_h, &VISITORCLASS::VisitMoveWideImmediate}, \ + {"movn_32_movewide"_h, &VISITORCLASS::VisitMoveWideImmediate}, \ + {"movn_64_movewide"_h, &VISITORCLASS::VisitMoveWideImmediate}, \ + {"movz_32_movewide"_h, &VISITORCLASS::VisitMoveWideImmediate}, \ + {"movz_64_movewide"_h, &VISITORCLASS::VisitMoveWideImmediate}, \ + {"fabs_asimdmiscfp16_r"_h, &VISITORCLASS::VisitNEON2RegMiscFP16}, \ + {"fcmeq_asimdmiscfp16_fz"_h, &VISITORCLASS::VisitNEON2RegMiscFP16}, \ + {"fcmge_asimdmiscfp16_fz"_h, &VISITORCLASS::VisitNEON2RegMiscFP16}, \ + {"fcmgt_asimdmiscfp16_fz"_h, &VISITORCLASS::VisitNEON2RegMiscFP16}, \ + {"fcmle_asimdmiscfp16_fz"_h, &VISITORCLASS::VisitNEON2RegMiscFP16}, \ + {"fcmlt_asimdmiscfp16_fz"_h, &VISITORCLASS::VisitNEON2RegMiscFP16}, \ + {"fcvtas_asimdmiscfp16_r"_h, &VISITORCLASS::VisitNEON2RegMiscFP16}, \ + {"fcvtau_asimdmiscfp16_r"_h, &VISITORCLASS::VisitNEON2RegMiscFP16}, \ + {"fcvtms_asimdmiscfp16_r"_h, &VISITORCLASS::VisitNEON2RegMiscFP16}, \ + {"fcvtmu_asimdmiscfp16_r"_h, &VISITORCLASS::VisitNEON2RegMiscFP16}, \ + {"fcvtns_asimdmiscfp16_r"_h, &VISITORCLASS::VisitNEON2RegMiscFP16}, \ + {"fcvtnu_asimdmiscfp16_r"_h, &VISITORCLASS::VisitNEON2RegMiscFP16}, \ + {"fcvtps_asimdmiscfp16_r"_h, &VISITORCLASS::VisitNEON2RegMiscFP16}, \ + {"fcvtpu_asimdmiscfp16_r"_h, &VISITORCLASS::VisitNEON2RegMiscFP16}, \ + {"fcvtzs_asimdmiscfp16_r"_h, &VISITORCLASS::VisitNEON2RegMiscFP16}, \ + {"fcvtzu_asimdmiscfp16_r"_h, &VISITORCLASS::VisitNEON2RegMiscFP16}, \ + {"fneg_asimdmiscfp16_r"_h, &VISITORCLASS::VisitNEON2RegMiscFP16}, \ + {"frecpe_asimdmiscfp16_r"_h, &VISITORCLASS::VisitNEON2RegMiscFP16}, \ + {"frinta_asimdmiscfp16_r"_h, &VISITORCLASS::VisitNEON2RegMiscFP16}, \ + {"frinti_asimdmiscfp16_r"_h, &VISITORCLASS::VisitNEON2RegMiscFP16}, \ + {"frintm_asimdmiscfp16_r"_h, &VISITORCLASS::VisitNEON2RegMiscFP16}, \ + {"frintn_asimdmiscfp16_r"_h, &VISITORCLASS::VisitNEON2RegMiscFP16}, \ + {"frintp_asimdmiscfp16_r"_h, &VISITORCLASS::VisitNEON2RegMiscFP16}, \ + {"frintx_asimdmiscfp16_r"_h, &VISITORCLASS::VisitNEON2RegMiscFP16}, \ + {"frintz_asimdmiscfp16_r"_h, &VISITORCLASS::VisitNEON2RegMiscFP16}, \ + {"frsqrte_asimdmiscfp16_r"_h, &VISITORCLASS::VisitNEON2RegMiscFP16}, \ + {"fsqrt_asimdmiscfp16_r"_h, &VISITORCLASS::VisitNEON2RegMiscFP16}, \ + {"scvtf_asimdmiscfp16_r"_h, &VISITORCLASS::VisitNEON2RegMiscFP16}, \ + {"ucvtf_asimdmiscfp16_r"_h, &VISITORCLASS::VisitNEON2RegMiscFP16}, \ + {"addhn_asimddiff_n"_h, &VISITORCLASS::VisitNEON3Different}, \ + {"raddhn_asimddiff_n"_h, &VISITORCLASS::VisitNEON3Different}, \ + {"rsubhn_asimddiff_n"_h, &VISITORCLASS::VisitNEON3Different}, \ + {"sabal_asimddiff_l"_h, &VISITORCLASS::VisitNEON3Different}, \ + {"sabdl_asimddiff_l"_h, &VISITORCLASS::VisitNEON3Different}, \ + {"saddl_asimddiff_l"_h, &VISITORCLASS::VisitNEON3Different}, \ + {"saddw_asimddiff_w"_h, &VISITORCLASS::VisitNEON3Different}, \ + {"smlal_asimddiff_l"_h, &VISITORCLASS::VisitNEON3Different}, \ + {"smlsl_asimddiff_l"_h, &VISITORCLASS::VisitNEON3Different}, \ + {"smull_asimddiff_l"_h, &VISITORCLASS::VisitNEON3Different}, \ + {"sqdmlal_asimddiff_l"_h, &VISITORCLASS::VisitNEON3Different}, \ + {"sqdmlsl_asimddiff_l"_h, &VISITORCLASS::VisitNEON3Different}, \ + {"sqdmull_asimddiff_l"_h, &VISITORCLASS::VisitNEON3Different}, \ + {"ssubl_asimddiff_l"_h, &VISITORCLASS::VisitNEON3Different}, \ + {"ssubw_asimddiff_w"_h, &VISITORCLASS::VisitNEON3Different}, \ + {"subhn_asimddiff_n"_h, &VISITORCLASS::VisitNEON3Different}, \ + {"uabal_asimddiff_l"_h, &VISITORCLASS::VisitNEON3Different}, \ + {"uabdl_asimddiff_l"_h, &VISITORCLASS::VisitNEON3Different}, \ + {"uaddl_asimddiff_l"_h, &VISITORCLASS::VisitNEON3Different}, \ + {"uaddw_asimddiff_w"_h, &VISITORCLASS::VisitNEON3Different}, \ + {"umlal_asimddiff_l"_h, &VISITORCLASS::VisitNEON3Different}, \ + {"umlsl_asimddiff_l"_h, &VISITORCLASS::VisitNEON3Different}, \ + {"umull_asimddiff_l"_h, &VISITORCLASS::VisitNEON3Different}, \ + {"usubl_asimddiff_l"_h, &VISITORCLASS::VisitNEON3Different}, \ + {"usubw_asimddiff_w"_h, &VISITORCLASS::VisitNEON3Different}, \ + {"addp_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"add_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"cmeq_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"cmge_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"cmgt_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"cmhi_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"cmhs_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"cmtst_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"fabd_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"facge_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"facgt_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"faddp_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"fadd_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"fcmeq_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"fcmge_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"fcmgt_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"fdiv_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"fmaxnmp_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"fmaxnm_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"fmaxp_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"fmax_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"fminnmp_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"fminnm_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"fminp_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"fmin_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"fmla_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"fmls_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"fmulx_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"fmul_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"frecps_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"frsqrts_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"fsub_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"sqadd_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"sqdmulh_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"sqrdmulh_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"sqrshl_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"sqshl_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"sqsub_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"srshl_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"sshl_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"sub_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"uqadd_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"uqrshl_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"uqshl_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"uqsub_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"urshl_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"ushl_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"fcadd_asimdsame2_c"_h, &VISITORCLASS::VisitNEON3SameExtra}, \ + {"fcmla_asimdsame2_c"_h, &VISITORCLASS::VisitNEON3SameExtra}, \ + {"sdot_asimdsame2_d"_h, &VISITORCLASS::VisitNEON3SameExtra}, \ + {"sqrdmlah_asimdsame2_only"_h, &VISITORCLASS::VisitNEON3SameExtra}, \ + {"sqrdmlsh_asimdsame2_only"_h, &VISITORCLASS::VisitNEON3SameExtra}, \ + {"udot_asimdsame2_d"_h, &VISITORCLASS::VisitNEON3SameExtra}, \ + {"fabd_asimdsamefp16_only"_h, &VISITORCLASS::VisitNEON3SameFP16}, \ + {"facge_asimdsamefp16_only"_h, &VISITORCLASS::VisitNEON3SameFP16}, \ + {"facgt_asimdsamefp16_only"_h, &VISITORCLASS::VisitNEON3SameFP16}, \ + {"faddp_asimdsamefp16_only"_h, &VISITORCLASS::VisitNEON3SameFP16}, \ + {"fadd_asimdsamefp16_only"_h, &VISITORCLASS::VisitNEON3SameFP16}, \ + {"fcmeq_asimdsamefp16_only"_h, &VISITORCLASS::VisitNEON3SameFP16}, \ + {"fcmge_asimdsamefp16_only"_h, &VISITORCLASS::VisitNEON3SameFP16}, \ + {"fcmgt_asimdsamefp16_only"_h, &VISITORCLASS::VisitNEON3SameFP16}, \ + {"fdiv_asimdsamefp16_only"_h, &VISITORCLASS::VisitNEON3SameFP16}, \ + {"fmaxnmp_asimdsamefp16_only"_h, &VISITORCLASS::VisitNEON3SameFP16}, \ + {"fmaxnm_asimdsamefp16_only"_h, &VISITORCLASS::VisitNEON3SameFP16}, \ + {"fmaxp_asimdsamefp16_only"_h, &VISITORCLASS::VisitNEON3SameFP16}, \ + {"fmax_asimdsamefp16_only"_h, &VISITORCLASS::VisitNEON3SameFP16}, \ + {"fminnmp_asimdsamefp16_only"_h, &VISITORCLASS::VisitNEON3SameFP16}, \ + {"fminnm_asimdsamefp16_only"_h, &VISITORCLASS::VisitNEON3SameFP16}, \ + {"fminp_asimdsamefp16_only"_h, &VISITORCLASS::VisitNEON3SameFP16}, \ + {"fmin_asimdsamefp16_only"_h, &VISITORCLASS::VisitNEON3SameFP16}, \ + {"fmla_asimdsamefp16_only"_h, &VISITORCLASS::VisitNEON3SameFP16}, \ + {"fmls_asimdsamefp16_only"_h, &VISITORCLASS::VisitNEON3SameFP16}, \ + {"fmulx_asimdsamefp16_only"_h, &VISITORCLASS::VisitNEON3SameFP16}, \ + {"fmul_asimdsamefp16_only"_h, &VISITORCLASS::VisitNEON3SameFP16}, \ + {"frecps_asimdsamefp16_only"_h, &VISITORCLASS::VisitNEON3SameFP16}, \ + {"frsqrts_asimdsamefp16_only"_h, &VISITORCLASS::VisitNEON3SameFP16}, \ + {"fsub_asimdsamefp16_only"_h, &VISITORCLASS::VisitNEON3SameFP16}, \ + {"addv_asimdall_only"_h, &VISITORCLASS::VisitNEONAcrossLanes}, \ + {"saddlv_asimdall_only"_h, &VISITORCLASS::VisitNEONAcrossLanes}, \ + {"smaxv_asimdall_only"_h, &VISITORCLASS::VisitNEONAcrossLanes}, \ + {"sminv_asimdall_only"_h, &VISITORCLASS::VisitNEONAcrossLanes}, \ + {"uaddlv_asimdall_only"_h, &VISITORCLASS::VisitNEONAcrossLanes}, \ + {"umaxv_asimdall_only"_h, &VISITORCLASS::VisitNEONAcrossLanes}, \ + {"uminv_asimdall_only"_h, &VISITORCLASS::VisitNEONAcrossLanes}, \ + {"mla_asimdelem_r"_h, &VISITORCLASS::VisitNEONByIndexedElement}, \ + {"mls_asimdelem_r"_h, &VISITORCLASS::VisitNEONByIndexedElement}, \ + {"mul_asimdelem_r"_h, &VISITORCLASS::VisitNEONByIndexedElement}, \ + {"sqdmulh_asimdelem_r"_h, &VISITORCLASS::VisitNEONByIndexedElement}, \ + {"sqrdmlah_asimdelem_r"_h, &VISITORCLASS::VisitNEONByIndexedElement}, \ + {"sqrdmlsh_asimdelem_r"_h, &VISITORCLASS::VisitNEONByIndexedElement}, \ + {"sqrdmulh_asimdelem_r"_h, &VISITORCLASS::VisitNEONByIndexedElement}, \ + {"dup_asimdins_dr_r"_h, &VISITORCLASS::VisitNEONCopy}, \ + {"dup_asimdins_dv_v"_h, &VISITORCLASS::VisitNEONCopy}, \ + {"ins_asimdins_ir_r"_h, &VISITORCLASS::VisitNEONCopy}, \ + {"ins_asimdins_iv_v"_h, &VISITORCLASS::VisitNEONCopy}, \ + {"smov_asimdins_w_w"_h, &VISITORCLASS::VisitNEONCopy}, \ + {"smov_asimdins_x_x"_h, &VISITORCLASS::VisitNEONCopy}, \ + {"umov_asimdins_w_w"_h, &VISITORCLASS::VisitNEONCopy}, \ + {"umov_asimdins_x_x"_h, &VISITORCLASS::VisitNEONCopy}, \ + {"ext_asimdext_only"_h, &VISITORCLASS::VisitNEONExtract}, \ + {"ld1_asisdlse_r1_1v"_h, &VISITORCLASS::VisitNEONLoadStoreMultiStruct}, \ + {"ld1_asisdlse_r2_2v"_h, &VISITORCLASS::VisitNEONLoadStoreMultiStruct}, \ + {"ld1_asisdlse_r3_3v"_h, &VISITORCLASS::VisitNEONLoadStoreMultiStruct}, \ + {"ld1_asisdlse_r4_4v"_h, &VISITORCLASS::VisitNEONLoadStoreMultiStruct}, \ + {"ld2_asisdlse_r2"_h, &VISITORCLASS::VisitNEONLoadStoreMultiStruct}, \ + {"ld3_asisdlse_r3"_h, &VISITORCLASS::VisitNEONLoadStoreMultiStruct}, \ + {"ld4_asisdlse_r4"_h, &VISITORCLASS::VisitNEONLoadStoreMultiStruct}, \ + {"st1_asisdlse_r1_1v"_h, &VISITORCLASS::VisitNEONLoadStoreMultiStruct}, \ + {"st1_asisdlse_r2_2v"_h, &VISITORCLASS::VisitNEONLoadStoreMultiStruct}, \ + {"st1_asisdlse_r3_3v"_h, &VISITORCLASS::VisitNEONLoadStoreMultiStruct}, \ + {"st1_asisdlse_r4_4v"_h, &VISITORCLASS::VisitNEONLoadStoreMultiStruct}, \ + {"st2_asisdlse_r2"_h, &VISITORCLASS::VisitNEONLoadStoreMultiStruct}, \ + {"st3_asisdlse_r3"_h, &VISITORCLASS::VisitNEONLoadStoreMultiStruct}, \ + {"st4_asisdlse_r4"_h, &VISITORCLASS::VisitNEONLoadStoreMultiStruct}, \ + {"ld1_asisdlsep_i1_i1"_h, \ + &VISITORCLASS::VisitNEONLoadStoreMultiStructPostIndex}, \ + {"ld1_asisdlsep_i2_i2"_h, \ + &VISITORCLASS::VisitNEONLoadStoreMultiStructPostIndex}, \ + {"ld1_asisdlsep_i3_i3"_h, \ + &VISITORCLASS::VisitNEONLoadStoreMultiStructPostIndex}, \ + {"ld1_asisdlsep_i4_i4"_h, \ + &VISITORCLASS::VisitNEONLoadStoreMultiStructPostIndex}, \ + {"ld1_asisdlsep_r1_r1"_h, \ + &VISITORCLASS::VisitNEONLoadStoreMultiStructPostIndex}, \ + {"ld1_asisdlsep_r2_r2"_h, \ + &VISITORCLASS::VisitNEONLoadStoreMultiStructPostIndex}, \ + {"ld1_asisdlsep_r3_r3"_h, \ + &VISITORCLASS::VisitNEONLoadStoreMultiStructPostIndex}, \ + {"ld1_asisdlsep_r4_r4"_h, \ + &VISITORCLASS::VisitNEONLoadStoreMultiStructPostIndex}, \ + {"ld2_asisdlsep_i2_i"_h, \ + &VISITORCLASS::VisitNEONLoadStoreMultiStructPostIndex}, \ + {"ld2_asisdlsep_r2_r"_h, \ + &VISITORCLASS::VisitNEONLoadStoreMultiStructPostIndex}, \ + {"ld3_asisdlsep_i3_i"_h, \ + &VISITORCLASS::VisitNEONLoadStoreMultiStructPostIndex}, \ + {"ld3_asisdlsep_r3_r"_h, \ + &VISITORCLASS::VisitNEONLoadStoreMultiStructPostIndex}, \ + {"ld4_asisdlsep_i4_i"_h, \ + &VISITORCLASS::VisitNEONLoadStoreMultiStructPostIndex}, \ + {"ld4_asisdlsep_r4_r"_h, \ + &VISITORCLASS::VisitNEONLoadStoreMultiStructPostIndex}, \ + {"st1_asisdlsep_i1_i1"_h, \ + &VISITORCLASS::VisitNEONLoadStoreMultiStructPostIndex}, \ + {"st1_asisdlsep_i2_i2"_h, \ + &VISITORCLASS::VisitNEONLoadStoreMultiStructPostIndex}, \ + {"st1_asisdlsep_i3_i3"_h, \ + &VISITORCLASS::VisitNEONLoadStoreMultiStructPostIndex}, \ + {"st1_asisdlsep_i4_i4"_h, \ + &VISITORCLASS::VisitNEONLoadStoreMultiStructPostIndex}, \ + {"st1_asisdlsep_r1_r1"_h, \ + &VISITORCLASS::VisitNEONLoadStoreMultiStructPostIndex}, \ + {"st1_asisdlsep_r2_r2"_h, \ + &VISITORCLASS::VisitNEONLoadStoreMultiStructPostIndex}, \ + {"st1_asisdlsep_r3_r3"_h, \ + &VISITORCLASS::VisitNEONLoadStoreMultiStructPostIndex}, \ + {"st1_asisdlsep_r4_r4"_h, \ + &VISITORCLASS::VisitNEONLoadStoreMultiStructPostIndex}, \ + {"st2_asisdlsep_i2_i"_h, \ + &VISITORCLASS::VisitNEONLoadStoreMultiStructPostIndex}, \ + {"st2_asisdlsep_r2_r"_h, \ + &VISITORCLASS::VisitNEONLoadStoreMultiStructPostIndex}, \ + {"st3_asisdlsep_i3_i"_h, \ + &VISITORCLASS::VisitNEONLoadStoreMultiStructPostIndex}, \ + {"st3_asisdlsep_r3_r"_h, \ + &VISITORCLASS::VisitNEONLoadStoreMultiStructPostIndex}, \ + {"st4_asisdlsep_i4_i"_h, \ + &VISITORCLASS::VisitNEONLoadStoreMultiStructPostIndex}, \ + {"st4_asisdlsep_r4_r"_h, \ + &VISITORCLASS::VisitNEONLoadStoreMultiStructPostIndex}, \ + {"ld1r_asisdlso_r1"_h, &VISITORCLASS::VisitNEONLoadStoreSingleStruct}, \ + {"ld1_asisdlso_b1_1b"_h, &VISITORCLASS::VisitNEONLoadStoreSingleStruct}, \ + {"ld1_asisdlso_d1_1d"_h, &VISITORCLASS::VisitNEONLoadStoreSingleStruct}, \ + {"ld1_asisdlso_h1_1h"_h, &VISITORCLASS::VisitNEONLoadStoreSingleStruct}, \ + {"ld1_asisdlso_s1_1s"_h, &VISITORCLASS::VisitNEONLoadStoreSingleStruct}, \ + {"ld2r_asisdlso_r2"_h, &VISITORCLASS::VisitNEONLoadStoreSingleStruct}, \ + {"ld2_asisdlso_b2_2b"_h, &VISITORCLASS::VisitNEONLoadStoreSingleStruct}, \ + {"ld2_asisdlso_d2_2d"_h, &VISITORCLASS::VisitNEONLoadStoreSingleStruct}, \ + {"ld2_asisdlso_h2_2h"_h, &VISITORCLASS::VisitNEONLoadStoreSingleStruct}, \ + {"ld2_asisdlso_s2_2s"_h, &VISITORCLASS::VisitNEONLoadStoreSingleStruct}, \ + {"ld3r_asisdlso_r3"_h, &VISITORCLASS::VisitNEONLoadStoreSingleStruct}, \ + {"ld3_asisdlso_b3_3b"_h, &VISITORCLASS::VisitNEONLoadStoreSingleStruct}, \ + {"ld3_asisdlso_d3_3d"_h, &VISITORCLASS::VisitNEONLoadStoreSingleStruct}, \ + {"ld3_asisdlso_h3_3h"_h, &VISITORCLASS::VisitNEONLoadStoreSingleStruct}, \ + {"ld3_asisdlso_s3_3s"_h, &VISITORCLASS::VisitNEONLoadStoreSingleStruct}, \ + {"ld4r_asisdlso_r4"_h, &VISITORCLASS::VisitNEONLoadStoreSingleStruct}, \ + {"ld4_asisdlso_b4_4b"_h, &VISITORCLASS::VisitNEONLoadStoreSingleStruct}, \ + {"ld4_asisdlso_d4_4d"_h, &VISITORCLASS::VisitNEONLoadStoreSingleStruct}, \ + {"ld4_asisdlso_h4_4h"_h, &VISITORCLASS::VisitNEONLoadStoreSingleStruct}, \ + {"ld4_asisdlso_s4_4s"_h, &VISITORCLASS::VisitNEONLoadStoreSingleStruct}, \ + {"st1_asisdlso_b1_1b"_h, &VISITORCLASS::VisitNEONLoadStoreSingleStruct}, \ + {"st1_asisdlso_d1_1d"_h, &VISITORCLASS::VisitNEONLoadStoreSingleStruct}, \ + {"st1_asisdlso_h1_1h"_h, &VISITORCLASS::VisitNEONLoadStoreSingleStruct}, \ + {"st1_asisdlso_s1_1s"_h, &VISITORCLASS::VisitNEONLoadStoreSingleStruct}, \ + {"st2_asisdlso_b2_2b"_h, &VISITORCLASS::VisitNEONLoadStoreSingleStruct}, \ + {"st2_asisdlso_d2_2d"_h, &VISITORCLASS::VisitNEONLoadStoreSingleStruct}, \ + {"st2_asisdlso_h2_2h"_h, &VISITORCLASS::VisitNEONLoadStoreSingleStruct}, \ + {"st2_asisdlso_s2_2s"_h, &VISITORCLASS::VisitNEONLoadStoreSingleStruct}, \ + {"st3_asisdlso_b3_3b"_h, &VISITORCLASS::VisitNEONLoadStoreSingleStruct}, \ + {"st3_asisdlso_d3_3d"_h, &VISITORCLASS::VisitNEONLoadStoreSingleStruct}, \ + {"st3_asisdlso_h3_3h"_h, &VISITORCLASS::VisitNEONLoadStoreSingleStruct}, \ + {"st3_asisdlso_s3_3s"_h, &VISITORCLASS::VisitNEONLoadStoreSingleStruct}, \ + {"st4_asisdlso_b4_4b"_h, &VISITORCLASS::VisitNEONLoadStoreSingleStruct}, \ + {"st4_asisdlso_d4_4d"_h, &VISITORCLASS::VisitNEONLoadStoreSingleStruct}, \ + {"st4_asisdlso_h4_4h"_h, &VISITORCLASS::VisitNEONLoadStoreSingleStruct}, \ + {"st4_asisdlso_s4_4s"_h, &VISITORCLASS::VisitNEONLoadStoreSingleStruct}, \ + {"ld1r_asisdlsop_r1_i"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"ld1r_asisdlsop_rx1_r"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"ld1_asisdlsop_b1_i1b"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"ld1_asisdlsop_bx1_r1b"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"ld1_asisdlsop_d1_i1d"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"ld1_asisdlsop_dx1_r1d"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"ld1_asisdlsop_h1_i1h"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"ld1_asisdlsop_hx1_r1h"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"ld1_asisdlsop_s1_i1s"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"ld1_asisdlsop_sx1_r1s"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"ld2r_asisdlsop_r2_i"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"ld2r_asisdlsop_rx2_r"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"ld2_asisdlsop_b2_i2b"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"ld2_asisdlsop_bx2_r2b"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"ld2_asisdlsop_d2_i2d"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"ld2_asisdlsop_dx2_r2d"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"ld2_asisdlsop_h2_i2h"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"ld2_asisdlsop_hx2_r2h"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"ld2_asisdlsop_s2_i2s"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"ld2_asisdlsop_sx2_r2s"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"ld3r_asisdlsop_r3_i"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"ld3r_asisdlsop_rx3_r"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"ld3_asisdlsop_b3_i3b"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"ld3_asisdlsop_bx3_r3b"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"ld3_asisdlsop_d3_i3d"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"ld3_asisdlsop_dx3_r3d"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"ld3_asisdlsop_h3_i3h"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"ld3_asisdlsop_hx3_r3h"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"ld3_asisdlsop_s3_i3s"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"ld3_asisdlsop_sx3_r3s"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"ld4r_asisdlsop_r4_i"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"ld4r_asisdlsop_rx4_r"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"ld4_asisdlsop_b4_i4b"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"ld4_asisdlsop_bx4_r4b"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"ld4_asisdlsop_d4_i4d"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"ld4_asisdlsop_dx4_r4d"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"ld4_asisdlsop_h4_i4h"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"ld4_asisdlsop_hx4_r4h"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"ld4_asisdlsop_s4_i4s"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"ld4_asisdlsop_sx4_r4s"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"st1_asisdlsop_b1_i1b"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"st1_asisdlsop_bx1_r1b"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"st1_asisdlsop_d1_i1d"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"st1_asisdlsop_dx1_r1d"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"st1_asisdlsop_h1_i1h"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"st1_asisdlsop_hx1_r1h"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"st1_asisdlsop_s1_i1s"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"st1_asisdlsop_sx1_r1s"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"st2_asisdlsop_b2_i2b"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"st2_asisdlsop_bx2_r2b"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"st2_asisdlsop_d2_i2d"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"st2_asisdlsop_dx2_r2d"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"st2_asisdlsop_h2_i2h"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"st2_asisdlsop_hx2_r2h"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"st2_asisdlsop_s2_i2s"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"st2_asisdlsop_sx2_r2s"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"st3_asisdlsop_b3_i3b"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"st3_asisdlsop_bx3_r3b"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"st3_asisdlsop_d3_i3d"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"st3_asisdlsop_dx3_r3d"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"st3_asisdlsop_h3_i3h"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"st3_asisdlsop_hx3_r3h"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"st3_asisdlsop_s3_i3s"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"st3_asisdlsop_sx3_r3s"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"st4_asisdlsop_b4_i4b"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"st4_asisdlsop_bx4_r4b"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"st4_asisdlsop_d4_i4d"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"st4_asisdlsop_dx4_r4d"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"st4_asisdlsop_h4_i4h"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"st4_asisdlsop_hx4_r4h"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"st4_asisdlsop_s4_i4s"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"st4_asisdlsop_sx4_r4s"_h, \ + &VISITORCLASS::VisitNEONLoadStoreSingleStructPostIndex}, \ + {"bic_asimdimm_l_hl"_h, &VISITORCLASS::VisitNEONModifiedImmediate}, \ + {"bic_asimdimm_l_sl"_h, &VISITORCLASS::VisitNEONModifiedImmediate}, \ + {"fmov_asimdimm_d2_d"_h, &VISITORCLASS::VisitNEONModifiedImmediate}, \ + {"fmov_asimdimm_h_h"_h, &VISITORCLASS::VisitNEONModifiedImmediate}, \ + {"fmov_asimdimm_s_s"_h, &VISITORCLASS::VisitNEONModifiedImmediate}, \ + {"movi_asimdimm_d2_d"_h, &VISITORCLASS::VisitNEONModifiedImmediate}, \ + {"movi_asimdimm_d_ds"_h, &VISITORCLASS::VisitNEONModifiedImmediate}, \ + {"movi_asimdimm_l_hl"_h, &VISITORCLASS::VisitNEONModifiedImmediate}, \ + {"movi_asimdimm_l_sl"_h, &VISITORCLASS::VisitNEONModifiedImmediate}, \ + {"movi_asimdimm_m_sm"_h, &VISITORCLASS::VisitNEONModifiedImmediate}, \ + {"movi_asimdimm_n_b"_h, &VISITORCLASS::VisitNEONModifiedImmediate}, \ + {"mvni_asimdimm_l_hl"_h, &VISITORCLASS::VisitNEONModifiedImmediate}, \ + {"mvni_asimdimm_l_sl"_h, &VISITORCLASS::VisitNEONModifiedImmediate}, \ + {"mvni_asimdimm_m_sm"_h, &VISITORCLASS::VisitNEONModifiedImmediate}, \ + {"orr_asimdimm_l_hl"_h, &VISITORCLASS::VisitNEONModifiedImmediate}, \ + {"orr_asimdimm_l_sl"_h, &VISITORCLASS::VisitNEONModifiedImmediate}, \ + {"trn1_asimdperm_only"_h, &VISITORCLASS::VisitNEONPerm}, \ + {"trn2_asimdperm_only"_h, &VISITORCLASS::VisitNEONPerm}, \ + {"uzp1_asimdperm_only"_h, &VISITORCLASS::VisitNEONPerm}, \ + {"uzp2_asimdperm_only"_h, &VISITORCLASS::VisitNEONPerm}, \ + {"zip1_asimdperm_only"_h, &VISITORCLASS::VisitNEONPerm}, \ + {"zip2_asimdperm_only"_h, &VISITORCLASS::VisitNEONPerm}, \ + {"sqabs_asisdmisc_r"_h, &VISITORCLASS::VisitNEONScalar2RegMisc}, \ + {"sqneg_asisdmisc_r"_h, &VISITORCLASS::VisitNEONScalar2RegMisc}, \ + {"sqxtn_asisdmisc_n"_h, &VISITORCLASS::VisitNEONScalar2RegMisc}, \ + {"sqxtun_asisdmisc_n"_h, &VISITORCLASS::VisitNEONScalar2RegMisc}, \ + {"suqadd_asisdmisc_r"_h, &VISITORCLASS::VisitNEONScalar2RegMisc}, \ + {"uqxtn_asisdmisc_n"_h, &VISITORCLASS::VisitNEONScalar2RegMisc}, \ + {"usqadd_asisdmisc_r"_h, &VISITORCLASS::VisitNEONScalar2RegMisc}, \ + {"fcmeq_asisdmiscfp16_fz"_h, \ + &VISITORCLASS::VisitNEONScalar2RegMiscFP16}, \ + {"fcmge_asisdmiscfp16_fz"_h, \ + &VISITORCLASS::VisitNEONScalar2RegMiscFP16}, \ + {"fcmgt_asisdmiscfp16_fz"_h, \ + &VISITORCLASS::VisitNEONScalar2RegMiscFP16}, \ + {"fcmle_asisdmiscfp16_fz"_h, \ + &VISITORCLASS::VisitNEONScalar2RegMiscFP16}, \ + {"fcmlt_asisdmiscfp16_fz"_h, \ + &VISITORCLASS::VisitNEONScalar2RegMiscFP16}, \ + {"fcvtas_asisdmiscfp16_r"_h, \ + &VISITORCLASS::VisitNEONScalar2RegMiscFP16}, \ + {"fcvtau_asisdmiscfp16_r"_h, \ + &VISITORCLASS::VisitNEONScalar2RegMiscFP16}, \ + {"fcvtms_asisdmiscfp16_r"_h, \ + &VISITORCLASS::VisitNEONScalar2RegMiscFP16}, \ + {"fcvtmu_asisdmiscfp16_r"_h, \ + &VISITORCLASS::VisitNEONScalar2RegMiscFP16}, \ + {"fcvtns_asisdmiscfp16_r"_h, \ + &VISITORCLASS::VisitNEONScalar2RegMiscFP16}, \ + {"fcvtnu_asisdmiscfp16_r"_h, \ + &VISITORCLASS::VisitNEONScalar2RegMiscFP16}, \ + {"fcvtps_asisdmiscfp16_r"_h, \ + &VISITORCLASS::VisitNEONScalar2RegMiscFP16}, \ + {"fcvtpu_asisdmiscfp16_r"_h, \ + &VISITORCLASS::VisitNEONScalar2RegMiscFP16}, \ + {"fcvtzs_asisdmiscfp16_r"_h, \ + &VISITORCLASS::VisitNEONScalar2RegMiscFP16}, \ + {"fcvtzu_asisdmiscfp16_r"_h, \ + &VISITORCLASS::VisitNEONScalar2RegMiscFP16}, \ + {"frecpe_asisdmiscfp16_r"_h, \ + &VISITORCLASS::VisitNEONScalar2RegMiscFP16}, \ + {"frecpx_asisdmiscfp16_r"_h, \ + &VISITORCLASS::VisitNEONScalar2RegMiscFP16}, \ + {"frsqrte_asisdmiscfp16_r"_h, \ + &VISITORCLASS::VisitNEONScalar2RegMiscFP16}, \ + {"scvtf_asisdmiscfp16_r"_h, &VISITORCLASS::VisitNEONScalar2RegMiscFP16}, \ + {"ucvtf_asisdmiscfp16_r"_h, &VISITORCLASS::VisitNEONScalar2RegMiscFP16}, \ + {"sqdmlal_asisddiff_only"_h, &VISITORCLASS::VisitNEONScalar3Diff}, \ + {"sqdmlsl_asisddiff_only"_h, &VISITORCLASS::VisitNEONScalar3Diff}, \ + {"sqdmull_asisddiff_only"_h, &VISITORCLASS::VisitNEONScalar3Diff}, \ + {"sqadd_asisdsame_only"_h, &VISITORCLASS::VisitNEONScalar3Same}, \ + {"sqdmulh_asisdsame_only"_h, &VISITORCLASS::VisitNEONScalar3Same}, \ + {"sqrdmulh_asisdsame_only"_h, &VISITORCLASS::VisitNEONScalar3Same}, \ + {"sqrshl_asisdsame_only"_h, &VISITORCLASS::VisitNEONScalar3Same}, \ + {"sqshl_asisdsame_only"_h, &VISITORCLASS::VisitNEONScalar3Same}, \ + {"sqsub_asisdsame_only"_h, &VISITORCLASS::VisitNEONScalar3Same}, \ + {"srshl_asisdsame_only"_h, &VISITORCLASS::VisitNEONScalar3Same}, \ + {"sshl_asisdsame_only"_h, &VISITORCLASS::VisitNEONScalar3Same}, \ + {"uqadd_asisdsame_only"_h, &VISITORCLASS::VisitNEONScalar3Same}, \ + {"uqrshl_asisdsame_only"_h, &VISITORCLASS::VisitNEONScalar3Same}, \ + {"uqshl_asisdsame_only"_h, &VISITORCLASS::VisitNEONScalar3Same}, \ + {"uqsub_asisdsame_only"_h, &VISITORCLASS::VisitNEONScalar3Same}, \ + {"urshl_asisdsame_only"_h, &VISITORCLASS::VisitNEONScalar3Same}, \ + {"ushl_asisdsame_only"_h, &VISITORCLASS::VisitNEONScalar3Same}, \ + {"fabd_asisdsamefp16_only"_h, &VISITORCLASS::VisitNEONScalar3SameFP16}, \ + {"facge_asisdsamefp16_only"_h, &VISITORCLASS::VisitNEONScalar3SameFP16}, \ + {"facgt_asisdsamefp16_only"_h, &VISITORCLASS::VisitNEONScalar3SameFP16}, \ + {"fcmeq_asisdsamefp16_only"_h, &VISITORCLASS::VisitNEONScalar3SameFP16}, \ + {"fcmge_asisdsamefp16_only"_h, &VISITORCLASS::VisitNEONScalar3SameFP16}, \ + {"fcmgt_asisdsamefp16_only"_h, &VISITORCLASS::VisitNEONScalar3SameFP16}, \ + {"fmulx_asisdsamefp16_only"_h, &VISITORCLASS::VisitNEONScalar3SameFP16}, \ + {"frecps_asisdsamefp16_only"_h, \ + &VISITORCLASS::VisitNEONScalar3SameFP16}, \ + {"frsqrts_asisdsamefp16_only"_h, \ + &VISITORCLASS::VisitNEONScalar3SameFP16}, \ + {"sqdmulh_asisdelem_r"_h, \ + &VISITORCLASS::VisitNEONScalarByIndexedElement}, \ + {"sqrdmlah_asisdelem_r"_h, \ + &VISITORCLASS::VisitNEONScalarByIndexedElement}, \ + {"sqrdmlsh_asisdelem_r"_h, \ + &VISITORCLASS::VisitNEONScalarByIndexedElement}, \ + {"sqrdmulh_asisdelem_r"_h, \ + &VISITORCLASS::VisitNEONScalarByIndexedElement}, \ + {"dup_asisdone_only"_h, &VISITORCLASS::VisitNEONScalarCopy}, \ + {"addp_asisdpair_only"_h, &VISITORCLASS::VisitNEONScalarPairwise}, \ + {"faddp_asisdpair_only_h"_h, &VISITORCLASS::VisitNEONScalarPairwise}, \ + {"faddp_asisdpair_only_sd"_h, &VISITORCLASS::VisitNEONScalarPairwise}, \ + {"fmaxnmp_asisdpair_only_h"_h, &VISITORCLASS::VisitNEONScalarPairwise}, \ + {"fmaxnmp_asisdpair_only_sd"_h, &VISITORCLASS::VisitNEONScalarPairwise}, \ + {"fmaxp_asisdpair_only_h"_h, &VISITORCLASS::VisitNEONScalarPairwise}, \ + {"fmaxp_asisdpair_only_sd"_h, &VISITORCLASS::VisitNEONScalarPairwise}, \ + {"fminnmp_asisdpair_only_h"_h, &VISITORCLASS::VisitNEONScalarPairwise}, \ + {"fminnmp_asisdpair_only_sd"_h, &VISITORCLASS::VisitNEONScalarPairwise}, \ + {"fminp_asisdpair_only_h"_h, &VISITORCLASS::VisitNEONScalarPairwise}, \ + {"fminp_asisdpair_only_sd"_h, &VISITORCLASS::VisitNEONScalarPairwise}, \ + {"fcvtzs_asisdshf_c"_h, &VISITORCLASS::VisitNEONScalarShiftImmediate}, \ + {"fcvtzu_asisdshf_c"_h, &VISITORCLASS::VisitNEONScalarShiftImmediate}, \ + {"scvtf_asisdshf_c"_h, &VISITORCLASS::VisitNEONScalarShiftImmediate}, \ + {"sqshlu_asisdshf_r"_h, &VISITORCLASS::VisitNEONScalarShiftImmediate}, \ + {"sqshl_asisdshf_r"_h, &VISITORCLASS::VisitNEONScalarShiftImmediate}, \ + {"ucvtf_asisdshf_c"_h, &VISITORCLASS::VisitNEONScalarShiftImmediate}, \ + {"uqshl_asisdshf_r"_h, &VISITORCLASS::VisitNEONScalarShiftImmediate}, \ + {"sqshlu_asimdshf_r"_h, &VISITORCLASS::VisitNEONShiftImmediate}, \ + {"sqshl_asimdshf_r"_h, &VISITORCLASS::VisitNEONShiftImmediate}, \ + {"uqshl_asimdshf_r"_h, &VISITORCLASS::VisitNEONShiftImmediate}, \ + {"shl_asimdshf_r"_h, &VISITORCLASS::VisitNEONShiftImmediate}, \ + {"sli_asimdshf_r"_h, &VISITORCLASS::VisitNEONShiftImmediate}, \ + {"tbl_asimdtbl_l1_1"_h, &VISITORCLASS::VisitNEONTable}, \ + {"tbl_asimdtbl_l2_2"_h, &VISITORCLASS::VisitNEONTable}, \ + {"tbl_asimdtbl_l3_3"_h, &VISITORCLASS::VisitNEONTable}, \ + {"tbl_asimdtbl_l4_4"_h, &VISITORCLASS::VisitNEONTable}, \ + {"tbx_asimdtbl_l1_1"_h, &VISITORCLASS::VisitNEONTable}, \ + {"tbx_asimdtbl_l2_2"_h, &VISITORCLASS::VisitNEONTable}, \ + {"tbx_asimdtbl_l3_3"_h, &VISITORCLASS::VisitNEONTable}, \ + {"tbx_asimdtbl_l4_4"_h, &VISITORCLASS::VisitNEONTable}, \ + {"adrp_only_pcreladdr"_h, &VISITORCLASS::VisitPCRelAddressing}, \ + {"adr_only_pcreladdr"_h, &VISITORCLASS::VisitPCRelAddressing}, \ + {"rmif_only_rmif"_h, &VISITORCLASS::VisitRotateRightIntoFlags}, \ + {"bti_hb_hints"_h, &VISITORCLASS::VisitSystem}, \ + {"clrex_bn_barriers"_h, &VISITORCLASS::VisitSystem}, \ + {"dmb_bo_barriers"_h, &VISITORCLASS::VisitSystem}, \ + {"dsb_bo_barriers"_h, &VISITORCLASS::VisitSystem}, \ + {"hint_hm_hints"_h, &VISITORCLASS::VisitSystem}, \ + {"mrs_rs_systemmove"_h, &VISITORCLASS::VisitSystem}, \ + {"msr_sr_systemmove"_h, &VISITORCLASS::VisitSystem}, \ + {"psb_hc_hints"_h, &VISITORCLASS::VisitSystem}, \ + {"sb_only_barriers"_h, &VISITORCLASS::VisitSystem}, \ + {"sysl_rc_systeminstrs"_h, &VISITORCLASS::VisitSystem}, \ + {"sys_cr_systeminstrs"_h, &VISITORCLASS::VisitSystem}, \ + {"tcommit_only_barriers"_h, &VISITORCLASS::VisitSystem}, \ + {"tsb_hc_hints"_h, &VISITORCLASS::VisitSystem}, \ + {"tbnz_only_testbranch"_h, &VISITORCLASS::VisitTestBranch}, \ + {"tbz_only_testbranch"_h, &VISITORCLASS::VisitTestBranch}, \ + {"bl_only_branch_imm"_h, &VISITORCLASS::VisitUnconditionalBranch}, \ + {"b_only_branch_imm"_h, &VISITORCLASS::VisitUnconditionalBranch}, \ + {"blraaz_64_branch_reg"_h, \ + &VISITORCLASS::VisitUnconditionalBranchToRegister}, \ + {"blraa_64p_branch_reg"_h, \ + &VISITORCLASS::VisitUnconditionalBranchToRegister}, \ + {"blrabz_64_branch_reg"_h, \ + &VISITORCLASS::VisitUnconditionalBranchToRegister}, \ + {"blrab_64p_branch_reg"_h, \ + &VISITORCLASS::VisitUnconditionalBranchToRegister}, \ + {"blr_64_branch_reg"_h, \ + &VISITORCLASS::VisitUnconditionalBranchToRegister}, \ + {"braaz_64_branch_reg"_h, \ + &VISITORCLASS::VisitUnconditionalBranchToRegister}, \ + {"braa_64p_branch_reg"_h, \ + &VISITORCLASS::VisitUnconditionalBranchToRegister}, \ + {"brabz_64_branch_reg"_h, \ + &VISITORCLASS::VisitUnconditionalBranchToRegister}, \ + {"brab_64p_branch_reg"_h, \ + &VISITORCLASS::VisitUnconditionalBranchToRegister}, \ + {"br_64_branch_reg"_h, \ + &VISITORCLASS::VisitUnconditionalBranchToRegister}, \ + {"drps_64e_branch_reg"_h, \ + &VISITORCLASS::VisitUnconditionalBranchToRegister}, \ + {"eretaa_64e_branch_reg"_h, \ + &VISITORCLASS::VisitUnconditionalBranchToRegister}, \ + {"eretab_64e_branch_reg"_h, \ + &VISITORCLASS::VisitUnconditionalBranchToRegister}, \ + {"eret_64e_branch_reg"_h, \ + &VISITORCLASS::VisitUnconditionalBranchToRegister}, \ + {"retaa_64e_branch_reg"_h, \ + &VISITORCLASS::VisitUnconditionalBranchToRegister}, \ + {"retab_64e_branch_reg"_h, \ + &VISITORCLASS::VisitUnconditionalBranchToRegister}, \ + {"ret_64r_branch_reg"_h, \ + &VISITORCLASS::VisitUnconditionalBranchToRegister}, \ + {"bcax_vvv16_crypto4"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"bfcvtn_asimdmisc_4s"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"bfdot_asimdelem_e"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"bfdot_asimdsame2_d"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"bfmlal_asimdelem_f"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"bfmlal_asimdsame2_f"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"bfmmla_asimdsame2_e"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"dsb_bon_barriers"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"eor3_vvv16_crypto4"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"ld64b_64l_memop"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"ldgm_64bulk_ldsttags"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"ldtrb_32_ldst_unpriv"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"ldtrh_32_ldst_unpriv"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"ldtrsb_32_ldst_unpriv"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"ldtrsb_64_ldst_unpriv"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"ldtrsh_32_ldst_unpriv"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"ldtrsh_64_ldst_unpriv"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"ldtrsw_64_ldst_unpriv"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"ldtr_32_ldst_unpriv"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"ldtr_64_ldst_unpriv"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"rax1_vvv2_cryptosha512_3"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"sha512h2_qqv_cryptosha512_3"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"sha512h_qqv_cryptosha512_3"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"sha512su0_vv2_cryptosha512_2"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"sha512su1_vvv2_cryptosha512_3"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"sm3partw1_vvv4_cryptosha512_3"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"sm3partw2_vvv4_cryptosha512_3"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"sm3ss1_vvv4_crypto4"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"sm3tt1a_vvv4_crypto3_imm2"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"sm3tt1b_vvv4_crypto3_imm2"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"sm3tt2a_vvv4_crypto3_imm2"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"sm3tt2b_vvv_crypto3_imm2"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"sm4ekey_vvv4_cryptosha512_3"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"sm4e_vv4_cryptosha512_2"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"st64b_64l_memop"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"st64bv_64_memop"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"st64bv0_64_memop"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"stgm_64bulk_ldsttags"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"sttrb_32_ldst_unpriv"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"sttrh_32_ldst_unpriv"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"sttr_32_ldst_unpriv"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"sttr_64_ldst_unpriv"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"stzgm_64bulk_ldsttags"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"tcancel_ex_exception"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"tstart_br_systemresult"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"ttest_br_systemresult"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"wfet_only_systeminstrswithreg"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"wfit_only_systeminstrswithreg"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"xar_vvv2_crypto3_imm6"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"bfcvt_z_p_z_s2bf"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"bfcvtnt_z_p_z_s2bf"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"bfdot_z_zzz"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"bfdot_z_zzzi"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"bfmlalb_z_zzz"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"bfmlalb_z_zzzi"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"bfmlalt_z_zzz"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"bfmlalt_z_zzzi"_h, &VISITORCLASS::VisitUnimplemented}, \ + {"bfmmla_z_zzz"_h, &VISITORCLASS::VisitUnimplemented}, { \ + "unallocated"_h, &VISITORCLASS::VisitUnallocated \ + } + +#define SIM_AUD_VISITOR_MAP(VISITORCLASS) \ + {"autia1716_hi_hints"_h, &VISITORCLASS::VisitSystem}, \ + {"autiasp_hi_hints"_h, &VISITORCLASS::VisitSystem}, \ + {"autiaz_hi_hints"_h, &VISITORCLASS::VisitSystem}, \ + {"autib1716_hi_hints"_h, &VISITORCLASS::VisitSystem}, \ + {"autibsp_hi_hints"_h, &VISITORCLASS::VisitSystem}, \ + {"autibz_hi_hints"_h, &VISITORCLASS::VisitSystem}, \ + {"axflag_m_pstate"_h, &VISITORCLASS::VisitSystem}, \ + {"cfinv_m_pstate"_h, &VISITORCLASS::VisitSystem}, \ + {"csdb_hi_hints"_h, &VISITORCLASS::VisitSystem}, \ + {"dgh_hi_hints"_h, &VISITORCLASS::VisitSystem}, \ + {"esb_hi_hints"_h, &VISITORCLASS::VisitSystem}, \ + {"isb_bi_barriers"_h, &VISITORCLASS::VisitSystem}, \ + {"nop_hi_hints"_h, &VISITORCLASS::VisitSystem}, \ + {"pacia1716_hi_hints"_h, &VISITORCLASS::VisitSystem}, \ + {"paciasp_hi_hints"_h, &VISITORCLASS::VisitSystem}, \ + {"paciaz_hi_hints"_h, &VISITORCLASS::VisitSystem}, \ + {"pacib1716_hi_hints"_h, &VISITORCLASS::VisitSystem}, \ + {"pacibsp_hi_hints"_h, &VISITORCLASS::VisitSystem}, \ + {"pacibz_hi_hints"_h, &VISITORCLASS::VisitSystem}, \ + {"sev_hi_hints"_h, &VISITORCLASS::VisitSystem}, \ + {"sevl_hi_hints"_h, &VISITORCLASS::VisitSystem}, \ + {"ssbb_only_barriers"_h, &VISITORCLASS::VisitSystem}, \ + {"wfe_hi_hints"_h, &VISITORCLASS::VisitSystem}, \ + {"wfi_hi_hints"_h, &VISITORCLASS::VisitSystem}, \ + {"xaflag_m_pstate"_h, &VISITORCLASS::VisitSystem}, \ + {"xpaclri_hi_hints"_h, &VISITORCLASS::VisitSystem}, \ + {"yield_hi_hints"_h, &VISITORCLASS::VisitSystem}, \ + {"abs_asimdmisc_r"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"cls_asimdmisc_r"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"clz_asimdmisc_r"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"cmeq_asimdmisc_z"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"cmge_asimdmisc_z"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"cmgt_asimdmisc_z"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"cmle_asimdmisc_z"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"cmlt_asimdmisc_z"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"cnt_asimdmisc_r"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"fabs_asimdmisc_r"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"fcmeq_asimdmisc_fz"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"fcmge_asimdmisc_fz"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"fcmgt_asimdmisc_fz"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"fcmle_asimdmisc_fz"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"fcmlt_asimdmisc_fz"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"fcvtas_asimdmisc_r"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"fcvtau_asimdmisc_r"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"fcvtl_asimdmisc_l"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"fcvtms_asimdmisc_r"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"fcvtmu_asimdmisc_r"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"fcvtns_asimdmisc_r"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"fcvtnu_asimdmisc_r"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"fcvtn_asimdmisc_n"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"fcvtps_asimdmisc_r"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"fcvtpu_asimdmisc_r"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"fcvtxn_asimdmisc_n"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"fcvtzs_asimdmisc_r"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"fcvtzu_asimdmisc_r"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"fneg_asimdmisc_r"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"frecpe_asimdmisc_r"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"frint32x_asimdmisc_r"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"frint32z_asimdmisc_r"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"frint64x_asimdmisc_r"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"frint64z_asimdmisc_r"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"frinta_asimdmisc_r"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"frinti_asimdmisc_r"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"frintm_asimdmisc_r"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"frintn_asimdmisc_r"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"frintp_asimdmisc_r"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"frintx_asimdmisc_r"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"frintz_asimdmisc_r"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"frsqrte_asimdmisc_r"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"fsqrt_asimdmisc_r"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"neg_asimdmisc_r"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"not_asimdmisc_r"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"rbit_asimdmisc_r"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"rev16_asimdmisc_r"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"rev32_asimdmisc_r"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"rev64_asimdmisc_r"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"sadalp_asimdmisc_p"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"saddlp_asimdmisc_p"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"scvtf_asimdmisc_r"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"shll_asimdmisc_s"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"sqabs_asimdmisc_r"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"sqneg_asimdmisc_r"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"sqxtn_asimdmisc_n"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"sqxtun_asimdmisc_n"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"suqadd_asimdmisc_r"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"uadalp_asimdmisc_p"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"uaddlp_asimdmisc_p"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"ucvtf_asimdmisc_r"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"uqxtn_asimdmisc_n"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"urecpe_asimdmisc_r"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"ursqrte_asimdmisc_r"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"usqadd_asimdmisc_r"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"xtn_asimdmisc_n"_h, &VISITORCLASS::VisitNEON2RegMisc}, \ + {"mla_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"mls_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"mul_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"saba_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"sabd_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"shadd_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"shsub_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"smaxp_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"smax_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"sminp_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"smin_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"srhadd_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"uaba_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"uabd_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"uhadd_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"uhsub_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"umaxp_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"umax_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"uminp_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"umin_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"urhadd_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"and_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"bic_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"bif_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"bit_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"bsl_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"eor_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"orr_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"orn_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"pmul_asimdsame_only"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"fmlal2_asimdsame_f"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"fmlal_asimdsame_f"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"fmlsl2_asimdsame_f"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"fmlsl_asimdsame_f"_h, &VISITORCLASS::VisitNEON3Same}, \ + {"pmull_asimddiff_l"_h, &VISITORCLASS::VisitNEON3Different}, \ + {"ushll_asimdshf_l"_h, &VISITORCLASS::VisitNEONShiftImmediate}, \ + {"sshll_asimdshf_l"_h, &VISITORCLASS::VisitNEONShiftImmediate}, \ + {"shrn_asimdshf_n"_h, &VISITORCLASS::VisitNEONShiftImmediate}, \ + {"rshrn_asimdshf_n"_h, &VISITORCLASS::VisitNEONShiftImmediate}, \ + {"sqshrn_asimdshf_n"_h, &VISITORCLASS::VisitNEONShiftImmediate}, \ + {"sqrshrn_asimdshf_n"_h, &VISITORCLASS::VisitNEONShiftImmediate}, \ + {"sqshrun_asimdshf_n"_h, &VISITORCLASS::VisitNEONShiftImmediate}, \ + {"sqrshrun_asimdshf_n"_h, &VISITORCLASS::VisitNEONShiftImmediate}, \ + {"uqshrn_asimdshf_n"_h, &VISITORCLASS::VisitNEONShiftImmediate}, \ + {"uqrshrn_asimdshf_n"_h, &VISITORCLASS::VisitNEONShiftImmediate}, \ + {"sri_asimdshf_r"_h, &VISITORCLASS::VisitNEONShiftImmediate}, \ + {"srshr_asimdshf_r"_h, &VISITORCLASS::VisitNEONShiftImmediate}, \ + {"srsra_asimdshf_r"_h, &VISITORCLASS::VisitNEONShiftImmediate}, \ + {"sshr_asimdshf_r"_h, &VISITORCLASS::VisitNEONShiftImmediate}, \ + {"ssra_asimdshf_r"_h, &VISITORCLASS::VisitNEONShiftImmediate}, \ + {"urshr_asimdshf_r"_h, &VISITORCLASS::VisitNEONShiftImmediate}, \ + {"ursra_asimdshf_r"_h, &VISITORCLASS::VisitNEONShiftImmediate}, \ + {"ushr_asimdshf_r"_h, &VISITORCLASS::VisitNEONShiftImmediate}, \ + {"usra_asimdshf_r"_h, &VISITORCLASS::VisitNEONShiftImmediate}, \ + {"scvtf_asimdshf_c"_h, &VISITORCLASS::VisitNEONShiftImmediate}, \ + {"ucvtf_asimdshf_c"_h, &VISITORCLASS::VisitNEONShiftImmediate}, \ + {"fcvtzs_asimdshf_c"_h, &VISITORCLASS::VisitNEONShiftImmediate}, \ + {"fcvtzu_asimdshf_c"_h, &VISITORCLASS::VisitNEONShiftImmediate}, \ + {"sqdmlal_asisdelem_l"_h, \ + &VISITORCLASS::VisitNEONScalarByIndexedElement}, \ + {"sqdmlsl_asisdelem_l"_h, \ + &VISITORCLASS::VisitNEONScalarByIndexedElement}, \ + {"sqdmull_asisdelem_l"_h, \ + &VISITORCLASS::VisitNEONScalarByIndexedElement}, \ + {"fmla_asisdelem_rh_h"_h, \ + &VISITORCLASS::VisitNEONScalarByIndexedElement}, \ + {"fmla_asisdelem_r_sd"_h, \ + &VISITORCLASS::VisitNEONScalarByIndexedElement}, \ + {"fmls_asisdelem_rh_h"_h, \ + &VISITORCLASS::VisitNEONScalarByIndexedElement}, \ + {"fmls_asisdelem_r_sd"_h, \ + &VISITORCLASS::VisitNEONScalarByIndexedElement}, \ + {"fmulx_asisdelem_rh_h"_h, \ + &VISITORCLASS::VisitNEONScalarByIndexedElement}, \ + {"fmulx_asisdelem_r_sd"_h, \ + &VISITORCLASS::VisitNEONScalarByIndexedElement}, \ + {"fmul_asisdelem_rh_h"_h, \ + &VISITORCLASS::VisitNEONScalarByIndexedElement}, \ + {"fmul_asisdelem_r_sd"_h, \ + &VISITORCLASS::VisitNEONScalarByIndexedElement}, \ + {"fabd_asisdsame_only"_h, &VISITORCLASS::VisitNEONScalar3Same}, \ + {"facge_asisdsame_only"_h, &VISITORCLASS::VisitNEONScalar3Same}, \ + {"facgt_asisdsame_only"_h, &VISITORCLASS::VisitNEONScalar3Same}, \ + {"fcmeq_asisdsame_only"_h, &VISITORCLASS::VisitNEONScalar3Same}, \ + {"fcmge_asisdsame_only"_h, &VISITORCLASS::VisitNEONScalar3Same}, \ + {"fcmgt_asisdsame_only"_h, &VISITORCLASS::VisitNEONScalar3Same}, \ + {"fmulx_asisdsame_only"_h, &VISITORCLASS::VisitNEONScalar3Same}, \ + {"frecps_asisdsame_only"_h, &VISITORCLASS::VisitNEONScalar3Same}, \ + {"frsqrts_asisdsame_only"_h, &VISITORCLASS::VisitNEONScalar3Same}, \ + {"cmeq_asisdsame_only"_h, &VISITORCLASS::VisitNEONScalar3Same}, \ + {"cmge_asisdsame_only"_h, &VISITORCLASS::VisitNEONScalar3Same}, \ + {"cmgt_asisdsame_only"_h, &VISITORCLASS::VisitNEONScalar3Same}, \ + {"cmhi_asisdsame_only"_h, &VISITORCLASS::VisitNEONScalar3Same}, \ + {"cmhs_asisdsame_only"_h, &VISITORCLASS::VisitNEONScalar3Same}, \ + {"cmtst_asisdsame_only"_h, &VISITORCLASS::VisitNEONScalar3Same}, \ + {"add_asisdsame_only"_h, &VISITORCLASS::VisitNEONScalar3Same}, \ + {"sub_asisdsame_only"_h, &VISITORCLASS::VisitNEONScalar3Same}, \ + {"sqrdmlah_asisdsame2_only"_h, \ + &VISITORCLASS::VisitNEONScalar3SameExtra}, \ + {"sqrdmlsh_asisdsame2_only"_h, \ + &VISITORCLASS::VisitNEONScalar3SameExtra}, \ + {"fmaxnmv_asimdall_only_h"_h, &VISITORCLASS::VisitNEONAcrossLanes}, \ + {"fmaxv_asimdall_only_h"_h, &VISITORCLASS::VisitNEONAcrossLanes}, \ + {"fminnmv_asimdall_only_h"_h, &VISITORCLASS::VisitNEONAcrossLanes}, \ + {"fminv_asimdall_only_h"_h, &VISITORCLASS::VisitNEONAcrossLanes}, \ + {"fmaxnmv_asimdall_only_sd"_h, &VISITORCLASS::VisitNEONAcrossLanes}, \ + {"fminnmv_asimdall_only_sd"_h, &VISITORCLASS::VisitNEONAcrossLanes}, \ + {"fmaxv_asimdall_only_sd"_h, &VISITORCLASS::VisitNEONAcrossLanes}, \ + {"fminv_asimdall_only_sd"_h, &VISITORCLASS::VisitNEONAcrossLanes}, \ + {"shl_asisdshf_r"_h, &VISITORCLASS::VisitNEONScalarShiftImmediate}, \ + {"sli_asisdshf_r"_h, &VISITORCLASS::VisitNEONScalarShiftImmediate}, \ + {"sri_asisdshf_r"_h, &VISITORCLASS::VisitNEONScalarShiftImmediate}, \ + {"srshr_asisdshf_r"_h, &VISITORCLASS::VisitNEONScalarShiftImmediate}, \ + {"srsra_asisdshf_r"_h, &VISITORCLASS::VisitNEONScalarShiftImmediate}, \ + {"sshr_asisdshf_r"_h, &VISITORCLASS::VisitNEONScalarShiftImmediate}, \ + {"ssra_asisdshf_r"_h, &VISITORCLASS::VisitNEONScalarShiftImmediate}, \ + {"urshr_asisdshf_r"_h, &VISITORCLASS::VisitNEONScalarShiftImmediate}, \ + {"ursra_asisdshf_r"_h, &VISITORCLASS::VisitNEONScalarShiftImmediate}, \ + {"ushr_asisdshf_r"_h, &VISITORCLASS::VisitNEONScalarShiftImmediate}, \ + {"usra_asisdshf_r"_h, &VISITORCLASS::VisitNEONScalarShiftImmediate}, \ + {"sqrshrn_asisdshf_n"_h, &VISITORCLASS::VisitNEONScalarShiftImmediate}, \ + {"sqrshrun_asisdshf_n"_h, &VISITORCLASS::VisitNEONScalarShiftImmediate}, \ + {"sqshrn_asisdshf_n"_h, &VISITORCLASS::VisitNEONScalarShiftImmediate}, \ + {"sqshrun_asisdshf_n"_h, &VISITORCLASS::VisitNEONScalarShiftImmediate}, \ + {"uqrshrn_asisdshf_n"_h, &VISITORCLASS::VisitNEONScalarShiftImmediate}, \ + {"uqshrn_asisdshf_n"_h, &VISITORCLASS::VisitNEONScalarShiftImmediate}, \ + {"cmeq_asisdmisc_z"_h, &VISITORCLASS::VisitNEONScalar2RegMisc}, \ + {"cmge_asisdmisc_z"_h, &VISITORCLASS::VisitNEONScalar2RegMisc}, \ + {"cmgt_asisdmisc_z"_h, &VISITORCLASS::VisitNEONScalar2RegMisc}, \ + {"cmle_asisdmisc_z"_h, &VISITORCLASS::VisitNEONScalar2RegMisc}, \ + {"cmlt_asisdmisc_z"_h, &VISITORCLASS::VisitNEONScalar2RegMisc}, \ + {"abs_asisdmisc_r"_h, &VISITORCLASS::VisitNEONScalar2RegMisc}, \ + {"neg_asisdmisc_r"_h, &VISITORCLASS::VisitNEONScalar2RegMisc}, \ + {"fcmeq_asisdmisc_fz"_h, &VISITORCLASS::VisitNEONScalar2RegMisc}, \ + {"fcmge_asisdmisc_fz"_h, &VISITORCLASS::VisitNEONScalar2RegMisc}, \ + {"fcmgt_asisdmisc_fz"_h, &VISITORCLASS::VisitNEONScalar2RegMisc}, \ + {"fcmle_asisdmisc_fz"_h, &VISITORCLASS::VisitNEONScalar2RegMisc}, \ + {"fcmlt_asisdmisc_fz"_h, &VISITORCLASS::VisitNEONScalar2RegMisc}, \ + {"fcvtas_asisdmisc_r"_h, &VISITORCLASS::VisitNEONScalar2RegMisc}, \ + {"fcvtau_asisdmisc_r"_h, &VISITORCLASS::VisitNEONScalar2RegMisc}, \ + {"fcvtms_asisdmisc_r"_h, &VISITORCLASS::VisitNEONScalar2RegMisc}, \ + {"fcvtmu_asisdmisc_r"_h, &VISITORCLASS::VisitNEONScalar2RegMisc}, \ + {"fcvtns_asisdmisc_r"_h, &VISITORCLASS::VisitNEONScalar2RegMisc}, \ + {"fcvtnu_asisdmisc_r"_h, &VISITORCLASS::VisitNEONScalar2RegMisc}, \ + {"fcvtps_asisdmisc_r"_h, &VISITORCLASS::VisitNEONScalar2RegMisc}, \ + {"fcvtpu_asisdmisc_r"_h, &VISITORCLASS::VisitNEONScalar2RegMisc}, \ + {"fcvtxn_asisdmisc_n"_h, &VISITORCLASS::VisitNEONScalar2RegMisc}, \ + {"fcvtzs_asisdmisc_r"_h, &VISITORCLASS::VisitNEONScalar2RegMisc}, \ + {"fcvtzu_asisdmisc_r"_h, &VISITORCLASS::VisitNEONScalar2RegMisc}, \ + {"frecpe_asisdmisc_r"_h, &VISITORCLASS::VisitNEONScalar2RegMisc}, \ + {"frecpx_asisdmisc_r"_h, &VISITORCLASS::VisitNEONScalar2RegMisc}, \ + {"frsqrte_asisdmisc_r"_h, &VISITORCLASS::VisitNEONScalar2RegMisc}, \ + {"scvtf_asisdmisc_r"_h, &VISITORCLASS::VisitNEONScalar2RegMisc}, { \ + "ucvtf_asisdmisc_r"_h, &VISITORCLASS::VisitNEONScalar2RegMisc \ + } diff --git a/dep/vixl/include/vixl/aarch64/disasm-aarch64.h b/dep/vixl/include/vixl/aarch64/disasm-aarch64.h index c650bee98..7985383b8 100644 --- a/dep/vixl/include/vixl/aarch64/disasm-aarch64.h +++ b/dep/vixl/include/vixl/aarch64/disasm-aarch64.h @@ -27,11 +27,16 @@ #ifndef VIXL_AARCH64_DISASM_AARCH64_H #define VIXL_AARCH64_DISASM_AARCH64_H +#include +#include +#include + #include "../globals-vixl.h" #include "../utils-vixl.h" #include "cpu-features-auditor-aarch64.h" #include "decoder-aarch64.h" +#include "decoder-visitor-map-aarch64.h" #include "instructions-aarch64.h" #include "operands-aarch64.h" @@ -45,11 +50,9 @@ class Disassembler : public DecoderVisitor { virtual ~Disassembler(); char* GetOutput(); -// Declare all Visitor functions. -#define DECLARE(A) \ - virtual void Visit##A(const Instruction* instr) VIXL_OVERRIDE; - VISITOR_LIST(DECLARE) -#undef DECLARE + // Declare all Visitor functions. + virtual void Visit(Metadata* metadata, + const Instruction* instr) VIXL_OVERRIDE; protected: virtual void ProcessOutput(const Instruction* instr); @@ -110,12 +113,145 @@ class Disassembler : public DecoderVisitor { int64_t CodeRelativeAddress(const void* instr); private: +#define DECLARE(A) virtual void Visit##A(const Instruction* instr); + VISITOR_LIST(DECLARE) +#undef DECLARE + + using FormToVisitorFnMap = std::unordered_map< + uint32_t, + std::function>; + static const FormToVisitorFnMap* GetFormToVisitorFnMap(); + + std::string mnemonic_; + uint32_t form_hash_; + + void SetMnemonicFromForm(const std::string& form) { + if (form != "unallocated") { + VIXL_ASSERT(form.find_first_of('_') != std::string::npos); + mnemonic_ = form.substr(0, form.find_first_of('_')); + } + } + + void Disassemble_PdT_PgZ_ZnT_ZmT(const Instruction* instr); + void Disassemble_ZdB_Zn1B_Zn2B_imm(const Instruction* instr); + void Disassemble_ZdB_ZnB_ZmB(const Instruction* instr); + void Disassemble_ZdD_PgM_ZnS(const Instruction* instr); + void Disassemble_ZdD_ZnD_ZmD(const Instruction* instr); + void Disassemble_ZdD_ZnD_ZmD_imm(const Instruction* instr); + void Disassemble_ZdD_ZnS_ZmS_imm(const Instruction* instr); + void Disassemble_ZdH_PgM_ZnS(const Instruction* instr); + void Disassemble_ZdH_ZnH_ZmH_imm(const Instruction* instr); + void Disassemble_ZdS_PgM_ZnD(const Instruction* instr); + void Disassemble_ZdS_PgM_ZnH(const Instruction* instr); + void Disassemble_ZdS_PgM_ZnS(const Instruction* instr); + void Disassemble_ZdS_ZnH_ZmH_imm(const Instruction* instr); + void Disassemble_ZdS_ZnS_ZmS(const Instruction* instr); + void Disassemble_ZdS_ZnS_ZmS_imm(const Instruction* instr); + void Disassemble_ZdT_PgM_ZnT(const Instruction* instr); + void Disassemble_ZdT_PgZ_ZnT_ZmT(const Instruction* instr); + void Disassemble_ZdT_Pg_Zn1T_Zn2T(const Instruction* instr); + void Disassemble_ZdT_Zn1T_Zn2T_ZmT(const Instruction* instr); + void Disassemble_ZdT_ZnT_ZmT(const Instruction* instr); + void Disassemble_ZdT_ZnT_ZmTb(const Instruction* instr); + void Disassemble_ZdT_ZnTb(const Instruction* instr); + void Disassemble_ZdT_ZnTb_ZmTb(const Instruction* instr); + void Disassemble_ZdaD_ZnD_ZmD_imm(const Instruction* instr); + void Disassemble_ZdaD_ZnH_ZmH_imm_const(const Instruction* instr); + void Disassemble_ZdaD_ZnS_ZmS_imm(const Instruction* instr); + void Disassemble_ZdaH_ZnH_ZmH_imm(const Instruction* instr); + void Disassemble_ZdaH_ZnH_ZmH_imm_const(const Instruction* instr); + void Disassemble_ZdaS_ZnB_ZmB_imm_const(const Instruction* instr); + void Disassemble_ZdaS_ZnH_ZmH(const Instruction* instr); + void Disassemble_ZdaS_ZnH_ZmH_imm(const Instruction* instr); + void Disassemble_ZdaS_ZnS_ZmS_imm(const Instruction* instr); + void Disassemble_ZdaS_ZnS_ZmS_imm_const(const Instruction* instr); + void Disassemble_ZdaT_PgM_ZnTb(const Instruction* instr); + void Disassemble_ZdaT_ZnT_ZmT(const Instruction* instr); + void Disassemble_ZdaT_ZnT_ZmT_const(const Instruction* instr); + void Disassemble_ZdaT_ZnT_const(const Instruction* instr); + void Disassemble_ZdaT_ZnTb_ZmTb(const Instruction* instr); + void Disassemble_ZdaT_ZnTb_ZmTb_const(const Instruction* instr); + void Disassemble_ZdnB_ZdnB(const Instruction* instr); + void Disassemble_ZdnB_ZdnB_ZmB(const Instruction* instr); + void Disassemble_ZdnS_ZdnS_ZmS(const Instruction* instr); + void Disassemble_ZdnT_PgM_ZdnT_ZmT(const Instruction* instr); + void Disassemble_ZdnT_PgM_ZdnT_const(const Instruction* instr); + void Disassemble_ZdnT_ZdnT_ZmT_const(const Instruction* instr); + void Disassemble_ZtD_PgZ_ZnD_Xm(const Instruction* instr); + void Disassemble_ZtD_Pg_ZnD_Xm(const Instruction* instr); + void Disassemble_ZtS_PgZ_ZnS_Xm(const Instruction* instr); + void Disassemble_ZtS_Pg_ZnS_Xm(const Instruction* instr); + void Disassemble_ZdaS_ZnB_ZmB(const Instruction* instr); + void Disassemble_Vd4S_Vn16B_Vm16B(const Instruction* instr); + + void DisassembleCpy(const Instruction* instr); + void DisassembleSet(const Instruction* instr); + void DisassembleMinMaxImm(const Instruction* instr); + + void DisassembleSVEShiftLeftImm(const Instruction* instr); + void DisassembleSVEShiftRightImm(const Instruction* instr); + void DisassembleSVEAddSubCarry(const Instruction* instr); + void DisassembleSVEAddSubHigh(const Instruction* instr); + void DisassembleSVEComplexIntAddition(const Instruction* instr); + void DisassembleSVEBitwiseTernary(const Instruction* instr); + void DisassembleSVEFlogb(const Instruction* instr); + void DisassembleSVEFPPair(const Instruction* instr); + + void DisassembleNoArgs(const Instruction* instr); + + void DisassembleNEONMulByElementLong(const Instruction* instr); + void DisassembleNEONDotProdByElement(const Instruction* instr); + void DisassembleNEONFPMulByElement(const Instruction* instr); + void DisassembleNEONHalfFPMulByElement(const Instruction* instr); + void DisassembleNEONFPMulByElementLong(const Instruction* instr); + void DisassembleNEONComplexMulByElement(const Instruction* instr); + void DisassembleNEON2RegLogical(const Instruction* instr); + void DisassembleNEON2RegExtract(const Instruction* instr); + void DisassembleNEON2RegAddlp(const Instruction* instr); + void DisassembleNEON2RegCompare(const Instruction* instr); + void DisassembleNEON2RegFPCompare(const Instruction* instr); + void DisassembleNEON2RegFPConvert(const Instruction* instr); + void DisassembleNEON2RegFP(const Instruction* instr); + void DisassembleNEON3SameLogical(const Instruction* instr); + void DisassembleNEON3SameFHM(const Instruction* instr); + void DisassembleNEON3SameNoD(const Instruction* instr); + void DisassembleNEONShiftLeftLongImm(const Instruction* instr); + void DisassembleNEONShiftRightImm(const Instruction* instr); + void DisassembleNEONShiftRightNarrowImm(const Instruction* instr); + void DisassembleNEONScalarSatMulLongIndex(const Instruction* instr); + void DisassembleNEONFPScalarMulIndex(const Instruction* instr); + void DisassembleNEONFPScalar3Same(const Instruction* instr); + void DisassembleNEONScalar3SameOnlyD(const Instruction* instr); + void DisassembleNEONFPAcrossLanes(const Instruction* instr); + void DisassembleNEONFP16AcrossLanes(const Instruction* instr); + void DisassembleNEONScalarShiftImmOnlyD(const Instruction* instr); + void DisassembleNEONScalarShiftRightNarrowImm(const Instruction* instr); + void DisassembleNEONScalar2RegMiscOnlyD(const Instruction* instr); + void DisassembleNEONFPScalar2RegMisc(const Instruction* instr); + void DisassembleNEONPolynomialMul(const Instruction* instr); + + void DisassembleMTELoadTag(const Instruction* instr); + void DisassembleMTEStoreTag(const Instruction* instr); + void DisassembleMTEStoreTagPair(const Instruction* instr); + + void Disassemble_XdSP_XnSP_Xm(const Instruction* instr); + void Disassemble_XdSP_XnSP_uimm6_uimm4(const Instruction* instr); + void Disassemble_Xd_XnSP_Xm(const Instruction* instr); + void Disassemble_Xd_XnSP_XmSP(const Instruction* instr); + void Format(const Instruction* instr, const char* mnemonic, - const char* format); + const char* format0, + const char* format1 = NULL); + void FormatWithDecodedMnemonic(const Instruction* instr, + const char* format0, + const char* format1 = NULL); + void Substitute(const Instruction* instr, const char* string); int SubstituteField(const Instruction* instr, const char* format); int SubstituteRegisterField(const Instruction* instr, const char* format); + int SubstitutePredicateRegisterField(const Instruction* instr, + const char* format); int SubstituteImmediateField(const Instruction* instr, const char* format); int SubstituteLiteralField(const Instruction* instr, const char* format); int SubstituteBitfieldImmediateField(const Instruction* instr, @@ -130,6 +266,14 @@ class Disassembler : public DecoderVisitor { int SubstituteBarrierField(const Instruction* instr, const char* format); int SubstituteSysOpField(const Instruction* instr, const char* format); int SubstituteCrField(const Instruction* instr, const char* format); + int SubstituteIntField(const Instruction* instr, const char* format); + int SubstituteSVESize(const Instruction* instr, const char* format); + int SubstituteTernary(const Instruction* instr, const char* format); + + std::pair GetRegNumForField(const Instruction* instr, + char reg_prefix, + const char* field); + bool RdIsZROrSP(const Instruction* instr) const { return (instr->GetRd() == kZeroRegCode); } @@ -173,6 +317,7 @@ class PrintDisassembler : public Disassembler { : cpu_features_auditor_(NULL), cpu_features_prefix_("// Needs: "), cpu_features_suffix_(""), + signed_addresses_(false), stream_(stream) {} // Convenience helpers for quick disassembly, without having to manually @@ -201,12 +346,23 @@ class PrintDisassembler : public Disassembler { cpu_features_suffix_ = suffix; } + // By default, addresses are printed as simple, unsigned 64-bit hex values. + // + // With `PrintSignedAddresses(true)`: + // - negative addresses are printed as "-0x1234...", + // - positive addresses have a leading space, like " 0x1234...", to maintain + // alignment. + // + // This is most useful in combination with Disassembler::MapCodeAddress(...). + void PrintSignedAddresses(bool s) { signed_addresses_ = s; } + protected: virtual void ProcessOutput(const Instruction* instr) VIXL_OVERRIDE; CPUFeaturesAuditor* cpu_features_auditor_; const char* cpu_features_prefix_; const char* cpu_features_suffix_; + bool signed_addresses_; private: FILE* stream_; diff --git a/dep/vixl/include/vixl/aarch64/instructions-aarch64.h b/dep/vixl/include/vixl/aarch64/instructions-aarch64.h index 4e6bce751..9a1c17fa9 100644 --- a/dep/vixl/include/vixl/aarch64/instructions-aarch64.h +++ b/dep/vixl/include/vixl/aarch64/instructions-aarch64.h @@ -32,6 +32,11 @@ #include "constants-aarch64.h" +#ifdef __clang__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wdeprecated-enum-enum-conversion" +#endif + namespace vixl { namespace aarch64 { // ISA constants. -------------------------------------------------------------- @@ -81,6 +86,7 @@ const uint64_t kXRegMask = UINT64_C(0xffffffffffffffff); const uint64_t kHRegMask = UINT64_C(0xffff); const uint64_t kSRegMask = UINT64_C(0xffffffff); const uint64_t kDRegMask = UINT64_C(0xffffffffffffffff); +const uint64_t kHSignMask = UINT64_C(0x8000); const uint64_t kSSignMask = UINT64_C(0x80000000); const uint64_t kDSignMask = UINT64_C(0x8000000000000000); const uint64_t kWSignMask = UINT64_C(0x80000000); @@ -106,6 +112,8 @@ const unsigned kZeroRegCode = 31; const unsigned kSPRegInternalCode = 63; const unsigned kRegCodeMask = 0x1f; +const unsigned kAtomicAccessGranule = 16; + const unsigned kAddressTagOffset = 56; const unsigned kAddressTagWidth = 8; const uint64_t kAddressTagMask = ((UINT64_C(1) << kAddressTagWidth) - 1) @@ -114,21 +122,49 @@ VIXL_STATIC_ASSERT(kAddressTagMask == UINT64_C(0xff00000000000000)); const uint64_t kTTBRMask = UINT64_C(1) << 55; +// We can't define a static kZRegSize because the size depends on the +// implementation. However, it is sometimes useful to know the minimum and +// maximum possible sizes. +const unsigned kZRegMinSize = 128; +const unsigned kZRegMinSizeLog2 = 7; +const unsigned kZRegMinSizeInBytes = kZRegMinSize / 8; +const unsigned kZRegMinSizeInBytesLog2 = kZRegMinSizeLog2 - 3; +const unsigned kZRegMaxSize = 2048; +const unsigned kZRegMaxSizeLog2 = 11; +const unsigned kZRegMaxSizeInBytes = kZRegMaxSize / 8; +const unsigned kZRegMaxSizeInBytesLog2 = kZRegMaxSizeLog2 - 3; + +// The P register size depends on the Z register size. +const unsigned kZRegBitsPerPRegBit = kBitsPerByte; +const unsigned kZRegBitsPerPRegBitLog2 = 3; +const unsigned kPRegMinSize = kZRegMinSize / kZRegBitsPerPRegBit; +const unsigned kPRegMinSizeLog2 = kZRegMinSizeLog2 - 3; +const unsigned kPRegMinSizeInBytes = kPRegMinSize / 8; +const unsigned kPRegMinSizeInBytesLog2 = kPRegMinSizeLog2 - 3; +const unsigned kPRegMaxSize = kZRegMaxSize / kZRegBitsPerPRegBit; +const unsigned kPRegMaxSizeLog2 = kZRegMaxSizeLog2 - 3; +const unsigned kPRegMaxSizeInBytes = kPRegMaxSize / 8; +const unsigned kPRegMaxSizeInBytesLog2 = kPRegMaxSizeLog2 - 3; + +const unsigned kMTETagGranuleInBytes = 16; +const unsigned kMTETagGranuleInBytesLog2 = 4; +const unsigned kMTETagWidth = 4; + // Make these moved float constants backwards compatible // with explicit vixl::aarch64:: namespace references. -using vixl::kDoubleMantissaBits; using vixl::kDoubleExponentBits; -using vixl::kFloatMantissaBits; -using vixl::kFloatExponentBits; -using vixl::kFloat16MantissaBits; +using vixl::kDoubleMantissaBits; using vixl::kFloat16ExponentBits; +using vixl::kFloat16MantissaBits; +using vixl::kFloatExponentBits; +using vixl::kFloatMantissaBits; -using vixl::kFP16PositiveInfinity; using vixl::kFP16NegativeInfinity; -using vixl::kFP32PositiveInfinity; +using vixl::kFP16PositiveInfinity; using vixl::kFP32NegativeInfinity; -using vixl::kFP64PositiveInfinity; +using vixl::kFP32PositiveInfinity; using vixl::kFP64NegativeInfinity; +using vixl::kFP64PositiveInfinity; using vixl::kFP16DefaultNaN; using vixl::kFP32DefaultNaN; @@ -149,6 +185,49 @@ enum AddrMode { Offset, PreIndex, PostIndex }; enum Reg31Mode { Reg31IsStackPointer, Reg31IsZeroRegister }; +enum VectorFormat { + kFormatUndefined = 0xffffffff, + kFormat8B = NEON_8B, + kFormat16B = NEON_16B, + kFormat4H = NEON_4H, + kFormat8H = NEON_8H, + kFormat2S = NEON_2S, + kFormat4S = NEON_4S, + kFormat1D = NEON_1D, + kFormat2D = NEON_2D, + + // Scalar formats. We add the scalar bit to distinguish between scalar and + // vector enumerations; the bit is always set in the encoding of scalar ops + // and always clear for vector ops. Although kFormatD and kFormat1D appear + // to be the same, their meaning is subtly different. The first is a scalar + // operation, the second a vector operation that only affects one lane. + kFormatB = NEON_B | NEONScalar, + kFormatH = NEON_H | NEONScalar, + kFormatS = NEON_S | NEONScalar, + kFormatD = NEON_D | NEONScalar, + + // An artificial value, used to distinguish from NEON format category. + kFormatSVE = 0x0000fffd, + // Artificial values. Q and O lane sizes aren't encoded in the usual size + // field. + kFormatSVEQ = 0x00080000, + kFormatSVEO = 0x00040000, + + // Vector element width of SVE register with the unknown lane count since + // the vector length is implementation dependent. + kFormatVnB = SVE_B | kFormatSVE, + kFormatVnH = SVE_H | kFormatSVE, + kFormatVnS = SVE_S | kFormatSVE, + kFormatVnD = SVE_D | kFormatSVE, + kFormatVnQ = kFormatSVEQ | kFormatSVE, + kFormatVnO = kFormatSVEO | kFormatSVE, + + // Artificial values, used by simulator trace tests and a few oddball + // instructions (such as FMLAL). + kFormat2H = 0xfffffffe, + kFormat1Q = 0xfffffffd +}; + // Instructions. --------------------------------------------------------------- class Instruction { @@ -176,6 +255,47 @@ class Instruction { return ExtractBits(msb, lsb); } + // Compress bit extraction operation from Hacker's Delight. + // https://github.com/hcs0/Hackers-Delight/blob/master/compress.c.txt + uint32_t Compress(uint32_t mask) const { + uint32_t mk, mp, mv, t; + uint32_t x = GetInstructionBits() & mask; // Clear irrelevant bits. + mk = ~mask << 1; // We will count 0's to right. + for (int i = 0; i < 5; i++) { + mp = mk ^ (mk << 1); // Parallel suffix. + mp = mp ^ (mp << 2); + mp = mp ^ (mp << 4); + mp = mp ^ (mp << 8); + mp = mp ^ (mp << 16); + mv = mp & mask; // Bits to move. + mask = (mask ^ mv) | (mv >> (1 << i)); // Compress mask. + t = x & mv; + x = (x ^ t) | (t >> (1 << i)); // Compress x. + mk = mk & ~mp; + } + return x; + } + + template + uint32_t ExtractBits() const { + return Compress(M); + } + + uint32_t ExtractBitsAbsent() const { + VIXL_UNREACHABLE(); + return 0; + } + + template + uint32_t IsMaskedValue() const { + return (Mask(M) == V) ? 1 : 0; + } + + uint32_t IsMaskedValueAbsent() const { + VIXL_UNREACHABLE(); + return 0; + } + int32_t ExtractSignedBits(int msb, int lsb) const { int32_t bits = *(reinterpret_cast(this)); return ExtractSignedBitfield32(msb, lsb, bits); @@ -196,6 +316,34 @@ class Instruction { INSTRUCTION_FIELDS_LIST(DEFINE_GETTER) #undef DEFINE_GETTER + template + int32_t GetRx() const { + // We don't have any register fields wider than five bits, so the result + // will always fit into an int32_t. + VIXL_ASSERT((msb - lsb + 1) <= 5); + return this->ExtractBits(msb, lsb); + } + + VectorFormat GetSVEVectorFormat(int field_lsb = 22) const { + VIXL_ASSERT((field_lsb >= 0) && (field_lsb <= 30)); + uint32_t instr = ExtractUnsignedBitfield32(field_lsb + 1, + field_lsb, + GetInstructionBits()) + << 22; + switch (instr & SVESizeFieldMask) { + case SVE_B: + return kFormatVnB; + case SVE_H: + return kFormatVnH; + case SVE_S: + return kFormatVnS; + case SVE_D: + return kFormatVnD; + } + VIXL_UNREACHABLE(); + return kFormatUndefined; + } + // ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST), // formed from ImmPCRelLo and ImmPCRelHi. int GetImmPCRel() const { @@ -207,10 +355,40 @@ class Instruction { } VIXL_DEPRECATED("GetImmPCRel", int ImmPCRel() const) { return GetImmPCRel(); } + // ImmLSPAC is a compound field (not present in INSTRUCTION_FIELDS_LIST), + // formed from ImmLSPACLo and ImmLSPACHi. + int GetImmLSPAC() const { + uint32_t hi = static_cast(GetImmLSPACHi()); + uint32_t lo = GetImmLSPACLo(); + uint32_t offset = (hi << ImmLSPACLo_width) | lo; + int width = ImmLSPACLo_width + ImmLSPACHi_width; + return ExtractSignedBitfield32(width - 1, 0, offset) << 3; + } + uint64_t GetImmLogical() const; VIXL_DEPRECATED("GetImmLogical", uint64_t ImmLogical() const) { return GetImmLogical(); } + uint64_t GetSVEImmLogical() const; + int GetSVEBitwiseImmLaneSizeInBytesLog2() const; + uint64_t DecodeImmBitMask(int32_t n, + int32_t imm_s, + int32_t imm_r, + int32_t size) const; + + std::pair GetSVEPermuteIndexAndLaneSizeLog2() const; + + std::pair GetSVEMulZmAndIndex() const; + std::pair GetSVEMulLongZmAndIndex() const; + + std::pair GetSVEImmShiftAndLaneSizeLog2(bool is_predicated) const; + + int GetSVEExtractImmediate() const; + + int GetSVEMsizeFromDtype(bool is_signed, int dtype_h_lsb = 23) const; + + int GetSVEEsizeFromDtype(bool is_signed, int dtype_l_lsb = 21) const; + unsigned GetImmNEONabcdefgh() const; VIXL_DEPRECATED("GetImmNEONabcdefgh", unsigned ImmNEONabcdefgh() const) { @@ -237,6 +415,16 @@ class Instruction { return GetImmNEONFP64(); } + Float16 GetSVEImmFP16() const { return Imm8ToFloat16(ExtractBits(12, 5)); } + + float GetSVEImmFP32() const { return Imm8ToFP32(ExtractBits(12, 5)); } + + double GetSVEImmFP64() const { return Imm8ToFP64(ExtractBits(12, 5)); } + + static Float16 Imm8ToFloat16(uint32_t imm8); + static float Imm8ToFP32(uint32_t imm8); + static double Imm8ToFP64(uint32_t imm8); + unsigned GetSizeLS() const { return CalcLSDataSize(static_cast(Mask(LoadStoreMask))); } @@ -299,6 +487,10 @@ class Instruction { return Mask(LoadStoreAnyFMask) == LoadStoreAnyFixed; } + // True if `this` is valid immediately after the provided movprfx instruction. + bool CanTakeSVEMovprfx(uint32_t form_hash, Instruction const* movprfx) const; + bool CanTakeSVEMovprfx(const char* form, Instruction const* movprfx) const; + bool IsLoad() const; bool IsStore() const; @@ -312,6 +504,83 @@ class Instruction { (Mask(MoveWideImmediateMask) == MOVN_w); } + bool IsException() const { return Mask(ExceptionFMask) == ExceptionFixed; } + + bool IsPAuth() const { return Mask(SystemPAuthFMask) == SystemPAuthFixed; } + + bool IsBti() const { + if (Mask(SystemHintFMask) == SystemHintFixed) { + int imm_hint = GetImmHint(); + switch (imm_hint) { + case BTI: + case BTI_c: + case BTI_j: + case BTI_jc: + return true; + } + } + return false; + } + + bool IsMOPSPrologueOf(const Instruction* instr, uint32_t mops_type) const { + VIXL_ASSERT((mops_type == "set"_h) || (mops_type == "setg"_h) || + (mops_type == "cpy"_h)); + const int op_lsb = (mops_type == "cpy"_h) ? 22 : 14; + return GetInstructionBits() == instr->Mask(~(0x3U << op_lsb)); + } + + bool IsMOPSMainOf(const Instruction* instr, uint32_t mops_type) const { + VIXL_ASSERT((mops_type == "set"_h) || (mops_type == "setg"_h) || + (mops_type == "cpy"_h)); + const int op_lsb = (mops_type == "cpy"_h) ? 22 : 14; + return GetInstructionBits() == + (instr->Mask(~(0x3U << op_lsb)) | (0x1 << op_lsb)); + } + + bool IsMOPSEpilogueOf(const Instruction* instr, uint32_t mops_type) const { + VIXL_ASSERT((mops_type == "set"_h) || (mops_type == "setg"_h) || + (mops_type == "cpy"_h)); + const int op_lsb = (mops_type == "cpy"_h) ? 22 : 14; + return GetInstructionBits() == + (instr->Mask(~(0x3U << op_lsb)) | (0x2 << op_lsb)); + } + + template + bool IsConsistentMOPSTriplet() const { + VIXL_STATIC_ASSERT((mops_type == "set"_h) || (mops_type == "setg"_h) || + (mops_type == "cpy"_h)); + + int64_t isize = static_cast(kInstructionSize); + const Instruction* prev2 = GetInstructionAtOffset(-2 * isize); + const Instruction* prev1 = GetInstructionAtOffset(-1 * isize); + const Instruction* next1 = GetInstructionAtOffset(1 * isize); + const Instruction* next2 = GetInstructionAtOffset(2 * isize); + + // Use the encoding of the current instruction to determine the expected + // adjacent instructions. NB. this doesn't check if the nearby instructions + // are MOPS-type, but checks that they form a consistent triplet if they + // are. For example, 'mov x0, #0; mov x0, #512; mov x0, #1024' is a + // consistent triplet, but they are not MOPS instructions. + const int op_lsb = (mops_type == "cpy"_h) ? 22 : 14; + const uint32_t kMOPSOpfield = 0x3 << op_lsb; + const uint32_t kMOPSPrologue = 0; + const uint32_t kMOPSMain = 0x1 << op_lsb; + const uint32_t kMOPSEpilogue = 0x2 << op_lsb; + switch (Mask(kMOPSOpfield)) { + case kMOPSPrologue: + return next1->IsMOPSMainOf(this, mops_type) && + next2->IsMOPSEpilogueOf(this, mops_type); + case kMOPSMain: + return prev1->IsMOPSPrologueOf(this, mops_type) && + next1->IsMOPSEpilogueOf(this, mops_type); + case kMOPSEpilogue: + return prev2->IsMOPSPrologueOf(this, mops_type) && + prev1->IsMOPSMainOf(this, mops_type); + default: + VIXL_ABORT_WITH_MSG("Undefined MOPS operation\n"); + } + } + static int GetImmBranchRangeBitwidth(ImmBranchType branch_type); VIXL_DEPRECATED( "GetImmBranchRangeBitwidth", @@ -496,40 +765,12 @@ class Instruction { private: int GetImmBranch() const; - static Float16 Imm8ToFloat16(uint32_t imm8); - static float Imm8ToFP32(uint32_t imm8); - static double Imm8ToFP64(uint32_t imm8); - void SetPCRelImmTarget(const Instruction* target); void SetBranchImmTarget(const Instruction* target); }; -// Functions for handling NEON vector format information. -enum VectorFormat { - kFormatUndefined = 0xffffffff, - kFormat8B = NEON_8B, - kFormat16B = NEON_16B, - kFormat4H = NEON_4H, - kFormat8H = NEON_8H, - kFormat2S = NEON_2S, - kFormat4S = NEON_4S, - kFormat1D = NEON_1D, - kFormat2D = NEON_2D, - - // Scalar formats. We add the scalar bit to distinguish between scalar and - // vector enumerations; the bit is always set in the encoding of scalar ops - // and always clear for vector ops. Although kFormatD and kFormat1D appear - // to be the same, their meaning is subtly different. The first is a scalar - // operation, the second a vector operation that only affects one lane. - kFormatB = NEON_B | NEONScalar, - kFormatH = NEON_H | NEONScalar, - kFormatS = NEON_S | NEONScalar, - kFormatD = NEON_D | NEONScalar, - - // A value invented solely for FP16 scalar pairwise simulator trace tests. - kFormat2H = 0xfffffffe -}; +// Functions for handling NEON and SVE vector format information. const int kMaxLanesPerVector = 16; @@ -537,12 +778,16 @@ VectorFormat VectorFormatHalfWidth(VectorFormat vform); VectorFormat VectorFormatDoubleWidth(VectorFormat vform); VectorFormat VectorFormatDoubleLanes(VectorFormat vform); VectorFormat VectorFormatHalfLanes(VectorFormat vform); -VectorFormat ScalarFormatFromLaneSize(int lanesize); +VectorFormat ScalarFormatFromLaneSize(int lane_size_in_bits); VectorFormat VectorFormatHalfWidthDoubleLanes(VectorFormat vform); VectorFormat VectorFormatFillQ(VectorFormat vform); VectorFormat ScalarFormatFromFormat(VectorFormat vform); +VectorFormat SVEFormatFromLaneSizeInBits(int lane_size_in_bits); +VectorFormat SVEFormatFromLaneSizeInBytes(int lane_size_in_bytes); +VectorFormat SVEFormatFromLaneSizeInBytesLog2(int lane_size_in_bytes_log_2); unsigned RegisterSizeInBitsFromFormat(VectorFormat vform); unsigned RegisterSizeInBytesFromFormat(VectorFormat vform); +bool IsSVEFormat(VectorFormat vform); // TODO: Make the return types of these functions consistent. unsigned LaneSizeInBitsFromFormat(VectorFormat vform); int LaneSizeInBytesFromFormat(VectorFormat vform); @@ -588,7 +833,7 @@ class NEONFormatDecoder { enum SubstitutionMode { kPlaceholder, kFormat }; // Construct a format decoder with increasingly specific format maps for each - // subsitution. If no format map is specified, the default is the integer + // substitution. If no format map is specified, the default is the integer // format map. explicit NEONFormatDecoder(const Instruction* instr) { instrbits_ = instr->GetInstructionBits(); @@ -639,18 +884,26 @@ class NEONFormatDecoder { SubstitutionMode mode0 = kFormat, SubstitutionMode mode1 = kFormat, SubstitutionMode mode2 = kFormat) { + const char* subst0 = GetSubstitute(0, mode0); + const char* subst1 = GetSubstitute(1, mode1); + const char* subst2 = GetSubstitute(2, mode2); + + if ((subst0 == NULL) || (subst1 == NULL) || (subst2 == NULL)) { + return NULL; + } + snprintf(form_buffer_, sizeof(form_buffer_), string, - GetSubstitute(0, mode0), - GetSubstitute(1, mode1), - GetSubstitute(2, mode2)); + subst0, + subst1, + subst2); return form_buffer_; } - // Append a "2" to a mnemonic string based of the state of the Q bit. + // Append a "2" to a mnemonic string based on the state of the Q bit. const char* Mnemonic(const char* mnemonic) { - if ((instrbits_ & NEON_Q) != 0) { + if ((mnemonic != NULL) && (instrbits_ & NEON_Q) != 0) { snprintf(mne_buffer_, sizeof(mne_buffer_), "%s2", mnemonic); return mne_buffer_; } @@ -745,6 +998,33 @@ class NEONFormatDecoder { return ↦ } + // The shift immediate map uses between two and five bits to encode the NEON + // vector format: + // 00010->8B, 00011->16B, 001x0->4H, 001x1->8H, + // 01xx0->2S, 01xx1->4S, 1xxx1->2D, all others undefined. + static const NEONFormatMap* ShiftImmFormatMap() { + static const NEONFormatMap map = {{22, 21, 20, 19, 30}, + {NF_UNDEF, NF_UNDEF, NF_8B, NF_16B, + NF_4H, NF_8H, NF_4H, NF_8H, + NF_2S, NF_4S, NF_2S, NF_4S, + NF_2S, NF_4S, NF_2S, NF_4S, + NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, + NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, + NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, + NF_UNDEF, NF_2D, NF_UNDEF, NF_2D}}; + return ↦ + } + + // The shift long/narrow immediate map uses between two and four bits to + // encode the NEON vector format: + // 0001->8H, 001x->4S, 01xx->2D, all others undefined. + static const NEONFormatMap* ShiftLongNarrowImmFormatMap() { + static const NEONFormatMap map = + {{22, 21, 20, 19}, + {NF_UNDEF, NF_8H, NF_4S, NF_4S, NF_2D, NF_2D, NF_2D, NF_2D}}; + return ↦ + } + // The scalar format map uses two bits (size<1:0>) to encode the NEON scalar // formats: NF_B, NF_H, NF_S, NF_D. static const NEONFormatMap* ScalarFormatMap() { @@ -818,7 +1098,7 @@ class NEONFormatDecoder { static const char* NEONFormatAsString(NEONFormat format) { // clang-format off static const char* formats[] = { - "undefined", + NULL, "8b", "16b", "4h", "8h", "2s", "4s", "1d", "2d", "b", "h", "s", "d" }; @@ -833,9 +1113,9 @@ class NEONFormatDecoder { (format == NF_D) || (format == NF_UNDEF)); // clang-format off static const char* formats[] = { - "undefined", - "undefined", "undefined", "undefined", "undefined", - "undefined", "undefined", "undefined", "undefined", + NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, "'B", "'H", "'S", "'D" }; // clang-format on @@ -862,4 +1142,8 @@ class NEONFormatDecoder { } // namespace aarch64 } // namespace vixl +#ifdef __clang__ +#pragma clang diagnostic pop +#endif + #endif // VIXL_AARCH64_INSTRUCTIONS_AARCH64_H_ diff --git a/dep/vixl/include/vixl/aarch64/instrument-aarch64.h b/dep/vixl/include/vixl/aarch64/instrument-aarch64.h deleted file mode 100644 index 4401b3eac..000000000 --- a/dep/vixl/include/vixl/aarch64/instrument-aarch64.h +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2014, VIXL authors -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// * Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// * Neither the name of ARM Limited nor the names of its contributors may be -// used to endorse or promote products derived from this software without -// specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE -// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef VIXL_AARCH64_INSTRUMENT_AARCH64_H_ -#define VIXL_AARCH64_INSTRUMENT_AARCH64_H_ - -#include "../globals-vixl.h" -#include "../utils-vixl.h" - -#include "constants-aarch64.h" -#include "decoder-aarch64.h" -#include "instrument-aarch64.h" - -namespace vixl { -namespace aarch64 { - -const int kCounterNameMaxLength = 256; -const uint64_t kDefaultInstrumentationSamplingPeriod = 1 << 22; - - -enum InstrumentState { InstrumentStateDisable = 0, InstrumentStateEnable = 1 }; - - -enum CounterType { - Gauge = 0, // Gauge counters reset themselves after reading. - Cumulative = 1 // Cumulative counters keep their value after reading. -}; - - -class Counter { - public: - explicit Counter(const char* name, CounterType type = Gauge); - - void Increment(); - void Enable(); - void Disable(); - bool IsEnabled(); - uint64_t GetCount(); - VIXL_DEPRECATED("GetCount", uint64_t count()) { return GetCount(); } - - const char* GetName(); - VIXL_DEPRECATED("GetName", const char* name()) { return GetName(); } - - CounterType GetType(); - VIXL_DEPRECATED("GetType", CounterType type()) { return GetType(); } - - private: - char name_[kCounterNameMaxLength]; - uint64_t count_; - bool enabled_; - CounterType type_; -}; - - -class Instrument : public DecoderVisitor { - public: - explicit Instrument( - const char* datafile = NULL, - uint64_t sample_period = kDefaultInstrumentationSamplingPeriod); - ~Instrument(); - - void Enable(); - void Disable(); - -// Declare all Visitor functions. -#define DECLARE(A) void Visit##A(const Instruction* instr) VIXL_OVERRIDE; - VISITOR_LIST(DECLARE) -#undef DECLARE - - private: - void Update(); - void DumpCounters(); - void DumpCounterNames(); - void DumpEventMarker(unsigned marker); - void HandleInstrumentationEvent(unsigned event); - Counter* GetCounter(const char* name); - - void InstrumentLoadStore(const Instruction* instr); - void InstrumentLoadStorePair(const Instruction* instr); - - std::list counters_; - - FILE* output_stream_; - - // Counter information is dumped every sample_period_ instructions decoded. - // For a sample_period_ = 0 a final counter value is only produced when the - // Instrumentation class is destroyed. - uint64_t sample_period_; -}; - -} // namespace aarch64 -} // namespace vixl - -#endif // VIXL_AARCH64_INSTRUMENT_AARCH64_H_ diff --git a/dep/vixl/include/vixl/aarch64/macro-assembler-aarch64.h b/dep/vixl/include/vixl/aarch64/macro-assembler-aarch64.h index 88ed55770..cd30f1644 100644 --- a/dep/vixl/include/vixl/aarch64/macro-assembler-aarch64.h +++ b/dep/vixl/include/vixl/aarch64/macro-assembler-aarch64.h @@ -35,7 +35,6 @@ #include "../macro-assembler-interface.h" #include "assembler-aarch64.h" -#include "instrument-aarch64.h" // Required for runtime call support. // TODO: Break this dependency. We should be able to separate out the necessary // parts so that we don't need to include the whole simulator header. @@ -61,7 +60,7 @@ #define LSPAIR_MACRO_LIST(V) \ V(Ldp, CPURegister&, rt, rt2, LoadPairOpFor(rt, rt2)) \ V(Stp, CPURegister&, rt, rt2, StorePairOpFor(rt, rt2)) \ - V(Ldpsw, CPURegister&, rt, rt2, LDPSW_x) + V(Ldpsw, Register&, rt, rt2, LDPSW_x) namespace vixl { namespace aarch64 { @@ -109,7 +108,7 @@ class Pool { class LiteralPool : public Pool { public: explicit LiteralPool(MacroAssembler* masm); - ~LiteralPool(); + ~LiteralPool() VIXL_NEGATIVE_TESTING_ALLOW_EXCEPTION; void Reset(); void AddEntry(RawLiteral* literal); @@ -528,6 +527,57 @@ class MacroEmissionCheckScope : public EmissionCheckScope { }; +// This scope simplifies the handling of the SVE `movprfx` instruction. +// +// If dst.Aliases(src): +// - Start an ExactAssemblyScope(masm, kInstructionSize). +// Otherwise: +// - Start an ExactAssemblyScope(masm, 2 * kInstructionSize). +// - Generate a suitable `movprfx` instruction. +// +// In both cases, the ExactAssemblyScope is left with enough remaining space for +// exactly one destructive instruction. +class MovprfxHelperScope : public ExactAssemblyScope { + public: + inline MovprfxHelperScope(MacroAssembler* masm, + const ZRegister& dst, + const ZRegister& src); + + inline MovprfxHelperScope(MacroAssembler* masm, + const ZRegister& dst, + const PRegister& pg, + const ZRegister& src); + + // TODO: Implement constructors that examine _all_ sources. If `dst` aliases + // any other source register, we can't use `movprfx`. This isn't obviously + // useful, but the MacroAssembler should not generate invalid code for it. + // Valid behaviour can be implemented using `mov`. + // + // The best way to handle this in an instruction-agnostic way is probably to + // use variadic templates. + + private: + inline bool ShouldGenerateMovprfx(const ZRegister& dst, + const ZRegister& src) { + VIXL_ASSERT(AreSameLaneSize(dst, src)); + return !dst.Aliases(src); + } + + inline bool ShouldGenerateMovprfx(const ZRegister& dst, + const PRegister& pg, + const ZRegister& src) { + VIXL_ASSERT(pg.IsMerging() || pg.IsZeroing()); + // We need to emit movprfx in two cases: + // 1. To give a predicated merging unary instruction zeroing predication. + // 2. To make destructive instructions constructive. + // + // There are no predicated zeroing instructions that can take movprfx, so we + // will never generate an unnecessary movprfx with this logic. + return pg.IsZeroing() || ShouldGenerateMovprfx(dst, src); + } +}; + + enum BranchType { // Copies of architectural conditions. // The associated conditions can be used in place of those, the code will @@ -566,7 +616,19 @@ enum BranchType { kBranchTypeFirstCondition = eq, kBranchTypeLastCondition = nv, kBranchTypeFirstUsingReg = reg_zero, - kBranchTypeFirstUsingBit = reg_bit_clear + kBranchTypeFirstUsingBit = reg_bit_clear, + + // SVE branch conditions. + integer_none = eq, + integer_any = ne, + integer_nlast = cs, + integer_last = cc, + integer_first = mi, + integer_nfrst = pl, + integer_pmore = hi, + integer_plast = ls, + integer_tcont = ge, + integer_tstop = lt }; @@ -587,13 +649,21 @@ enum PreShiftImmMode { kAnyShift // Allow any pre-shift. }; +enum FPMacroNaNPropagationOption { + // The default option. This generates a run-time error in macros that respect + // this option. + NoFPMacroNaNPropagationSelected, + // For example, Fmin(result, NaN(a), NaN(b)) always selects NaN(a) if both + // NaN(a) and NaN(b) are both quiet, or both are signalling, at the + // cost of extra code generation in some cases. + StrictNaNPropagation, + // For example, Fmin(result, NaN(a), NaN(b)) selects either NaN, but using the + // fewest instructions. + FastNaNPropagation +}; class MacroAssembler : public Assembler, public MacroAssemblerInterface { public: - explicit MacroAssembler( - PositionIndependentCodeOption pic = PositionIndependentCode); - MacroAssembler(size_t capacity, - PositionIndependentCodeOption pic = PositionIndependentCode); MacroAssembler(byte* buffer, size_t capacity, PositionIndependentCodeOption pic = PositionIndependentCode); @@ -642,9 +712,6 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface { static int MoveImmediateHelper(MacroAssembler* masm, const Register& rd, uint64_t imm); - static bool OneInstrMoveImmediateHelper(MacroAssembler* masm, - const Register& dst, - int64_t imm); // Logical macros. @@ -697,6 +764,10 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface { FlagsUpdate S, AddSubWithCarryOp op); + void Rmif(const Register& xn, unsigned shift, StatusFlags flags); + void Setf8(const Register& wn); + void Setf16(const Register& wn); + // Move macros. void Mov(const Register& rd, uint64_t imm); void Mov(const Register& rd, @@ -710,7 +781,7 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface { // Try to move an immediate into the destination register in a single // instruction. Returns true for success, and updates the contents of dst. // Returns false, otherwise. - bool TryOneInstrMoveImmediate(const Register& dst, int64_t imm); + bool TryOneInstrMoveImmediate(const Register& dst, uint64_t imm); // Move an immediate into register dst, and return an Operand object for // use with a subsequent instruction that accepts a shift. The value moved @@ -718,7 +789,7 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface { // operation applied to it that will be subsequently undone by the shift // applied in the Operand. Operand MoveImmediateForShiftedOp(const Register& dst, - int64_t imm, + uint64_t imm, PreShiftImmMode mode); void Move(const GenericOperand& dst, const GenericOperand& src); @@ -942,6 +1013,20 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface { void Claim(const Operand& size); void Drop(const Operand& size); + // As above, but for multiples of the SVE vector length. + void ClaimVL(int64_t multiplier) { + // We never need to worry about sp alignment because the VL is always a + // multiple of 16. + VIXL_STATIC_ASSERT((kZRegMinSizeInBytes % 16) == 0); + VIXL_ASSERT(multiplier >= 0); + Addvl(sp, sp, -multiplier); + } + void DropVL(int64_t multiplier) { + VIXL_STATIC_ASSERT((kZRegMinSizeInBytes % 16) == 0); + VIXL_ASSERT(multiplier >= 0); + Addvl(sp, sp, multiplier); + } + // Preserve the callee-saved registers (as defined by AAPCS64). // // Higher-numbered registers are pushed before lower-numbered registers, and @@ -1051,7 +1136,7 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface { SingleEmissionCheckScope guard(this); bfxil(rd, rn, lsb, width); } - void Bind(Label* label); + void Bind(Label* label, BranchTargetIdentifier id = EmitBTI_none); // Bind a label to a specified offset from the start of the buffer. void BindToOffset(Label* label, ptrdiff_t offset); void Bl(Label* label) { @@ -1269,8 +1354,6 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface { Condition cond) { VIXL_ASSERT(allow_macro_instructions_); VIXL_ASSERT(!rd.IsZero()); - VIXL_ASSERT(!rn.IsZero()); - VIXL_ASSERT(!rm.IsZero()); VIXL_ASSERT((cond != al) && (cond != nv)); SingleEmissionCheckScope guard(this); csinc(rd, rn, rm, cond); @@ -1281,8 +1364,6 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface { Condition cond) { VIXL_ASSERT(allow_macro_instructions_); VIXL_ASSERT(!rd.IsZero()); - VIXL_ASSERT(!rn.IsZero()); - VIXL_ASSERT(!rm.IsZero()); VIXL_ASSERT((cond != al) && (cond != nv)); SingleEmissionCheckScope guard(this); csinv(rd, rn, rm, cond); @@ -1293,8 +1374,6 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface { Condition cond) { VIXL_ASSERT(allow_macro_instructions_); VIXL_ASSERT(!rd.IsZero()); - VIXL_ASSERT(!rn.IsZero()); - VIXL_ASSERT(!rm.IsZero()); VIXL_ASSERT((cond != al) && (cond != nv)); SingleEmissionCheckScope guard(this); csneg(rd, rn, rm, cond); @@ -1491,13 +1570,8 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface { void Fmov(const VRegister& vd, const VRegister& vn) { VIXL_ASSERT(allow_macro_instructions_); SingleEmissionCheckScope guard(this); - // Only emit an instruction if vd and vn are different, and they are both D - // registers. fmov(s0, s0) is not a no-op because it clears the top word of - // d0. Technically, fmov(d0, d0) is not a no-op either because it clears - // the top of q0, but VRegister does not currently support Q registers. - if (!vd.Is(vn) || !vd.Is64Bits()) { - fmov(vd, vn); - } + // TODO: Use DiscardMoveMode to allow this move to be elided if vd.Is(vn). + fmov(vd, vn); } void Fmov(const VRegister& vd, const Register& rn) { VIXL_ASSERT(allow_macro_instructions_); @@ -1505,21 +1579,23 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface { SingleEmissionCheckScope guard(this); fmov(vd, rn); } - void Fmov(const VRegister& vd, const XRegister& xn) { - Fmov(vd, Register(xn)); - } - void Fmov(const VRegister& vd, const WRegister& wn) { - Fmov(vd, Register(wn)); - } void Fmov(const VRegister& vd, int index, const Register& rn) { VIXL_ASSERT(allow_macro_instructions_); SingleEmissionCheckScope guard(this); - fmov(vd, index, rn); + if (vd.Is1D() && (index == 0)) { + mov(vd, index, rn); + } else { + fmov(vd, index, rn); + } } void Fmov(const Register& rd, const VRegister& vn, int index) { VIXL_ASSERT(allow_macro_instructions_); SingleEmissionCheckScope guard(this); - fmov(rd, vn, index); + if (vn.Is1D() && (index == 0)) { + mov(rd, vn, index); + } else { + fmov(rd, vn, index); + } } // Provide explicit double and float interfaces for FP immediate moves, rather @@ -1674,7 +1750,7 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface { V(casah, Casah) \ V(caslh, Caslh) \ V(casalh, Casalh) -// clang-format on + // clang-format on #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \ void MASM(const Register& rs, const Register& rt, const MemOperand& src) { \ @@ -1692,7 +1768,7 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface { V(caspa, Caspa) \ V(caspl, Caspl) \ V(caspal, Caspal) -// clang-format on + // clang-format on #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \ void MASM(const Register& rs, \ @@ -1737,7 +1813,7 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface { V(MASM##alb, ASM##alb) \ V(MASM##ah, ASM##ah) \ V(MASM##alh, ASM##alh) -// clang-format on + // clang-format on #define DEFINE_MACRO_LOAD_ASM_FUNC(MASM, ASM) \ void MASM(const Register& rs, const Register& rt, const MemOperand& src) { \ @@ -1777,19 +1853,52 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface { void Ldaprb(const Register& rt, const MemOperand& src) { VIXL_ASSERT(allow_macro_instructions_); SingleEmissionCheckScope guard(this); - ldaprb(rt, src); + VIXL_ASSERT(src.IsImmediateOffset()); + if (src.GetOffset() == 0) { + ldaprb(rt, src); + } else { + ldapurb(rt, src); + } + } + + void Ldapursb(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldapursb(rt, src); } void Ldaprh(const Register& rt, const MemOperand& src) { VIXL_ASSERT(allow_macro_instructions_); SingleEmissionCheckScope guard(this); - ldaprh(rt, src); + VIXL_ASSERT(src.IsImmediateOffset()); + if (src.GetOffset() == 0) { + ldaprh(rt, src); + } else { + ldapurh(rt, src); + } + } + + void Ldapursh(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldapursh(rt, src); } void Ldapr(const Register& rt, const MemOperand& src) { VIXL_ASSERT(allow_macro_instructions_); SingleEmissionCheckScope guard(this); - ldapr(rt, src); + VIXL_ASSERT(src.IsImmediateOffset()); + if (src.GetOffset() == 0) { + ldapr(rt, src); + } else { + ldapur(rt, src); + } + } + + void Ldapursw(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldapursw(rt, src); } void Ldnp(const CPURegister& rt, @@ -1931,6 +2040,16 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface { SingleEmissionCheckScope guard(this); lsrv(rd, rn, rm); } + void Ldraa(const Register& xt, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldraa(xt, src); + } + void Ldrab(const Register& xt, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldrab(xt, src); + } void Madd(const Register& rd, const Register& rn, const Register& rm, @@ -1988,6 +2107,21 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface { SingleEmissionCheckScope guard(this); msr(sysreg, rt); } + void Cfinv() { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cfinv(); + } + void Axflag() { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + axflag(); + } + void Xaflag() { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + xaflag(); + } void Sys(int op1, int crn, int crm, int op2, const Register& rt = xzr) { VIXL_ASSERT(allow_macro_instructions_); SingleEmissionCheckScope guard(this); @@ -2220,17 +2354,32 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface { void Stlr(const Register& rt, const MemOperand& dst) { VIXL_ASSERT(allow_macro_instructions_); SingleEmissionCheckScope guard(this); - stlr(rt, dst); + VIXL_ASSERT(dst.IsImmediateOffset()); + if (dst.GetOffset() == 0) { + stlr(rt, dst); + } else { + stlur(rt, dst); + } } void Stlrb(const Register& rt, const MemOperand& dst) { VIXL_ASSERT(allow_macro_instructions_); SingleEmissionCheckScope guard(this); - stlrb(rt, dst); + VIXL_ASSERT(dst.IsImmediateOffset()); + if (dst.GetOffset() == 0) { + stlrb(rt, dst); + } else { + stlurb(rt, dst); + } } void Stlrh(const Register& rt, const MemOperand& dst) { VIXL_ASSERT(allow_macro_instructions_); SingleEmissionCheckScope guard(this); - stlrh(rt, dst); + VIXL_ASSERT(dst.IsImmediateOffset()); + if (dst.GetOffset() == 0) { + stlrh(rt, dst); + } else { + stlurh(rt, dst); + } } void Stllr(const Register& rt, const MemOperand& dst) { VIXL_ASSERT(allow_macro_instructions_); @@ -2500,9 +2649,9 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface { if (generate_simulator_code_) { hlt(kUnreachableOpcode); } else { - // Branch to 0 to generate a segfault. - // lr - kInstructionSize is the address of the offending instruction. - blr(xzr); + // Use the architecturally-defined UDF instruction to abort on hardware, + // because using HLT and BRK tends to make the process difficult to debug. + udf(kUnreachableOpcode); } } void Uxtb(const Register& rd, const Register& rn) { @@ -2527,6 +2676,44 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface { uxtw(rd, rn); } + void Addg(const Register& xd, + const Register& xn, + int offset, + int tag_offset) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + addg(xd, xn, offset, tag_offset); + } + void Gmi(const Register& xd, const Register& xn, const Register& xm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + gmi(xd, xn, xm); + } + void Irg(const Register& xd, const Register& xn, const Register& xm = xzr) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + irg(xd, xn, xm); + } + void Subg(const Register& xd, + const Register& xn, + int offset, + int tag_offset) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + subg(xd, xn, offset, tag_offset); + } + void Subp(const Register& xd, const Register& xn, const Register& xm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + subp(xd, xn, xm); + } + void Subps(const Register& xd, const Register& xn, const Register& xm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + subps(xd, xn, xm); + } + void Cmpp(const Register& xn, const Register& xm) { Subps(xzr, xn, xm); } + // NEON 3 vector register instructions. #define NEON_3VREG_MACRO_LIST(V) \ V(add, Add) \ @@ -2557,7 +2744,11 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface { V(fminnmp, Fminnmp) \ V(fminp, Fminp) \ V(fmla, Fmla) \ + V(fmlal, Fmlal) \ + V(fmlal2, Fmlal2) \ V(fmls, Fmls) \ + V(fmlsl, Fmlsl) \ + V(fmlsl2, Fmlsl2) \ V(fmulx, Fmulx) \ V(frecps, Frecps) \ V(frsqrts, Frsqrts) \ @@ -2659,7 +2850,11 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface { V(uzp1, Uzp1) \ V(uzp2, Uzp2) \ V(zip1, Zip1) \ - V(zip2, Zip2) + V(zip2, Zip2) \ + V(smmla, Smmla) \ + V(ummla, Ummla) \ + V(usmmla, Usmmla) \ + V(usdot, Usdot) #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \ void MASM(const VRegister& vd, const VRegister& vn, const VRegister& vm) { \ @@ -2699,6 +2894,10 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface { V(fneg, Fneg) \ V(frecpe, Frecpe) \ V(frecpx, Frecpx) \ + V(frint32x, Frint32x) \ + V(frint32z, Frint32z) \ + V(frint64x, Frint64x) \ + V(frint64z, Frint64z) \ V(frinta, Frinta) \ V(frinti, Frinti) \ V(frintm, Frintm) \ @@ -2775,7 +2974,11 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface { #define NEON_BYELEMENT_MACRO_LIST(V) \ V(fmul, Fmul) \ V(fmla, Fmla) \ + V(fmlal, Fmlal) \ + V(fmlal2, Fmlal2) \ V(fmls, Fmls) \ + V(fmlsl, Fmlsl) \ + V(fmlsl2, Fmlsl2) \ V(fmulx, Fmulx) \ V(mul, Mul) \ V(mla, Mla) \ @@ -2803,7 +3006,10 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface { V(umlal, Umlal) \ V(umlal2, Umlal2) \ V(umlsl, Umlsl) \ - V(umlsl2, Umlsl2) + V(umlsl2, Umlsl2) \ + V(sudot, Sudot) \ + V(usdot, Usdot) + #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \ void MASM(const VRegister& vd, \ @@ -2839,8 +3045,6 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface { V(sri, Sri) \ V(srshr, Srshr) \ V(srsra, Srsra) \ - V(sshll, Sshll) \ - V(sshll2, Sshll2) \ V(sshr, Sshr) \ V(ssra, Ssra) \ V(uqrshrn, Uqrshrn) \ @@ -2850,8 +3054,6 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface { V(uqshrn2, Uqshrn2) \ V(urshr, Urshr) \ V(ursra, Ursra) \ - V(ushll, Ushll) \ - V(ushll2, Ushll2) \ V(ushr, Ushr) \ V(usra, Usra) @@ -2864,6 +3066,67 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface { NEON_2VREG_SHIFT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC) #undef DEFINE_MACRO_ASM_FUNC +#define NEON_2VREG_SHIFT_LONG_MACRO_LIST(V) \ + V(shll, sshll, Sshll) \ + V(shll, ushll, Ushll) \ + V(shll2, sshll2, Sshll2) \ + V(shll2, ushll2, Ushll2) + +#define DEFINE_MACRO_ASM_FUNC(ASM1, ASM2, MASM) \ + void MASM(const VRegister& vd, const VRegister& vn, int shift) { \ + VIXL_ASSERT(allow_macro_instructions_); \ + SingleEmissionCheckScope guard(this); \ + if (vn.GetLaneSizeInBits() == static_cast(shift)) { \ + ASM1(vd, vn, shift); \ + } else { \ + ASM2(vd, vn, shift); \ + } \ + } + NEON_2VREG_SHIFT_LONG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC) +#undef DEFINE_MACRO_ASM_FUNC + +// SVE 3 vector register instructions. +#define SVE_3VREG_COMMUTATIVE_MACRO_LIST(V) \ + V(add, Add) \ + V(and_, And) \ + V(eor, Eor) \ + V(mul, Mul) \ + V(orr, Orr) \ + V(sabd, Sabd) \ + V(shadd, Shadd) \ + V(smax, Smax) \ + V(smin, Smin) \ + V(smulh, Smulh) \ + V(sqadd, Sqadd) \ + V(srhadd, Srhadd) \ + V(uabd, Uabd) \ + V(uhadd, Uhadd) \ + V(umax, Umax) \ + V(umin, Umin) \ + V(umulh, Umulh) \ + V(uqadd, Uqadd) \ + V(urhadd, Urhadd) + +#define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \ + void MASM(const ZRegister& zd, \ + const PRegisterM& pg, \ + const ZRegister& zn, \ + const ZRegister& zm) { \ + VIXL_ASSERT(allow_macro_instructions_); \ + if (zd.Aliases(zn)) { \ + SingleEmissionCheckScope guard(this); \ + ASM(zd, pg, zd, zm); \ + } else if (zd.Aliases(zm)) { \ + SingleEmissionCheckScope guard(this); \ + ASM(zd, pg, zd, zn); \ + } else { \ + MovprfxHelperScope guard(this, zd, pg, zn); \ + ASM(zd, pg, zd, zm); \ + } \ + } + SVE_3VREG_COMMUTATIVE_MACRO_LIST(DEFINE_MACRO_ASM_FUNC) +#undef DEFINE_MACRO_ASM_FUNC + void Bic(const VRegister& vd, const int imm8, const int left_shift = 0) { VIXL_ASSERT(allow_macro_instructions_); SingleEmissionCheckScope guard(this); @@ -3251,6 +3514,4288 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface { crc32cx(rd, rn, rm); } + // Scalable Vector Extensions. + void Abs(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + abs(zd, pg, zn); + } + void Add(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + add(zd, zn, zm); + } + void Add(const ZRegister& zd, const ZRegister& zn, IntegerOperand imm) { + VIXL_ASSERT(allow_macro_instructions_); + AddSubHelper(kAddImmediate, zd, zn, imm); + } + void Addpl(const Register& xd, const Register& xn, int64_t multiplier); + void Addvl(const Register& xd, const Register& xn, int64_t multiplier); + // Note that unlike the core ISA, SVE's `adr` is not PC-relative. + void Adr(const ZRegister& zd, const SVEMemOperand& addr) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + adr(zd, addr); + } + void And(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + and_(pd, pg, pn, pm); + } + void And(const ZRegister& zd, const ZRegister& zn, uint64_t imm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + if (IsImmLogical(imm, zd.GetLaneSizeInBits())) { + and_(zd, zn, imm); + } else { + // TODO: Synthesise the immediate once 'Mov' is implemented. + VIXL_UNIMPLEMENTED(); + } + } + void And(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(AreSameLaneSize(zd, zn, zm)); + SingleEmissionCheckScope guard(this); + and_(zd.VnD(), zn.VnD(), zm.VnD()); + } + void Ands(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ands(pd, pg, pn, pm); + } + void Andv(const VRegister& vd, const PRegister& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + andv(vd, pg, zn); + } + void Asr(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + int shift) { + VIXL_ASSERT(allow_macro_instructions_); + MovprfxHelperScope guard(this, zd, pg, zn); + asr(zd, pg, zd, shift); + } + void Asr(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + void Asr(const ZRegister& zd, const ZRegister& zn, int shift) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + asr(zd, zn, shift); + } + void Asr(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + asr(zd, zn, zm); + } + void Asrd(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + int shift) { + VIXL_ASSERT(allow_macro_instructions_); + MovprfxHelperScope guard(this, zd, pg, zn); + asrd(zd, pg, zd, shift); + } + void Bic(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + void Bic(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + bic(pd, pg, pn, pm); + } + void Bic(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(AreSameLaneSize(zd, zn, zm)); + SingleEmissionCheckScope guard(this); + bic(zd.VnD(), zn.VnD(), zm.VnD()); + } + void Bic(const ZRegister& zd, const ZRegister& zn, uint64_t imm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + if (IsImmLogical(imm, zd.GetLaneSizeInBits())) { + bic(zd, zn, imm); + } else { + // TODO: Synthesise the immediate once 'Mov' is implemented. + VIXL_UNIMPLEMENTED(); + } + } + void Bics(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + bics(pd, pg, pn, pm); + } + void Brka(const PRegisterWithLaneSize& pd, + const PRegister& pg, + const PRegisterWithLaneSize& pn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + brka(pd, pg, pn); + } + void Brkas(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + brkas(pd, pg, pn); + } + void Brkb(const PRegisterWithLaneSize& pd, + const PRegister& pg, + const PRegisterWithLaneSize& pn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + brkb(pd, pg, pn); + } + void Brkbs(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + brkbs(pd, pg, pn); + } + void Brkn(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm) { + VIXL_ASSERT(allow_macro_instructions_); + if (!pd.Aliases(pm)) { + Mov(pd, pm); + } + SingleEmissionCheckScope guard(this); + brkn(pd, pg, pn, pd); + } + void Brkns(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm) { + VIXL_ASSERT(allow_macro_instructions_); + if (!pd.Aliases(pm)) { + Mov(pd, pm); + } + SingleEmissionCheckScope guard(this); + brkns(pd, pg, pn, pd); + } + void Brkpa(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + brkpa(pd, pg, pn, pm); + } + void Brkpas(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + brkpas(pd, pg, pn, pm); + } + void Brkpb(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + brkpb(pd, pg, pn, pm); + } + void Brkpbs(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + brkpbs(pd, pg, pn, pm); + } + void Clasta(const Register& rd, + const PRegister& pg, + const Register& rn, + const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + clasta(rd, pg, rn, zm); + } + void Clasta(const VRegister& vd, + const PRegister& pg, + const VRegister& vn, + const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + clasta(vd, pg, vn, zm); + } + void Clasta(const ZRegister& zd, + const PRegister& pg, + const ZRegister& zn, + const ZRegister& zm); + void Clastb(const Register& rd, + const PRegister& pg, + const Register& rn, + const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + clastb(rd, pg, rn, zm); + } + void Clastb(const VRegister& vd, + const PRegister& pg, + const VRegister& vn, + const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + clastb(vd, pg, vn, zm); + } + void Clastb(const ZRegister& zd, + const PRegister& pg, + const ZRegister& zn, + const ZRegister& zm); + void Cls(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cls(zd, pg, zn); + } + void Clz(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + clz(zd, pg, zn); + } + void Cmpeq(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cmpeq(pd, pg, zn, zm); + } + void Cmpeq(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + IntegerOperand imm) { + VIXL_ASSERT(allow_macro_instructions_); + int imm5; + if (imm.TryEncodeAsIntNForLane<5>(zn, &imm5)) { + SingleEmissionCheckScope guard(this); + cmpeq(pd, pg, zn, imm5); + } else { + CompareHelper(eq, pd, pg, zn, imm); + } + } + void Cmpge(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cmpge(pd, pg, zn, zm); + } + void Cmpge(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + IntegerOperand imm) { + VIXL_ASSERT(allow_macro_instructions_); + int imm5; + if (imm.TryEncodeAsIntNForLane<5>(zn, &imm5)) { + SingleEmissionCheckScope guard(this); + cmpge(pd, pg, zn, imm5); + } else { + CompareHelper(ge, pd, pg, zn, imm); + } + } + void Cmpgt(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cmpgt(pd, pg, zn, zm); + } + void Cmpgt(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + IntegerOperand imm) { + VIXL_ASSERT(allow_macro_instructions_); + int imm5; + if (imm.TryEncodeAsIntNForLane<5>(zn, &imm5)) { + SingleEmissionCheckScope guard(this); + cmpgt(pd, pg, zn, imm5); + } else { + CompareHelper(gt, pd, pg, zn, imm); + } + } + void Cmphi(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cmphi(pd, pg, zn, zm); + } + void Cmphi(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + IntegerOperand imm) { + VIXL_ASSERT(allow_macro_instructions_); + if (imm.IsUintN(7)) { + SingleEmissionCheckScope guard(this); + cmphi(pd, pg, zn, static_cast(imm.AsUintN(7))); + } else { + CompareHelper(hi, pd, pg, zn, imm); + } + } + void Cmphs(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cmphs(pd, pg, zn, zm); + } + void Cmphs(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + IntegerOperand imm) { + if (imm.IsUintN(7)) { + SingleEmissionCheckScope guard(this); + cmphs(pd, pg, zn, static_cast(imm.AsUintN(7))); + } else { + CompareHelper(hs, pd, pg, zn, imm); + } + } + void Cmple(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cmple(pd, pg, zn, zm); + } + void Cmple(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + IntegerOperand imm) { + VIXL_ASSERT(allow_macro_instructions_); + int imm5; + if (imm.TryEncodeAsIntNForLane<5>(zn, &imm5)) { + SingleEmissionCheckScope guard(this); + cmple(pd, pg, zn, imm5); + } else { + CompareHelper(le, pd, pg, zn, imm); + } + } + void Cmplo(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cmplo(pd, pg, zn, zm); + } + void Cmplo(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + IntegerOperand imm) { + if (imm.IsUintN(7)) { + SingleEmissionCheckScope guard(this); + cmplo(pd, pg, zn, static_cast(imm.AsUintN(7))); + } else { + CompareHelper(lo, pd, pg, zn, imm); + } + } + void Cmpls(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cmpls(pd, pg, zn, zm); + } + void Cmpls(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + IntegerOperand imm) { + if (imm.IsUintN(7)) { + SingleEmissionCheckScope guard(this); + cmpls(pd, pg, zn, static_cast(imm.AsUintN(7))); + } else { + CompareHelper(ls, pd, pg, zn, imm); + } + } + void Cmplt(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cmplt(pd, pg, zn, zm); + } + void Cmplt(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + IntegerOperand imm) { + VIXL_ASSERT(allow_macro_instructions_); + int imm5; + if (imm.TryEncodeAsIntNForLane<5>(zn, &imm5)) { + SingleEmissionCheckScope guard(this); + cmplt(pd, pg, zn, imm5); + } else { + CompareHelper(lt, pd, pg, zn, imm); + } + } + void Cmpne(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cmpne(pd, pg, zn, zm); + } + void Cmpne(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + IntegerOperand imm) { + VIXL_ASSERT(allow_macro_instructions_); + int imm5; + if (imm.TryEncodeAsIntNForLane<5>(zn, &imm5)) { + SingleEmissionCheckScope guard(this); + cmpne(pd, pg, zn, imm5); + } else { + CompareHelper(ne, pd, pg, zn, imm); + } + } + void Cnot(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cnot(zd, pg, zn); + } + void Cnt(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cnt(zd, pg, zn); + } + void Cntb(const Register& rd, int pattern = SVE_ALL, int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cntb(rd, pattern, multiplier); + } + void Cntd(const Register& rd, int pattern = SVE_ALL, int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cntd(rd, pattern, multiplier); + } + void Cnth(const Register& rd, int pattern = SVE_ALL, int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cnth(rd, pattern, multiplier); + } + void Cntp(const Register& rd, + const PRegister& pg, + const PRegisterWithLaneSize& pn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + // The `cntp` instruction architecturally takes an X register, but the + // result will always be in the range [0, kPRegMaxSize] (and therefore + // always fits in a W register), so we can accept a W-sized rd here. + cntp(rd.X(), pg, pn); + } + void Cntw(const Register& rd, int pattern = SVE_ALL, int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cntw(rd, pattern, multiplier); + } + void Compact(const ZRegister& zd, const PRegister& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + compact(zd, pg, zn); + } + void Cpy(const ZRegister& zd, const PRegister& pg, IntegerOperand imm); + void Cpy(const ZRegister& zd, const PRegisterM& pg, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cpy(zd, pg, rn); + } + void Cpy(const ZRegister& zd, const PRegisterM& pg, const VRegister& vn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cpy(zd, pg, vn); + } + void Ctermeq(const Register& rn, const Register& rm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ctermeq(rn, rm); + } + void Ctermne(const Register& rn, const Register& rm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ctermne(rn, rm); + } + void Decb(const Register& rdn, int pattern = SVE_ALL, int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + decb(rdn, pattern, multiplier); + } + void Decd(const Register& rdn, int pattern = SVE_ALL, int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + decd(rdn, pattern, multiplier); + } + void Decd(const ZRegister& zdn, int pattern = SVE_ALL, int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + decd(zdn, pattern, multiplier); + } + void Dech(const Register& rdn, int pattern = SVE_ALL, int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + dech(rdn, pattern, multiplier); + } + void Dech(const ZRegister& zdn, int pattern = SVE_ALL, int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + dech(zdn, pattern, multiplier); + } + void Decp(const Register& rdn, const PRegisterWithLaneSize& pg) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + decp(rdn, pg); + } + void Decp(const ZRegister& zd, const PRegister& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(AreSameFormat(zd, zn)); + // `decp` writes every lane, so use an unpredicated movprfx. + MovprfxHelperScope guard(this, zd, zn); + decp(zd, pg); + } + void Decp(const ZRegister& zdn, const PRegister& pg) { Decp(zdn, pg, zdn); } + void Decw(const Register& rdn, int pattern = SVE_ALL, int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + decw(rdn, pattern, multiplier); + } + void Decw(const ZRegister& zdn, int pattern = SVE_ALL, int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + decw(zdn, pattern, multiplier); + } + void Dup(const ZRegister& zd, const Register& xn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + dup(zd, xn); + } + void Dup(const ZRegister& zd, const ZRegister& zn, int index) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + dup(zd, zn, index); + } + void Dup(const ZRegister& zd, IntegerOperand imm); + void Eon(const ZRegister& zd, const ZRegister& zn, uint64_t imm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + if (IsImmLogical(imm, zd.GetLaneSizeInBits())) { + eon(zd, zn, imm); + } else { + // TODO: Synthesise the immediate once 'Mov' is implemented. + VIXL_UNIMPLEMENTED(); + } + } + void Eor(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + eor(pd, pg, pn, pm); + } + void Eor(const ZRegister& zd, const ZRegister& zn, uint64_t imm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + if (IsImmLogical(imm, zd.GetLaneSizeInBits())) { + eor(zd, zn, imm); + } else { + // TODO: Synthesise the immediate once 'Mov' is implemented. + VIXL_UNIMPLEMENTED(); + } + } + void Eor(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(AreSameLaneSize(zd, zn, zm)); + SingleEmissionCheckScope guard(this); + eor(zd.VnD(), zn.VnD(), zm.VnD()); + } + void Eors(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + eors(pd, pg, pn, pm); + } + void Eorv(const VRegister& vd, const PRegister& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + eorv(vd, pg, zn); + } + void Ext(const ZRegister& zd, + const ZRegister& zn, + const ZRegister& zm, + unsigned offset) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ext(zd, zn, zm, offset); + } + void Fabd(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm, + FPMacroNaNPropagationOption nan_option); + void Fabs(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fabs(zd, pg, zn); + } + void Facge(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + facge(pd, pg, zn, zm); + } + void Facgt(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + facgt(pd, pg, zn, zm); + } + void Facle(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + facge(pd, pg, zm, zn); + } + void Faclt(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + facgt(pd, pg, zm, zn); + } + void Fadd(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + double imm) { + VIXL_ASSERT(allow_macro_instructions_); + MovprfxHelperScope guard(this, zd, pg, zn); + fadd(zd, pg, zd, imm); + } + void Fadd(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm, + FPMacroNaNPropagationOption nan_option); + void Fadd(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fadd(zd, zn, zm); + } + void Fadda(const VRegister& vd, + const PRegister& pg, + const VRegister& vn, + const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fadda(vd, pg, vn, zm); + } + void Faddv(const VRegister& vd, const PRegister& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + faddv(vd, pg, zn); + } + void Fcadd(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm, + int rot); + void Fcmeq(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + double zero) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + if (zero == 0.0) { + fcmeq(pd, pg, zn, zero); + } else { + // TODO: Synthesise other immediates. + VIXL_UNIMPLEMENTED(); + } + } + void Fcmeq(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fcmeq(pd, pg, zn, zm); + } + void Fcmge(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + double zero) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + if (zero == 0.0) { + fcmge(pd, pg, zn, zero); + } else { + // TODO: Synthesise other immediates. + VIXL_UNIMPLEMENTED(); + } + } + void Fcmge(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fcmge(pd, pg, zn, zm); + } + void Fcmgt(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + double zero) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + if (zero == 0.0) { + fcmgt(pd, pg, zn, zero); + } else { + // TODO: Synthesise other immediates. + VIXL_UNIMPLEMENTED(); + } + } + void Fcmgt(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fcmgt(pd, pg, zn, zm); + } + void Fcmla(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm, + int rot); + void Fcmla(const ZRegister& zda, + const ZRegister& zn, + const ZRegister& zm, + int index, + int rot) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fcmla(zda, zn, zm, index, rot); + } + void Fcmle(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + double zero) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + if (zero == 0.0) { + fcmle(pd, pg, zn, zero); + } else { + // TODO: Synthesise other immediates. + VIXL_UNIMPLEMENTED(); + } + } + void Fcmle(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fcmge(pd, pg, zm, zn); + } + void Fcmlt(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + double zero) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + if (zero == 0.0) { + fcmlt(pd, pg, zn, zero); + } else { + // TODO: Synthesise other immediates. + VIXL_UNIMPLEMENTED(); + } + } + void Fcmlt(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fcmgt(pd, pg, zm, zn); + } + void Fcmne(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + double zero) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + if (zero == 0.0) { + fcmne(pd, pg, zn, zero); + } else { + // TODO: Synthesise other immediates. + VIXL_UNIMPLEMENTED(); + } + } + void Fcmne(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fcmne(pd, pg, zn, zm); + } + void Fcmuo(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fcmuo(pd, pg, zn, zm); + } + void Fcpy(const ZRegister& zd, const PRegisterM& pg, double imm); + void Fcpy(const ZRegister& zd, const PRegisterM& pg, float imm); + void Fcpy(const ZRegister& zd, const PRegisterM& pg, Float16 imm); + void Fcvt(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fcvt(zd, pg, zn); + } + void Fcvt(const ZRegister& zd, const PRegisterZ& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + // The element type in this predicated movprfx is determined by the larger + // type between the source and destination. + int lane_size = std::max(zd.GetLaneSizeInBits(), zn.GetLaneSizeInBits()); + MovprfxHelperScope guard(this, + zd.WithLaneSize(lane_size), + pg, + zn.WithLaneSize(lane_size)); + fcvt(zd, pg.Merging(), zn); + } + void Fcvtzs(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fcvtzs(zd, pg, zn); + } + void Fcvtzu(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fcvtzu(zd, pg, zn); + } + void Fdiv(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + void Fdup(const ZRegister& zd, double imm); + void Fdup(const ZRegister& zd, float imm); + void Fdup(const ZRegister& zd, Float16 imm); + void Fexpa(const ZRegister& zd, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fexpa(zd, zn); + } + void Fmad(const ZRegister& zdn, + const PRegisterM& pg, + const ZRegister& zm, + const ZRegister& za) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fmad(zdn, pg, zm, za); + } + void Fmax(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + double imm) { + VIXL_ASSERT(allow_macro_instructions_); + MovprfxHelperScope guard(this, zd, pg, zn); + fmax(zd, pg, zd, imm); + } + void Fmax( + const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm, + FPMacroNaNPropagationOption nan_option = NoFPMacroNaNPropagationSelected); + void Fmaxnm(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + double imm) { + VIXL_ASSERT(allow_macro_instructions_); + MovprfxHelperScope guard(this, zd, pg, zn); + fmaxnm(zd, pg, zd, imm); + } + void Fmaxnm(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm, + FPMacroNaNPropagationOption nan_option); + void Fmaxnmv(const VRegister& vd, const PRegister& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fmaxnmv(vd, pg, zn); + } + void Fmaxv(const VRegister& vd, const PRegister& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fmaxv(vd, pg, zn); + } + void Fmin(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + double imm) { + VIXL_ASSERT(allow_macro_instructions_); + MovprfxHelperScope guard(this, zd, pg, zn); + fmin(zd, pg, zd, imm); + } + void Fmin( + const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm, + FPMacroNaNPropagationOption nan_option = NoFPMacroNaNPropagationSelected); + void Fminnm(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + double imm) { + VIXL_ASSERT(allow_macro_instructions_); + MovprfxHelperScope guard(this, zd, pg, zn); + fminnm(zd, pg, zd, imm); + } + void Fminnm(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm, + FPMacroNaNPropagationOption nan_option); + void Fminnmv(const VRegister& vd, const PRegister& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fminnmv(vd, pg, zn); + } + void Fminv(const VRegister& vd, const PRegister& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fminv(vd, pg, zn); + } + // zd = za + (zn * zm) + void Fmla( + const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm, + FPMacroNaNPropagationOption nan_option = NoFPMacroNaNPropagationSelected); + void Fmla(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm, + int index); + // zd = za - (zn * zm) + void Fmls( + const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm, + FPMacroNaNPropagationOption nan_option = NoFPMacroNaNPropagationSelected); + void Fmls(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm, + int index); + void Fmov(const ZRegister& zd, double imm) { + VIXL_ASSERT(allow_macro_instructions_); + Fdup(zd, imm); + } + void Fmov(const ZRegister& zd, float imm) { + VIXL_ASSERT(allow_macro_instructions_); + Fdup(zd, imm); + } + void Fmov(const ZRegister& zd, Float16 imm) { + VIXL_ASSERT(allow_macro_instructions_); + Fdup(zd, imm); + } + void Fmov(const ZRegister& zd, const PRegisterM& pg, double imm) { + VIXL_ASSERT(allow_macro_instructions_); + Fcpy(zd, pg, imm); + } + void Fmov(const ZRegister& zd, const PRegisterM& pg, float imm) { + VIXL_ASSERT(allow_macro_instructions_); + Fcpy(zd, pg, imm); + } + void Fmov(const ZRegister& zd, const PRegisterM& pg, Float16 imm) { + VIXL_ASSERT(allow_macro_instructions_); + Fcpy(zd, pg, imm); + } + void Fmsb(const ZRegister& zdn, + const PRegisterM& pg, + const ZRegister& zm, + const ZRegister& za) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fmsb(zdn, pg, zm, za); + } + void Fmul(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + double imm) { + VIXL_ASSERT(allow_macro_instructions_); + MovprfxHelperScope guard(this, zd, pg, zn); + fmul(zd, pg, zd, imm); + } + void Fmul(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm, + FPMacroNaNPropagationOption nan_option); + void Fmul(const ZRegister& zd, + const ZRegister& zn, + const ZRegister& zm, + unsigned index) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fmul(zd, zn, zm, index); + } + void Fmul(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fmul(zd, zn, zm); + } + void Fmulx(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm, + FPMacroNaNPropagationOption nan_option); + void Fneg(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fneg(zd, pg, zn); + } + void Fnmla( + const ZRegister& zda, + const PRegisterM& pg, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm, + FPMacroNaNPropagationOption nan_option = NoFPMacroNaNPropagationSelected); + void Fnmls( + const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm, + FPMacroNaNPropagationOption nan_option = NoFPMacroNaNPropagationSelected); + void Frecpe(const ZRegister& zd, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + frecpe(zd, zn); + } + void Frecps(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + frecps(zd, zn, zm); + } + void Frecpx(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + frecpx(zd, pg, zn); + } + void Frecpx(const ZRegister& zd, const PRegisterZ& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + MovprfxHelperScope guard(this, zd, pg, zn); + frecpx(zd, pg.Merging(), zn); + } + void Frinta(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + frinta(zd, pg, zn); + } + void Frinta(const ZRegister& zd, const PRegisterZ& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + MovprfxHelperScope guard(this, zd, pg, zn); + frinta(zd, pg.Merging(), zn); + } + void Frinti(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + frinti(zd, pg, zn); + } + void Frinti(const ZRegister& zd, const PRegisterZ& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + MovprfxHelperScope guard(this, zd, pg, zn); + frinti(zd, pg.Merging(), zn); + } + void Frintm(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + frintm(zd, pg, zn); + } + void Frintm(const ZRegister& zd, const PRegisterZ& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + MovprfxHelperScope guard(this, zd, pg, zn); + frintm(zd, pg.Merging(), zn); + } + void Frintn(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + frintn(zd, pg, zn); + } + void Frintn(const ZRegister& zd, const PRegisterZ& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + MovprfxHelperScope guard(this, zd, pg, zn); + frintn(zd, pg.Merging(), zn); + } + void Frintp(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + frintp(zd, pg, zn); + } + void Frintp(const ZRegister& zd, const PRegisterZ& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + MovprfxHelperScope guard(this, zd, pg, zn); + frintp(zd, pg.Merging(), zn); + } + void Frintx(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + frintx(zd, pg, zn); + } + void Frintx(const ZRegister& zd, const PRegisterZ& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + MovprfxHelperScope guard(this, zd, pg, zn); + frintx(zd, pg.Merging(), zn); + } + void Frintz(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + frintz(zd, pg, zn); + } + void Frintz(const ZRegister& zd, const PRegisterZ& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + MovprfxHelperScope guard(this, zd, pg, zn); + frintz(zd, pg.Merging(), zn); + } + void Frsqrte(const ZRegister& zd, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + frsqrte(zd, zn); + } + void Frsqrts(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + frsqrts(zd, zn, zm); + } + void Fscale(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + void Fsqrt(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fsqrt(zd, pg, zn); + } + void Fsqrt(const ZRegister& zd, const PRegisterZ& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + MovprfxHelperScope guard(this, zd, pg, zn); + fsqrt(zd, pg.Merging(), zn); + } + void Fsub(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + double imm) { + VIXL_ASSERT(allow_macro_instructions_); + MovprfxHelperScope guard(this, zd, pg, zn); + fsub(zd, pg, zd, imm); + } + void Fsub(const ZRegister& zd, + const PRegisterM& pg, + double imm, + const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + MovprfxHelperScope guard(this, zd, pg, zn); + fsubr(zd, pg, zd, imm); + } + void Fsub(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + void Fsub(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fsub(zd, zn, zm); + } + void Ftmad(const ZRegister& zd, + const ZRegister& zn, + const ZRegister& zm, + int imm3); + void Ftsmul(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ftsmul(zd, zn, zm); + } + void Ftssel(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ftssel(zd, zn, zm); + } + void Incb(const Register& rdn, int pattern = SVE_ALL, int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + incb(rdn, pattern, multiplier); + } + void Incd(const Register& rdn, int pattern = SVE_ALL, int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + incd(rdn, pattern, multiplier); + } + void Incd(const ZRegister& zdn, int pattern = SVE_ALL, int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + incd(zdn, pattern, multiplier); + } + void Inch(const Register& rdn, int pattern = SVE_ALL, int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + inch(rdn, pattern, multiplier); + } + void Inch(const ZRegister& zdn, int pattern = SVE_ALL, int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + inch(zdn, pattern, multiplier); + } + void Incp(const Register& rdn, const PRegisterWithLaneSize& pg) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + incp(rdn, pg); + } + void Incp(const ZRegister& zd, const PRegister& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(AreSameFormat(zd, zn)); + // `incp` writes every lane, so use an unpredicated movprfx. + MovprfxHelperScope guard(this, zd, zn); + incp(zd, pg); + } + void Incp(const ZRegister& zdn, const PRegister& pg) { Incp(zdn, pg, zdn); } + void Incw(const Register& rdn, int pattern = SVE_ALL, int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + incw(rdn, pattern, multiplier); + } + void Incw(const ZRegister& zdn, int pattern = SVE_ALL, int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + incw(zdn, pattern, multiplier); + } + void Index(const ZRegister& zd, const Operand& start, const Operand& step); + void Insr(const ZRegister& zdn, const Register& rm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + insr(zdn, rm); + } + void Insr(const ZRegister& zdn, const VRegister& vm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + insr(zdn, vm); + } + void Insr(const ZRegister& zdn, IntegerOperand imm); + void Lasta(const Register& rd, const PRegister& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + lasta(rd, pg, zn); + } + void Lasta(const VRegister& vd, const PRegister& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + lasta(vd, pg, zn); + } + void Lastb(const Register& rd, const PRegister& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + lastb(rd, pg, zn); + } + void Lastb(const VRegister& vd, const PRegister& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + lastb(vd, pg, zn); + } + void Ld1b(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + void Ld1h(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + void Ld1w(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + void Ld1d(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + void Ld1rb(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr) { + VIXL_ASSERT(allow_macro_instructions_); + SVELoadBroadcastImmHelper(zt, + pg, + addr, + &MacroAssembler::ld1rb, + kBRegSizeInBytes); + } + void Ld1rh(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr) { + VIXL_ASSERT(allow_macro_instructions_); + SVELoadBroadcastImmHelper(zt, + pg, + addr, + &MacroAssembler::ld1rh, + kHRegSizeInBytes); + } + void Ld1rw(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr) { + VIXL_ASSERT(allow_macro_instructions_); + SVELoadBroadcastImmHelper(zt, + pg, + addr, + &MacroAssembler::ld1rw, + kSRegSizeInBytes); + } + void Ld1rd(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr) { + VIXL_ASSERT(allow_macro_instructions_); + SVELoadBroadcastImmHelper(zt, + pg, + addr, + &MacroAssembler::ld1rd, + kDRegSizeInBytes); + } + void Ld1rqb(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + void Ld1rqd(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + void Ld1rqh(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + void Ld1rqw(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + void Ld1rob(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + void Ld1rod(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + void Ld1roh(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + void Ld1row(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + void Ld1rsb(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr) { + VIXL_ASSERT(allow_macro_instructions_); + SVELoadBroadcastImmHelper(zt, + pg, + addr, + &MacroAssembler::ld1rsb, + kBRegSizeInBytes); + } + void Ld1rsh(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr) { + VIXL_ASSERT(allow_macro_instructions_); + SVELoadBroadcastImmHelper(zt, + pg, + addr, + &MacroAssembler::ld1rsh, + kHRegSizeInBytes); + } + void Ld1rsw(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr) { + VIXL_ASSERT(allow_macro_instructions_); + SVELoadBroadcastImmHelper(zt, + pg, + addr, + &MacroAssembler::ld1rsw, + kSRegSizeInBytes); + } + void Ld1sb(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + void Ld1sh(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + void Ld1sw(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + void Ld2b(const ZRegister& zt1, + const ZRegister& zt2, + const PRegisterZ& pg, + const SVEMemOperand& addr) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ld2b(zt1, zt2, pg, addr); + } + void Ld2h(const ZRegister& zt1, + const ZRegister& zt2, + const PRegisterZ& pg, + const SVEMemOperand& addr) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ld2h(zt1, zt2, pg, addr); + } + void Ld2w(const ZRegister& zt1, + const ZRegister& zt2, + const PRegisterZ& pg, + const SVEMemOperand& addr) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ld2w(zt1, zt2, pg, addr); + } + void Ld2d(const ZRegister& zt1, + const ZRegister& zt2, + const PRegisterZ& pg, + const SVEMemOperand& addr) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ld2d(zt1, zt2, pg, addr); + } + void Ld3b(const ZRegister& zt1, + const ZRegister& zt2, + const ZRegister& zt3, + const PRegisterZ& pg, + const SVEMemOperand& addr) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ld3b(zt1, zt2, zt3, pg, addr); + } + void Ld3h(const ZRegister& zt1, + const ZRegister& zt2, + const ZRegister& zt3, + const PRegisterZ& pg, + const SVEMemOperand& addr) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ld3h(zt1, zt2, zt3, pg, addr); + } + void Ld3w(const ZRegister& zt1, + const ZRegister& zt2, + const ZRegister& zt3, + const PRegisterZ& pg, + const SVEMemOperand& addr) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ld3w(zt1, zt2, zt3, pg, addr); + } + void Ld3d(const ZRegister& zt1, + const ZRegister& zt2, + const ZRegister& zt3, + const PRegisterZ& pg, + const SVEMemOperand& addr) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ld3d(zt1, zt2, zt3, pg, addr); + } + void Ld4b(const ZRegister& zt1, + const ZRegister& zt2, + const ZRegister& zt3, + const ZRegister& zt4, + const PRegisterZ& pg, + const SVEMemOperand& addr) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ld4b(zt1, zt2, zt3, zt4, pg, addr); + } + void Ld4h(const ZRegister& zt1, + const ZRegister& zt2, + const ZRegister& zt3, + const ZRegister& zt4, + const PRegisterZ& pg, + const SVEMemOperand& addr) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ld4h(zt1, zt2, zt3, zt4, pg, addr); + } + void Ld4w(const ZRegister& zt1, + const ZRegister& zt2, + const ZRegister& zt3, + const ZRegister& zt4, + const PRegisterZ& pg, + const SVEMemOperand& addr) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ld4w(zt1, zt2, zt3, zt4, pg, addr); + } + void Ld4d(const ZRegister& zt1, + const ZRegister& zt2, + const ZRegister& zt3, + const ZRegister& zt4, + const PRegisterZ& pg, + const SVEMemOperand& addr) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ld4d(zt1, zt2, zt3, zt4, pg, addr); + } + void Ldff1b(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + void Ldff1h(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + void Ldff1w(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + void Ldff1d(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + void Ldff1sb(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + void Ldff1sh(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + void Ldff1sw(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + void Ldff1b(const ZRegister& zt, + const PRegisterZ& pg, + const Register& xn, + const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldff1b(zt, pg, xn, zm); + } + void Ldff1b(const ZRegister& zt, + const PRegisterZ& pg, + const ZRegister& zn, + int imm5) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldff1b(zt, pg, zn, imm5); + } + void Ldff1d(const ZRegister& zt, + const PRegisterZ& pg, + const Register& xn, + const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldff1d(zt, pg, xn, zm); + } + void Ldff1d(const ZRegister& zt, + const PRegisterZ& pg, + const ZRegister& zn, + int imm5) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldff1d(zt, pg, zn, imm5); + } + void Ldff1h(const ZRegister& zt, + const PRegisterZ& pg, + const Register& xn, + const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldff1h(zt, pg, xn, zm); + } + void Ldff1h(const ZRegister& zt, + const PRegisterZ& pg, + const ZRegister& zn, + int imm5) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldff1h(zt, pg, zn, imm5); + } + void Ldff1sb(const ZRegister& zt, + const PRegisterZ& pg, + const Register& xn, + const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldff1sb(zt, pg, xn, zm); + } + void Ldff1sb(const ZRegister& zt, + const PRegisterZ& pg, + const ZRegister& zn, + int imm5) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldff1sb(zt, pg, zn, imm5); + } + void Ldff1sh(const ZRegister& zt, + const PRegisterZ& pg, + const Register& xn, + const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldff1sh(zt, pg, xn, zm); + } + void Ldff1sh(const ZRegister& zt, + const PRegisterZ& pg, + const ZRegister& zn, + int imm5) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldff1sh(zt, pg, zn, imm5); + } + void Ldff1sw(const ZRegister& zt, + const PRegisterZ& pg, + const Register& xn, + const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldff1sw(zt, pg, xn, zm); + } + void Ldff1sw(const ZRegister& zt, + const PRegisterZ& pg, + const ZRegister& zn, + int imm5) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldff1sw(zt, pg, zn, imm5); + } + void Ldff1w(const ZRegister& zt, + const PRegisterZ& pg, + const Register& xn, + const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldff1w(zt, pg, xn, zm); + } + void Ldff1w(const ZRegister& zt, + const PRegisterZ& pg, + const ZRegister& zn, + int imm5) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldff1w(zt, pg, zn, imm5); + } + void Ldnf1b(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldnf1b(zt, pg, addr); + } + void Ldnf1d(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldnf1d(zt, pg, addr); + } + void Ldnf1h(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldnf1h(zt, pg, addr); + } + void Ldnf1sb(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldnf1sb(zt, pg, addr); + } + void Ldnf1sh(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldnf1sh(zt, pg, addr); + } + void Ldnf1sw(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldnf1sw(zt, pg, addr); + } + void Ldnf1w(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldnf1w(zt, pg, addr); + } + void Ldnt1b(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + void Ldnt1d(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + void Ldnt1h(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + void Ldnt1w(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + void Ldr(const CPURegister& rt, const SVEMemOperand& addr) { + VIXL_ASSERT(allow_macro_instructions_); + SVELoadStoreScalarImmHelper(rt, addr, &MacroAssembler::ldr); + } + void Lsl(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + int shift) { + VIXL_ASSERT(allow_macro_instructions_); + MovprfxHelperScope guard(this, zd, pg, zn); + lsl(zd, pg, zd, shift); + } + void Lsl(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + void Lsl(const ZRegister& zd, const ZRegister& zn, int shift) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + lsl(zd, zn, shift); + } + void Lsl(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + lsl(zd, zn, zm); + } + void Lsr(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + int shift) { + VIXL_ASSERT(allow_macro_instructions_); + MovprfxHelperScope guard(this, zd, pg, zn); + lsr(zd, pg, zd, shift); + } + void Lsr(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + void Lsr(const ZRegister& zd, const ZRegister& zn, int shift) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + lsr(zd, zn, shift); + } + void Lsr(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + lsr(zd, zn, zm); + } + void Mov(const PRegister& pd, const PRegister& pn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + mov(pd.VnB(), pn.VnB()); + } + void Mov(const PRegisterWithLaneSize& pd, + const PRegisterM& pg, + const PRegisterWithLaneSize& pn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + mov(pd, pg, pn); + } + void Mov(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + mov(pd, pg, pn); + } + void Mov(const ZRegister& zd, const Register& xn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + mov(zd, xn); + } + + void Mov(const ZRegister& zd, const VRegister& vn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + mov(zd, vn); + } + + void Mov(const ZRegister& zd, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + mov(zd, zn); + } + void Mov(const ZRegister& zd, const ZRegister& zn, unsigned index) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + mov(zd, zn, index); + } + void Mov(const ZRegister& zd, const PRegister& pg, IntegerOperand imm) { + VIXL_ASSERT(allow_macro_instructions_); + Cpy(zd, pg, imm); + } + // TODO: support zeroing predicated moves using movprfx. + void Mov(const ZRegister& zd, const PRegisterM& pg, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + mov(zd, pg, rn); + } + void Mov(const ZRegister& zd, const PRegisterM& pg, const VRegister& vn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + mov(zd, pg, vn); + } + void Mov(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + mov(zd, pg, zn); + } + void Mov(const ZRegister& zd, IntegerOperand imm) { + VIXL_ASSERT(allow_macro_instructions_); + Dup(zd, imm); + } + void Movs(const PRegister& pd, const PRegister& pn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + movs(pd, pn); + } + void Movs(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + movs(pd, pg, pn); + } + // zd = za + (zn * zm) + void Mla(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm); + // zd = za - (zn * zm) + void Mls(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm); + void Mul(const ZRegister& zd, const ZRegister& zn, IntegerOperand imm); + void Nand(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + nand(pd, pg, pn, pm); + } + void Nands(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + nands(pd, pg, pn, pm); + } + // There is no instruction with this form, but we can implement it using + // `subr`. + void Neg(const ZRegister& zd, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + MovprfxHelperScope guard(this, zd, zn); + subr(zd, zd, 0); + } + void Neg(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + neg(zd, pg, zn); + } + void Nor(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + nor(pd, pg, pn, pm); + } + void Nors(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + nors(pd, pg, pn, pm); + } + void Not(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + not_(pd, pg, pn); + } + void Not(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + not_(zd, pg, zn); + } + void Nots(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + nots(pd, pg, pn); + } + void Orn(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + orn(pd, pg, pn, pm); + } + void Orn(const ZRegister& zd, const ZRegister& zn, uint64_t imm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + if (IsImmLogical(imm, zd.GetLaneSizeInBits())) { + orn(zd, zn, imm); + } else { + // TODO: Synthesise the immediate once 'Mov' is implemented. + VIXL_UNIMPLEMENTED(); + } + } + void Orns(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + orns(pd, pg, pn, pm); + } + void Orr(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + orr(pd, pg, pn, pm); + } + void Orr(const ZRegister& zd, const ZRegister& zn, uint64_t imm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + if (IsImmLogical(imm, zd.GetLaneSizeInBits())) { + orr(zd, zn, imm); + } else { + // TODO: Synthesise the immediate once 'Mov' is implemented. + VIXL_UNIMPLEMENTED(); + } + } + void Orr(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(AreSameLaneSize(zd, zn, zm)); + SingleEmissionCheckScope guard(this); + orr(zd.VnD(), zn.VnD(), zm.VnD()); + } + void Orrs(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + orrs(pd, pg, pn, pm); + } + void Orv(const VRegister& vd, const PRegister& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + orv(vd, pg, zn); + } + void Pfalse(const PRegister& pd) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(pd.IsUnqualified()); + SingleEmissionCheckScope guard(this); + // No matter what the lane size is, overall this operation just writes zeros + // throughout the register. + pfalse(pd.VnB()); + } + void Pfirst(const PRegisterWithLaneSize& pd, + const PRegister& pg, + const PRegisterWithLaneSize& pn); + void Pnext(const PRegisterWithLaneSize& pd, + const PRegister& pg, + const PRegisterWithLaneSize& pn); + void Prfb(PrefetchOperation prfop, + const PRegister& pg, + const SVEMemOperand addr) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + prfb(prfop, pg, addr); + } + void Prfh(PrefetchOperation prfop, + const PRegister& pg, + const SVEMemOperand addr) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + prfh(prfop, pg, addr); + } + void Prfw(PrefetchOperation prfop, + const PRegister& pg, + const SVEMemOperand addr) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + prfw(prfop, pg, addr); + } + void Prfd(PrefetchOperation prfop, + const PRegister& pg, + const SVEMemOperand addr) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + prfd(prfop, pg, addr); + } + void Ptest(const PRegister& pg, const PRegisterWithLaneSize& pn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ptest(pg, pn); + } + void Ptrue(const PRegisterWithLaneSize& pd, + SVEPredicateConstraint pattern, + FlagsUpdate s); + void Ptrue(const PRegisterWithLaneSize& pd, + SVEPredicateConstraint pattern = SVE_ALL) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ptrue(pd, pattern); + } + void Ptrues(const PRegisterWithLaneSize& pd, + SVEPredicateConstraint pattern = SVE_ALL) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ptrues(pd, pattern); + } + void Punpkhi(const PRegisterWithLaneSize& pd, + const PRegisterWithLaneSize& pn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + punpkhi(pd, pn); + } + void Punpklo(const PRegisterWithLaneSize& pd, + const PRegisterWithLaneSize& pn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + punpklo(pd, pn); + } + void Rbit(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + rbit(zd, pg, zn); + } + void Rdffr(const PRegister& pd) { + VIXL_ASSERT(allow_macro_instructions_); + // Although this is essentially just a move, it writes every bit and so can + // only support b-sized lane because other lane sizes would simplicity clear + // bits in `pd`. + VIXL_ASSERT(!pd.HasLaneSize() || pd.IsLaneSizeB()); + VIXL_ASSERT(pd.IsUnqualified()); + SingleEmissionCheckScope guard(this); + rdffr(pd.VnB()); + } + void Rdffr(const PRegisterWithLaneSize& pd, const PRegisterZ& pg) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + rdffr(pd, pg); + } + void Rdffrs(const PRegisterWithLaneSize& pd, const PRegisterZ& pg) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + rdffrs(pd, pg); + } + // Note that there is no `rdpl` instruction, but this macro emulates it (for + // symmetry with `Rdvl`). + void Rdpl(const Register& xd, int64_t multiplier) { + VIXL_ASSERT(allow_macro_instructions_); + Addpl(xd, xzr, multiplier); + } + void Rdvl(const Register& xd, int64_t multiplier) { + VIXL_ASSERT(allow_macro_instructions_); + Addvl(xd, xzr, multiplier); + } + void Rev(const PRegisterWithLaneSize& pd, const PRegisterWithLaneSize& pn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + rev(pd, pn); + } + void Rev(const ZRegister& zd, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + rev(zd, zn); + } + void Revb(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + revb(zd, pg, zn); + } + void Revh(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + revh(zd, pg, zn); + } + void Revw(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + revw(zd, pg, zn); + } + void Saddv(const VRegister& dd, const PRegister& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + saddv(dd, pg, zn); + } + void Scvtf(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + scvtf(zd, pg, zn); + } + void Sdiv(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + void Sdot(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm); + void Sdot(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm, + int index); + void Sel(const PRegisterWithLaneSize& pd, + const PRegister& pg, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sel(pd, pg, pn, pm); + } + void Sel(const ZRegister& zd, + const PRegister& pg, + const ZRegister& zn, + const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sel(zd, pg, zn, zm); + } + void Setffr() { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + setffr(); + } + void Smax(const ZRegister& zd, const ZRegister& zn, IntegerOperand imm); + void Smaxv(const VRegister& vd, const PRegister& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + smaxv(vd, pg, zn); + } + void Smin(const ZRegister& zd, const ZRegister& zn, IntegerOperand imm); + void Sminv(const VRegister& vd, const PRegister& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sminv(vd, pg, zn); + } + void Splice(const ZRegister& zd, + const PRegister& pg, + const ZRegister& zn, + const ZRegister& zm); + void Sqadd(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sqadd(zd, zn, zm); + } + void Sqadd(const ZRegister& zd, const ZRegister& zn, IntegerOperand imm) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(imm.IsUint8() || + (imm.IsUint16() && ((imm.AsUint16() & 0xff) == 0))); + MovprfxHelperScope guard(this, zd, zn); + sqadd(zd, zd, imm.AsUint16()); + } + void Sqdecb(const Register& xd, + const Register& wn, + int pattern = SVE_ALL, + int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sqdecb(xd, wn, pattern, multiplier); + } + void Sqdecb(const Register& rdn, int pattern = SVE_ALL, int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sqdecb(rdn, pattern, multiplier); + } + void Sqdecd(const Register& xd, + const Register& wn, + int pattern = SVE_ALL, + int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sqdecd(xd, wn, pattern, multiplier); + } + void Sqdecd(const Register& rdn, int pattern = SVE_ALL, int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sqdecd(rdn, pattern, multiplier); + } + void Sqdecd(const ZRegister& zdn, int pattern = SVE_ALL, int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sqdecd(zdn, pattern, multiplier); + } + void Sqdech(const Register& xd, + const Register& wn, + int pattern = SVE_ALL, + int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sqdech(xd, wn, pattern, multiplier); + } + void Sqdech(const Register& rdn, int pattern = SVE_ALL, int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sqdech(rdn, pattern, multiplier); + } + void Sqdech(const ZRegister& zdn, int pattern = SVE_ALL, int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sqdech(zdn, pattern, multiplier); + } + void Sqdecp(const Register& xdn, + const PRegisterWithLaneSize& pg, + const Register& wdn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sqdecp(xdn, pg, wdn); + } + void Sqdecp(const Register& xdn, const PRegisterWithLaneSize& pg) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sqdecp(xdn, pg); + } + void Sqdecp(const ZRegister& zd, const PRegister& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(AreSameFormat(zd, zn)); + // `sqdecp` writes every lane, so use an unpredicated movprfx. + MovprfxHelperScope guard(this, zd, zn); + sqdecp(zd, pg); + } + void Sqdecp(const ZRegister& zdn, const PRegister& pg) { + Sqdecp(zdn, pg, zdn); + } + void Sqdecw(const Register& xd, + const Register& wn, + int pattern = SVE_ALL, + int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sqdecw(xd, wn, pattern, multiplier); + } + void Sqdecw(const Register& rdn, int pattern = SVE_ALL, int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sqdecw(rdn, pattern, multiplier); + } + void Sqdecw(const ZRegister& zdn, int pattern = SVE_ALL, int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sqdecw(zdn, pattern, multiplier); + } + void Sqincb(const Register& xd, + const Register& wn, + int pattern = SVE_ALL, + int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sqincb(xd, wn, pattern, multiplier); + } + void Sqincb(const Register& rdn, int pattern = SVE_ALL, int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sqincb(rdn, pattern, multiplier); + } + void Sqincd(const Register& xd, + const Register& wn, + int pattern = SVE_ALL, + int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sqincd(xd, wn, pattern, multiplier); + } + void Sqincd(const Register& rdn, int pattern = SVE_ALL, int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sqincd(rdn, pattern, multiplier); + } + void Sqincd(const ZRegister& zdn, int pattern = SVE_ALL, int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sqincd(zdn, pattern, multiplier); + } + void Sqinch(const Register& xd, + const Register& wn, + int pattern = SVE_ALL, + int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sqinch(xd, wn, pattern, multiplier); + } + void Sqinch(const Register& rdn, int pattern = SVE_ALL, int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sqinch(rdn, pattern, multiplier); + } + void Sqinch(const ZRegister& zdn, int pattern = SVE_ALL, int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sqinch(zdn, pattern, multiplier); + } + void Sqincp(const Register& xdn, + const PRegisterWithLaneSize& pg, + const Register& wdn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sqincp(xdn, pg, wdn); + } + void Sqincp(const Register& xdn, const PRegisterWithLaneSize& pg) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sqincp(xdn, pg); + } + void Sqincp(const ZRegister& zd, const PRegister& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(AreSameFormat(zd, zn)); + // `sqincp` writes every lane, so use an unpredicated movprfx. + MovprfxHelperScope guard(this, zd, zn); + sqincp(zd, pg); + } + void Sqincp(const ZRegister& zdn, const PRegister& pg) { + Sqincp(zdn, pg, zdn); + } + void Sqincw(const Register& xd, + const Register& wn, + int pattern = SVE_ALL, + int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sqincw(xd, wn, pattern, multiplier); + } + void Sqincw(const Register& rdn, int pattern = SVE_ALL, int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sqincw(rdn, pattern, multiplier); + } + void Sqincw(const ZRegister& zdn, int pattern = SVE_ALL, int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sqincw(zdn, pattern, multiplier); + } + void Sqsub(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sqsub(zd, zn, zm); + } + void Sqsub(const ZRegister& zd, const ZRegister& zn, IntegerOperand imm) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(imm.IsUint8() || + (imm.IsUint16() && ((imm.AsUint16() & 0xff) == 0))); + MovprfxHelperScope guard(this, zd, zn); + sqsub(zd, zd, imm.AsUint16()); + } + void St1b(const ZRegister& zt, + const PRegister& pg, + const SVEMemOperand& addr); + void St1h(const ZRegister& zt, + const PRegister& pg, + const SVEMemOperand& addr); + void St1w(const ZRegister& zt, + const PRegister& pg, + const SVEMemOperand& addr); + void St1d(const ZRegister& zt, + const PRegister& pg, + const SVEMemOperand& addr); + void St2b(const ZRegister& zt1, + const ZRegister& zt2, + const PRegister& pg, + const SVEMemOperand& addr) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + st2b(zt1, zt2, pg, addr); + } + void St2h(const ZRegister& zt1, + const ZRegister& zt2, + const PRegister& pg, + const SVEMemOperand& addr) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + st2h(zt1, zt2, pg, addr); + } + void St2w(const ZRegister& zt1, + const ZRegister& zt2, + const PRegister& pg, + const SVEMemOperand& addr) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + st2w(zt1, zt2, pg, addr); + } + void St2d(const ZRegister& zt1, + const ZRegister& zt2, + const PRegister& pg, + const SVEMemOperand& addr) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + st2d(zt1, zt2, pg, addr); + } + void St3b(const ZRegister& zt1, + const ZRegister& zt2, + const ZRegister& zt3, + const PRegister& pg, + const SVEMemOperand& addr) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + st3b(zt1, zt2, zt3, pg, addr); + } + void St3h(const ZRegister& zt1, + const ZRegister& zt2, + const ZRegister& zt3, + const PRegister& pg, + const SVEMemOperand& addr) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + st3h(zt1, zt2, zt3, pg, addr); + } + void St3w(const ZRegister& zt1, + const ZRegister& zt2, + const ZRegister& zt3, + const PRegister& pg, + const SVEMemOperand& addr) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + st3w(zt1, zt2, zt3, pg, addr); + } + void St3d(const ZRegister& zt1, + const ZRegister& zt2, + const ZRegister& zt3, + const PRegister& pg, + const SVEMemOperand& addr) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + st3d(zt1, zt2, zt3, pg, addr); + } + void St4b(const ZRegister& zt1, + const ZRegister& zt2, + const ZRegister& zt3, + const ZRegister& zt4, + const PRegister& pg, + const SVEMemOperand& addr) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + st4b(zt1, zt2, zt3, zt4, pg, addr); + } + void St4h(const ZRegister& zt1, + const ZRegister& zt2, + const ZRegister& zt3, + const ZRegister& zt4, + const PRegister& pg, + const SVEMemOperand& addr) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + st4h(zt1, zt2, zt3, zt4, pg, addr); + } + void St4w(const ZRegister& zt1, + const ZRegister& zt2, + const ZRegister& zt3, + const ZRegister& zt4, + const PRegister& pg, + const SVEMemOperand& addr) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + st4w(zt1, zt2, zt3, zt4, pg, addr); + } + void St4d(const ZRegister& zt1, + const ZRegister& zt2, + const ZRegister& zt3, + const ZRegister& zt4, + const PRegister& pg, + const SVEMemOperand& addr) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + st4d(zt1, zt2, zt3, zt4, pg, addr); + } + void Stnt1b(const ZRegister& zt, + const PRegister& pg, + const SVEMemOperand& addr); + void Stnt1d(const ZRegister& zt, + const PRegister& pg, + const SVEMemOperand& addr); + void Stnt1h(const ZRegister& zt, + const PRegister& pg, + const SVEMemOperand& addr); + void Stnt1w(const ZRegister& zt, + const PRegister& pg, + const SVEMemOperand& addr); + void Str(const CPURegister& rt, const SVEMemOperand& addr) { + VIXL_ASSERT(allow_macro_instructions_); + SVELoadStoreScalarImmHelper(rt, addr, &MacroAssembler::str); + } + void Sub(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + void Sub(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sub(zd, zn, zm); + } + void Sub(const ZRegister& zd, const ZRegister& zn, IntegerOperand imm) { + VIXL_ASSERT(allow_macro_instructions_); + AddSubHelper(kSubImmediate, zd, zn, imm); + } + void Sub(const ZRegister& zd, IntegerOperand imm, const ZRegister& zm); + void Sunpkhi(const ZRegister& zd, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sunpkhi(zd, zn); + } + void Sunpklo(const ZRegister& zd, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sunpklo(zd, zn); + } + void Sxtb(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sxtb(zd, pg, zn); + } + void Sxth(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sxth(zd, pg, zn); + } + void Sxtw(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sxtw(zd, pg, zn); + } + void Tbl(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + tbl(zd, zn, zm); + } + void Trn1(const PRegisterWithLaneSize& pd, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + trn1(pd, pn, pm); + } + void Trn1(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + trn1(zd, zn, zm); + } + void Trn2(const PRegisterWithLaneSize& pd, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + trn2(pd, pn, pm); + } + void Trn2(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + trn2(zd, zn, zm); + } + void Uaddv(const VRegister& dd, const PRegister& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + uaddv(dd, pg, zn); + } + void Ucvtf(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ucvtf(zd, pg, zn); + } + void Udiv(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + void Udot(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm); + void Udot(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm, + int index); + void Umax(const ZRegister& zd, const ZRegister& zn, IntegerOperand imm); + void Umaxv(const VRegister& vd, const PRegister& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + umaxv(vd, pg, zn); + } + void Umin(const ZRegister& zd, const ZRegister& zn, IntegerOperand imm); + void Uminv(const VRegister& vd, const PRegister& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + uminv(vd, pg, zn); + } + void Uqadd(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + uqadd(zd, zn, zm); + } + void Uqadd(const ZRegister& zd, const ZRegister& zn, IntegerOperand imm) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(imm.IsUint8() || + (imm.IsUint16() && ((imm.AsUint16() & 0xff) == 0))); + MovprfxHelperScope guard(this, zd, zn); + uqadd(zd, zd, imm.AsUint16()); + } + void Uqdecb(const Register& rdn, int pattern = SVE_ALL, int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + uqdecb(rdn, pattern, multiplier); + } + void Uqdecd(const Register& rdn, int pattern = SVE_ALL, int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + uqdecd(rdn, pattern, multiplier); + } + void Uqdecd(const ZRegister& zdn, int pattern = SVE_ALL, int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + uqdecd(zdn, pattern, multiplier); + } + void Uqdech(const Register& rdn, int pattern = SVE_ALL, int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + uqdech(rdn, pattern, multiplier); + } + void Uqdech(const ZRegister& zdn, int pattern = SVE_ALL, int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + uqdech(zdn, pattern, multiplier); + } + // The saturation is based on the size of `rn`. The result is zero-extended + // into `rd`, which must be at least as big. + void Uqdecp(const Register& rd, + const PRegisterWithLaneSize& pg, + const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(rd.Aliases(rn)); + VIXL_ASSERT(rd.GetSizeInBytes() >= rn.GetSizeInBytes()); + SingleEmissionCheckScope guard(this); + if (rn.Is64Bits()) { + uqdecp(rd, pg); + } else { + // Convert into , to make this more consistent with Sqdecp. + uqdecp(rd.W(), pg); + } + } + void Uqdecp(const Register& rdn, const PRegisterWithLaneSize& pg) { + Uqdecp(rdn, pg, rdn); + } + void Uqdecp(const ZRegister& zd, const PRegister& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(AreSameFormat(zd, zn)); + // `sqdecp` writes every lane, so use an unpredicated movprfx. + MovprfxHelperScope guard(this, zd, zn); + uqdecp(zd, pg); + } + void Uqdecp(const ZRegister& zdn, const PRegister& pg) { + Uqdecp(zdn, pg, zdn); + } + void Uqdecw(const Register& rdn, int pattern = SVE_ALL, int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + uqdecw(rdn, pattern, multiplier); + } + void Uqdecw(const ZRegister& zdn, int pattern = SVE_ALL, int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + uqdecw(zdn, pattern, multiplier); + } + void Uqincb(const Register& rdn, int pattern = SVE_ALL, int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + uqincb(rdn, pattern, multiplier); + } + void Uqincd(const Register& rdn, int pattern = SVE_ALL, int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + uqincd(rdn, pattern, multiplier); + } + void Uqincd(const ZRegister& zdn, int pattern = SVE_ALL, int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + uqincd(zdn, pattern, multiplier); + } + void Uqinch(const Register& rdn, int pattern = SVE_ALL, int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + uqinch(rdn, pattern, multiplier); + } + void Uqinch(const ZRegister& zdn, int pattern = SVE_ALL, int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + uqinch(zdn, pattern, multiplier); + } + // The saturation is based on the size of `rn`. The result is zero-extended + // into `rd`, which must be at least as big. + void Uqincp(const Register& rd, + const PRegisterWithLaneSize& pg, + const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(rd.Aliases(rn)); + VIXL_ASSERT(rd.GetSizeInBytes() >= rn.GetSizeInBytes()); + SingleEmissionCheckScope guard(this); + if (rn.Is64Bits()) { + uqincp(rd, pg); + } else { + // Convert into , to make this more consistent with Sqincp. + uqincp(rd.W(), pg); + } + } + void Uqincp(const Register& rdn, const PRegisterWithLaneSize& pg) { + Uqincp(rdn, pg, rdn); + } + void Uqincp(const ZRegister& zd, const PRegister& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(AreSameFormat(zd, zn)); + // `sqincp` writes every lane, so use an unpredicated movprfx. + MovprfxHelperScope guard(this, zd, zn); + uqincp(zd, pg); + } + void Uqincp(const ZRegister& zdn, const PRegister& pg) { + Uqincp(zdn, pg, zdn); + } + void Uqincw(const Register& rdn, int pattern = SVE_ALL, int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + uqincw(rdn, pattern, multiplier); + } + void Uqincw(const ZRegister& zdn, int pattern = SVE_ALL, int multiplier = 1) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + uqincw(zdn, pattern, multiplier); + } + void Uqsub(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + uqsub(zd, zn, zm); + } + void Uqsub(const ZRegister& zd, const ZRegister& zn, IntegerOperand imm) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(imm.IsUint8() || + (imm.IsUint16() && ((imm.AsUint16() & 0xff) == 0))); + MovprfxHelperScope guard(this, zd, zn); + uqsub(zd, zd, imm.AsUint16()); + } + void Uunpkhi(const ZRegister& zd, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + uunpkhi(zd, zn); + } + void Uunpklo(const ZRegister& zd, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + uunpklo(zd, zn); + } + void Uxtb(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + uxtb(zd, pg, zn); + } + void Uxth(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + uxth(zd, pg, zn); + } + void Uxtw(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + uxtw(zd, pg, zn); + } + void Uzp1(const PRegisterWithLaneSize& pd, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + uzp1(pd, pn, pm); + } + void Uzp1(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + uzp1(zd, zn, zm); + } + void Uzp2(const PRegisterWithLaneSize& pd, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + uzp2(pd, pn, pm); + } + void Uzp2(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + uzp2(zd, zn, zm); + } + void Whilele(const PRegisterWithLaneSize& pd, + const Register& rn, + const Register& rm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + whilele(pd, rn, rm); + } + void Whilelo(const PRegisterWithLaneSize& pd, + const Register& rn, + const Register& rm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + whilelo(pd, rn, rm); + } + void Whilels(const PRegisterWithLaneSize& pd, + const Register& rn, + const Register& rm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + whilels(pd, rn, rm); + } + void Whilelt(const PRegisterWithLaneSize& pd, + const Register& rn, + const Register& rm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + whilelt(pd, rn, rm); + } + void Wrffr(const PRegister& pn) { + VIXL_ASSERT(allow_macro_instructions_); + // Although this is essentially just a move, it writes every bit and so can + // only support b-sized lane because other lane sizes would implicitly clear + // bits in `ffr`. + VIXL_ASSERT(!pn.HasLaneSize() || pn.IsLaneSizeB()); + VIXL_ASSERT(pn.IsUnqualified()); + SingleEmissionCheckScope guard(this); + wrffr(pn.VnB()); + } + void Zip1(const PRegisterWithLaneSize& pd, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + zip1(pd, pn, pm); + } + void Zip1(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + zip1(zd, zn, zm); + } + void Zip2(const PRegisterWithLaneSize& pd, + const PRegisterWithLaneSize& pn, + const PRegisterWithLaneSize& pm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + zip2(pd, pn, pm); + } + void Zip2(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + zip2(zd, zn, zm); + } + + // SVE2 + void Adclb(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm); + void Adclt(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm); + void Addhnb(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + addhnb(zd, zn, zm); + } + void Addhnt(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + addhnt(zd, zn, zm); + } + void Addp(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + void Bcax(const ZRegister& zd, + const ZRegister& zn, + const ZRegister& zm, + const ZRegister& zk); + void Bdep(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + bdep(zd, zn, zm); + } + void Bext(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + bext(zd, zn, zm); + } + void Bgrp(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + bgrp(zd, zn, zm); + } + void Bsl(const ZRegister& zd, + const ZRegister& zn, + const ZRegister& zm, + const ZRegister& zk); + void Bsl1n(const ZRegister& zd, + const ZRegister& zn, + const ZRegister& zm, + const ZRegister& zk); + void Bsl2n(const ZRegister& zd, + const ZRegister& zn, + const ZRegister& zm, + const ZRegister& zk); + void Cadd(const ZRegister& zd, + const ZRegister& zn, + const ZRegister& zm, + int rot); + void Cdot(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm, + int index, + int rot); + void Cdot(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm, + int rot); + void Cmla(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm, + int index, + int rot); + void Cmla(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm, + int rot); + void Eor3(const ZRegister& zd, + const ZRegister& zn, + const ZRegister& zm, + const ZRegister& zk); + void Eorbt(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + eorbt(zd, zn, zm); + } + void Eortb(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + eortb(zd, zn, zm); + } + void Faddp(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + void Fcvtlt(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fcvtlt(zd, pg, zn); + } + void Fcvtnt(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fcvtnt(zd, pg, zn); + } + void Fcvtx(const ZRegister& zd, const PRegister& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(zn.IsLaneSizeD()); + MovprfxHelperScope guard(this, zd.VnD(), pg, zd.VnD()); + fcvtx(zd, pg.Merging(), zn); + } + void Fcvtxnt(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fcvtxnt(zd, pg, zn); + } + void Flogb(const ZRegister& zd, const PRegister& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + MovprfxHelperScope guard(this, zd, pg, zd); + flogb(zd, pg.Merging(), zn); + } + void Fmaxnmp(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + void Fmaxp(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + void Fminnmp(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + void Fminp(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + void Fmlalb(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm); + void Fmlalt(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm); + void Fmlslb(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm); + void Fmlslt(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm); + void Fmlalb(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm, + int index); + void Fmlalt(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm, + int index); + void Fmlslb(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm, + int index); + void Fmlslt(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm, + int index); + void Histcnt(const ZRegister& zd, + const PRegisterZ& pg, + const ZRegister& zn, + const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + histcnt(zd, pg, zn, zm); + } + void Histseg(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + histseg(zd, zn, zm); + } + void Ldnt1sb(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldnt1sb(zt, pg, addr); + } + void Ldnt1sh(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldnt1sh(zt, pg, addr); + } + void Ldnt1sw(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldnt1sw(zt, pg, addr); + } + void Match(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + match(pd, pg, zn, zm); + } + void Mla(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm, + int index); + void Mls(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm, + int index); + void Mul(const ZRegister& zd, + const ZRegister& zn, + const ZRegister& zm, + int index) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + mul(zd, zn, zm, index); + } + void Mul(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + mul(zd, zn, zm); + } + void Nbsl(const ZRegister& zd, + const ZRegister& zn, + const ZRegister& zm, + const ZRegister& zk); + void Nmatch(const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + nmatch(pd, pg, zn, zm); + } + void Pmul(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + pmul(zd, zn, zm); + } + void Pmullb(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + pmullb(zd, zn, zm); + } + void Pmullt(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + pmullt(zd, zn, zm); + } + void Raddhnb(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + raddhnb(zd, zn, zm); + } + void Raddhnt(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + raddhnt(zd, zn, zm); + } + void Rshrnb(const ZRegister& zd, const ZRegister& zn, int shift) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + rshrnb(zd, zn, shift); + } + void Rshrnt(const ZRegister& zd, const ZRegister& zn, int shift) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + rshrnt(zd, zn, shift); + } + void Rsubhnb(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + rsubhnb(zd, zn, zm); + } + void Rsubhnt(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + rsubhnt(zd, zn, zm); + } + void Saba(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm); + void Sabalb(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm); + void Sabalt(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm); + void Sabdlb(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sabdlb(zd, zn, zm); + } + void Sabdlt(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sabdlt(zd, zn, zm); + } + void Sadalp(const ZRegister& zda, const PRegisterM& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sadalp(zda, pg, zn); + } + void Saddlb(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + saddlb(zd, zn, zm); + } + void Saddlbt(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + saddlbt(zd, zn, zm); + } + void Saddlt(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + saddlt(zd, zn, zm); + } + void Saddwb(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + saddwb(zd, zn, zm); + } + void Saddwt(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + saddwt(zd, zn, zm); + } + void Sbclb(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm); + void Sbclt(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm); + void Shrnb(const ZRegister& zd, const ZRegister& zn, int shift) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + shrnb(zd, zn, shift); + } + void Shrnt(const ZRegister& zd, const ZRegister& zn, int shift) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + shrnt(zd, zn, shift); + } + void Shsub(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + void Sli(const ZRegister& zd, const ZRegister& zn, int shift) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sli(zd, zn, shift); + } + void Smaxp(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + void Sminp(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + void Smlalb(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm, + int index); + void Smlalb(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm); + void Smlalt(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm, + int index); + void Smlalt(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm); + void Smlslb(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm, + int index); + void Smlslb(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm); + void Smlslt(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm, + int index); + void Smlslt(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm); + void Smulh(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + smulh(zd, zn, zm); + } + void Smullb(const ZRegister& zd, + const ZRegister& zn, + const ZRegister& zm, + int index) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + smullb(zd, zn, zm, index); + } + void Smullb(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + smullb(zd, zn, zm); + } + void Smullt(const ZRegister& zd, + const ZRegister& zn, + const ZRegister& zm, + int index) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + smullt(zd, zn, zm, index); + } + void Smullt(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + smullt(zd, zn, zm); + } + void Sqabs(const ZRegister& zd, const PRegister& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + MovprfxHelperScope guard(this, zd, pg, zd); + sqabs(zd, pg.Merging(), zn); + } + void Sqcadd(const ZRegister& zd, + const ZRegister& zn, + const ZRegister& zm, + int rot); + void Sqdmlalb(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm, + int index); + void Sqdmlalb(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm); + void Sqdmlalbt(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm); + void Sqdmlalt(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm, + int index); + void Sqdmlalt(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm); + void Sqdmlslb(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm, + int index); + void Sqdmlslb(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm); + void Sqdmlslbt(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm); + void Sqdmlslt(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm, + int index); + void Sqdmlslt(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm); + void Sqdmulh(const ZRegister& zd, + const ZRegister& zn, + const ZRegister& zm, + int index) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sqdmulh(zd, zn, zm, index); + } + void Sqdmulh(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sqdmulh(zd, zn, zm); + } + void Sqdmullb(const ZRegister& zd, + const ZRegister& zn, + const ZRegister& zm, + int index) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sqdmullb(zd, zn, zm, index); + } + void Sqdmullb(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sqdmullb(zd, zn, zm); + } + void Sqdmullt(const ZRegister& zd, + const ZRegister& zn, + const ZRegister& zm, + int index) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sqdmullt(zd, zn, zm, index); + } + void Sqdmullt(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sqdmullt(zd, zn, zm); + } + void Sqneg(const ZRegister& zd, const PRegister& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + MovprfxHelperScope guard(this, zd, pg, zd); + sqneg(zd, pg.Merging(), zn); + } + void Sqrdcmlah(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm, + int index, + int rot); + void Sqrdcmlah(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm, + int rot); + void Sqrdmlah(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm); + void Sqrdmlah(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm, + int index); + void Sqrdmlsh(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm); + void Sqrdmlsh(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm, + int index); + void Sqrdmulh(const ZRegister& zd, + const ZRegister& zn, + const ZRegister& zm, + int index) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sqrdmulh(zd, zn, zm, index); + } + void Sqrdmulh(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sqrdmulh(zd, zn, zm); + } + void Sqrshl(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + void Sqrshrnb(const ZRegister& zd, const ZRegister& zn, int shift) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sqrshrnb(zd, zn, shift); + } + void Sqrshrnt(const ZRegister& zd, const ZRegister& zn, int shift) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sqrshrnt(zd, zn, shift); + } + void Sqrshrunb(const ZRegister& zd, const ZRegister& zn, int shift) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sqrshrunb(zd, zn, shift); + } + void Sqrshrunt(const ZRegister& zd, const ZRegister& zn, int shift) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sqrshrunt(zd, zn, shift); + } + void Sqshl(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + int shift) { + VIXL_ASSERT(allow_macro_instructions_); + MovprfxHelperScope guard(this, zd, pg, zn); + sqshl(zd, pg, zd, shift); + } + void Sqshl(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + void Sqshlu(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + int shift) { + VIXL_ASSERT(allow_macro_instructions_); + MovprfxHelperScope guard(this, zd, pg, zn); + sqshlu(zd, pg, zd, shift); + } + void Sqshrnb(const ZRegister& zd, const ZRegister& zn, int shift) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sqshrnb(zd, zn, shift); + } + void Sqshrnt(const ZRegister& zd, const ZRegister& zn, int shift) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sqshrnt(zd, zn, shift); + } + void Sqshrunb(const ZRegister& zd, const ZRegister& zn, int shift) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sqshrunb(zd, zn, shift); + } + void Sqshrunt(const ZRegister& zd, const ZRegister& zn, int shift) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sqshrunt(zd, zn, shift); + } + void Sqsub(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + void Sqxtnb(const ZRegister& zd, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sqxtnb(zd, zn); + } + void Sqxtnt(const ZRegister& zd, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sqxtnt(zd, zn); + } + void Sqxtunb(const ZRegister& zd, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sqxtunb(zd, zn); + } + void Sqxtunt(const ZRegister& zd, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sqxtunt(zd, zn); + } + void Sri(const ZRegister& zd, const ZRegister& zn, int shift) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sri(zd, zn, shift); + } + void Srshl(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + void Srshr(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + int shift) { + VIXL_ASSERT(allow_macro_instructions_); + MovprfxHelperScope guard(this, zd, pg, zn); + srshr(zd, pg, zd, shift); + } + void Srsra(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + int shift); + void Sshllb(const ZRegister& zd, const ZRegister& zn, int shift) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sshllb(zd, zn, shift); + } + void Sshllt(const ZRegister& zd, const ZRegister& zn, int shift) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sshllt(zd, zn, shift); + } + void Ssra(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + int shift); + void Ssublb(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ssublb(zd, zn, zm); + } + void Ssublbt(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ssublbt(zd, zn, zm); + } + void Ssublt(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ssublt(zd, zn, zm); + } + void Ssubltb(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ssubltb(zd, zn, zm); + } + void Ssubwb(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ssubwb(zd, zn, zm); + } + void Ssubwt(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ssubwt(zd, zn, zm); + } + void Subhnb(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + subhnb(zd, zn, zm); + } + void Subhnt(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + subhnt(zd, zn, zm); + } + void Suqadd(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + void Tbl(const ZRegister& zd, + const ZRegister& zn1, + const ZRegister& zn2, + const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + tbl(zd, zn1, zn2, zm); + } + void Tbx(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + tbx(zd, zn, zm); + } + void Uaba(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm); + void Uabalb(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm); + void Uabalt(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm); + void Uabdlb(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + uabdlb(zd, zn, zm); + } + void Uabdlt(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + uabdlt(zd, zn, zm); + } + void Uadalp(const ZRegister& zda, const PRegisterM& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + uadalp(zda, pg, zn); + } + void Uaddlb(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + uaddlb(zd, zn, zm); + } + void Uaddlt(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + uaddlt(zd, zn, zm); + } + void Uaddwb(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + uaddwb(zd, zn, zm); + } + void Uaddwt(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + uaddwt(zd, zn, zm); + } + void Uhsub(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + void Umaxp(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + void Uminp(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + void Umlalb(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm, + int index); + void Umlalb(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm); + void Umlalt(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm, + int index); + void Umlalt(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm); + void Umlslb(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm, + int index); + void Umlslb(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm); + void Umlslt(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm, + int index); + void Umlslt(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm); + void Umulh(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + umulh(zd, zn, zm); + } + void Umullb(const ZRegister& zd, + const ZRegister& zn, + const ZRegister& zm, + int index) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + umullb(zd, zn, zm, index); + } + void Umullb(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + umullb(zd, zn, zm); + } + void Umullt(const ZRegister& zd, + const ZRegister& zn, + const ZRegister& zm, + int index) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + umullt(zd, zn, zm, index); + } + void Umullt(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + umullt(zd, zn, zm); + } + void Uqrshl(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + void Uqrshrnb(const ZRegister& zd, const ZRegister& zn, int shift) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + uqrshrnb(zd, zn, shift); + } + void Uqrshrnt(const ZRegister& zd, const ZRegister& zn, int shift) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + uqrshrnt(zd, zn, shift); + } + void Uqshl(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + int shift) { + VIXL_ASSERT(allow_macro_instructions_); + MovprfxHelperScope guard(this, zd, pg, zn); + uqshl(zd, pg, zd, shift); + } + void Uqshl(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + void Uqshrnb(const ZRegister& zd, const ZRegister& zn, int shift) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + uqshrnb(zd, zn, shift); + } + void Uqshrnt(const ZRegister& zd, const ZRegister& zn, int shift) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + uqshrnt(zd, zn, shift); + } + void Uqsub(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + void Uqxtnb(const ZRegister& zd, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + uqxtnb(zd, zn); + } + void Uqxtnt(const ZRegister& zd, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + uqxtnt(zd, zn); + } + void Urecpe(const ZRegister& zd, const PRegister& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + MovprfxHelperScope guard(this, zd, pg, zd); + urecpe(zd, pg.Merging(), zn); + } + void Urshl(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + void Urshr(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + int shift) { + VIXL_ASSERT(allow_macro_instructions_); + MovprfxHelperScope guard(this, zd, pg, zn); + urshr(zd, pg, zd, shift); + } + void Ursqrte(const ZRegister& zd, const PRegister& pg, const ZRegister& zn) { + VIXL_ASSERT(allow_macro_instructions_); + MovprfxHelperScope guard(this, zd, pg, zd); + ursqrte(zd, pg.Merging(), zn); + } + void Ursra(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + int shift); + void Ushllb(const ZRegister& zd, const ZRegister& zn, int shift) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ushllb(zd, zn, shift); + } + void Ushllt(const ZRegister& zd, const ZRegister& zn, int shift) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ushllt(zd, zn, shift); + } + void Usqadd(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + void Usra(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + int shift); + void Usublb(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + usublb(zd, zn, zm); + } + void Usublt(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + usublt(zd, zn, zm); + } + void Usubwb(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + usubwb(zd, zn, zm); + } + void Usubwt(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + usubwt(zd, zn, zm); + } + void Whilege(const PRegisterWithLaneSize& pd, + const Register& rn, + const Register& rm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + whilege(pd, rn, rm); + } + void Whilegt(const PRegisterWithLaneSize& pd, + const Register& rn, + const Register& rm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + whilegt(pd, rn, rm); + } + void Whilehi(const PRegisterWithLaneSize& pd, + const Register& rn, + const Register& rm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + whilehi(pd, rn, rm); + } + void Whilehs(const PRegisterWithLaneSize& pd, + const Register& rn, + const Register& rm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + whilehs(pd, rn, rm); + } + void Whilerw(const PRegisterWithLaneSize& pd, + const Register& rn, + const Register& rm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + whilerw(pd, rn, rm); + } + void Whilewr(const PRegisterWithLaneSize& pd, + const Register& rn, + const Register& rm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + whilewr(pd, rn, rm); + } + void Xar(const ZRegister& zd, + const ZRegister& zn, + const ZRegister& zm, + int shift) { + VIXL_ASSERT(allow_macro_instructions_); + if (zd.Aliases(zm)) { + SingleEmissionCheckScope guard(this); + xar(zd, zm, zn, shift); + } else { + MovprfxHelperScope guard(this, zd, zn); + xar(zd, zd, zm, shift); + } + } + void Fmmla(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm); + void Smmla(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm); + void Ummla(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm); + void Usmmla(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm); + void Usdot(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm); + void Usdot(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm, + int index); + void Sudot(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm, + int index); + + // MTE + void St2g(const Register& rt, const MemOperand& addr); + void Stg(const Register& rt, const MemOperand& addr); + void Stgp(const Register& rt1, const Register& rt2, const MemOperand& addr); + void Stz2g(const Register& rt, const MemOperand& addr); + void Stzg(const Register& rt, const MemOperand& addr); + void Ldg(const Register& rt, const MemOperand& addr); + + void Cpye(const Register& rd, const Register& rs, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cpye(rd, rs, rn); + } + + void Cpyen(const Register& rd, const Register& rs, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cpyen(rd, rs, rn); + } + + void Cpyern(const Register& rd, const Register& rs, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cpyern(rd, rs, rn); + } + + void Cpyewn(const Register& rd, const Register& rs, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cpyewn(rd, rs, rn); + } + + void Cpyfe(const Register& rd, const Register& rs, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cpyfe(rd, rs, rn); + } + + void Cpyfen(const Register& rd, const Register& rs, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cpyfen(rd, rs, rn); + } + + void Cpyfern(const Register& rd, const Register& rs, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cpyfern(rd, rs, rn); + } + + void Cpyfewn(const Register& rd, const Register& rs, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cpyfewn(rd, rs, rn); + } + + void Cpyfm(const Register& rd, const Register& rs, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cpyfm(rd, rs, rn); + } + + void Cpyfmn(const Register& rd, const Register& rs, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cpyfmn(rd, rs, rn); + } + + void Cpyfmrn(const Register& rd, const Register& rs, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cpyfmrn(rd, rs, rn); + } + + void Cpyfmwn(const Register& rd, const Register& rs, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cpyfmwn(rd, rs, rn); + } + + void Cpyfp(const Register& rd, const Register& rs, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cpyfp(rd, rs, rn); + } + + void Cpyfpn(const Register& rd, const Register& rs, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cpyfpn(rd, rs, rn); + } + + void Cpyfprn(const Register& rd, const Register& rs, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cpyfprn(rd, rs, rn); + } + + void Cpyfpwn(const Register& rd, const Register& rs, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cpyfpwn(rd, rs, rn); + } + + void Cpym(const Register& rd, const Register& rs, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cpym(rd, rs, rn); + } + + void Cpymn(const Register& rd, const Register& rs, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cpymn(rd, rs, rn); + } + + void Cpymrn(const Register& rd, const Register& rs, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cpymrn(rd, rs, rn); + } + + void Cpymwn(const Register& rd, const Register& rs, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cpymwn(rd, rs, rn); + } + + void Cpyp(const Register& rd, const Register& rs, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cpyp(rd, rs, rn); + } + + void Cpypn(const Register& rd, const Register& rs, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cpypn(rd, rs, rn); + } + + void Cpyprn(const Register& rd, const Register& rs, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cpyprn(rd, rs, rn); + } + + void Cpypwn(const Register& rd, const Register& rs, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cpypwn(rd, rs, rn); + } + + void Sete(const Register& rd, const Register& rn, const Register& rs) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sete(rd, rn, rs); + } + + void Seten(const Register& rd, const Register& rn, const Register& rs) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + seten(rd, rn, rs); + } + + void Setge(const Register& rd, const Register& rn, const Register& rs) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + setge(rd, rn, rs); + } + + void Setgen(const Register& rd, const Register& rn, const Register& rs) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + setgen(rd, rn, rs); + } + + void Setgm(const Register& rd, const Register& rn, const Register& rs) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + setgm(rd, rn, rs); + } + + void Setgmn(const Register& rd, const Register& rn, const Register& rs) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + setgmn(rd, rn, rs); + } + + void Setgp(const Register& rd, const Register& rn, const Register& rs) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + setgp(rd, rn, rs); + } + + void Setgpn(const Register& rd, const Register& rn, const Register& rs) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + setgpn(rd, rn, rs); + } + + void Setm(const Register& rd, const Register& rn, const Register& rs) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + setm(rd, rn, rs); + } + + void Setmn(const Register& rd, const Register& rn, const Register& rs) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + setmn(rd, rn, rs); + } + + void Setp(const Register& rd, const Register& rn, const Register& rs) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + setp(rd, rn, rs); + } + + void Setpn(const Register& rd, const Register& rn, const Register& rs) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + setpn(rd, rn, rs); + } + +// Macro assembler wrappers that package the MOPS instructions into a single +// call. +#define MOPS_LIST(V) \ + V(Set, set, ) \ + V(Setn, set, n) \ + V(Setg, setg, ) \ + V(Setgn, setg, n) \ + V(Cpy, cpy, ) \ + V(Cpyn, cpy, n) \ + V(Cpyrn, cpy, rn) \ + V(Cpywn, cpy, wn) \ + V(Cpyf, cpyf, ) \ + V(Cpyfn, cpyf, n) \ + V(Cpyfrn, cpyf, rn) \ + V(Cpyfwn, cpyf, wn) + +#define DEFINE_MACRO_ASM_FUNC(MASM, ASMPREFIX, ASMSUFFIX) \ + void MASM(const Register& ra, const Register& rb, const Register& rc) { \ + ExactAssemblyScope scope(this, 3 * kInstructionSize); \ + ASMPREFIX##p##ASMSUFFIX(ra, rb, rc); \ + ASMPREFIX##m##ASMSUFFIX(ra, rb, rc); \ + ASMPREFIX##e##ASMSUFFIX(ra, rb, rc); \ + } + MOPS_LIST(DEFINE_MACRO_ASM_FUNC) +#undef DEFINE_MACRO_ASM_FUNC + + void Abs(const Register& rd, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + abs(rd, rn); + } + + void Cnt(const Register& rd, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cnt(rd, rn); + } + + void Ctz(const Register& rd, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ctz(rd, rn); + } + + void Smax(const Register& rd, const Register& rn, const Operand& op); + void Smin(const Register& rd, const Register& rn, const Operand& op); + void Umax(const Register& rd, const Register& rn, const Operand& op); + void Umin(const Register& rd, const Register& rn, const Operand& op); + template Literal* CreateLiteralDestroyedWithPool(T value) { return new Literal(value, @@ -3374,11 +7919,13 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface { return GetScratchRegisterList(); } - CPURegList* GetScratchFPRegisterList() { return &fptmp_list_; } - VIXL_DEPRECATED("GetScratchFPRegisterList", CPURegList* FPTmpList()) { - return GetScratchFPRegisterList(); + CPURegList* GetScratchVRegisterList() { return &v_tmp_list_; } + VIXL_DEPRECATED("GetScratchVRegisterList", CPURegList* FPTmpList()) { + return GetScratchVRegisterList(); } + CPURegList* GetScratchPRegisterList() { return &p_tmp_list_; } + // Get or set the current (most-deeply-nested) UseScratchRegisterScope. void SetCurrentScratchRegisterScope(UseScratchRegisterScope* scope) { current_scratch_scope_ = scope; @@ -3442,16 +7989,6 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface { // Will output the flags. void Log(TraceParameters parameters); - // Enable or disable instrumentation when an Instrument visitor is attached to - // the simulator. - void EnableInstrumentation(); - void DisableInstrumentation(); - - // Add a marker to the instrumentation data produced by an Instrument visitor. - // The name is a two character string that will be attached to the marker in - // the output data. - void AnnotateInstrumentation(const char* marker_name); - // Enable or disable CPU features dynamically. This mechanism allows users to // strictly check the use of CPU features in different regions of code. void SetSimulatorCPUFeatures(const CPUFeatures& features); @@ -3555,6 +8092,36 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface { Condition cond, bool* should_synthesise_left); + // Generate code to calculate the address represented by `addr` and write it + // into `xd`. This is used as a common fall-back for out-of-range load and + // store operands. + // + // The vl_divisor_log2 argument is used to scale the VL, for use with + // SVE_MUL_VL. + void CalculateSVEAddress(const Register& xd, + const SVEMemOperand& addr, + int vl_divisor_log2 = 0); + + void CalculateSVEAddress(const Register& xd, + const SVEMemOperand& addr, + const CPURegister& rt) { + VIXL_ASSERT(rt.IsPRegister() || rt.IsZRegister()); + int vl_divisor_log2 = rt.IsPRegister() ? kZRegBitsPerPRegBitLog2 : 0; + CalculateSVEAddress(xd, addr, vl_divisor_log2); + } + + void SetFPNaNPropagationOption(FPMacroNaNPropagationOption nan_option) { + fp_nan_propagation_ = nan_option; + } + + void ResolveFPNaNPropagationOption(FPMacroNaNPropagationOption* nan_option) { + // The input option has priority over the option that has set. + if (*nan_option == NoFPMacroNaNPropagationSelected) { + *nan_option = fp_nan_propagation_; + } + VIXL_ASSERT(*nan_option != NoFPMacroNaNPropagationSelected); + } + private: // The actual Push and Pop implementations. These don't generate any code // other than that required for the push or pop. This allows @@ -3608,6 +8175,212 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface { void ConfigureSimulatorCPUFeaturesHelper(const CPUFeatures& features, DebugHltOpcode action); + void CompareHelper(Condition cond, + const PRegisterWithLaneSize& pd, + const PRegisterZ& pg, + const ZRegister& zn, + IntegerOperand imm); + + // E.g. Ld1rb. + typedef void (Assembler::*SVELoadBroadcastFn)(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + + void SVELoadBroadcastImmHelper(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr, + SVELoadBroadcastFn fn, + int divisor); + + // E.g. ldr/str + typedef void (Assembler::*SVELoadStoreFn)(const CPURegister& rt, + const SVEMemOperand& addr); + + void SVELoadStoreScalarImmHelper(const CPURegister& rt, + const SVEMemOperand& addr, + SVELoadStoreFn fn); + + typedef void (Assembler::*SVELoad1Fn)(const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr); + typedef void (Assembler::*SVEStore1Fn)(const ZRegister& zt, + const PRegister& pg, + const SVEMemOperand& addr); + + // Helper for predicated Z register loads with addressing modes not directly + // encodable in the instruction. The supported_modifier parameter indicates + // which offset modifier the calling instruction encoder supports (eg. + // SVE_MUL_VL). The ratio log2 of VL to memory access size is passed as + // vl_divisor_log2; pass -1 to indicate no dependency. + template + void SVELoadStoreNTBroadcastQOHelper( + const ZRegister& zt, + const Tg& pg, + const SVEMemOperand& addr, + Tf fn, + int imm_bits, + int shift_amount, + SVEOffsetModifier supported_modifier = NO_SVE_OFFSET_MODIFIER, + int vl_divisor_log2 = 0); + + template + void SVELoadStore1Helper(int msize_in_bytes_log2, + const ZRegister& zt, + const Tg& pg, + const SVEMemOperand& addr, + Tf fn); + + template + void SVELoadFFHelper(int msize_in_bytes_log2, + const ZRegister& zt, + const PRegisterZ& pg, + const SVEMemOperand& addr, + Tf fn); + + typedef void (MacroAssembler::*IntWideImmMacroFn)(const ZRegister& zd, + const ZRegister& zn, + IntegerOperand imm); + + typedef void (Assembler::*IntWideImmShiftFn)(const ZRegister& zd, + const ZRegister& zn, + int imm, + int shift); + + typedef void (Assembler::*Int3ArithFn)(const ZRegister& zd, + const ZRegister& zn, + const ZRegister& zm); + + typedef void (Assembler::*Int4ArithFn)(const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm); + + typedef void (Assembler::*IntArithImmFn)(const ZRegister& zd, + const ZRegister& zn, + int imm); + + typedef void (Assembler::*ZZZImmFn)(const ZRegister& zd, + const ZRegister& zn, + const ZRegister& zm, + int imm); + + typedef void (MacroAssembler::*SVEArithPredicatedFn)(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + void IntWideImmHelper(IntArithImmFn imm_fn, + SVEArithPredicatedFn reg_fn, + const ZRegister& zd, + const ZRegister& zn, + IntegerOperand imm, + bool is_signed_imm); + + enum AddSubHelperOption { kAddImmediate, kSubImmediate }; + + void AddSubHelper(AddSubHelperOption option, + const ZRegister& zd, + const ZRegister& zn, + IntegerOperand imm); + + // Try to emit an add- or sub-like instruction (imm_fn) with `imm`, or the + // corresponding sub- or add-like instruction (n_imm_fn) with a negated `imm`. + // A `movprfx` is automatically generated if one is required. If successful, + // return true. Otherwise, return false. + // + // This helper uses two's complement equivalences, for example treating 0xffff + // as -1 for H-sized lanes. + bool TrySingleAddSub(AddSubHelperOption option, + const ZRegister& zd, + const ZRegister& zn, + IntegerOperand imm); + + void AbsoluteDifferenceAccumulate(Int3ArithFn fn, + const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm); + + void FourRegDestructiveHelper(Int3ArithFn fn, + const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm); + + void FourRegDestructiveHelper(Int4ArithFn fn, + const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm); + + void SVEDotIndexHelper(ZZZImmFn fn, + const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm, + int index); + + // For noncommutative arithmetic operations. + void NoncommutativeArithmeticHelper(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm, + SVEArithPredicatedFn fn, + SVEArithPredicatedFn rev_fn); + + void FPCommutativeArithmeticHelper(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm, + SVEArithPredicatedFn fn, + FPMacroNaNPropagationOption nan_option); + + // Floating-point fused multiply-add vectors (predicated), writing addend. + typedef void (Assembler::*SVEMulAddPredicatedZdaFn)(const ZRegister& zda, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + // Floating-point fused multiply-add vectors (predicated), writing + // multiplicand. + typedef void (Assembler::*SVEMulAddPredicatedZdnFn)(const ZRegister& zdn, + const PRegisterM& pg, + const ZRegister& zn, + const ZRegister& zm); + + void FPMulAddHelper(const ZRegister& zd, + const PRegisterM& pg, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm, + SVEMulAddPredicatedZdaFn fn_zda, + SVEMulAddPredicatedZdnFn fn_zdn, + FPMacroNaNPropagationOption nan_option); + + typedef void (Assembler::*SVEMulAddIndexFn)(const ZRegister& zda, + const ZRegister& zn, + const ZRegister& zm, + int index); + + void FourRegOneImmDestructiveHelper(ZZZImmFn fn, + const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + const ZRegister& zm, + int imm); + + void ShiftRightAccumulate(IntArithImmFn fn, + const ZRegister& zd, + const ZRegister& za, + const ZRegister& zn, + int imm); + + void ComplexAddition(ZZZImmFn fn, + const ZRegister& zd, + const ZRegister& zn, + const ZRegister& zm, + int rot); + // Tell whether any of the macro instruction can be used. When false the // MacroAssembler will assert if a method which can emit a variable number // of instructions is called. @@ -3621,7 +8394,8 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface { // Scratch registers available for use by the MacroAssembler. CPURegList tmp_list_; - CPURegList fptmp_list_; + CPURegList v_tmp_list_; + CPURegList p_tmp_list_; UseScratchRegisterScope* current_scratch_scope_; @@ -3631,6 +8405,8 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface { ptrdiff_t checkpoint_; ptrdiff_t recommended_checkpoint_; + FPMacroNaNPropagationOption fp_nan_propagation_; + friend class Pool; friend class LiteralPool; }; @@ -3699,11 +8475,35 @@ class BlockPoolsScope { MacroAssembler* masm_; }; +MovprfxHelperScope::MovprfxHelperScope(MacroAssembler* masm, + const ZRegister& dst, + const ZRegister& src) + : ExactAssemblyScope(masm, + ShouldGenerateMovprfx(dst, src) + ? (2 * kInstructionSize) + : kInstructionSize) { + if (ShouldGenerateMovprfx(dst, src)) { + masm->movprfx(dst, src); + } +} + +MovprfxHelperScope::MovprfxHelperScope(MacroAssembler* masm, + const ZRegister& dst, + const PRegister& pg, + const ZRegister& src) + : ExactAssemblyScope(masm, + ShouldGenerateMovprfx(dst, pg, src) + ? (2 * kInstructionSize) + : kInstructionSize) { + if (ShouldGenerateMovprfx(dst, pg, src)) { + masm->movprfx(dst, pg, src); + } +} // This scope utility allows scratch registers to be managed safely. The -// MacroAssembler's GetScratchRegisterList() (and GetScratchFPRegisterList()) is -// used as a pool of scratch registers. These registers can be allocated on -// demand, and will be returned at the end of the scope. +// MacroAssembler's GetScratch*RegisterList() are used as a pool of scratch +// registers. These registers can be allocated on demand, and will be returned +// at the end of the scope. // // When the scope ends, the MacroAssembler's lists will be restored to their // original state, even if the lists were modified by some other means. @@ -3713,14 +8513,22 @@ class UseScratchRegisterScope { // must not be `NULL`), so it is ready to use immediately after it has been // constructed. explicit UseScratchRegisterScope(MacroAssembler* masm) - : masm_(NULL), parent_(NULL), old_available_(0), old_availablefp_(0) { + : masm_(NULL), + parent_(NULL), + old_available_(0), + old_available_v_(0), + old_available_p_(0) { Open(masm); } // This constructor does not implicitly initialise the scope. Instead, the // user is required to explicitly call the `Open` function before using the // scope. UseScratchRegisterScope() - : masm_(NULL), parent_(NULL), old_available_(0), old_availablefp_(0) {} + : masm_(NULL), + parent_(NULL), + old_available_(0), + old_available_v_(0), + old_available_p_(0) {} // This function performs the actual initialisation work. void Open(MacroAssembler* masm); @@ -3735,25 +8543,42 @@ class UseScratchRegisterScope { bool IsAvailable(const CPURegister& reg) const; - // Take a register from the appropriate temps list. It will be returned // automatically when the scope ends. Register AcquireW() { - return AcquireNextAvailable(masm_->GetScratchRegisterList()).W(); + return AcquireFrom(masm_->GetScratchRegisterList()).W(); } Register AcquireX() { - return AcquireNextAvailable(masm_->GetScratchRegisterList()).X(); + return AcquireFrom(masm_->GetScratchRegisterList()).X(); } VRegister AcquireH() { - return AcquireNextAvailable(masm_->GetScratchFPRegisterList()).H(); + return AcquireFrom(masm_->GetScratchVRegisterList()).H(); } VRegister AcquireS() { - return AcquireNextAvailable(masm_->GetScratchFPRegisterList()).S(); + return AcquireFrom(masm_->GetScratchVRegisterList()).S(); } VRegister AcquireD() { - return AcquireNextAvailable(masm_->GetScratchFPRegisterList()).D(); + return AcquireFrom(masm_->GetScratchVRegisterList()).D(); + } + ZRegister AcquireZ() { + return AcquireFrom(masm_->GetScratchVRegisterList()).Z(); + } + PRegister AcquireP() { + // Prefer to allocate p8-p15 if we can, to leave p0-p7 available for use as + // governing predicates. + CPURegList* available = masm_->GetScratchPRegisterList(); + RegList preferred = ~kGoverningPRegisterMask; + if ((available->GetList() & preferred) != 0) { + return AcquireFrom(available, preferred).P(); + } + return AcquireFrom(available).P(); + } + // Acquire a P register suitable for use as a governing predicate in + // instructions which only accept p0-p7 for that purpose. + PRegister AcquireGoverningP() { + CPURegList* available = masm_->GetScratchPRegisterList(); + return AcquireFrom(available, kGoverningPRegisterMask).P(); } - Register AcquireRegisterOfSize(int size_in_bits); Register AcquireSameSizeAs(const Register& reg) { @@ -3769,6 +8594,12 @@ class UseScratchRegisterScope { : CPURegister(AcquireRegisterOfSize(size_in_bits)); } + // Acquire a register big enough to represent one lane of `vector`. + Register AcquireRegisterToHoldLane(const CPURegister& vector) { + VIXL_ASSERT(vector.GetLaneSizeInBits() <= kXRegSize); + return (vector.GetLaneSizeInBits() > kWRegSize) ? AcquireX() : AcquireW(); + } + // Explicitly release an acquired (or excluded) register, putting it back in // the appropriate temps list. @@ -3786,6 +8617,10 @@ class UseScratchRegisterScope { const VRegister& reg2 = NoVReg, const VRegister& reg3 = NoVReg, const VRegister& reg4 = NoVReg); + void Include(const CPURegister& reg1, + const CPURegister& reg2 = NoCPUReg, + const CPURegister& reg3 = NoCPUReg, + const CPURegister& reg4 = NoCPUReg); // Make sure that the specified registers are not available in this scope. @@ -3805,21 +8640,40 @@ class UseScratchRegisterScope { const CPURegister& reg3 = NoCPUReg, const CPURegister& reg4 = NoCPUReg); + // Convenience for excluding registers that are part of Operands. This is + // useful for sequences like this: + // + // // Use 'rd' as a scratch, but only if it's not aliased by an input. + // temps.Include(rd); + // temps.Exclude(rn); + // temps.Exclude(operand); + // + // Otherwise, a conditional check is needed on the last 'Exclude'. + void Exclude(const Operand& operand) { + if (operand.IsShiftedRegister() || operand.IsExtendedRegister()) { + Exclude(operand.GetRegister()); + } else { + VIXL_ASSERT(operand.IsImmediate()); + } + } // Prevent any scratch registers from being used in this scope. void ExcludeAll(); private: - static CPURegister AcquireNextAvailable(CPURegList* available); + static CPURegister AcquireFrom(CPURegList* available, + RegList mask = ~static_cast(0)); static void ReleaseByCode(CPURegList* available, int code); - static void ReleaseByRegList(CPURegList* available, RegList regs); - static void IncludeByRegList(CPURegList* available, RegList exclude); - static void ExcludeByRegList(CPURegList* available, RegList exclude); + CPURegList* GetAvailableListFor(CPURegister::RegisterBank bank); + + static const RegList kGoverningPRegisterMask = + (static_cast(1) << kNumberOfGoverningPRegisters) - 1; + // The MacroAssembler maintains a list of available scratch registers, and // also keeps track of the most recently-opened scope so that on destruction // we can check that scopes do not outlive their parents. @@ -3828,13 +8682,15 @@ class UseScratchRegisterScope { // The state of the available lists at the start of this scope. RegList old_available_; // kRegister - RegList old_availablefp_; // kVRegister + RegList old_available_v_; // kVRegister / kZRegister + RegList old_available_p_; // kPRegister // Disallow copy constructor and operator=. - VIXL_DEBUG_NO_RETURN UseScratchRegisterScope(const UseScratchRegisterScope&) { + VIXL_NO_RETURN_IN_DEBUG_MODE UseScratchRegisterScope( + const UseScratchRegisterScope&) { VIXL_UNREACHABLE(); } - VIXL_DEBUG_NO_RETURN void operator=(const UseScratchRegisterScope&) { + VIXL_NO_RETURN_IN_DEBUG_MODE void operator=(const UseScratchRegisterScope&) { VIXL_UNREACHABLE(); } }; @@ -3848,23 +8704,11 @@ class UseScratchRegisterScope { // features needs a corresponding macro instruction. class SimulationCPUFeaturesScope { public: - explicit SimulationCPUFeaturesScope( - MacroAssembler* masm, - CPUFeatures::Feature feature0 = CPUFeatures::kNone, - CPUFeatures::Feature feature1 = CPUFeatures::kNone, - CPUFeatures::Feature feature2 = CPUFeatures::kNone, - CPUFeatures::Feature feature3 = CPUFeatures::kNone) - : masm_(masm), - cpu_features_scope_(masm, feature0, feature1, feature2, feature3) { + template + explicit SimulationCPUFeaturesScope(MacroAssembler* masm, T... features) + : masm_(masm), cpu_features_scope_(masm, features...) { masm_->SaveSimulatorCPUFeatures(); - masm_->EnableSimulatorCPUFeatures( - CPUFeatures(feature0, feature1, feature2, feature3)); - } - - SimulationCPUFeaturesScope(MacroAssembler* masm, const CPUFeatures& other) - : masm_(masm), cpu_features_scope_(masm, other) { - masm_->SaveSimulatorCPUFeatures(); - masm_->EnableSimulatorCPUFeatures(other); + masm_->EnableSimulatorCPUFeatures(CPUFeatures(features...)); } ~SimulationCPUFeaturesScope() { masm_->RestoreSimulatorCPUFeatures(); } diff --git a/dep/vixl/include/vixl/aarch64/operands-aarch64.h b/dep/vixl/include/vixl/aarch64/operands-aarch64.h index e3dbfa3ec..546954220 100644 --- a/dep/vixl/include/vixl/aarch64/operands-aarch64.h +++ b/dep/vixl/include/vixl/aarch64/operands-aarch64.h @@ -27,525 +27,15 @@ #ifndef VIXL_AARCH64_OPERANDS_AARCH64_H_ #define VIXL_AARCH64_OPERANDS_AARCH64_H_ +#include +#include + #include "instructions-aarch64.h" +#include "registers-aarch64.h" namespace vixl { namespace aarch64 { -typedef uint64_t RegList; -static const int kRegListSizeInBits = sizeof(RegList) * 8; - - -// Registers. - -// Some CPURegister methods can return Register or VRegister types, so we need -// to declare them in advance. -class Register; -class VRegister; - -class CPURegister { - public: - enum RegisterType { - // The kInvalid value is used to detect uninitialized static instances, - // which are always zero-initialized before any constructors are called. - kInvalid = 0, - kRegister, - kVRegister, - kFPRegister = kVRegister, - kNoRegister - }; - - CPURegister() : code_(0), size_(0), type_(kNoRegister) { - VIXL_ASSERT(!IsValid()); - VIXL_ASSERT(IsNone()); - } - - CPURegister(unsigned code, unsigned size, RegisterType type) - : code_(code), size_(size), type_(type) { - VIXL_ASSERT(IsValidOrNone()); - } - - unsigned GetCode() const { - VIXL_ASSERT(IsValid()); - return code_; - } - VIXL_DEPRECATED("GetCode", unsigned code() const) { return GetCode(); } - - RegisterType GetType() const { - VIXL_ASSERT(IsValidOrNone()); - return type_; - } - VIXL_DEPRECATED("GetType", RegisterType type() const) { return GetType(); } - - RegList GetBit() const { - VIXL_ASSERT(code_ < (sizeof(RegList) * 8)); - return IsValid() ? (static_cast(1) << code_) : 0; - } - VIXL_DEPRECATED("GetBit", RegList Bit() const) { return GetBit(); } - - int GetSizeInBytes() const { - VIXL_ASSERT(IsValid()); - VIXL_ASSERT(size_ % 8 == 0); - return size_ / 8; - } - VIXL_DEPRECATED("GetSizeInBytes", int SizeInBytes() const) { - return GetSizeInBytes(); - } - - int GetSizeInBits() const { - VIXL_ASSERT(IsValid()); - return size_; - } - VIXL_DEPRECATED("GetSizeInBits", unsigned size() const) { - return GetSizeInBits(); - } - VIXL_DEPRECATED("GetSizeInBits", int SizeInBits() const) { - return GetSizeInBits(); - } - - bool Is8Bits() const { - VIXL_ASSERT(IsValid()); - return size_ == 8; - } - - bool Is16Bits() const { - VIXL_ASSERT(IsValid()); - return size_ == 16; - } - - bool Is32Bits() const { - VIXL_ASSERT(IsValid()); - return size_ == 32; - } - - bool Is64Bits() const { - VIXL_ASSERT(IsValid()); - return size_ == 64; - } - - bool Is128Bits() const { - VIXL_ASSERT(IsValid()); - return size_ == 128; - } - - bool IsValid() const { - if (IsValidRegister() || IsValidVRegister()) { - VIXL_ASSERT(!IsNone()); - return true; - } else { - // This assert is hit when the register has not been properly initialized. - // One cause for this can be an initialisation order fiasco. See - // https://isocpp.org/wiki/faq/ctors#static-init-order for some details. - VIXL_ASSERT(IsNone()); - return false; - } - } - - bool IsValidRegister() const { - return IsRegister() && ((size_ == kWRegSize) || (size_ == kXRegSize)) && - ((code_ < kNumberOfRegisters) || (code_ == kSPRegInternalCode)); - } - - bool IsValidVRegister() const { - return IsVRegister() && ((size_ == kBRegSize) || (size_ == kHRegSize) || - (size_ == kSRegSize) || (size_ == kDRegSize) || - (size_ == kQRegSize)) && - (code_ < kNumberOfVRegisters); - } - - bool IsValidFPRegister() const { - return IsFPRegister() && (code_ < kNumberOfVRegisters); - } - - bool IsNone() const { - // kNoRegister types should always have size 0 and code 0. - VIXL_ASSERT((type_ != kNoRegister) || (code_ == 0)); - VIXL_ASSERT((type_ != kNoRegister) || (size_ == 0)); - - return type_ == kNoRegister; - } - - bool Aliases(const CPURegister& other) const { - VIXL_ASSERT(IsValidOrNone() && other.IsValidOrNone()); - return (code_ == other.code_) && (type_ == other.type_); - } - - bool Is(const CPURegister& other) const { - VIXL_ASSERT(IsValidOrNone() && other.IsValidOrNone()); - return Aliases(other) && (size_ == other.size_); - } - - bool IsZero() const { - VIXL_ASSERT(IsValid()); - return IsRegister() && (code_ == kZeroRegCode); - } - - bool IsSP() const { - VIXL_ASSERT(IsValid()); - return IsRegister() && (code_ == kSPRegInternalCode); - } - - bool IsRegister() const { return type_ == kRegister; } - - bool IsVRegister() const { return type_ == kVRegister; } - - bool IsFPRegister() const { return IsS() || IsD(); } - - bool IsW() const { return IsValidRegister() && Is32Bits(); } - bool IsX() const { return IsValidRegister() && Is64Bits(); } - - // These assertions ensure that the size and type of the register are as - // described. They do not consider the number of lanes that make up a vector. - // So, for example, Is8B() implies IsD(), and Is1D() implies IsD, but IsD() - // does not imply Is1D() or Is8B(). - // Check the number of lanes, ie. the format of the vector, using methods such - // as Is8B(), Is1D(), etc. in the VRegister class. - bool IsV() const { return IsVRegister(); } - bool IsB() const { return IsV() && Is8Bits(); } - bool IsH() const { return IsV() && Is16Bits(); } - bool IsS() const { return IsV() && Is32Bits(); } - bool IsD() const { return IsV() && Is64Bits(); } - bool IsQ() const { return IsV() && Is128Bits(); } - - // Semantic type for sdot and udot instructions. - bool IsS4B() const { return IsS(); } - const VRegister& S4B() const { return S(); } - - const Register& W() const; - const Register& X() const; - const VRegister& V() const; - const VRegister& B() const; - const VRegister& H() const; - const VRegister& S() const; - const VRegister& D() const; - const VRegister& Q() const; - - bool IsSameType(const CPURegister& other) const { - return type_ == other.type_; - } - - bool IsSameSizeAndType(const CPURegister& other) const { - return (size_ == other.size_) && IsSameType(other); - } - - protected: - unsigned code_; - int size_; - RegisterType type_; - - private: - bool IsValidOrNone() const { return IsValid() || IsNone(); } -}; - - -class Register : public CPURegister { - public: - Register() : CPURegister() {} - explicit Register(const CPURegister& other) - : CPURegister(other.GetCode(), other.GetSizeInBits(), other.GetType()) { - VIXL_ASSERT(IsValidRegister()); - } - Register(unsigned code, unsigned size) : CPURegister(code, size, kRegister) {} - - bool IsValid() const { - VIXL_ASSERT(IsRegister() || IsNone()); - return IsValidRegister(); - } - - static const Register& GetWRegFromCode(unsigned code); - VIXL_DEPRECATED("GetWRegFromCode", - static const Register& WRegFromCode(unsigned code)) { - return GetWRegFromCode(code); - } - - static const Register& GetXRegFromCode(unsigned code); - VIXL_DEPRECATED("GetXRegFromCode", - static const Register& XRegFromCode(unsigned code)) { - return GetXRegFromCode(code); - } - - private: - static const Register wregisters[]; - static const Register xregisters[]; -}; - - -namespace internal { - -template -class FixedSizeRegister : public Register { - public: - FixedSizeRegister() : Register() {} - explicit FixedSizeRegister(unsigned code) : Register(code, size_in_bits) { - VIXL_ASSERT(IsValidRegister()); - } - explicit FixedSizeRegister(const Register& other) - : Register(other.GetCode(), size_in_bits) { - VIXL_ASSERT(other.GetSizeInBits() == size_in_bits); - VIXL_ASSERT(IsValidRegister()); - } - explicit FixedSizeRegister(const CPURegister& other) - : Register(other.GetCode(), other.GetSizeInBits()) { - VIXL_ASSERT(other.GetType() == kRegister); - VIXL_ASSERT(other.GetSizeInBits() == size_in_bits); - VIXL_ASSERT(IsValidRegister()); - } - - bool IsValid() const { - return Register::IsValid() && (GetSizeInBits() == size_in_bits); - } -}; - -} // namespace internal - -typedef internal::FixedSizeRegister XRegister; -typedef internal::FixedSizeRegister WRegister; - - -class VRegister : public CPURegister { - public: - VRegister() : CPURegister(), lanes_(1) {} - explicit VRegister(const CPURegister& other) - : CPURegister(other.GetCode(), other.GetSizeInBits(), other.GetType()), - lanes_(1) { - VIXL_ASSERT(IsValidVRegister()); - VIXL_ASSERT(IsPowerOf2(lanes_) && (lanes_ <= 16)); - } - VRegister(unsigned code, unsigned size, unsigned lanes = 1) - : CPURegister(code, size, kVRegister), lanes_(lanes) { - VIXL_ASSERT(IsPowerOf2(lanes_) && (lanes_ <= 16)); - } - VRegister(unsigned code, VectorFormat format) - : CPURegister(code, RegisterSizeInBitsFromFormat(format), kVRegister), - lanes_(IsVectorFormat(format) ? LaneCountFromFormat(format) : 1) { - VIXL_ASSERT(IsPowerOf2(lanes_) && (lanes_ <= 16)); - } - - bool IsValid() const { - VIXL_ASSERT(IsVRegister() || IsNone()); - return IsValidVRegister(); - } - - static const VRegister& GetBRegFromCode(unsigned code); - VIXL_DEPRECATED("GetBRegFromCode", - static const VRegister& BRegFromCode(unsigned code)) { - return GetBRegFromCode(code); - } - - static const VRegister& GetHRegFromCode(unsigned code); - VIXL_DEPRECATED("GetHRegFromCode", - static const VRegister& HRegFromCode(unsigned code)) { - return GetHRegFromCode(code); - } - - static const VRegister& GetSRegFromCode(unsigned code); - VIXL_DEPRECATED("GetSRegFromCode", - static const VRegister& SRegFromCode(unsigned code)) { - return GetSRegFromCode(code); - } - - static const VRegister& GetDRegFromCode(unsigned code); - VIXL_DEPRECATED("GetDRegFromCode", - static const VRegister& DRegFromCode(unsigned code)) { - return GetDRegFromCode(code); - } - - static const VRegister& GetQRegFromCode(unsigned code); - VIXL_DEPRECATED("GetQRegFromCode", - static const VRegister& QRegFromCode(unsigned code)) { - return GetQRegFromCode(code); - } - - static const VRegister& GetVRegFromCode(unsigned code); - VIXL_DEPRECATED("GetVRegFromCode", - static const VRegister& VRegFromCode(unsigned code)) { - return GetVRegFromCode(code); - } - - VRegister V8B() const { return VRegister(code_, kDRegSize, 8); } - VRegister V16B() const { return VRegister(code_, kQRegSize, 16); } - VRegister V2H() const { return VRegister(code_, kSRegSize, 2); } - VRegister V4H() const { return VRegister(code_, kDRegSize, 4); } - VRegister V8H() const { return VRegister(code_, kQRegSize, 8); } - VRegister V2S() const { return VRegister(code_, kDRegSize, 2); } - VRegister V4S() const { return VRegister(code_, kQRegSize, 4); } - VRegister V2D() const { return VRegister(code_, kQRegSize, 2); } - VRegister V1D() const { return VRegister(code_, kDRegSize, 1); } - - bool Is8B() const { return (Is64Bits() && (lanes_ == 8)); } - bool Is16B() const { return (Is128Bits() && (lanes_ == 16)); } - bool Is2H() const { return (Is32Bits() && (lanes_ == 2)); } - bool Is4H() const { return (Is64Bits() && (lanes_ == 4)); } - bool Is8H() const { return (Is128Bits() && (lanes_ == 8)); } - bool Is2S() const { return (Is64Bits() && (lanes_ == 2)); } - bool Is4S() const { return (Is128Bits() && (lanes_ == 4)); } - bool Is1D() const { return (Is64Bits() && (lanes_ == 1)); } - bool Is2D() const { return (Is128Bits() && (lanes_ == 2)); } - - // For consistency, we assert the number of lanes of these scalar registers, - // even though there are no vectors of equivalent total size with which they - // could alias. - bool Is1B() const { - VIXL_ASSERT(!(Is8Bits() && IsVector())); - return Is8Bits(); - } - bool Is1H() const { - VIXL_ASSERT(!(Is16Bits() && IsVector())); - return Is16Bits(); - } - bool Is1S() const { - VIXL_ASSERT(!(Is32Bits() && IsVector())); - return Is32Bits(); - } - - // Semantic type for sdot and udot instructions. - bool Is1S4B() const { return Is1S(); } - - - bool IsLaneSizeB() const { return GetLaneSizeInBits() == kBRegSize; } - bool IsLaneSizeH() const { return GetLaneSizeInBits() == kHRegSize; } - bool IsLaneSizeS() const { return GetLaneSizeInBits() == kSRegSize; } - bool IsLaneSizeD() const { return GetLaneSizeInBits() == kDRegSize; } - - int GetLanes() const { return lanes_; } - VIXL_DEPRECATED("GetLanes", int lanes() const) { return GetLanes(); } - - bool IsScalar() const { return lanes_ == 1; } - - bool IsVector() const { return lanes_ > 1; } - - bool IsSameFormat(const VRegister& other) const { - return (size_ == other.size_) && (lanes_ == other.lanes_); - } - - unsigned GetLaneSizeInBytes() const { return GetSizeInBytes() / lanes_; } - VIXL_DEPRECATED("GetLaneSizeInBytes", unsigned LaneSizeInBytes() const) { - return GetLaneSizeInBytes(); - } - - unsigned GetLaneSizeInBits() const { return GetLaneSizeInBytes() * 8; } - VIXL_DEPRECATED("GetLaneSizeInBits", unsigned LaneSizeInBits() const) { - return GetLaneSizeInBits(); - } - - private: - static const VRegister bregisters[]; - static const VRegister hregisters[]; - static const VRegister sregisters[]; - static const VRegister dregisters[]; - static const VRegister qregisters[]; - static const VRegister vregisters[]; - int lanes_; -}; - - -// Backward compatibility for FPRegisters. -typedef VRegister FPRegister; - -// No*Reg is used to indicate an unused argument, or an error case. Note that -// these all compare equal (using the Is() method). The Register and VRegister -// variants are provided for convenience. -const Register NoReg; -const VRegister NoVReg; -const FPRegister NoFPReg; // For backward compatibility. -const CPURegister NoCPUReg; - - -#define DEFINE_REGISTERS(N) \ - const WRegister w##N(N); \ - const XRegister x##N(N); -AARCH64_REGISTER_CODE_LIST(DEFINE_REGISTERS) -#undef DEFINE_REGISTERS -const WRegister wsp(kSPRegInternalCode); -const XRegister sp(kSPRegInternalCode); - - -#define DEFINE_VREGISTERS(N) \ - const VRegister b##N(N, kBRegSize); \ - const VRegister h##N(N, kHRegSize); \ - const VRegister s##N(N, kSRegSize); \ - const VRegister d##N(N, kDRegSize); \ - const VRegister q##N(N, kQRegSize); \ - const VRegister v##N(N, kQRegSize); -AARCH64_REGISTER_CODE_LIST(DEFINE_VREGISTERS) -#undef DEFINE_VREGISTERS - - -// Register aliases. -const XRegister ip0 = x16; -const XRegister ip1 = x17; -const XRegister lr = x30; -const XRegister xzr = x31; -const WRegister wzr = w31; - - -// AreAliased returns true if any of the named registers overlap. Arguments -// set to NoReg are ignored. The system stack pointer may be specified. -bool AreAliased(const CPURegister& reg1, - const CPURegister& reg2, - const CPURegister& reg3 = NoReg, - const CPURegister& reg4 = NoReg, - const CPURegister& reg5 = NoReg, - const CPURegister& reg6 = NoReg, - const CPURegister& reg7 = NoReg, - const CPURegister& reg8 = NoReg); - - -// AreSameSizeAndType returns true if all of the specified registers have the -// same size, and are of the same type. The system stack pointer may be -// specified. Arguments set to NoReg are ignored, as are any subsequent -// arguments. At least one argument (reg1) must be valid (not NoCPUReg). -bool AreSameSizeAndType(const CPURegister& reg1, - const CPURegister& reg2, - const CPURegister& reg3 = NoCPUReg, - const CPURegister& reg4 = NoCPUReg, - const CPURegister& reg5 = NoCPUReg, - const CPURegister& reg6 = NoCPUReg, - const CPURegister& reg7 = NoCPUReg, - const CPURegister& reg8 = NoCPUReg); - -// AreEven returns true if all of the specified registers have even register -// indices. Arguments set to NoReg are ignored, as are any subsequent -// arguments. At least one argument (reg1) must be valid (not NoCPUReg). -bool AreEven(const CPURegister& reg1, - const CPURegister& reg2, - const CPURegister& reg3 = NoReg, - const CPURegister& reg4 = NoReg, - const CPURegister& reg5 = NoReg, - const CPURegister& reg6 = NoReg, - const CPURegister& reg7 = NoReg, - const CPURegister& reg8 = NoReg); - - -// AreConsecutive returns true if all of the specified registers are -// consecutive in the register file. Arguments set to NoReg are ignored, as are -// any subsequent arguments. At least one argument (reg1) must be valid -// (not NoCPUReg). -bool AreConsecutive(const CPURegister& reg1, - const CPURegister& reg2, - const CPURegister& reg3 = NoCPUReg, - const CPURegister& reg4 = NoCPUReg); - - -// AreSameFormat returns true if all of the specified VRegisters have the same -// vector format. Arguments set to NoReg are ignored, as are any subsequent -// arguments. At least one argument (reg1) must be valid (not NoVReg). -bool AreSameFormat(const VRegister& reg1, - const VRegister& reg2, - const VRegister& reg3 = NoVReg, - const VRegister& reg4 = NoVReg); - - -// AreConsecutive returns true if all of the specified VRegisters are -// consecutive in the register file. Arguments set to NoReg are ignored, as are -// any subsequent arguments. At least one argument (reg1) must be valid -// (not NoVReg). -bool AreConsecutive(const VRegister& reg1, - const VRegister& reg2, - const VRegister& reg3 = NoVReg, - const VRegister& reg4 = NoVReg); - - // Lists of registers. class CPURegList { public: @@ -580,6 +70,28 @@ class CPURegList { VIXL_ASSERT(IsValid()); } + // Construct an empty CPURegList with the specified size and type. If `size` + // is CPURegister::kUnknownSize and the register type requires a size, a valid + // but unspecified default will be picked. + static CPURegList Empty(CPURegister::RegisterType type, + unsigned size = CPURegister::kUnknownSize) { + return CPURegList(type, GetDefaultSizeFor(type, size), 0); + } + + // Construct a CPURegList with all possible registers with the specified size + // and type. If `size` is CPURegister::kUnknownSize and the register type + // requires a size, a valid but unspecified default will be picked. + static CPURegList All(CPURegister::RegisterType type, + unsigned size = CPURegister::kUnknownSize) { + unsigned number_of_registers = (CPURegister::GetMaxCodeFor(type) + 1); + RegList list = (static_cast(1) << number_of_registers) - 1; + if (type == CPURegister::kRegister) { + // GetMaxCodeFor(kRegister) ignores SP, so explicitly include it. + list |= (static_cast(1) << kSPRegInternalCode); + } + return CPURegList(type, GetDefaultSizeFor(type, size), list); + } + CPURegister::RegisterType GetType() const { VIXL_ASSERT(IsValid()); return type_; @@ -588,6 +100,10 @@ class CPURegList { return GetType(); } + CPURegister::RegisterBank GetBank() const { + return CPURegister::GetBankFor(GetType()); + } + // Combine another CPURegList into this one. Registers that already exist in // this list are left unchanged. The type and size of the registers in the // 'other' list must match those in this list. @@ -684,8 +200,11 @@ class CPURegList { // preparing registers for an AAPCS64 function call, for example. void RemoveCalleeSaved(); - CPURegister PopLowestIndex(); - CPURegister PopHighestIndex(); + // Find the register in this list that appears in `mask` with the lowest or + // highest code, remove it from the list and return it as a CPURegister. If + // the list is empty, leave it unchanged and return NoCPUReg. + CPURegister PopLowestIndex(RegList mask = ~static_cast(0)); + CPURegister PopHighestIndex(RegList mask = ~static_cast(0)); // AAPCS64 callee-saved registers. static CPURegList GetCalleeSaved(unsigned size = kXRegSize); @@ -704,12 +223,12 @@ class CPURegList { bool IncludesAliasOf(const CPURegister& other) const { VIXL_ASSERT(IsValid()); - return (type_ == other.GetType()) && ((other.GetBit() & list_) != 0); + return (GetBank() == other.GetBank()) && IncludesAliasOf(other.GetCode()); } bool IncludesAliasOf(int code) const { VIXL_ASSERT(IsValid()); - return ((code & list_) != 0); + return (((static_cast(1) << code) & list_) != 0); } int GetCount() const { @@ -744,6 +263,21 @@ class CPURegList { } private: + // If `size` is CPURegister::kUnknownSize and the type requires a known size, + // then return an arbitrary-but-valid size. + // + // Otherwise, the size is checked for validity and returned unchanged. + static unsigned GetDefaultSizeFor(CPURegister::RegisterType type, + unsigned size) { + if (size == CPURegister::kUnknownSize) { + if (type == CPURegister::kRegister) size = kXRegSize; + if (type == CPURegister::kVRegister) size = kQRegSize; + // All other types require kUnknownSize. + } + VIXL_ASSERT(CPURegister(0, size, type).IsValid()); + return size; + } + RegList list_; int size_; CPURegister::RegisterType type_; @@ -761,6 +295,7 @@ extern const CPURegList kCalleeSavedV; extern const CPURegList kCallerSaved; extern const CPURegList kCallerSavedV; +class IntegerOperand; // Operand. class Operand { @@ -769,7 +304,9 @@ class Operand { // where is int64_t. // This is allowed to be an implicit constructor because Operand is // a wrapper class that doesn't normally perform any type conversion. - Operand(int64_t immediate = 0); // NOLINT(runtime/explicit) + Operand(int64_t immediate); // NOLINT(runtime/explicit) + + Operand(IntegerOperand immediate); // NOLINT(runtime/explicit) // rm, { #} // where is one of {LSL, LSR, ASR, ROR}. @@ -844,6 +381,8 @@ class Operand { // MemOperand represents the addressing mode of a load or store instruction. +// In assembly syntax, MemOperands are normally denoted by one or more elements +// inside or around square brackets. class MemOperand { public: // Creates an invalid `MemOperand`. @@ -862,38 +401,47 @@ class MemOperand { MemOperand(Register base, const Operand& offset, AddrMode addrmode = Offset); const Register& GetBaseRegister() const { return base_; } - VIXL_DEPRECATED("GetBaseRegister", const Register& base() const) { - return GetBaseRegister(); - } + // If the MemOperand has a register offset, return it. (This also applies to + // pre- and post-index modes.) Otherwise, return NoReg. const Register& GetRegisterOffset() const { return regoffset_; } - VIXL_DEPRECATED("GetRegisterOffset", const Register& regoffset() const) { - return GetRegisterOffset(); - } + // If the MemOperand has an immediate offset, return it. (This also applies to + // pre- and post-index modes.) Otherwise, return 0. int64_t GetOffset() const { return offset_; } - VIXL_DEPRECATED("GetOffset", int64_t offset() const) { return GetOffset(); } AddrMode GetAddrMode() const { return addrmode_; } - VIXL_DEPRECATED("GetAddrMode", AddrMode addrmode() const) { - return GetAddrMode(); - } - Shift GetShift() const { return shift_; } - VIXL_DEPRECATED("GetShift", Shift shift() const) { return GetShift(); } - Extend GetExtend() const { return extend_; } - VIXL_DEPRECATED("GetExtend", Extend extend() const) { return GetExtend(); } - unsigned GetShiftAmount() const { return shift_amount_; } - VIXL_DEPRECATED("GetShiftAmount", unsigned shift_amount() const) { - return GetShiftAmount(); + unsigned GetShiftAmount() const { + // Extend modes can also encode a shift for some instructions. + VIXL_ASSERT((GetShift() != NO_SHIFT) || (GetExtend() != NO_EXTEND)); + return shift_amount_; } + // True for MemOperands which represent something like [x0]. + // Currently, this will also return true for [x0, #0], because MemOperand has + // no way to distinguish the two. + bool IsPlainRegister() const; + + // True for MemOperands which represent something like [x0], or for compound + // MemOperands which are functionally equivalent, such as [x0, #0], [x0, xzr] + // or [x0, wzr, UXTW #3]. + bool IsEquivalentToPlainRegister() const; + + // True for immediate-offset (but not indexed) MemOperands. bool IsImmediateOffset() const; + // True for register-offset (but not indexed) MemOperands. bool IsRegisterOffset() const; + // True for immediate or register pre-indexed MemOperands. bool IsPreIndex() const; + // True for immediate or register post-indexed MemOperands. bool IsPostIndex() const; + // True for immediate pre-indexed MemOperands, [reg, #imm]! + bool IsImmediatePreIndex() const; + // True for immediate post-indexed MemOperands, [reg], #imm + bool IsImmediatePostIndex() const; void AddOffset(int64_t offset); @@ -922,6 +470,464 @@ class MemOperand { unsigned shift_amount_; }; +// SVE supports memory operands which don't make sense to the core ISA, such as +// scatter-gather forms, in which either the base or offset registers are +// vectors. This class exists to avoid complicating core-ISA code with +// SVE-specific behaviour. +// +// Note that SVE does not support any pre- or post-index modes. +class SVEMemOperand { + public: + // "vector-plus-immediate", like [z0.s, #21] + explicit SVEMemOperand(ZRegister base, uint64_t offset = 0) + : base_(base), + regoffset_(NoReg), + offset_(RawbitsToInt64(offset)), + mod_(NO_SVE_OFFSET_MODIFIER), + shift_amount_(0) { + VIXL_ASSERT(IsVectorPlusImmediate()); + VIXL_ASSERT(IsValid()); + } + + // "scalar-plus-immediate", like [x0], [x0, #42] or [x0, #42, MUL_VL] + // The only supported modifiers are NO_SVE_OFFSET_MODIFIER or SVE_MUL_VL. + // + // Note that VIXL cannot currently distinguish between `SVEMemOperand(x0)` and + // `SVEMemOperand(x0, 0)`. This is only significant in scalar-plus-scalar + // instructions where xm defaults to xzr. However, users should not rely on + // `SVEMemOperand(x0, 0)` being accepted in such cases. + explicit SVEMemOperand(Register base, + uint64_t offset = 0, + SVEOffsetModifier mod = NO_SVE_OFFSET_MODIFIER) + : base_(base), + regoffset_(NoReg), + offset_(RawbitsToInt64(offset)), + mod_(mod), + shift_amount_(0) { + VIXL_ASSERT(IsScalarPlusImmediate()); + VIXL_ASSERT(IsValid()); + } + + // "scalar-plus-scalar", like [x0, x1] + // "scalar-plus-vector", like [x0, z1.d] + SVEMemOperand(Register base, CPURegister offset) + : base_(base), + regoffset_(offset), + offset_(0), + mod_(NO_SVE_OFFSET_MODIFIER), + shift_amount_(0) { + VIXL_ASSERT(IsScalarPlusScalar() || IsScalarPlusVector()); + if (offset.IsZero()) VIXL_ASSERT(IsEquivalentToScalar()); + VIXL_ASSERT(IsValid()); + } + + // "scalar-plus-vector", like [x0, z1.d, UXTW] + // The type of `mod` can be any `SVEOffsetModifier` (other than LSL), or a + // corresponding `Extend` value. + template + SVEMemOperand(Register base, ZRegister offset, M mod) + : base_(base), + regoffset_(offset), + offset_(0), + mod_(GetSVEOffsetModifierFor(mod)), + shift_amount_(0) { + VIXL_ASSERT(mod_ != SVE_LSL); // LSL requires an explicit shift amount. + VIXL_ASSERT(IsScalarPlusVector()); + VIXL_ASSERT(IsValid()); + } + + // "scalar-plus-scalar", like [x0, x1, LSL #1] + // "scalar-plus-vector", like [x0, z1.d, LSL #2] + // The type of `mod` can be any `SVEOffsetModifier`, or a corresponding + // `Shift` or `Extend` value. + template + SVEMemOperand(Register base, CPURegister offset, M mod, unsigned shift_amount) + : base_(base), + regoffset_(offset), + offset_(0), + mod_(GetSVEOffsetModifierFor(mod)), + shift_amount_(shift_amount) { + VIXL_ASSERT(IsValid()); + } + + // "vector-plus-scalar", like [z0.d, x0] + SVEMemOperand(ZRegister base, Register offset) + : base_(base), + regoffset_(offset), + offset_(0), + mod_(NO_SVE_OFFSET_MODIFIER), + shift_amount_(0) { + VIXL_ASSERT(IsValid()); + VIXL_ASSERT(IsVectorPlusScalar()); + } + + // "vector-plus-vector", like [z0.d, z1.d, UXTW] + template + SVEMemOperand(ZRegister base, + ZRegister offset, + M mod = NO_SVE_OFFSET_MODIFIER, + unsigned shift_amount = 0) + : base_(base), + regoffset_(offset), + offset_(0), + mod_(GetSVEOffsetModifierFor(mod)), + shift_amount_(shift_amount) { + VIXL_ASSERT(IsValid()); + VIXL_ASSERT(IsVectorPlusVector()); + } + + // True for SVEMemOperands which represent something like [x0]. + // This will also return true for [x0, #0], because there is no way + // to distinguish the two. + bool IsPlainScalar() const { + return IsScalarPlusImmediate() && (offset_ == 0); + } + + // True for SVEMemOperands which represent something like [x0], or for + // compound SVEMemOperands which are functionally equivalent, such as + // [x0, #0], [x0, xzr] or [x0, wzr, UXTW #3]. + bool IsEquivalentToScalar() const; + + // True for SVEMemOperands like [x0], [x0, #0], false for [x0, xzr] and + // similar. + bool IsPlainRegister() const; + + bool IsScalarPlusImmediate() const { + return base_.IsX() && regoffset_.IsNone() && + ((mod_ == NO_SVE_OFFSET_MODIFIER) || IsMulVl()); + } + + bool IsScalarPlusScalar() const { + // SVE offers no extend modes for scalar-plus-scalar, so both registers must + // be X registers. + return base_.IsX() && regoffset_.IsX() && + ((mod_ == NO_SVE_OFFSET_MODIFIER) || (mod_ == SVE_LSL)); + } + + bool IsScalarPlusVector() const { + // The modifier can be LSL or an an extend mode (UXTW or SXTW) here. Unlike + // in the core ISA, these extend modes do not imply an S-sized lane, so the + // modifier is independent from the lane size. The architecture describes + // [US]XTW with a D-sized lane as an "unpacked" offset. + return base_.IsX() && regoffset_.IsZRegister() && + (regoffset_.IsLaneSizeS() || regoffset_.IsLaneSizeD()) && !IsMulVl(); + } + + bool IsVectorPlusImmediate() const { + return base_.IsZRegister() && + (base_.IsLaneSizeS() || base_.IsLaneSizeD()) && + regoffset_.IsNone() && (mod_ == NO_SVE_OFFSET_MODIFIER); + } + + bool IsVectorPlusScalar() const { + return base_.IsZRegister() && regoffset_.IsX() && + (base_.IsLaneSizeS() || base_.IsLaneSizeD()); + } + + bool IsVectorPlusVector() const { + return base_.IsZRegister() && regoffset_.IsZRegister() && (offset_ == 0) && + AreSameFormat(base_, regoffset_) && + (base_.IsLaneSizeS() || base_.IsLaneSizeD()); + } + + bool IsContiguous() const { return !IsScatterGather(); } + bool IsScatterGather() const { + return base_.IsZRegister() || regoffset_.IsZRegister(); + } + + // TODO: If necessary, add helpers like `HasScalarBase()`. + + Register GetScalarBase() const { + VIXL_ASSERT(base_.IsX()); + return Register(base_); + } + + ZRegister GetVectorBase() const { + VIXL_ASSERT(base_.IsZRegister()); + VIXL_ASSERT(base_.HasLaneSize()); + return ZRegister(base_); + } + + Register GetScalarOffset() const { + VIXL_ASSERT(regoffset_.IsRegister()); + return Register(regoffset_); + } + + ZRegister GetVectorOffset() const { + VIXL_ASSERT(regoffset_.IsZRegister()); + VIXL_ASSERT(regoffset_.HasLaneSize()); + return ZRegister(regoffset_); + } + + int64_t GetImmediateOffset() const { + VIXL_ASSERT(regoffset_.IsNone()); + return offset_; + } + + SVEOffsetModifier GetOffsetModifier() const { return mod_; } + unsigned GetShiftAmount() const { return shift_amount_; } + + bool IsEquivalentToLSL(unsigned amount) const { + if (shift_amount_ != amount) return false; + if (amount == 0) { + // No-shift is equivalent to "LSL #0". + return ((mod_ == SVE_LSL) || (mod_ == NO_SVE_OFFSET_MODIFIER)); + } + return mod_ == SVE_LSL; + } + + bool IsMulVl() const { return mod_ == SVE_MUL_VL; } + + bool IsValid() const; + + private: + // Allow standard `Shift` and `Extend` arguments to be used. + SVEOffsetModifier GetSVEOffsetModifierFor(Shift shift) { + if (shift == LSL) return SVE_LSL; + if (shift == NO_SHIFT) return NO_SVE_OFFSET_MODIFIER; + // SVE does not accept any other shift. + VIXL_UNIMPLEMENTED(); + return NO_SVE_OFFSET_MODIFIER; + } + + SVEOffsetModifier GetSVEOffsetModifierFor(Extend extend = NO_EXTEND) { + if (extend == UXTW) return SVE_UXTW; + if (extend == SXTW) return SVE_SXTW; + if (extend == NO_EXTEND) return NO_SVE_OFFSET_MODIFIER; + // SVE does not accept any other extend mode. + VIXL_UNIMPLEMENTED(); + return NO_SVE_OFFSET_MODIFIER; + } + + SVEOffsetModifier GetSVEOffsetModifierFor(SVEOffsetModifier mod) { + return mod; + } + + CPURegister base_; + CPURegister regoffset_; + int64_t offset_; + SVEOffsetModifier mod_; + unsigned shift_amount_; +}; + +// Represent a signed or unsigned integer operand. +// +// This is designed to make instructions which naturally accept a _signed_ +// immediate easier to implement and use, when we also want users to be able to +// specify raw-bits values (such as with hexadecimal constants). The advantage +// of this class over a simple uint64_t (with implicit C++ sign-extension) is +// that this class can strictly check the range of allowed values. With a simple +// uint64_t, it is impossible to distinguish -1 from UINT64_MAX. +// +// For example, these instructions are equivalent: +// +// __ Insr(z0.VnB(), -1); +// __ Insr(z0.VnB(), 0xff); +// +// ... as are these: +// +// __ Insr(z0.VnD(), -1); +// __ Insr(z0.VnD(), 0xffffffffffffffff); +// +// ... but this is invalid: +// +// __ Insr(z0.VnB(), 0xffffffffffffffff); // Too big for B-sized lanes. +class IntegerOperand { + public: +#define VIXL_INT_TYPES(V) \ + V(char) V(short) V(int) V(long) V(long long) // NOLINT(google-runtime-int) +#define VIXL_DECL_INT_OVERLOADS(T) \ + /* These are allowed to be implicit constructors because this is a */ \ + /* wrapper class that doesn't normally perform any type conversion. */ \ + IntegerOperand(signed T immediate) /* NOLINT(runtime/explicit) */ \ + : raw_bits_(immediate), /* Allow implicit sign-extension. */ \ + is_negative_(immediate < 0) {} \ + IntegerOperand(unsigned T immediate) /* NOLINT(runtime/explicit) */ \ + : raw_bits_(immediate), is_negative_(false) {} + VIXL_INT_TYPES(VIXL_DECL_INT_OVERLOADS) +#undef VIXL_DECL_INT_OVERLOADS +#undef VIXL_INT_TYPES + + // TODO: `Operand` can currently only hold an int64_t, so some large, unsigned + // values will be misrepresented here. + explicit IntegerOperand(const Operand& operand) + : raw_bits_(operand.GetEquivalentImmediate()), + is_negative_(operand.GetEquivalentImmediate() < 0) {} + + bool IsIntN(unsigned n) const { + return is_negative_ ? vixl::IsIntN(n, RawbitsToInt64(raw_bits_)) + : vixl::IsIntN(n, raw_bits_); + } + bool IsUintN(unsigned n) const { + return !is_negative_ && vixl::IsUintN(n, raw_bits_); + } + + bool IsUint8() const { return IsUintN(8); } + bool IsUint16() const { return IsUintN(16); } + bool IsUint32() const { return IsUintN(32); } + bool IsUint64() const { return IsUintN(64); } + + bool IsInt8() const { return IsIntN(8); } + bool IsInt16() const { return IsIntN(16); } + bool IsInt32() const { return IsIntN(32); } + bool IsInt64() const { return IsIntN(64); } + + bool FitsInBits(unsigned n) const { + return is_negative_ ? IsIntN(n) : IsUintN(n); + } + bool FitsInLane(const CPURegister& zd) const { + return FitsInBits(zd.GetLaneSizeInBits()); + } + bool FitsInSignedLane(const CPURegister& zd) const { + return IsIntN(zd.GetLaneSizeInBits()); + } + bool FitsInUnsignedLane(const CPURegister& zd) const { + return IsUintN(zd.GetLaneSizeInBits()); + } + + // Cast a value in the range [INT_MIN, UINT_MAX] to an unsigned integer + // in the range [0, UINT_MAX] (using two's complement mapping). + uint64_t AsUintN(unsigned n) const { + VIXL_ASSERT(FitsInBits(n)); + return raw_bits_ & GetUintMask(n); + } + + uint8_t AsUint8() const { return static_cast(AsUintN(8)); } + uint16_t AsUint16() const { return static_cast(AsUintN(16)); } + uint32_t AsUint32() const { return static_cast(AsUintN(32)); } + uint64_t AsUint64() const { return AsUintN(64); } + + // Cast a value in the range [INT_MIN, UINT_MAX] to a signed integer in + // the range [INT_MIN, INT_MAX] (using two's complement mapping). + int64_t AsIntN(unsigned n) const { + VIXL_ASSERT(FitsInBits(n)); + return ExtractSignedBitfield64(n - 1, 0, raw_bits_); + } + + int8_t AsInt8() const { return static_cast(AsIntN(8)); } + int16_t AsInt16() const { return static_cast(AsIntN(16)); } + int32_t AsInt32() const { return static_cast(AsIntN(32)); } + int64_t AsInt64() const { return AsIntN(64); } + + // Several instructions encode a signed int_t, which is then (optionally) + // left-shifted and sign-extended to a Z register lane with a size which may + // be larger than N. This helper tries to find an int_t such that the + // IntegerOperand's arithmetic value is reproduced in each lane. + // + // This is the mechanism that allows `Insr(z0.VnB(), 0xff)` to be treated as + // `Insr(z0.VnB(), -1)`. + template + bool TryEncodeAsShiftedIntNForLane(const CPURegister& zd, T* imm) const { + VIXL_STATIC_ASSERT(std::numeric_limits::digits > N); + VIXL_ASSERT(FitsInLane(zd)); + if ((raw_bits_ & GetUintMask(kShift)) != 0) return false; + + // Reverse the specified left-shift. + IntegerOperand unshifted(*this); + unshifted.ArithmeticShiftRight(kShift); + + if (unshifted.IsIntN(N)) { + // This is trivial, since sign-extension produces the same arithmetic + // value irrespective of the destination size. + *imm = static_cast(unshifted.AsIntN(N)); + return true; + } + + // Otherwise, we might be able to use the sign-extension to produce the + // desired bit pattern. We can only do this for values in the range + // [INT_MAX + 1, UINT_MAX], where the highest set bit is the sign bit. + // + // The lane size has to be adjusted to compensate for `kShift`, since the + // high bits will be dropped when the encoded value is left-shifted. + if (unshifted.IsUintN(zd.GetLaneSizeInBits() - kShift)) { + int64_t encoded = unshifted.AsIntN(zd.GetLaneSizeInBits() - kShift); + if (vixl::IsIntN(N, encoded)) { + *imm = static_cast(encoded); + return true; + } + } + return false; + } + + // As above, but `kShift` is written to the `*shift` parameter on success, so + // that it is easy to chain calls like this: + // + // if (imm.TryEncodeAsShiftedIntNForLane<8, 0>(zd, &imm8, &shift) || + // imm.TryEncodeAsShiftedIntNForLane<8, 8>(zd, &imm8, &shift)) { + // insn(zd, imm8, shift) + // } + template + bool TryEncodeAsShiftedIntNForLane(const CPURegister& zd, + T* imm, + S* shift) const { + if (TryEncodeAsShiftedIntNForLane(zd, imm)) { + *shift = kShift; + return true; + } + return false; + } + + // As above, but assume that `kShift` is 0. + template + bool TryEncodeAsIntNForLane(const CPURegister& zd, T* imm) const { + return TryEncodeAsShiftedIntNForLane(zd, imm); + } + + // As above, but for unsigned fields. This is usually a simple operation, but + // is provided for symmetry. + template + bool TryEncodeAsShiftedUintNForLane(const CPURegister& zd, T* imm) const { + VIXL_STATIC_ASSERT(std::numeric_limits::digits > N); + VIXL_ASSERT(FitsInLane(zd)); + + // TODO: Should we convert -1 to 0xff here? + if (is_negative_) return false; + USE(zd); + + if ((raw_bits_ & GetUintMask(kShift)) != 0) return false; + + if (vixl::IsUintN(N, raw_bits_ >> kShift)) { + *imm = static_cast(raw_bits_ >> kShift); + return true; + } + return false; + } + + template + bool TryEncodeAsShiftedUintNForLane(const CPURegister& zd, + T* imm, + S* shift) const { + if (TryEncodeAsShiftedUintNForLane(zd, imm)) { + *shift = kShift; + return true; + } + return false; + } + + bool IsZero() const { return raw_bits_ == 0; } + bool IsNegative() const { return is_negative_; } + bool IsPositiveOrZero() const { return !is_negative_; } + + uint64_t GetMagnitude() const { + return is_negative_ ? UnsignedNegate(raw_bits_) : raw_bits_; + } + + private: + // Shift the arithmetic value right, with sign extension if is_negative_. + void ArithmeticShiftRight(int shift) { + VIXL_ASSERT((shift >= 0) && (shift < 64)); + if (shift == 0) return; + if (is_negative_) { + raw_bits_ = ExtractSignedBitfield64(63, shift, raw_bits_); + } else { + raw_bits_ >>= shift; + } + } + + uint64_t raw_bits_; + bool is_negative_; +}; + // This an abstraction that can represent a register or memory location. The // `MacroAssembler` provides helpers to move data between generic operands. class GenericOperand { @@ -987,7 +993,7 @@ class GenericOperand { // We only support sizes up to X/D register sizes. size_t mem_op_size_; }; -} -} // namespace vixl::aarch64 +} // namespace aarch64 +} // namespace vixl #endif // VIXL_AARCH64_OPERANDS_AARCH64_H_ diff --git a/dep/vixl/include/vixl/aarch64/registers-aarch64.h b/dep/vixl/include/vixl/aarch64/registers-aarch64.h new file mode 100644 index 000000000..53bbe132f --- /dev/null +++ b/dep/vixl/include/vixl/aarch64/registers-aarch64.h @@ -0,0 +1,902 @@ +// Copyright 2019, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_AARCH64_REGISTERS_AARCH64_H_ +#define VIXL_AARCH64_REGISTERS_AARCH64_H_ + +#include + +#include "instructions-aarch64.h" + +namespace vixl { +namespace aarch64 { + +// An integer type capable of representing a homogeneous, non-overlapping set of +// registers as a bitmask of their codes. +typedef uint64_t RegList; +static const int kRegListSizeInBits = sizeof(RegList) * 8; + +class Register; +class WRegister; +class XRegister; + +class VRegister; +class BRegister; +class HRegister; +class SRegister; +class DRegister; +class QRegister; + +class ZRegister; + +class PRegister; +class PRegisterWithLaneSize; +class PRegisterM; +class PRegisterZ; + +// A container for any single register supported by the processor. Selected +// qualifications are also supported. Basic registers can be constructed +// directly as CPURegister objects. Other variants should be constructed as one +// of the derived classes. +// +// CPURegister aims to support any getter that would also be available to more +// specialised register types. However, using the equivalent functions on the +// specialised register types can avoid run-time checks, and should therefore be +// preferred where run-time polymorphism isn't required. +// +// Type-specific modifiers are typically implemented only on the derived +// classes. +// +// The encoding is such that CPURegister objects are cheap to pass by value. +class CPURegister { + public: + enum RegisterBank : uint8_t { + kNoRegisterBank = 0, + kRRegisterBank, + kVRegisterBank, + kPRegisterBank + }; + enum RegisterType { + kNoRegister, + kRegister, + kVRegister, + kZRegister, + kPRegister + }; + + static const unsigned kUnknownSize = 0; + + VIXL_CONSTEXPR CPURegister() + : code_(0), + bank_(kNoRegisterBank), + size_(kEncodedUnknownSize), + qualifiers_(kNoQualifiers), + lane_size_(kEncodedUnknownSize) {} + + CPURegister(int code, int size_in_bits, RegisterType type) + : code_(code), + bank_(GetBankFor(type)), + size_(EncodeSizeInBits(size_in_bits)), + qualifiers_(kNoQualifiers), + lane_size_(EncodeSizeInBits(size_in_bits)) { + VIXL_ASSERT(IsValid()); + } + + // Basic accessors. + + // TODO: Make this return 'int'. + unsigned GetCode() const { return code_; } + + RegisterBank GetBank() const { return bank_; } + + // For scalar registers, the lane size matches the register size, and is + // always known. + bool HasSize() const { return size_ != kEncodedUnknownSize; } + bool HasLaneSize() const { return lane_size_ != kEncodedUnknownSize; } + + RegList GetBit() const { + if (IsNone()) return 0; + VIXL_ASSERT(code_ < kRegListSizeInBits); + return static_cast(1) << code_; + } + + // Return the architectural name for this register. + // TODO: This is temporary. Ultimately, we should move the + // Simulator::*RegNameForCode helpers out of the simulator, and provide an + // independent way to obtain the name of a register. + std::string GetArchitecturalName() const; + + // Return the highest valid register code for this type, to allow generic + // loops to be written. This excludes kSPRegInternalCode, since it is not + // contiguous, and sp usually requires special handling anyway. + unsigned GetMaxCode() const { return GetMaxCodeFor(GetBank()); } + + // Registers without a known size report kUnknownSize. + int GetSizeInBits() const { return DecodeSizeInBits(size_); } + int GetSizeInBytes() const { return DecodeSizeInBytes(size_); } + // TODO: Make these return 'int'. + unsigned GetLaneSizeInBits() const { return DecodeSizeInBits(lane_size_); } + unsigned GetLaneSizeInBytes() const { return DecodeSizeInBytes(lane_size_); } + unsigned GetLaneSizeInBytesLog2() const { + VIXL_ASSERT(HasLaneSize()); + return DecodeSizeInBytesLog2(lane_size_); + } + + int GetLanes() const { + if (HasSize() && HasLaneSize()) { + // Take advantage of the size encoding to calculate this efficiently. + VIXL_STATIC_ASSERT(kEncodedHRegSize == (kEncodedBRegSize + 1)); + VIXL_STATIC_ASSERT(kEncodedSRegSize == (kEncodedHRegSize + 1)); + VIXL_STATIC_ASSERT(kEncodedDRegSize == (kEncodedSRegSize + 1)); + VIXL_STATIC_ASSERT(kEncodedQRegSize == (kEncodedDRegSize + 1)); + int log2_delta = static_cast(size_) - static_cast(lane_size_); + VIXL_ASSERT(log2_delta >= 0); + return 1 << log2_delta; + } + return kUnknownSize; + } + + bool Is8Bits() const { return size_ == kEncodedBRegSize; } + bool Is16Bits() const { return size_ == kEncodedHRegSize; } + bool Is32Bits() const { return size_ == kEncodedSRegSize; } + bool Is64Bits() const { return size_ == kEncodedDRegSize; } + bool Is128Bits() const { return size_ == kEncodedQRegSize; } + + bool IsLaneSizeB() const { return lane_size_ == kEncodedBRegSize; } + bool IsLaneSizeH() const { return lane_size_ == kEncodedHRegSize; } + bool IsLaneSizeS() const { return lane_size_ == kEncodedSRegSize; } + bool IsLaneSizeD() const { return lane_size_ == kEncodedDRegSize; } + bool IsLaneSizeQ() const { return lane_size_ == kEncodedQRegSize; } + + // If IsRegister(), then it is valid to convert the CPURegister to some + // Register type. + // + // If... ... then it is safe to construct ... + // r.IsRegister() -> Register(r) + // r.IsVRegister() -> VRegister(r) + // r.IsZRegister() -> ZRegister(r) + // r.IsPRegister() -> PRegister(r) + // + // r.IsPRegister() && HasLaneSize() -> PRegisterWithLaneSize(r) + // r.IsPRegister() && IsMerging() -> PRegisterM(r) + // r.IsPRegister() && IsZeroing() -> PRegisterZ(r) + bool IsRegister() const { return GetType() == kRegister; } + bool IsVRegister() const { return GetType() == kVRegister; } + bool IsZRegister() const { return GetType() == kZRegister; } + bool IsPRegister() const { return GetType() == kPRegister; } + + bool IsNone() const { return GetType() == kNoRegister; } + + // `GetType() == kNoRegister` implies IsNone(), and vice-versa. + // `GetType() == kRegister` implies IsRegister(), and vice-versa. + RegisterType GetType() const { + switch (bank_) { + case kNoRegisterBank: + return kNoRegister; + case kRRegisterBank: + return kRegister; + case kVRegisterBank: + return HasSize() ? kVRegister : kZRegister; + case kPRegisterBank: + return kPRegister; + } + VIXL_UNREACHABLE(); + return kNoRegister; + } + + // IsFPRegister() is true for scalar FP types (and therefore implies + // IsVRegister()). There is no corresponding FPRegister type. + bool IsFPRegister() const { return Is1H() || Is1S() || Is1D(); } + + // TODO: These are stricter forms of the helpers above. We should make the + // basic helpers strict, and remove these. + bool IsValidRegister() const; + bool IsValidVRegister() const; + bool IsValidFPRegister() const; + bool IsValidZRegister() const; + bool IsValidPRegister() const; + + bool IsValid() const; + bool IsValidOrNone() const { return IsNone() || IsValid(); } + + bool IsVector() const { return HasLaneSize() && (size_ != lane_size_); } + bool IsScalar() const { return HasLaneSize() && (size_ == lane_size_); } + + bool IsSameType(const CPURegister& other) const { + return GetType() == other.GetType(); + } + + bool IsSameBank(const CPURegister& other) const { + return GetBank() == other.GetBank(); + } + + // Two registers with unknown size are considered to have the same size if + // they also have the same type. For example, all Z registers have the same + // size, even though we don't know what that is. + bool IsSameSizeAndType(const CPURegister& other) const { + return IsSameType(other) && (size_ == other.size_); + } + + bool IsSameFormat(const CPURegister& other) const { + return IsSameSizeAndType(other) && (lane_size_ == other.lane_size_); + } + + // Note that NoReg aliases itself, so that 'Is' implies 'Aliases'. + bool Aliases(const CPURegister& other) const { + return IsSameBank(other) && (code_ == other.code_); + } + + bool Is(const CPURegister& other) const { + if (IsRegister() || IsVRegister()) { + // For core (W, X) and FP/NEON registers, we only consider the code, size + // and type. This is legacy behaviour. + // TODO: We should probably check every field for all registers. + return Aliases(other) && (size_ == other.size_); + } else { + // For Z and P registers, we require all fields to match exactly. + VIXL_ASSERT(IsNone() || IsZRegister() || IsPRegister()); + return (code_ == other.code_) && (bank_ == other.bank_) && + (size_ == other.size_) && (qualifiers_ == other.qualifiers_) && + (lane_size_ == other.lane_size_); + } + } + + // Conversions to specific register types. The result is a register that + // aliases the original CPURegister. That is, the original register bank + // (`GetBank()`) is checked and the code (`GetCode()`) preserved, but all + // other properties are ignored. + // + // Typical usage: + // + // if (reg.GetBank() == kVRegisterBank) { + // DRegister d = reg.D(); + // ... + // } + // + // These could all return types with compile-time guarantees (like XRegister), + // but this breaks backwards-compatibility quite severely, particularly with + // code like `cond ? reg.W() : reg.X()`, which would have indeterminate type. + + // Core registers, like "w0". + Register W() const; + Register X() const; + // FP/NEON registers, like "b0". + VRegister B() const; + VRegister H() const; + VRegister S() const; + VRegister D() const; + VRegister Q() const; + VRegister V() const; + // SVE registers, like "z0". + ZRegister Z() const; + PRegister P() const; + + // Utilities for kRegister types. + + bool IsZero() const { return IsRegister() && (code_ == kZeroRegCode); } + bool IsSP() const { return IsRegister() && (code_ == kSPRegInternalCode); } + bool IsW() const { return IsRegister() && Is32Bits(); } + bool IsX() const { return IsRegister() && Is64Bits(); } + + // Utilities for FP/NEON kVRegister types. + + // These helpers ensure that the size and type of the register are as + // described. They do not consider the number of lanes that make up a vector. + // So, for example, Is8B() implies IsD(), and Is1D() implies IsD, but IsD() + // does not imply Is1D() or Is8B(). + // Check the number of lanes, ie. the format of the vector, using methods such + // as Is8B(), Is1D(), etc. + bool IsB() const { return IsVRegister() && Is8Bits(); } + bool IsH() const { return IsVRegister() && Is16Bits(); } + bool IsS() const { return IsVRegister() && Is32Bits(); } + bool IsD() const { return IsVRegister() && Is64Bits(); } + bool IsQ() const { return IsVRegister() && Is128Bits(); } + + // As above, but also check that the register has exactly one lane. For + // example, reg.Is1D() implies DRegister(reg).IsValid(), but reg.IsD() does + // not. + bool Is1B() const { return IsB() && IsScalar(); } + bool Is1H() const { return IsH() && IsScalar(); } + bool Is1S() const { return IsS() && IsScalar(); } + bool Is1D() const { return IsD() && IsScalar(); } + bool Is1Q() const { return IsQ() && IsScalar(); } + + // Check the specific NEON format. + bool Is8B() const { return IsD() && IsLaneSizeB(); } + bool Is16B() const { return IsQ() && IsLaneSizeB(); } + bool Is2H() const { return IsS() && IsLaneSizeH(); } + bool Is4H() const { return IsD() && IsLaneSizeH(); } + bool Is8H() const { return IsQ() && IsLaneSizeH(); } + bool Is2S() const { return IsD() && IsLaneSizeS(); } + bool Is4S() const { return IsQ() && IsLaneSizeS(); } + bool Is2D() const { return IsQ() && IsLaneSizeD(); } + + // A semantic alias for sdot and udot (indexed and by element) instructions. + // The current CPURegister implementation cannot not tell this from Is1S(), + // but it might do later. + // TODO: Do this with the qualifiers_ field. + bool Is1S4B() const { return Is1S(); } + + // Utilities for SVE registers. + + bool IsUnqualified() const { return qualifiers_ == kNoQualifiers; } + bool IsMerging() const { return IsPRegister() && (qualifiers_ == kMerging); } + bool IsZeroing() const { return IsPRegister() && (qualifiers_ == kZeroing); } + + // SVE types have unknown sizes, but within known bounds. + + int GetMaxSizeInBytes() const { + switch (GetType()) { + case kZRegister: + return kZRegMaxSizeInBytes; + case kPRegister: + return kPRegMaxSizeInBytes; + default: + VIXL_ASSERT(HasSize()); + return GetSizeInBits(); + } + } + + int GetMinSizeInBytes() const { + switch (GetType()) { + case kZRegister: + return kZRegMinSizeInBytes; + case kPRegister: + return kPRegMinSizeInBytes; + default: + VIXL_ASSERT(HasSize()); + return GetSizeInBits(); + } + } + + int GetMaxSizeInBits() const { return GetMaxSizeInBytes() * kBitsPerByte; } + int GetMinSizeInBits() const { return GetMinSizeInBytes() * kBitsPerByte; } + + static RegisterBank GetBankFor(RegisterType type) { + switch (type) { + case kNoRegister: + return kNoRegisterBank; + case kRegister: + return kRRegisterBank; + case kVRegister: + case kZRegister: + return kVRegisterBank; + case kPRegister: + return kPRegisterBank; + } + VIXL_UNREACHABLE(); + return kNoRegisterBank; + } + + static unsigned GetMaxCodeFor(CPURegister::RegisterType type) { + return GetMaxCodeFor(GetBankFor(type)); + } + + protected: + enum EncodedSize : uint8_t { + // Ensure that kUnknownSize (and therefore kNoRegister) is encoded as zero. + kEncodedUnknownSize = 0, + + // The implementation assumes that the remaining sizes are encoded as + // `log2(size) + c`, so the following names must remain in sequence. + kEncodedBRegSize, + kEncodedHRegSize, + kEncodedSRegSize, + kEncodedDRegSize, + kEncodedQRegSize, + + kEncodedWRegSize = kEncodedSRegSize, + kEncodedXRegSize = kEncodedDRegSize + }; + VIXL_STATIC_ASSERT(kSRegSize == kWRegSize); + VIXL_STATIC_ASSERT(kDRegSize == kXRegSize); + + char GetLaneSizeSymbol() const { + switch (lane_size_) { + case kEncodedBRegSize: + return 'B'; + case kEncodedHRegSize: + return 'H'; + case kEncodedSRegSize: + return 'S'; + case kEncodedDRegSize: + return 'D'; + case kEncodedQRegSize: + return 'Q'; + case kEncodedUnknownSize: + break; + } + VIXL_UNREACHABLE(); + return '?'; + } + + static EncodedSize EncodeSizeInBits(int size_in_bits) { + switch (size_in_bits) { + case kUnknownSize: + return kEncodedUnknownSize; + case kBRegSize: + return kEncodedBRegSize; + case kHRegSize: + return kEncodedHRegSize; + case kSRegSize: + return kEncodedSRegSize; + case kDRegSize: + return kEncodedDRegSize; + case kQRegSize: + return kEncodedQRegSize; + } + VIXL_UNREACHABLE(); + return kEncodedUnknownSize; + } + + static int DecodeSizeInBytesLog2(EncodedSize encoded_size) { + switch (encoded_size) { + case kEncodedUnknownSize: + // Log2 of B-sized lane in bytes is 0, so we can't just return 0 here. + VIXL_UNREACHABLE(); + return -1; + case kEncodedBRegSize: + return kBRegSizeInBytesLog2; + case kEncodedHRegSize: + return kHRegSizeInBytesLog2; + case kEncodedSRegSize: + return kSRegSizeInBytesLog2; + case kEncodedDRegSize: + return kDRegSizeInBytesLog2; + case kEncodedQRegSize: + return kQRegSizeInBytesLog2; + } + VIXL_UNREACHABLE(); + return kUnknownSize; + } + + static int DecodeSizeInBytes(EncodedSize encoded_size) { + if (encoded_size == kEncodedUnknownSize) { + return kUnknownSize; + } + return 1 << DecodeSizeInBytesLog2(encoded_size); + } + + static int DecodeSizeInBits(EncodedSize encoded_size) { + VIXL_STATIC_ASSERT(kUnknownSize == 0); + return DecodeSizeInBytes(encoded_size) * kBitsPerByte; + } + + static unsigned GetMaxCodeFor(CPURegister::RegisterBank bank); + + enum Qualifiers : uint8_t { + kNoQualifiers = 0, + // Used by P registers. + kMerging, + kZeroing + }; + + // An unchecked constructor, for use by derived classes. + CPURegister(int code, + EncodedSize size, + RegisterBank bank, + EncodedSize lane_size, + Qualifiers qualifiers = kNoQualifiers) + : code_(code), + bank_(bank), + size_(size), + qualifiers_(qualifiers), + lane_size_(lane_size) {} + + // TODO: Check that access to these fields is reasonably efficient. + uint8_t code_; + RegisterBank bank_; + EncodedSize size_; + Qualifiers qualifiers_; + EncodedSize lane_size_; +}; +// Ensure that CPURegisters can fit in a single (64-bit) register. This is a +// proxy for being "cheap to pass by value", which is hard to check directly. +VIXL_STATIC_ASSERT(sizeof(CPURegister) <= sizeof(uint64_t)); + +// TODO: Add constexpr constructors. +#define VIXL_DECLARE_REGISTER_COMMON(NAME, REGISTER_TYPE, PARENT_TYPE) \ + VIXL_CONSTEXPR NAME() : PARENT_TYPE() {} \ + \ + explicit NAME(CPURegister other) : PARENT_TYPE(other) { \ + VIXL_ASSERT(IsValid()); \ + } \ + \ + VIXL_CONSTEXPR static unsigned GetMaxCode() { \ + return kNumberOf##REGISTER_TYPE##s - 1; \ + } + +// Any W or X register, including the zero register and the stack pointer. +class Register : public CPURegister { + public: + VIXL_DECLARE_REGISTER_COMMON(Register, Register, CPURegister) + + Register(int code, int size_in_bits) + : CPURegister(code, size_in_bits, kRegister) { + VIXL_ASSERT(IsValidRegister()); + } + + bool IsValid() const { return IsValidRegister(); } +}; + +// Any FP or NEON V register, including vector (V.) and scalar forms +// (B, H, S, D, Q). +class VRegister : public CPURegister { + public: + VIXL_DECLARE_REGISTER_COMMON(VRegister, VRegister, CPURegister) + + // For historical reasons, VRegister(0) returns v0.1Q (or equivalently, q0). + explicit VRegister(int code, int size_in_bits = kQRegSize, int lanes = 1) + : CPURegister(code, + EncodeSizeInBits(size_in_bits), + kVRegisterBank, + EncodeLaneSizeInBits(size_in_bits, lanes)) { + VIXL_ASSERT(IsValidVRegister()); + } + + VRegister(int code, VectorFormat format) + : CPURegister(code, + EncodeSizeInBits(RegisterSizeInBitsFromFormat(format)), + kVRegisterBank, + EncodeSizeInBits(LaneSizeInBitsFromFormat(format)), + kNoQualifiers) { + VIXL_ASSERT(IsValid()); + } + + VRegister V8B() const; + VRegister V16B() const; + VRegister V2H() const; + VRegister V4H() const; + VRegister V8H() const; + VRegister V2S() const; + VRegister V4S() const; + VRegister V1D() const; + VRegister V2D() const; + VRegister V1Q() const; + VRegister S4B() const; + + bool IsValid() const { return IsValidVRegister(); } + + protected: + static EncodedSize EncodeLaneSizeInBits(int size_in_bits, int lanes) { + VIXL_ASSERT(lanes >= 1); + VIXL_ASSERT((size_in_bits % lanes) == 0); + return EncodeSizeInBits(size_in_bits / lanes); + } +}; + +// Any SVE Z register, with or without a lane size specifier. +class ZRegister : public CPURegister { + public: + VIXL_DECLARE_REGISTER_COMMON(ZRegister, ZRegister, CPURegister) + + explicit ZRegister(int code, int lane_size_in_bits = kUnknownSize) + : CPURegister(code, + kEncodedUnknownSize, + kVRegisterBank, + EncodeSizeInBits(lane_size_in_bits)) { + VIXL_ASSERT(IsValid()); + } + + ZRegister(int code, VectorFormat format) + : CPURegister(code, + kEncodedUnknownSize, + kVRegisterBank, + EncodeSizeInBits(LaneSizeInBitsFromFormat(format)), + kNoQualifiers) { + VIXL_ASSERT(IsValid()); + } + + // Return a Z register with a known lane size (like "z0.B"). + ZRegister VnB() const { return ZRegister(GetCode(), kBRegSize); } + ZRegister VnH() const { return ZRegister(GetCode(), kHRegSize); } + ZRegister VnS() const { return ZRegister(GetCode(), kSRegSize); } + ZRegister VnD() const { return ZRegister(GetCode(), kDRegSize); } + ZRegister VnQ() const { return ZRegister(GetCode(), kQRegSize); } + + template + ZRegister WithLaneSize(T format) const { + return ZRegister(GetCode(), format); + } + + ZRegister WithSameLaneSizeAs(const CPURegister& other) const { + VIXL_ASSERT(other.HasLaneSize()); + return this->WithLaneSize(other.GetLaneSizeInBits()); + } + + bool IsValid() const { return IsValidZRegister(); } +}; + +// Any SVE P register, with or without a qualifier or lane size specifier. +class PRegister : public CPURegister { + public: + VIXL_DECLARE_REGISTER_COMMON(PRegister, PRegister, CPURegister) + + explicit PRegister(int code) : CPURegister(code, kUnknownSize, kPRegister) { + VIXL_ASSERT(IsValid()); + } + + bool IsValid() const { + return IsValidPRegister() && !HasLaneSize() && IsUnqualified(); + } + + // Return a P register with a known lane size (like "p0.B"). + PRegisterWithLaneSize VnB() const; + PRegisterWithLaneSize VnH() const; + PRegisterWithLaneSize VnS() const; + PRegisterWithLaneSize VnD() const; + + template + PRegisterWithLaneSize WithLaneSize(T format) const; + + PRegisterWithLaneSize WithSameLaneSizeAs(const CPURegister& other) const; + + // SVE predicates are specified (in normal assembly) with a "/z" (zeroing) or + // "/m" (merging) suffix. These methods are VIXL's equivalents. + PRegisterZ Zeroing() const; + PRegisterM Merging() const; + + protected: + // Unchecked constructors, for use by derived classes. + PRegister(int code, EncodedSize encoded_lane_size) + : CPURegister(code, + kEncodedUnknownSize, + kPRegisterBank, + encoded_lane_size, + kNoQualifiers) {} + + PRegister(int code, Qualifiers qualifiers) + : CPURegister(code, + kEncodedUnknownSize, + kPRegisterBank, + kEncodedUnknownSize, + qualifiers) {} +}; + +// Any SVE P register with a known lane size (like "p0.B"). +class PRegisterWithLaneSize : public PRegister { + public: + VIXL_DECLARE_REGISTER_COMMON(PRegisterWithLaneSize, PRegister, PRegister) + + PRegisterWithLaneSize(int code, int lane_size_in_bits) + : PRegister(code, EncodeSizeInBits(lane_size_in_bits)) { + VIXL_ASSERT(IsValid()); + } + + PRegisterWithLaneSize(int code, VectorFormat format) + : PRegister(code, EncodeSizeInBits(LaneSizeInBitsFromFormat(format))) { + VIXL_ASSERT(IsValid()); + } + + bool IsValid() const { + return IsValidPRegister() && HasLaneSize() && IsUnqualified(); + } + + // Overload lane size accessors so we can assert `HasLaneSize()`. This allows + // tools such as clang-tidy to prove that the result of GetLaneSize* is + // non-zero. + + // TODO: Make these return 'int'. + unsigned GetLaneSizeInBits() const { + VIXL_ASSERT(HasLaneSize()); + return PRegister::GetLaneSizeInBits(); + } + + unsigned GetLaneSizeInBytes() const { + VIXL_ASSERT(HasLaneSize()); + return PRegister::GetLaneSizeInBytes(); + } +}; + +// Any SVE P register with the zeroing qualifier (like "p0/z"). +class PRegisterZ : public PRegister { + public: + VIXL_DECLARE_REGISTER_COMMON(PRegisterZ, PRegister, PRegister) + + explicit PRegisterZ(int code) : PRegister(code, kZeroing) { + VIXL_ASSERT(IsValid()); + } + + bool IsValid() const { + return IsValidPRegister() && !HasLaneSize() && IsZeroing(); + } +}; + +// Any SVE P register with the merging qualifier (like "p0/m"). +class PRegisterM : public PRegister { + public: + VIXL_DECLARE_REGISTER_COMMON(PRegisterM, PRegister, PRegister) + + explicit PRegisterM(int code) : PRegister(code, kMerging) { + VIXL_ASSERT(IsValid()); + } + + bool IsValid() const { + return IsValidPRegister() && !HasLaneSize() && IsMerging(); + } +}; + +inline PRegisterWithLaneSize PRegister::VnB() const { + return PRegisterWithLaneSize(GetCode(), kBRegSize); +} +inline PRegisterWithLaneSize PRegister::VnH() const { + return PRegisterWithLaneSize(GetCode(), kHRegSize); +} +inline PRegisterWithLaneSize PRegister::VnS() const { + return PRegisterWithLaneSize(GetCode(), kSRegSize); +} +inline PRegisterWithLaneSize PRegister::VnD() const { + return PRegisterWithLaneSize(GetCode(), kDRegSize); +} + +template +inline PRegisterWithLaneSize PRegister::WithLaneSize(T format) const { + return PRegisterWithLaneSize(GetCode(), format); +} + +inline PRegisterWithLaneSize PRegister::WithSameLaneSizeAs( + const CPURegister& other) const { + VIXL_ASSERT(other.HasLaneSize()); + return this->WithLaneSize(other.GetLaneSizeInBits()); +} + +inline PRegisterZ PRegister::Zeroing() const { return PRegisterZ(GetCode()); } +inline PRegisterM PRegister::Merging() const { return PRegisterM(GetCode()); } + +#define VIXL_REGISTER_WITH_SIZE_LIST(V) \ + V(WRegister, kWRegSize, Register) \ + V(XRegister, kXRegSize, Register) \ + V(QRegister, kQRegSize, VRegister) \ + V(DRegister, kDRegSize, VRegister) \ + V(SRegister, kSRegSize, VRegister) \ + V(HRegister, kHRegSize, VRegister) \ + V(BRegister, kBRegSize, VRegister) + +#define VIXL_DEFINE_REGISTER_WITH_SIZE(NAME, SIZE, PARENT) \ + class NAME : public PARENT { \ + public: \ + VIXL_CONSTEXPR NAME() : PARENT() {} \ + explicit NAME(int code) : PARENT(code, SIZE) {} \ + \ + explicit NAME(PARENT other) : PARENT(other) { \ + VIXL_ASSERT(GetSizeInBits() == SIZE); \ + } \ + \ + PARENT As##PARENT() const { return *this; } \ + \ + VIXL_CONSTEXPR int GetSizeInBits() const { return SIZE; } \ + \ + bool IsValid() const { \ + return PARENT::IsValid() && (PARENT::GetSizeInBits() == SIZE); \ + } \ + }; + +VIXL_REGISTER_WITH_SIZE_LIST(VIXL_DEFINE_REGISTER_WITH_SIZE) + +// No*Reg is used to provide default values for unused arguments, error cases +// and so on. Note that these (and the default constructors) all compare equal +// (using the Is() method). +const Register NoReg; +const VRegister NoVReg; +const CPURegister NoCPUReg; +const ZRegister NoZReg; + +// TODO: Ideally, these would use specialised register types (like XRegister and +// so on). However, doing so throws up template overloading problems elsewhere. +#define VIXL_DEFINE_REGISTERS(N) \ + const Register w##N = WRegister(N); \ + const Register x##N = XRegister(N); \ + const VRegister b##N = BRegister(N); \ + const VRegister h##N = HRegister(N); \ + const VRegister s##N = SRegister(N); \ + const VRegister d##N = DRegister(N); \ + const VRegister q##N = QRegister(N); \ + const VRegister v##N(N); \ + const ZRegister z##N(N); +AARCH64_REGISTER_CODE_LIST(VIXL_DEFINE_REGISTERS) +#undef VIXL_DEFINE_REGISTERS + +#define VIXL_DEFINE_P_REGISTERS(N) const PRegister p##N(N); +AARCH64_P_REGISTER_CODE_LIST(VIXL_DEFINE_P_REGISTERS) +#undef VIXL_DEFINE_P_REGISTERS + +// VIXL represents 'sp' with a unique code, to tell it apart from 'xzr'. +const Register wsp = WRegister(kSPRegInternalCode); +const Register sp = XRegister(kSPRegInternalCode); + +// Standard aliases. +const Register ip0 = x16; +const Register ip1 = x17; +const Register lr = x30; +const Register xzr = x31; +const Register wzr = w31; + +// AreAliased returns true if any of the named registers overlap. Arguments +// set to NoReg are ignored. The system stack pointer may be specified. +bool AreAliased(const CPURegister& reg1, + const CPURegister& reg2, + const CPURegister& reg3 = NoReg, + const CPURegister& reg4 = NoReg, + const CPURegister& reg5 = NoReg, + const CPURegister& reg6 = NoReg, + const CPURegister& reg7 = NoReg, + const CPURegister& reg8 = NoReg); + +// AreSameSizeAndType returns true if all of the specified registers have the +// same size, and are of the same type. The system stack pointer may be +// specified. Arguments set to NoReg are ignored, as are any subsequent +// arguments. At least one argument (reg1) must be valid (not NoCPUReg). +bool AreSameSizeAndType(const CPURegister& reg1, + const CPURegister& reg2, + const CPURegister& reg3 = NoCPUReg, + const CPURegister& reg4 = NoCPUReg, + const CPURegister& reg5 = NoCPUReg, + const CPURegister& reg6 = NoCPUReg, + const CPURegister& reg7 = NoCPUReg, + const CPURegister& reg8 = NoCPUReg); + +// AreEven returns true if all of the specified registers have even register +// indices. Arguments set to NoReg are ignored, as are any subsequent +// arguments. At least one argument (reg1) must be valid (not NoCPUReg). +bool AreEven(const CPURegister& reg1, + const CPURegister& reg2, + const CPURegister& reg3 = NoReg, + const CPURegister& reg4 = NoReg, + const CPURegister& reg5 = NoReg, + const CPURegister& reg6 = NoReg, + const CPURegister& reg7 = NoReg, + const CPURegister& reg8 = NoReg); + +// AreConsecutive returns true if all of the specified registers are +// consecutive in the register file. Arguments set to NoReg are ignored, as are +// any subsequent arguments. At least one argument (reg1) must be valid +// (not NoCPUReg). +bool AreConsecutive(const CPURegister& reg1, + const CPURegister& reg2, + const CPURegister& reg3 = NoCPUReg, + const CPURegister& reg4 = NoCPUReg); + +// AreSameFormat returns true if all of the specified registers have the same +// vector format. Arguments set to NoReg are ignored, as are any subsequent +// arguments. At least one argument (reg1) must be valid (not NoVReg). +bool AreSameFormat(const CPURegister& reg1, + const CPURegister& reg2, + const CPURegister& reg3 = NoCPUReg, + const CPURegister& reg4 = NoCPUReg); + +// AreSameLaneSize returns true if all of the specified registers have the same +// element lane size, B, H, S or D. It doesn't compare the type of registers. +// Arguments set to NoReg are ignored, as are any subsequent arguments. +// At least one argument (reg1) must be valid (not NoVReg). +// TODO: Remove this, and replace its uses with AreSameFormat. +bool AreSameLaneSize(const CPURegister& reg1, + const CPURegister& reg2, + const CPURegister& reg3 = NoCPUReg, + const CPURegister& reg4 = NoCPUReg); +} // namespace aarch64 +} // namespace vixl + +#endif // VIXL_AARCH64_REGISTERS_AARCH64_H_ diff --git a/dep/vixl/include/vixl/aarch64/simulator-aarch64.h b/dep/vixl/include/vixl/aarch64/simulator-aarch64.h index 061a7dab3..65c07dfa3 100644 --- a/dep/vixl/include/vixl/aarch64/simulator-aarch64.h +++ b/dep/vixl/include/vixl/aarch64/simulator-aarch64.h @@ -27,6 +27,8 @@ #ifndef VIXL_AARCH64_SIMULATOR_AARCH64_H_ #define VIXL_AARCH64_SIMULATOR_AARCH64_H_ +#include +#include #include #include "../globals-vixl.h" @@ -35,9 +37,9 @@ #include "abi-aarch64.h" #include "cpu-features-auditor-aarch64.h" +#include "debugger-aarch64.h" #include "disasm-aarch64.h" #include "instructions-aarch64.h" -#include "instrument-aarch64.h" #include "simulator-constants-aarch64.h" #ifdef VIXL_INCLUDE_SIMULATOR_AARCH64 @@ -52,54 +54,466 @@ #endif #endif +// The hosts that Simulator running on may not have these flags defined. +#ifndef PROT_BTI +#define PROT_BTI 0x10 +#endif +#ifndef PROT_MTE +#define PROT_MTE 0x20 +#endif + namespace vixl { namespace aarch64 { +class Simulator; +struct RuntimeCallStructHelper; + +enum class MemoryAccessResult { Success = 0, Failure = 1 }; + +// Try to access a piece of memory at the given address. Accessing that memory +// might raise a signal which, if handled by a custom signal handler, should +// setup the native and simulated context in order to continue. Return whether +// the memory access failed (i.e: raised a signal) or succeeded. +MemoryAccessResult TryMemoryAccess(uintptr_t address, uintptr_t access_size); + +#ifdef VIXL_ENABLE_IMPLICIT_CHECKS +// Access a byte of memory from the address at the given offset. If the memory +// could be accessed then return MemoryAccessResult::Success. If the memory +// could not be accessed, and therefore raised a signal, setup the simulated +// context and return MemoryAccessResult::Failure. +// +// If a signal is raised then it is expected that the signal handler will place +// MemoryAccessResult::Failure in the native return register and the address of +// _vixl_internal_AccessMemory_continue into the native instruction pointer. +extern "C" MemoryAccessResult _vixl_internal_ReadMemory(uintptr_t address, + uintptr_t offset); +extern "C" uintptr_t _vixl_internal_AccessMemory_continue(); +#endif // VIXL_ENABLE_IMPLICIT_CHECKS + +class SimStack { + public: + SimStack() {} + explicit SimStack(size_t size) : usable_size_(size) {} + + // Guard against accesses above the stack base. This could occur, for example, + // if the first simulated function tries to read stack arguments that haven't + // been properly initialised in the Simulator's stack. + void SetBaseGuardSize(size_t size) { base_guard_size_ = size; } + + // Guard against stack overflows. The size should be large enough to detect + // the largest stride made (by `MacroAssembler::Claim()` or equivalent) whilst + // initialising stack objects. + void SetLimitGuardSize(size_t size) { limit_guard_size_ = size; } + + // The minimum usable size of the stack. + // Equal to "stack base" - "stack limit", in AAPCS64 terminology. + void SetUsableSize(size_t size) { usable_size_ = size; } + + // Set the minimum alignment for the stack parameters. + void AlignToBytesLog2(int align_log2) { align_log2_ = align_log2; } + + class Allocated { + public: + // Using AAPCS64 terminology, highest addresses at the top: + // + // data_.get() + alloc_size -> + // | + // | Base guard + // GetBase() -> | | + // | | + // | | AAPCS64-legal + // | Usable stack | values of 'sp'. + // | | + // | | + // GetLimit() -> | + // | Limit guard + // data_.get() -> | + // + // The Simulator detects (and forbids) accesses to either guard region. + + char* GetBase() const { return base_; } + char* GetLimit() const { return limit_; } + + template + bool IsAccessInGuardRegion(const T* base, size_t size) const { + VIXL_ASSERT(size > 0); + // Inclusive bounds. + const char* start = reinterpret_cast(base); + const char* end = start + size - 1; + const char* data_start = data_.get(); + const char* data_end = data_start + alloc_size_ - 1; + bool in_base_guard = (start <= data_end) && (end >= base_); + bool in_limit_guard = (start <= limit_) && (end >= data_start); + return in_base_guard || in_limit_guard; + } + + private: + std::unique_ptr data_; + char* limit_; + char* base_; + size_t alloc_size_; + + friend class SimStack; + }; + + // Allocate the stack, locking the parameters. + Allocated Allocate() { + size_t align_to = 1 << align_log2_; + size_t l = AlignUp(limit_guard_size_, align_to); + size_t u = AlignUp(usable_size_, align_to); + size_t b = AlignUp(base_guard_size_, align_to); + size_t size = l + u + b; + + Allocated a; + size_t alloc_size = (align_to - 1) + size; + a.data_ = std::make_unique(alloc_size); + void* data = a.data_.get(); + auto data_aligned = + reinterpret_cast(std::align(align_to, size, data, alloc_size)); + a.limit_ = data_aligned + l - 1; + a.base_ = data_aligned + l + u; + a.alloc_size_ = alloc_size; + return a; + } + + private: + size_t base_guard_size_ = 256; + size_t limit_guard_size_ = 4 * 1024; + size_t usable_size_ = 8 * 1024; + size_t align_log2_ = 4; + + static const size_t kDefaultBaseGuardSize = 256; + static const size_t kDefaultLimitGuardSize = 4 * 1024; + static const size_t kDefaultUsableSize = 8 * 1024; +}; + +// Armv8.5 MTE helpers. +inline int GetAllocationTagFromAddress(uint64_t address) { + return static_cast(ExtractUnsignedBitfield64(59, 56, address)); +} + +template +T AddressUntag(T address) { + // Cast the address using a C-style cast. A reinterpret_cast would be + // appropriate, but it can't cast one integral type to another. + uint64_t bits = (uint64_t)address; + return (T)(bits & ~kAddressTagMask); +} + +// A callback function, called when a function has been intercepted if a +// BranchInterception entry exists in branch_interceptions. The address of +// the intercepted function is passed to the callback. For usage see +// BranchInterception. +using InterceptionCallback = std::function; + +class MetaDataDepot { + public: + class MetaDataMTE { + public: + explicit MetaDataMTE(int tag) : tag_(tag) {} + + int GetTag() const { return tag_; } + void SetTag(int tag) { + VIXL_ASSERT(IsUint4(tag)); + tag_ = tag; + } + + static bool IsActive() { return is_active; } + static void SetActive(bool value) { is_active = value; } + + private: + static bool is_active; + int16_t tag_; + + friend class MetaDataDepot; + }; + + // Generate a key for metadata recording from a untagged address. + template + uint64_t GenerateMTEkey(T address) const { + // Cast the address using a C-style cast. A reinterpret_cast would be + // appropriate, but it can't cast one integral type to another. + return (uint64_t)(AddressUntag(address)) >> kMTETagGranuleInBytesLog2; + } + + template + R GetAttribute(T map, uint64_t key) { + auto pair = map->find(key); + R value = (pair == map->end()) ? nullptr : &pair->second; + return value; + } + + template + int GetMTETag(T address, Instruction const* pc = nullptr) { + uint64_t key = GenerateMTEkey(address); + MetaDataMTE* m = GetAttribute(&metadata_mte_, key); + + if (!m) { + std::stringstream sstream; + sstream << std::hex << "MTE ERROR : instruction at 0x" + << reinterpret_cast(pc) + << " touched a unallocated memory location 0x" + << (uint64_t)(address) << ".\n"; + VIXL_ABORT_WITH_MSG(sstream.str().c_str()); + } + + return m->GetTag(); + } + + template + void SetMTETag(T address, int tag, Instruction const* pc = nullptr) { + VIXL_ASSERT(IsAligned((uintptr_t)address, kMTETagGranuleInBytes)); + uint64_t key = GenerateMTEkey(address); + MetaDataMTE* m = GetAttribute(&metadata_mte_, key); + + if (!m) { + metadata_mte_.insert({key, MetaDataMTE(tag)}); + } else { + // Overwrite + if (m->GetTag() == tag) { + std::stringstream sstream; + sstream << std::hex << "MTE WARNING : instruction at 0x" + << reinterpret_cast(pc) + << ", the same tag is assigned to the address 0x" + << (uint64_t)(address) << ".\n"; + VIXL_WARNING(sstream.str().c_str()); + } + m->SetTag(tag); + } + } + + template + size_t CleanMTETag(T address) { + VIXL_ASSERT( + IsAligned(reinterpret_cast(address), kMTETagGranuleInBytes)); + uint64_t key = GenerateMTEkey(address); + return metadata_mte_.erase(key); + } + + size_t GetTotalCountMTE() { return metadata_mte_.size(); } + + // A pure virtual struct that allows the templated BranchInterception struct + // to be stored. For more information see BranchInterception. + struct BranchInterceptionAbstract { + virtual ~BranchInterceptionAbstract() {} + // Call the callback_ if one exists, otherwise do a RuntimeCall. + virtual void operator()(Simulator* simulator) const = 0; + }; + + // An entry denoting a function to intercept when branched to during + // simulator execution. When a function is intercepted the callback will be + // called if one exists otherwise the function will be passed to + // RuntimeCall. + template + struct BranchInterception : public BranchInterceptionAbstract { + BranchInterception(R (*function)(P...), + InterceptionCallback callback = nullptr) + : function_(function), callback_(callback) {} + + void operator()(Simulator* simulator) const VIXL_OVERRIDE; + + private: + // Pointer to the function that will be intercepted. + R (*function_)(P...); + + // Function to be called instead of function_ + InterceptionCallback callback_; + }; + + // Register a new BranchInterception object. If 'function' is branched to + // (e.g: "blr function") in the future; instead, if provided, 'callback' will + // be called otherwise a runtime call will be performed on 'function'. + // + // For example: this can be used to always perform runtime calls on + // non-AArch64 functions without using the macroassembler. + // + // Note: only unconditional branches to registers are currently supported to + // be intercepted, e.g: "br"/"blr". + // + // TODO: support intercepting other branch types. + template + void RegisterBranchInterception(R (*function)(P...), + InterceptionCallback callback = nullptr) { + uintptr_t addr = reinterpret_cast(function); + std::unique_ptr intercept = + std::make_unique>(function, callback); + branch_interceptions_.insert(std::make_pair(addr, std::move(intercept))); + } + + // Search for branch interceptions to the branch_target address; If one is + // found return it otherwise return nullptr. + BranchInterceptionAbstract* FindBranchInterception(uint64_t branch_target) { + // Check for interceptions to the target address, if one is found, call it. + auto search = branch_interceptions_.find(branch_target); + if (search != branch_interceptions_.end()) { + return search->second.get(); + } else { + return nullptr; + } + } + + void ResetState() { branch_interceptions_.clear(); } + + private: + // Tag recording of each allocated memory in the tag-granule. + std::unordered_map metadata_mte_; + + // Store a map of addresses to be intercepted and their corresponding branch + // interception object, see 'BranchInterception'. + std::unordered_map> + branch_interceptions_; +}; + + // Representation of memory, with typed getters and setters for access. class Memory { public: - template - static T AddressUntag(T address) { - // Cast the address using a C-style cast. A reinterpret_cast would be - // appropriate, but it can't cast one integral type to another. - uint64_t bits = (uint64_t)address; - return (T)(bits & ~kAddressTagMask); + explicit Memory(SimStack::Allocated stack) : stack_(std::move(stack)) { + metadata_depot_ = nullptr; + } + + const SimStack::Allocated& GetStack() { return stack_; } + + template + bool IsMTETagsMatched(A address, Instruction const* pc = nullptr) const { + if (MetaDataDepot::MetaDataMTE::IsActive()) { + // Cast the address using a C-style cast. A reinterpret_cast would be + // appropriate, but it can't cast one integral type to another. + uint64_t addr = (uint64_t)address; + int pointer_tag = GetAllocationTagFromAddress(addr); + int memory_tag = metadata_depot_->GetMTETag(AddressUntag(addr), pc); + return pointer_tag == memory_tag; + } + return true; } template - static T Read(A address) { + std::optional Read(A address, Instruction const* pc = nullptr) const { T value; - address = AddressUntag(address); - VIXL_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) || - (sizeof(value) == 4) || (sizeof(value) == 8) || - (sizeof(value) == 16)); - memcpy(&value, reinterpret_cast(address), sizeof(value)); + VIXL_STATIC_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) || + (sizeof(value) == 4) || (sizeof(value) == 8) || + (sizeof(value) == 16)); + auto base = reinterpret_cast(AddressUntag(address)); + if (stack_.IsAccessInGuardRegion(base, sizeof(value))) { + VIXL_ABORT_WITH_MSG("Attempt to read from stack guard region"); + } + if (!IsMTETagsMatched(address, pc)) { + VIXL_ABORT_WITH_MSG("Tag mismatch."); + } + if (TryMemoryAccess(reinterpret_cast(base), sizeof(value)) == + MemoryAccessResult::Failure) { + return std::nullopt; + } + memcpy(&value, base, sizeof(value)); return value; } template - static void Write(A address, T value) { - address = AddressUntag(address); - VIXL_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) || - (sizeof(value) == 4) || (sizeof(value) == 8) || - (sizeof(value) == 16)); - memcpy(reinterpret_cast(address), &value, sizeof(value)); + bool Write(A address, T value, Instruction const* pc = nullptr) const { + VIXL_STATIC_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) || + (sizeof(value) == 4) || (sizeof(value) == 8) || + (sizeof(value) == 16)); + auto base = reinterpret_cast(AddressUntag(address)); + if (stack_.IsAccessInGuardRegion(base, sizeof(value))) { + VIXL_ABORT_WITH_MSG("Attempt to write to stack guard region"); + } + if (!IsMTETagsMatched(address, pc)) { + VIXL_ABORT_WITH_MSG("Tag mismatch."); + } + if (TryMemoryAccess(reinterpret_cast(base), sizeof(value)) == + MemoryAccessResult::Failure) { + return false; + } + memcpy(base, &value, sizeof(value)); + return true; } + + template + std::optional ReadUint(int size_in_bytes, A address) const { + switch (size_in_bytes) { + case 1: + return Read(address); + case 2: + return Read(address); + case 4: + return Read(address); + case 8: + return Read(address); + } + VIXL_UNREACHABLE(); + return 0; + } + + template + std::optional ReadInt(int size_in_bytes, A address) const { + switch (size_in_bytes) { + case 1: + return Read(address); + case 2: + return Read(address); + case 4: + return Read(address); + case 8: + return Read(address); + } + VIXL_UNREACHABLE(); + return 0; + } + + template + bool Write(int size_in_bytes, A address, uint64_t value) const { + switch (size_in_bytes) { + case 1: + return Write(address, static_cast(value)); + case 2: + return Write(address, static_cast(value)); + case 4: + return Write(address, static_cast(value)); + case 8: + return Write(address, value); + } + VIXL_UNREACHABLE(); + return false; + } + + void AppendMetaData(MetaDataDepot* metadata_depot) { + VIXL_ASSERT(metadata_depot != nullptr); + VIXL_ASSERT(metadata_depot_ == nullptr); + metadata_depot_ = metadata_depot; + } + + private: + SimStack::Allocated stack_; + MetaDataDepot* metadata_depot_; }; -// Represent a register (r0-r31, v0-v31). -template +// Represent a register (r0-r31, v0-v31, z0-z31, p0-p15). +template class SimRegisterBase { public: - SimRegisterBase() : written_since_last_log_(false) {} + static const unsigned kMaxSizeInBytes = kMaxSizeInBits / kBitsPerByte; + VIXL_STATIC_ASSERT((kMaxSizeInBytes * kBitsPerByte) == kMaxSizeInBits); + + SimRegisterBase() : size_in_bytes_(kMaxSizeInBytes) { Clear(); } + + unsigned GetSizeInBits() const { return size_in_bytes_ * kBitsPerByte; } + unsigned GetSizeInBytes() const { return size_in_bytes_; } + + void SetSizeInBytes(unsigned size_in_bytes) { + VIXL_ASSERT(size_in_bytes <= kMaxSizeInBytes); + size_in_bytes_ = size_in_bytes; + } + void SetSizeInBits(unsigned size_in_bits) { + VIXL_ASSERT(size_in_bits <= kMaxSizeInBits); + VIXL_ASSERT((size_in_bits % kBitsPerByte) == 0); + SetSizeInBytes(size_in_bits / kBitsPerByte); + } // Write the specified value. The value is zero-extended if necessary. template void Write(T new_value) { - if (sizeof(new_value) < kSizeInBytes) { - // All AArch64 registers are zero-extending. - memset(value_ + sizeof(new_value), 0, kSizeInBytes - sizeof(new_value)); - } + // All AArch64 registers are zero-extending. + if (sizeof(new_value) < GetSizeInBytes()) Clear(); WriteLane(new_value, 0); NotifyRegisterWrite(); } @@ -108,6 +522,11 @@ class SimRegisterBase { Write(new_value); } + void Clear() { + memset(value_, 0, kMaxSizeInBytes); + NotifyRegisterWrite(); + } + // Insert a typed value into a register, leaving the rest of the register // unchanged. The lane parameter indicates where in the register the value // should be inserted, in the range [ 0, sizeof(value_) / sizeof(T) ), where @@ -137,6 +556,17 @@ class SimRegisterBase { return GetLane(lane); } + // Get the value of a specific bit, indexed from the least-significant bit of + // lane 0. + bool GetBit(int bit) const { + int bit_in_byte = bit % (sizeof(value_[0]) * kBitsPerByte); + int byte = bit / (sizeof(value_[0]) * kBitsPerByte); + return ((value_[byte] >> bit_in_byte) & 1) != 0; + } + + // Return a pointer to the raw, underlying byte array. + const uint8_t* GetBytes() const { return value_; } + // TODO: Make this return a map of updated bytes, so that we can highlight // updated lanes for load-and-insert. (That never happens for scalar code, but // NEON has some instructions that can update individual lanes.) @@ -145,7 +575,9 @@ class SimRegisterBase { void NotifyRegisterLogged() { written_since_last_log_ = false; } protected: - uint8_t value_[kSizeInBytes]; + uint8_t value_[kMaxSizeInBytes]; + + unsigned size_in_bytes_; // Helpers to aid with register tracing. bool written_since_last_log_; @@ -156,38 +588,154 @@ class SimRegisterBase { template void ReadLane(T* dst, int lane) const { VIXL_ASSERT(lane >= 0); - VIXL_ASSERT((sizeof(*dst) + (lane * sizeof(*dst))) <= kSizeInBytes); + VIXL_ASSERT((sizeof(*dst) + (lane * sizeof(*dst))) <= GetSizeInBytes()); memcpy(dst, &value_[lane * sizeof(*dst)], sizeof(*dst)); } template void WriteLane(T src, int lane) { VIXL_ASSERT(lane >= 0); - VIXL_ASSERT((sizeof(src) + (lane * sizeof(src))) <= kSizeInBytes); + VIXL_ASSERT((sizeof(src) + (lane * sizeof(src))) <= GetSizeInBytes()); memcpy(&value_[lane * sizeof(src)], &src, sizeof(src)); } + + // The default ReadLane and WriteLane methods assume what we are copying is + // "trivially copyable" by using memcpy. We have to provide alternative + // implementations for SimFloat16 which cannot be copied this way. + + void ReadLane(vixl::internal::SimFloat16* dst, int lane) const { + uint16_t rawbits; + ReadLane(&rawbits, lane); + *dst = RawbitsToFloat16(rawbits); + } + + void WriteLane(vixl::internal::SimFloat16 src, int lane) { + WriteLane(Float16ToRawbits(src), lane); + } }; -typedef SimRegisterBase SimRegister; // r0-r31 -typedef SimRegisterBase SimVRegister; // v0-v31 -// The default ReadLane and WriteLane methods assume what we are copying is -// "trivially copyable" by using memcpy. We have to provide alternative -// implementations for SimFloat16 which cannot be copied this way. +typedef SimRegisterBase SimRegister; // r0-r31 +typedef SimRegisterBase SimPRegister; // p0-p15 +// FFR has the same format as a predicate register. +typedef SimPRegister SimFFRRegister; -template <> -template <> -inline void SimVRegister::ReadLane(vixl::internal::SimFloat16* dst, - int lane) const { - uint16_t rawbits; - ReadLane(&rawbits, lane); - *dst = RawbitsToFloat16(rawbits); -} +// v0-v31 and z0-z31 +class SimVRegister : public SimRegisterBase { + public: + SimVRegister() : SimRegisterBase(), accessed_as_z_(false) {} -template <> -template <> -inline void SimVRegister::WriteLane(vixl::internal::SimFloat16 src, int lane) { - WriteLane(Float16ToRawbits(src), lane); -} + void NotifyAccessAsZ() { accessed_as_z_ = true; } + + void NotifyRegisterLogged() { + SimRegisterBase::NotifyRegisterLogged(); + accessed_as_z_ = false; + } + + bool AccessedAsZSinceLastLog() const { return accessed_as_z_; } + + private: + bool accessed_as_z_; +}; + +// Representation of a SVE predicate register. +class LogicPRegister { + public: + inline LogicPRegister( + SimPRegister& other) // NOLINT(runtime/references)(runtime/explicit) + : register_(other) {} + + // Set a conveniently-sized block to 16 bits as the minimum predicate length + // is 16 bits and allow to be increased to multiples of 16 bits. + typedef uint16_t ChunkType; + + // Assign a bit into the end positon of the specified lane. + // The bit is zero-extended if necessary. + void SetActive(VectorFormat vform, int lane_index, bool value) { + int psize = LaneSizeInBytesFromFormat(vform); + int bit_index = lane_index * psize; + int byte_index = bit_index / kBitsPerByte; + int bit_offset = bit_index % kBitsPerByte; + uint8_t byte = register_.GetLane(byte_index); + register_.Insert(byte_index, ZeroExtend(byte, bit_offset, psize, value)); + } + + bool IsActive(VectorFormat vform, int lane_index) const { + int psize = LaneSizeInBytesFromFormat(vform); + int bit_index = lane_index * psize; + int byte_index = bit_index / kBitsPerByte; + int bit_offset = bit_index % kBitsPerByte; + uint8_t byte = register_.GetLane(byte_index); + return ExtractBit(byte, bit_offset); + } + + // The accessors for bulk processing. + int GetChunkCount() const { + VIXL_ASSERT((register_.GetSizeInBytes() % sizeof(ChunkType)) == 0); + return register_.GetSizeInBytes() / sizeof(ChunkType); + } + + ChunkType GetChunk(int lane) const { return GetActiveMask(lane); } + + void SetChunk(int lane, ChunkType new_value) { + SetActiveMask(lane, new_value); + } + + void SetAllBits() { + int chunk_size = sizeof(ChunkType) * kBitsPerByte; + ChunkType bits = GetUintMask(chunk_size); + for (int lane = 0; + lane < (static_cast(register_.GetSizeInBits() / chunk_size)); + lane++) { + SetChunk(lane, bits); + } + } + + template + T GetActiveMask(int lane) const { + return register_.GetLane(lane); + } + + template + void SetActiveMask(int lane, T new_value) { + register_.Insert(lane, new_value); + } + + void Clear() { register_.Clear(); } + + bool Aliases(const LogicPRegister& other) const { + return ®ister_ == &other.register_; + } + + private: + // The bit assignment is zero-extended to fill the size of predicate element. + uint8_t ZeroExtend(uint8_t byte, int index, int psize, bool value) { + VIXL_ASSERT(index >= 0); + VIXL_ASSERT(index + psize <= kBitsPerByte); + int bits = value ? 1 : 0; + switch (psize) { + case 1: + AssignBit(byte, index, bits); + break; + case 2: + AssignBits(byte, index, 0x03, bits); + break; + case 4: + AssignBits(byte, index, 0x0f, bits); + break; + case 8: + AssignBits(byte, index, 0xff, bits); + break; + default: + VIXL_UNREACHABLE(); + return 0; + } + return byte; + } + + SimPRegister& register_; +}; + +using vixl_uint128_t = std::pair; // Representation of a vector register, with typed getters and setters for lanes // and additional information to represent lane state. @@ -205,6 +753,7 @@ class LogicVRegister { } int64_t Int(VectorFormat vform, int index) const { + if (IsSVEFormat(vform)) register_.NotifyAccessAsZ(); int64_t element; switch (LaneSizeInBitsFromFormat(vform)) { case 8: @@ -227,6 +776,7 @@ class LogicVRegister { } uint64_t Uint(VectorFormat vform, int index) const { + if (IsSVEFormat(vform)) register_.NotifyAccessAsZ(); uint64_t element; switch (LaneSizeInBitsFromFormat(vform)) { case 8: @@ -248,6 +798,13 @@ class LogicVRegister { return element; } + int UintArray(VectorFormat vform, uint64_t* dst) const { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst[i] = Uint(vform, i); + } + return LaneCountFromFormat(vform); + } + uint64_t UintLeftJustified(VectorFormat vform, int index) const { return Uint(vform, index) << (64 - LaneSizeInBitsFromFormat(vform)); } @@ -260,6 +817,7 @@ class LogicVRegister { } void SetInt(VectorFormat vform, int index, int64_t value) const { + if (IsSVEFormat(vform)) register_.NotifyAccessAsZ(); switch (LaneSizeInBitsFromFormat(vform)) { case 8: register_.Insert(index, static_cast(value)); @@ -287,6 +845,7 @@ class LogicVRegister { } void SetUint(VectorFormat vform, int index, uint64_t value) const { + if (IsSVEFormat(vform)) register_.NotifyAccessAsZ(); switch (LaneSizeInBitsFromFormat(vform)) { case 8: register_.Insert(index, static_cast(value)); @@ -306,6 +865,17 @@ class LogicVRegister { } } + void SetUint(VectorFormat vform, int index, vixl_uint128_t value) const { + if (LaneSizeInBitsFromFormat(vform) <= 64) { + SetUint(vform, index, value.second); + return; + } + // TODO: Extend this to SVE. + VIXL_ASSERT((vform == kFormat1Q) && (index == 0)); + SetUint(kFormat2D, 0, value.second); + SetUint(kFormat2D, 1, value.first); + } + void SetUintArray(VectorFormat vform, const uint64_t* src) const { ClearForWrite(vform); for (int i = 0; i < LaneCountFromFormat(vform); i++) { @@ -313,44 +883,6 @@ class LogicVRegister { } } - void ReadUintFromMem(VectorFormat vform, int index, uint64_t addr) const { - switch (LaneSizeInBitsFromFormat(vform)) { - case 8: - register_.Insert(index, Memory::Read(addr)); - break; - case 16: - register_.Insert(index, Memory::Read(addr)); - break; - case 32: - register_.Insert(index, Memory::Read(addr)); - break; - case 64: - register_.Insert(index, Memory::Read(addr)); - break; - default: - VIXL_UNREACHABLE(); - return; - } - } - - void WriteUintToMem(VectorFormat vform, int index, uint64_t addr) const { - uint64_t value = Uint(vform, index); - switch (LaneSizeInBitsFromFormat(vform)) { - case 8: - Memory::Write(addr, static_cast(value)); - break; - case 16: - Memory::Write(addr, static_cast(value)); - break; - case 32: - Memory::Write(addr, static_cast(value)); - break; - case 64: - Memory::Write(addr, value); - break; - } - } - template T Float(int index) const { return register_.GetLane(index); @@ -361,11 +893,22 @@ class LogicVRegister { register_.Insert(index, value); } - // When setting a result in a register of size less than Q, the top bits of - // the Q register must be cleared. + template + void SetFloat(VectorFormat vform, int index, T value) const { + if (IsSVEFormat(vform)) register_.NotifyAccessAsZ(); + register_.Insert(index, value); + } + + void Clear() { register_.Clear(); } + + // When setting a result in a register larger than the result itself, the top + // bits of the register must be cleared. void ClearForWrite(VectorFormat vform) const { + // SVE destinations write whole registers, so we have nothing to clear. + if (IsSVEFormat(vform)) return; + unsigned size = RegisterSizeInBytesFromFormat(vform); - for (unsigned i = size; i < kQRegSizeInBytes; i++) { + for (unsigned i = size; i < register_.GetSizeInBytes(); i++) { SetUint(kFormat16B, i, 0); } } @@ -470,26 +1013,142 @@ class LogicVRegister { for (int i = 0; i < LaneCountFromFormat(vform); i++) { int64_t val = Int(vform, i); SetRounding(i, (val & 1) == 1); - val >>= 1; - if (GetSignedSaturation(i) != kNotSaturated) { + val = ExtractSignedBitfield64(63, 1, val); // >>= 1 + if (GetSignedSaturation(i) == kNotSaturated) { + SetInt(vform, i, val); + } else { // If the operation causes signed saturation, the sign bit must be // inverted. - val ^= (MaxUintFromFormat(vform) >> 1) + 1; + uint64_t uval = static_cast(val); + SetUint(vform, i, uval ^ ((MaxUintFromFormat(vform) >> 1) + 1)); } - SetInt(vform, i, val); } return *this; } + int LaneCountFromFormat(VectorFormat vform) const { + if (IsSVEFormat(vform)) { + return register_.GetSizeInBits() / LaneSizeInBitsFromFormat(vform); + } else { + return vixl::aarch64::LaneCountFromFormat(vform); + } + } + private: SimVRegister& register_; // Allocate one saturation state entry per lane; largest register is type Q, // and lanes can be a minimum of one byte wide. - Saturation saturated_[kQRegSizeInBytes]; + Saturation saturated_[kZRegMaxSizeInBytes]; // Allocate one rounding state entry per lane. - bool round_[kQRegSizeInBytes]; + bool round_[kZRegMaxSizeInBytes]; +}; + +// Represent an SVE addressing mode and abstract per-lane address generation to +// make iteration easy. +// +// Contiguous accesses are described with a simple base address, the memory +// occupied by each lane (`SetMsizeInBytesLog2()`) and the number of elements in +// each struct (`SetRegCount()`). +// +// Scatter-gather accesses also require a SimVRegister and information about how +// to extract lanes from it. +class LogicSVEAddressVector { + public: + // scalar-plus-scalar + // scalar-plus-immediate + explicit LogicSVEAddressVector(uint64_t base) + : base_(base), + msize_in_bytes_log2_(kUnknownMsizeInBytesLog2), + reg_count_(1), + vector_(NULL), + vector_form_(kFormatUndefined), + vector_mod_(NO_SVE_OFFSET_MODIFIER), + vector_shift_(0) {} + + // scalar-plus-vector + // vector-plus-immediate + // `base` should be the constant used for each element. That is, the value + // of `xn`, or `#`. + // `vector` should be the SimVRegister with offsets for each element. The + // vector format must be specified; SVE scatter/gather accesses typically + // support both 32-bit and 64-bit addressing. + // + // `mod` and `shift` correspond to the modifiers applied to each element in + // scalar-plus-vector forms, such as those used for unpacking and + // sign-extension. They are not used for vector-plus-immediate. + LogicSVEAddressVector(uint64_t base, + const SimVRegister* vector, + VectorFormat vform, + SVEOffsetModifier mod = NO_SVE_OFFSET_MODIFIER, + int shift = 0) + : base_(base), + msize_in_bytes_log2_(kUnknownMsizeInBytesLog2), + reg_count_(1), + vector_(vector), + vector_form_(vform), + vector_mod_(mod), + vector_shift_(shift) {} + + // Set `msize` -- the memory occupied by each lane -- for address + // calculations. + void SetMsizeInBytesLog2(int msize_in_bytes_log2) { + VIXL_ASSERT(msize_in_bytes_log2 >= static_cast(kBRegSizeInBytesLog2)); + VIXL_ASSERT(msize_in_bytes_log2 <= static_cast(kDRegSizeInBytesLog2)); + msize_in_bytes_log2_ = msize_in_bytes_log2; + } + + bool HasMsize() const { + return msize_in_bytes_log2_ != kUnknownMsizeInBytesLog2; + } + + int GetMsizeInBytesLog2() const { + VIXL_ASSERT(HasMsize()); + return msize_in_bytes_log2_; + } + int GetMsizeInBitsLog2() const { + return GetMsizeInBytesLog2() + kBitsPerByteLog2; + } + + int GetMsizeInBytes() const { return 1 << GetMsizeInBytesLog2(); } + int GetMsizeInBits() const { return 1 << GetMsizeInBitsLog2(); } + + void SetRegCount(int reg_count) { + VIXL_ASSERT(reg_count >= 1); // E.g. ld1/st1 + VIXL_ASSERT(reg_count <= 4); // E.g. ld4/st4 + reg_count_ = reg_count; + } + + int GetRegCount() const { return reg_count_; } + + // Full per-element address calculation for structured accesses. + // + // Note that the register number argument (`reg`) is zero-based. + uint64_t GetElementAddress(int lane, int reg) const { + VIXL_ASSERT(reg < GetRegCount()); + // Individual structures are always contiguous in memory, so this + // implementation works for both contiguous and scatter-gather addressing. + return GetStructAddress(lane) + (reg * GetMsizeInBytes()); + } + + // Full per-struct address calculation for structured accesses. + uint64_t GetStructAddress(int lane) const; + + bool IsContiguous() const { return vector_ == NULL; } + bool IsScatterGather() const { return !IsContiguous(); } + + private: + uint64_t base_; + int msize_in_bytes_log2_; + int reg_count_; + + const SimVRegister* vector_; + VectorFormat vector_form_; + SVEOffsetModifier vector_mod_; + int vector_shift_; + + static const int kUnknownMsizeInBytesLog2 = -1; }; // The proper way to initialize a simulated system register (such as NZCV) is as @@ -621,9 +1280,14 @@ class SimExclusiveGlobalMonitor { }; +class Debugger; + + class Simulator : public DecoderVisitor { public: - explicit Simulator(Decoder* decoder, FILE* stream = stdout); + explicit Simulator(Decoder* decoder, + FILE* stream = stdout, + SimStack::Allocated stack = SimStack().Allocate()); ~Simulator(); void ResetState(); @@ -693,6 +1357,8 @@ class Simulator : public DecoderVisitor { static const Instruction* kEndOfSimAddress; // Simulation helpers. + bool IsSimulationFinished() const { return pc_ == kEndOfSimAddress; } + const Instruction* ReadPc() const { return pc_; } VIXL_DEPRECATED("ReadPc", const Instruction* pc() const) { return ReadPc(); } @@ -701,7 +1367,7 @@ class Simulator : public DecoderVisitor { void WritePc(const Instruction* new_pc, BranchLogMode log_mode = LogBranches) { if (log_mode == LogBranches) LogTakenBranch(new_pc); - pc_ = Memory::AddressUntag(new_pc); + pc_ = AddressUntag(new_pc); pc_modified_ = true; } VIXL_DEPRECATED("WritePc", void set_pc(const Instruction* new_pc)) { @@ -715,11 +1381,43 @@ class Simulator : public DecoderVisitor { } VIXL_DEPRECATED("IncrementPc", void increment_pc()) { IncrementPc(); } + BType ReadBType() const { return btype_; } + void WriteNextBType(BType btype) { next_btype_ = btype; } + void UpdateBType() { + btype_ = next_btype_; + next_btype_ = DefaultBType; + } + + // Helper function to determine BType for branches. + BType GetBTypeFromInstruction(const Instruction* instr) const; + + bool PcIsInGuardedPage() const { return guard_pages_; } + void SetGuardedPages(bool guard_pages) { guard_pages_ = guard_pages; } + + const Instruction* GetLastExecutedInstruction() const { return last_instr_; } + void ExecuteInstruction() { // The program counter should always be aligned. VIXL_ASSERT(IsWordAligned(pc_)); pc_modified_ = false; + // On guarded pages, if BType is not zero, take an exception on any + // instruction other than BTI, PACI[AB]SP, HLT or BRK. + if (PcIsInGuardedPage() && (ReadBType() != DefaultBType)) { + if (pc_->IsPAuth()) { + Instr i = pc_->Mask(SystemPAuthMask); + if ((i != PACIASP) && (i != PACIBSP)) { + VIXL_ABORT_WITH_MSG( + "Executing non-BTI instruction with wrong BType."); + } + } else if (!pc_->IsBti() && !pc_->IsException()) { + VIXL_ABORT_WITH_MSG("Executing non-BTI instruction with wrong BType."); + } + } + + bool last_instr_was_movprfx = + (form_hash_ == "movprfx_z_z"_h) || (form_hash_ == "movprfx_z_p_z"_h); + // decoder_->Decode(...) triggers at least the following visitors: // 1. The CPUFeaturesAuditor (`cpu_features_auditor_`). // 2. The PrintDisassembler (`print_disasm_`), if enabled. @@ -727,24 +1425,109 @@ class Simulator : public DecoderVisitor { // User can add additional visitors at any point, but the Simulator requires // that the ordering above is preserved. decoder_->Decode(pc_); + + if (last_instr_was_movprfx) { + VIXL_ASSERT(last_instr_ != NULL); + VIXL_CHECK(pc_->CanTakeSVEMovprfx(form_hash_, last_instr_)); + } + + last_instr_ = ReadPc(); IncrementPc(); LogAllWrittenRegisters(); + UpdateBType(); VIXL_CHECK(cpu_features_auditor_.InstructionIsAvailable()); } -// Declare all Visitor functions. -#define DECLARE(A) \ - virtual void Visit##A(const Instruction* instr) VIXL_OVERRIDE; + virtual void Visit(Metadata* metadata, + const Instruction* instr) VIXL_OVERRIDE; + +#define DECLARE(A) virtual void Visit##A(const Instruction* instr); VISITOR_LIST_THAT_RETURN(DECLARE) #undef DECLARE - -#define DECLARE(A) \ - VIXL_DEBUG_NO_RETURN virtual void Visit##A(const Instruction* instr) \ - VIXL_OVERRIDE; +#define DECLARE(A) \ + VIXL_NO_RETURN virtual void Visit##A(const Instruction* instr); VISITOR_LIST_THAT_DONT_RETURN(DECLARE) #undef DECLARE + void Simulate_PdT_PgZ_ZnT_ZmT(const Instruction* instr); + void Simulate_PdT_Xn_Xm(const Instruction* instr); + void Simulate_ZdB_Zn1B_Zn2B_imm(const Instruction* instr); + void Simulate_ZdB_ZnB_ZmB(const Instruction* instr); + void Simulate_ZdD_ZnD_ZmD_imm(const Instruction* instr); + void Simulate_ZdH_PgM_ZnS(const Instruction* instr); + void Simulate_ZdH_ZnH_ZmH_imm(const Instruction* instr); + void Simulate_ZdS_PgM_ZnD(const Instruction* instr); + void Simulate_ZdS_PgM_ZnS(const Instruction* instr); + void Simulate_ZdS_ZnS_ZmS_imm(const Instruction* instr); + void Simulate_ZdT_PgM_ZnT(const Instruction* instr); + void Simulate_ZdT_PgZ_ZnT_ZmT(const Instruction* instr); + void Simulate_ZdT_ZnT_ZmT(const Instruction* instr); + void Simulate_ZdT_ZnT_ZmTb(const Instruction* instr); + void Simulate_ZdT_ZnT_const(const Instruction* instr); + void Simulate_ZdaD_ZnS_ZmS_imm(const Instruction* instr); + void Simulate_ZdaH_ZnH_ZmH_imm_const(const Instruction* instr); + void Simulate_ZdaS_ZnH_ZmH(const Instruction* instr); + void Simulate_ZdaS_ZnH_ZmH_imm(const Instruction* instr); + void Simulate_ZdaS_ZnS_ZmS_imm_const(const Instruction* instr); + void Simulate_ZdaT_PgM_ZnTb(const Instruction* instr); + void Simulate_ZdaT_ZnT_ZmT(const Instruction* instr); + void Simulate_ZdaT_ZnT_const(const Instruction* instr); + void Simulate_ZdaT_ZnTb_ZmTb(const Instruction* instr); + void Simulate_ZdnT_PgM_ZdnT_ZmT(const Instruction* instr); + void Simulate_ZdnT_PgM_ZdnT_const(const Instruction* instr); + void Simulate_ZdnT_ZdnT_ZmT_const(const Instruction* instr); + void Simulate_ZtD_PgZ_ZnD_Xm(const Instruction* instr); + void Simulate_ZtD_Pg_ZnD_Xm(const Instruction* instr); + void Simulate_ZtS_PgZ_ZnS_Xm(const Instruction* instr); + void Simulate_ZtS_Pg_ZnS_Xm(const Instruction* instr); + + void SimulateSVEHalvingAddSub(const Instruction* instr); + void SimulateSVESaturatingArithmetic(const Instruction* instr); + void SimulateSVEIntArithPair(const Instruction* instr); + void SimulateSVENarrow(const Instruction* instr); + void SimulateSVEInterleavedArithLong(const Instruction* instr); + void SimulateSVEShiftLeftImm(const Instruction* instr); + void SimulateSVEAddSubCarry(const Instruction* instr); + void SimulateSVEAddSubHigh(const Instruction* instr); + void SimulateSVEIntMulLongVec(const Instruction* instr); + void SimulateSVESaturatingIntMulLongIdx(const Instruction* instr); + void SimulateSVEExclusiveOrRotate(const Instruction* instr); + void SimulateSVEBitwiseTernary(const Instruction* instr); + void SimulateSVEComplexDotProduct(const Instruction* instr); + void SimulateSVEMulIndex(const Instruction* instr); + void SimulateSVEMlaMlsIndex(const Instruction* instr); + void SimulateSVEComplexIntMulAdd(const Instruction* instr); + void SimulateSVESaturatingMulAddHigh(const Instruction* instr); + void SimulateSVESaturatingMulHighIndex(const Instruction* instr); + void SimulateSVEFPConvertLong(const Instruction* instr); + void SimulateMatrixMul(const Instruction* instr); + void SimulateSVEFPMatrixMul(const Instruction* instr); + void SimulateNEONMulByElementLong(const Instruction* instr); + void SimulateNEONFPMulByElement(const Instruction* instr); + void SimulateNEONFPMulByElementLong(const Instruction* instr); + void SimulateNEONComplexMulByElement(const Instruction* instr); + void SimulateNEONDotProdByElement(const Instruction* instr); + void SimulateMTEAddSubTag(const Instruction* instr); + void SimulateMTETagMaskInsert(const Instruction* instr); + void SimulateMTESubPointer(const Instruction* instr); + void SimulateMTELoadTag(const Instruction* instr); + void SimulateMTEStoreTag(const Instruction* instr); + void SimulateMTEStoreTagPair(const Instruction* instr); + void Simulate_XdSP_XnSP_Xm(const Instruction* instr); + void SimulateCpy(const Instruction* instr); + void SimulateCpyFP(const Instruction* instr); + void SimulateCpyP(const Instruction* instr); + void SimulateCpyM(const Instruction* instr); + void SimulateCpyE(const Instruction* instr); + void SimulateSetP(const Instruction* instr); + void SimulateSetM(const Instruction* instr); + void SimulateSetE(const Instruction* instr); + void SimulateSetGP(const Instruction* instr); + void SimulateSetGM(const Instruction* instr); + void SimulateSignedMinMax(const Instruction* instr); + void SimulateUnsignedMinMax(const Instruction* instr); + // Integer register accessors. @@ -792,6 +1575,13 @@ class Simulator : public DecoderVisitor { return ReadXRegister(code, r31mode); } + SimPRegister& ReadPRegister(unsigned code) { + VIXL_ASSERT(code < kNumberOfPRegisters); + return pregisters_[code]; + } + + SimFFRRegister& ReadFFR() { return ffr_register_; } + // As above, with parameterized size and return type. The value is // either zero-extended or truncated to fit, as required. template @@ -842,6 +1632,10 @@ class Simulator : public DecoderVisitor { // Write 'value' into an integer register. The value is zero-extended. This // behaviour matches AArch64 register writes. + // + // SP may be specified in one of two ways: + // - (code == kSPRegInternalCode) && (r31mode == Reg31IsZeroRegister) + // - (code == 31) && (r31mode == Reg31IsStackPointer) template void WriteRegister(unsigned code, T value, @@ -861,20 +1655,25 @@ class Simulator : public DecoderVisitor { VIXL_ASSERT((sizeof(T) == kWRegSizeInBytes) || (sizeof(T) == kXRegSizeInBytes)); VIXL_ASSERT( - code < kNumberOfRegisters || + (code < kNumberOfRegisters) || ((r31mode == Reg31IsZeroRegister) && (code == kSPRegInternalCode))); - if ((code == 31) && (r31mode == Reg31IsZeroRegister)) { - return; + if (code == 31) { + if (r31mode == Reg31IsZeroRegister) { + // Discard writes to the zero register. + return; + } else { + code = kSPRegInternalCode; + } } - if ((r31mode == Reg31IsZeroRegister) && (code == kSPRegInternalCode)) { - code = 31; + // registers_[31] is the stack pointer. + VIXL_STATIC_ASSERT((kSPRegInternalCode % kNumberOfRegisters) == 31); + registers_[code % kNumberOfRegisters].Write(value); + + if (log_mode == LogRegWrites) { + LogRegister(code, GetPrintRegisterFormatForSize(sizeof(T))); } - - registers_[code].Write(value); - - if (log_mode == LogRegWrites) LogRegister(code, r31mode); } template VIXL_DEPRECATED("WriteRegister", @@ -980,6 +1779,11 @@ class Simulator : public DecoderVisitor { uint8_t val[kQRegSizeInBytes]; }; + // A structure for representing a SVE Z register. + struct zreg_t { + uint8_t val[kZRegMaxSizeInBytes]; + }; + // Basic accessor: read the register as the specified type. template T ReadVRegister(unsigned code) const { @@ -1095,7 +1899,8 @@ class Simulator : public DecoderVisitor { (sizeof(value) == kHRegSizeInBytes) || (sizeof(value) == kSRegSizeInBytes) || (sizeof(value) == kDRegSizeInBytes) || - (sizeof(value) == kQRegSizeInBytes)); + (sizeof(value) == kQRegSizeInBytes) || + (sizeof(value) == kZRegMaxSizeInBytes)); VIXL_ASSERT(code < kNumberOfVRegisters); vregisters_[code].Write(value); @@ -1202,6 +2007,12 @@ class Simulator : public DecoderVisitor { WriteQRegister(code, value, log_mode); } + void WriteZRegister(unsigned code, + zreg_t value, + RegLogMode log_mode = LogRegWrites) { + WriteVRegister(code, value, log_mode); + } + template T ReadRegister(Register reg) const { return ReadRegister(reg.GetCode(), Reg31IsZeroRegister); @@ -1246,6 +2057,69 @@ class Simulator : public DecoderVisitor { } } + template + std::optional MemRead(A address) const { + Instruction const* pc = ReadPc(); + return memory_.Read(address, pc); + } + + template + bool MemWrite(A address, T value) const { + Instruction const* pc = ReadPc(); + return memory_.Write(address, value, pc); + } + + template + std::optional MemReadUint(int size_in_bytes, A address) const { + return memory_.ReadUint(size_in_bytes, address); + } + + template + std::optional MemReadInt(int size_in_bytes, A address) const { + return memory_.ReadInt(size_in_bytes, address); + } + + template + bool MemWrite(int size_in_bytes, A address, uint64_t value) const { + return memory_.Write(size_in_bytes, address, value); + } + + bool LoadLane(LogicVRegister dst, + VectorFormat vform, + int index, + uint64_t addr) const { + unsigned msize_in_bytes = LaneSizeInBytesFromFormat(vform); + return LoadUintToLane(dst, vform, msize_in_bytes, index, addr); + } + + bool LoadUintToLane(LogicVRegister dst, + VectorFormat vform, + unsigned msize_in_bytes, + int index, + uint64_t addr) const { + VIXL_DEFINE_OR_RETURN_FALSE(value, MemReadUint(msize_in_bytes, addr)); + dst.SetUint(vform, index, value); + return true; + } + + bool LoadIntToLane(LogicVRegister dst, + VectorFormat vform, + unsigned msize_in_bytes, + int index, + uint64_t addr) const { + VIXL_DEFINE_OR_RETURN_FALSE(value, MemReadInt(msize_in_bytes, addr)); + dst.SetInt(vform, index, value); + return true; + } + + bool StoreLane(const LogicVRegister& src, + VectorFormat vform, + int index, + uint64_t addr) const { + unsigned msize_in_bytes = LaneSizeInBytesFromFormat(vform); + return MemWrite(msize_in_bytes, addr, src.Uint(vform, index)); + } + uint64_t ComputeMemOperandAddress(const MemOperand& mem_op) const; template @@ -1254,20 +2128,32 @@ class Simulator : public DecoderVisitor { return ReadCPURegister(operand.GetCPURegister()); } else { VIXL_ASSERT(operand.IsMemOperand()); - return Memory::Read(ComputeMemOperandAddress(operand.GetMemOperand())); + auto res = MemRead(ComputeMemOperandAddress(operand.GetMemOperand())); + VIXL_ASSERT(res); + return *res; } } template - void WriteGenericOperand(GenericOperand operand, + bool WriteGenericOperand(GenericOperand operand, T value, RegLogMode log_mode = LogRegWrites) { if (operand.IsCPURegister()) { - WriteCPURegister(operand.GetCPURegister(), value, log_mode); + // Outside SIMD, registers are 64-bit or a subset of a 64-bit register. If + // the width of the value to write is smaller than 64 bits, the unused + // bits may contain unrelated values that the code following this write + // needs to handle gracefully. + // Here we fill the unused bits with a predefined pattern to catch issues + // early. + VIXL_ASSERT(operand.GetCPURegister().GetSizeInBits() <= 64); + uint64_t raw = 0xdeadda1adeadda1a; + memcpy(&raw, &value, sizeof(value)); + WriteCPURegister(operand.GetCPURegister(), raw, log_mode); } else { VIXL_ASSERT(operand.IsMemOperand()); - Memory::Write(ComputeMemOperandAddress(operand.GetMemOperand()), value); + return MemWrite(ComputeMemOperandAddress(operand.GetMemOperand()), value); } + return true; } bool ReadN() const { return nzcv_.GetN() != 0; } @@ -1313,14 +2199,16 @@ class Simulator : public DecoderVisitor { kPrintRegLaneSizeD = 3 << 0, kPrintRegLaneSizeX = kPrintRegLaneSizeD, kPrintRegLaneSizeQ = 4 << 0, + kPrintRegLaneSizeUnknown = 5 << 0, kPrintRegLaneSizeOffset = 0, kPrintRegLaneSizeMask = 7 << 0, - // The lane count. + // The overall register size. kPrintRegAsScalar = 0, kPrintRegAsDVector = 1 << 3, kPrintRegAsQVector = 2 << 3, + kPrintRegAsSVEVector = 3 << 3, kPrintRegAsVectorMask = 3 << 3, @@ -1328,37 +2216,98 @@ class Simulator : public DecoderVisitor { // S-, H-, and D-sized lanes.) kPrintRegAsFP = 1 << 5, - // Supported combinations. + // With this flag, print helpers won't check that the upper bits are zero. + // This also forces the register name to be printed with the `reg` + // format. + // + // The flag is supported with any PrintRegisterFormat other than those with + // kPrintRegAsSVEVector. + kPrintRegPartial = 1 << 6, - kPrintXReg = kPrintRegLaneSizeX | kPrintRegAsScalar, - kPrintWReg = kPrintRegLaneSizeW | kPrintRegAsScalar, - kPrintHReg = kPrintRegLaneSizeH | kPrintRegAsScalar | kPrintRegAsFP, - kPrintSReg = kPrintRegLaneSizeS | kPrintRegAsScalar | kPrintRegAsFP, - kPrintDReg = kPrintRegLaneSizeD | kPrintRegAsScalar | kPrintRegAsFP, +// Supported combinations. +// These exist so that they can be referred to by name, but also because C++ +// does not allow enum types to hold values that aren't explicitly +// enumerated, and we want to be able to combine the above flags. - kPrintReg1B = kPrintRegLaneSizeB | kPrintRegAsScalar, - kPrintReg8B = kPrintRegLaneSizeB | kPrintRegAsDVector, - kPrintReg16B = kPrintRegLaneSizeB | kPrintRegAsQVector, - kPrintReg1H = kPrintRegLaneSizeH | kPrintRegAsScalar, - kPrintReg4H = kPrintRegLaneSizeH | kPrintRegAsDVector, - kPrintReg8H = kPrintRegLaneSizeH | kPrintRegAsQVector, - kPrintReg1S = kPrintRegLaneSizeS | kPrintRegAsScalar, - kPrintReg2S = kPrintRegLaneSizeS | kPrintRegAsDVector, - kPrintReg4S = kPrintRegLaneSizeS | kPrintRegAsQVector, - kPrintReg1HFP = kPrintRegLaneSizeH | kPrintRegAsScalar | kPrintRegAsFP, - kPrintReg4HFP = kPrintRegLaneSizeH | kPrintRegAsDVector | kPrintRegAsFP, - kPrintReg8HFP = kPrintRegLaneSizeH | kPrintRegAsQVector | kPrintRegAsFP, - kPrintReg1SFP = kPrintRegLaneSizeS | kPrintRegAsScalar | kPrintRegAsFP, - kPrintReg2SFP = kPrintRegLaneSizeS | kPrintRegAsDVector | kPrintRegAsFP, - kPrintReg4SFP = kPrintRegLaneSizeS | kPrintRegAsQVector | kPrintRegAsFP, - kPrintReg1D = kPrintRegLaneSizeD | kPrintRegAsScalar, - kPrintReg2D = kPrintRegLaneSizeD | kPrintRegAsQVector, - kPrintReg1DFP = kPrintRegLaneSizeD | kPrintRegAsScalar | kPrintRegAsFP, - kPrintReg2DFP = kPrintRegLaneSizeD | kPrintRegAsQVector | kPrintRegAsFP, - kPrintReg1Q = kPrintRegLaneSizeQ | kPrintRegAsScalar +// Scalar formats. +#define VIXL_DECL_PRINT_REG_SCALAR(size) \ + kPrint##size##Reg = kPrintRegLaneSize##size | kPrintRegAsScalar, \ + kPrint##size##RegPartial = kPrintRegLaneSize##size | kPrintRegPartial +#define VIXL_DECL_PRINT_REG_SCALAR_FP(size) \ + VIXL_DECL_PRINT_REG_SCALAR(size) \ + , kPrint##size##RegFP = kPrint##size##Reg | kPrintRegAsFP, \ + kPrint##size##RegPartialFP = kPrint##size##RegPartial | kPrintRegAsFP + VIXL_DECL_PRINT_REG_SCALAR(W), + VIXL_DECL_PRINT_REG_SCALAR(X), + VIXL_DECL_PRINT_REG_SCALAR_FP(H), + VIXL_DECL_PRINT_REG_SCALAR_FP(S), + VIXL_DECL_PRINT_REG_SCALAR_FP(D), + VIXL_DECL_PRINT_REG_SCALAR(Q), +#undef VIXL_DECL_PRINT_REG_SCALAR +#undef VIXL_DECL_PRINT_REG_SCALAR_FP + +#define VIXL_DECL_PRINT_REG_NEON(count, type, size) \ + kPrintReg##count##type = kPrintRegLaneSize##type | kPrintRegAs##size, \ + kPrintReg##count##type##Partial = kPrintReg##count##type | kPrintRegPartial +#define VIXL_DECL_PRINT_REG_NEON_FP(count, type, size) \ + VIXL_DECL_PRINT_REG_NEON(count, type, size) \ + , kPrintReg##count##type##FP = kPrintReg##count##type | kPrintRegAsFP, \ + kPrintReg##count##type##PartialFP = \ + kPrintReg##count##type##Partial | kPrintRegAsFP + VIXL_DECL_PRINT_REG_NEON(1, B, Scalar), + VIXL_DECL_PRINT_REG_NEON(8, B, DVector), + VIXL_DECL_PRINT_REG_NEON(16, B, QVector), + VIXL_DECL_PRINT_REG_NEON_FP(1, H, Scalar), + VIXL_DECL_PRINT_REG_NEON_FP(4, H, DVector), + VIXL_DECL_PRINT_REG_NEON_FP(8, H, QVector), + VIXL_DECL_PRINT_REG_NEON_FP(1, S, Scalar), + VIXL_DECL_PRINT_REG_NEON_FP(2, S, DVector), + VIXL_DECL_PRINT_REG_NEON_FP(4, S, QVector), + VIXL_DECL_PRINT_REG_NEON_FP(1, D, Scalar), + VIXL_DECL_PRINT_REG_NEON_FP(2, D, QVector), + VIXL_DECL_PRINT_REG_NEON(1, Q, Scalar), +#undef VIXL_DECL_PRINT_REG_NEON +#undef VIXL_DECL_PRINT_REG_NEON_FP + +#define VIXL_DECL_PRINT_REG_SVE(type) \ + kPrintRegVn##type = kPrintRegLaneSize##type | kPrintRegAsSVEVector, \ + kPrintRegVn##type##Partial = kPrintRegVn##type | kPrintRegPartial +#define VIXL_DECL_PRINT_REG_SVE_FP(type) \ + VIXL_DECL_PRINT_REG_SVE(type) \ + , kPrintRegVn##type##FP = kPrintRegVn##type | kPrintRegAsFP, \ + kPrintRegVn##type##PartialFP = kPrintRegVn##type##Partial | kPrintRegAsFP + VIXL_DECL_PRINT_REG_SVE(B), + VIXL_DECL_PRINT_REG_SVE_FP(H), + VIXL_DECL_PRINT_REG_SVE_FP(S), + VIXL_DECL_PRINT_REG_SVE_FP(D), + VIXL_DECL_PRINT_REG_SVE(Q) +#undef VIXL_DECL_PRINT_REG_SVE +#undef VIXL_DECL_PRINT_REG_SVE_FP }; + // Return `format` with the kPrintRegPartial flag set. + PrintRegisterFormat GetPrintRegPartial(PrintRegisterFormat format) { + // Every PrintRegisterFormat has a kPrintRegPartial counterpart, so the + // result of this cast will always be well-defined. + return static_cast(format | kPrintRegPartial); + } + + // For SVE formats, return the format of a Q register part of it. + PrintRegisterFormat GetPrintRegAsQChunkOfSVE(PrintRegisterFormat format) { + VIXL_ASSERT((format & kPrintRegAsVectorMask) == kPrintRegAsSVEVector); + // Keep the FP and lane size fields. + int q_format = format & (kPrintRegLaneSizeMask | kPrintRegAsFP); + // The resulting format must always be partial, because we're not formatting + // the whole Z register. + q_format |= (kPrintRegAsQVector | kPrintRegPartial); + + // This cast is always safe because NEON QVector formats support every + // combination of FP and lane size that SVE formats do. + return static_cast(q_format); + } + unsigned GetPrintRegLaneSizeInBytesLog2(PrintRegisterFormat format) { + VIXL_ASSERT((format & kPrintRegLaneSizeMask) != kPrintRegLaneSizeUnknown); return (format & kPrintRegLaneSizeMask) >> kPrintRegLaneSizeOffset; } @@ -1367,17 +2316,51 @@ class Simulator : public DecoderVisitor { } unsigned GetPrintRegSizeInBytesLog2(PrintRegisterFormat format) { - if (format & kPrintRegAsDVector) return kDRegSizeInBytesLog2; - if (format & kPrintRegAsQVector) return kQRegSizeInBytesLog2; - - // Scalar types. - return GetPrintRegLaneSizeInBytesLog2(format); + switch (format & kPrintRegAsVectorMask) { + case kPrintRegAsScalar: + return GetPrintRegLaneSizeInBytesLog2(format); + case kPrintRegAsDVector: + return kDRegSizeInBytesLog2; + case kPrintRegAsQVector: + return kQRegSizeInBytesLog2; + default: + case kPrintRegAsSVEVector: + // We print SVE vectors in Q-sized chunks. These need special handling, + // and it's probably an error to call this function in that case. + VIXL_UNREACHABLE(); + return kQRegSizeInBytesLog2; + } } unsigned GetPrintRegSizeInBytes(PrintRegisterFormat format) { return 1 << GetPrintRegSizeInBytesLog2(format); } + unsigned GetPrintRegSizeInBitsLog2(PrintRegisterFormat format) { + return GetPrintRegSizeInBytesLog2(format) + kBitsPerByteLog2; + } + + unsigned GetPrintRegSizeInBits(PrintRegisterFormat format) { + return 1 << GetPrintRegSizeInBitsLog2(format); + } + + const char* GetPartialRegSuffix(PrintRegisterFormat format) { + switch (GetPrintRegSizeInBitsLog2(format)) { + case kBRegSizeLog2: + return "<7:0>"; + case kHRegSizeLog2: + return "<15:0>"; + case kSRegSizeLog2: + return "<31:0>"; + case kDRegSizeLog2: + return "<63:0>"; + case kQRegSizeLog2: + return "<127:0>"; + } + VIXL_UNREACHABLE(); + return ""; + } + unsigned GetPrintRegLaneCount(PrintRegisterFormat format) { unsigned reg_size_log2 = GetPrintRegSizeInBytesLog2(format); unsigned lane_size_log2 = GetPrintRegLaneSizeInBytesLog2(format); @@ -1385,6 +2368,21 @@ class Simulator : public DecoderVisitor { return 1 << (reg_size_log2 - lane_size_log2); } + uint16_t GetPrintRegLaneMask(PrintRegisterFormat format) { + int print_as = format & kPrintRegAsVectorMask; + if (print_as == kPrintRegAsScalar) return 1; + + // Vector formats, including SVE formats printed in Q-sized chunks. + static const uint16_t masks[] = {0xffff, 0x5555, 0x1111, 0x0101, 0x0001}; + unsigned size_in_bytes_log2 = GetPrintRegLaneSizeInBytesLog2(format); + VIXL_ASSERT(size_in_bytes_log2 < ArrayLength(masks)); + uint16_t mask = masks[size_in_bytes_log2]; + + // Exclude lanes that aren't visible in D vectors. + if (print_as == kPrintRegAsDVector) mask &= 0x00ff; + return mask; + } + PrintRegisterFormat GetPrintRegisterFormatForSize(unsigned reg_size, unsigned lane_size); @@ -1415,6 +2413,10 @@ class Simulator : public DecoderVisitor { return format; } + PrintRegisterFormat GetPrintRegisterFormatForSizeTryFP(unsigned size) { + return GetPrintRegisterFormatTryFP(GetPrintRegisterFormatForSize(size)); + } + template PrintRegisterFormat GetPrintRegisterFormat(T value) { return GetPrintRegisterFormatForSize(sizeof(value)); @@ -1441,99 +2443,317 @@ class Simulator : public DecoderVisitor { // Print all registers of the specified types. void PrintRegisters(); void PrintVRegisters(); + void PrintZRegisters(); void PrintSystemRegisters(); // As above, but only print the registers that have been updated. void PrintWrittenRegisters(); void PrintWrittenVRegisters(); + void PrintWrittenPRegisters(); // As above, but respect LOG_REG and LOG_VREG. void LogWrittenRegisters() { - if (GetTraceParameters() & LOG_REGS) PrintWrittenRegisters(); + if (ShouldTraceRegs()) PrintWrittenRegisters(); } void LogWrittenVRegisters() { - if (GetTraceParameters() & LOG_VREGS) PrintWrittenVRegisters(); + if (ShouldTraceVRegs()) PrintWrittenVRegisters(); + } + void LogWrittenPRegisters() { + if (ShouldTraceVRegs()) PrintWrittenPRegisters(); } void LogAllWrittenRegisters() { LogWrittenRegisters(); LogWrittenVRegisters(); + LogWrittenPRegisters(); } - // Print individual register values (after update). - void PrintRegister(unsigned code, Reg31Mode r31mode = Reg31IsStackPointer); - void PrintVRegister(unsigned code, PrintRegisterFormat format); - void PrintSystemRegister(SystemRegister id); - void PrintTakenBranch(const Instruction* target); + // The amount of space to leave for a register name. This is used to keep the + // values vertically aligned. The longest register name has the form + // "z31<2047:1920>". The total overall value indentation must also take into + // account the fixed formatting: "# {name}: 0x{value}". + static const int kPrintRegisterNameFieldWidth = 14; - // Like Print* (above), but respect GetTraceParameters(). - void LogRegister(unsigned code, Reg31Mode r31mode = Reg31IsStackPointer) { - if (GetTraceParameters() & LOG_REGS) PrintRegister(code, r31mode); + // Print whole, individual register values. + // - The format can be used to restrict how much of the register is printed, + // but such formats indicate that the unprinted high-order bits are zero and + // these helpers will assert that. + // - If the format includes the kPrintRegAsFP flag then human-friendly FP + // value annotations will be printed. + // - The suffix can be used to add annotations (such as memory access + // details), or to suppress the newline. + void PrintRegister(int code, + PrintRegisterFormat format = kPrintXReg, + const char* suffix = "\n"); + void PrintVRegister(int code, + PrintRegisterFormat format = kPrintReg1Q, + const char* suffix = "\n"); + // PrintZRegister and PrintPRegister print over several lines, so they cannot + // allow the suffix to be overridden. + void PrintZRegister(int code, PrintRegisterFormat format = kPrintRegVnQ); + void PrintPRegister(int code, PrintRegisterFormat format = kPrintRegVnQ); + void PrintFFR(PrintRegisterFormat format = kPrintRegVnQ); + // Print a single Q-sized part of a Z register, or the corresponding two-byte + // part of a P register. These print single lines, and therefore allow the + // suffix to be overridden. The format must include the kPrintRegPartial flag. + void PrintPartialZRegister(int code, + int q_index, + PrintRegisterFormat format = kPrintRegVnQ, + const char* suffix = "\n"); + void PrintPartialPRegister(int code, + int q_index, + PrintRegisterFormat format = kPrintRegVnQ, + const char* suffix = "\n"); + void PrintPartialPRegister(const char* name, + const SimPRegister& reg, + int q_index, + PrintRegisterFormat format = kPrintRegVnQ, + const char* suffix = "\n"); + + // Like Print*Register (above), but respect trace parameters. + void LogRegister(unsigned code, PrintRegisterFormat format) { + if (ShouldTraceRegs()) PrintRegister(code, format); } void LogVRegister(unsigned code, PrintRegisterFormat format) { - if (GetTraceParameters() & LOG_VREGS) PrintVRegister(code, format); + if (ShouldTraceVRegs()) PrintVRegister(code, format); } + void LogZRegister(unsigned code, PrintRegisterFormat format) { + if (ShouldTraceVRegs()) PrintZRegister(code, format); + } + void LogPRegister(unsigned code, PrintRegisterFormat format) { + if (ShouldTraceVRegs()) PrintPRegister(code, format); + } + void LogFFR(PrintRegisterFormat format) { + if (ShouldTraceVRegs()) PrintFFR(format); + } + + // Other state updates, including system registers. + void PrintSystemRegister(SystemRegister id); + void PrintTakenBranch(const Instruction* target); void LogSystemRegister(SystemRegister id) { - if (GetTraceParameters() & LOG_SYSREGS) PrintSystemRegister(id); + if (ShouldTraceSysRegs()) PrintSystemRegister(id); } void LogTakenBranch(const Instruction* target) { - if (GetTraceParameters() & LOG_BRANCH) PrintTakenBranch(target); + if (ShouldTraceBranches()) PrintTakenBranch(target); } - // Print memory accesses. - void PrintRead(uintptr_t address, - unsigned reg_code, - PrintRegisterFormat format); - void PrintWrite(uintptr_t address, - unsigned reg_code, - PrintRegisterFormat format); - void PrintVRead(uintptr_t address, - unsigned reg_code, - PrintRegisterFormat format, - unsigned lane); - void PrintVWrite(uintptr_t address, - unsigned reg_code, - PrintRegisterFormat format, - unsigned lane); + // Trace memory accesses. + + // Common, contiguous register accesses (such as for scalars). + // The *Write variants automatically set kPrintRegPartial on the format. + void PrintRead(int rt_code, PrintRegisterFormat format, uintptr_t address); + void PrintExtendingRead(int rt_code, + PrintRegisterFormat format, + int access_size_in_bytes, + uintptr_t address); + void PrintWrite(int rt_code, PrintRegisterFormat format, uintptr_t address); + void PrintVRead(int rt_code, PrintRegisterFormat format, uintptr_t address); + void PrintVWrite(int rt_code, PrintRegisterFormat format, uintptr_t address); + // Simple, unpredicated SVE accesses always access the whole vector, and never + // know the lane type, so there's no need to accept a `format`. + void PrintZRead(int rt_code, uintptr_t address) { + vregisters_[rt_code].NotifyRegisterLogged(); + PrintZAccess(rt_code, "<-", address); + } + void PrintZWrite(int rt_code, uintptr_t address) { + PrintZAccess(rt_code, "->", address); + } + void PrintPRead(int rt_code, uintptr_t address) { + pregisters_[rt_code].NotifyRegisterLogged(); + PrintPAccess(rt_code, "<-", address); + } + void PrintPWrite(int rt_code, uintptr_t address) { + PrintPAccess(rt_code, "->", address); + } // Like Print* (above), but respect GetTraceParameters(). - void LogRead(uintptr_t address, - unsigned reg_code, - PrintRegisterFormat format) { - if (GetTraceParameters() & LOG_REGS) PrintRead(address, reg_code, format); + void LogRead(int rt_code, PrintRegisterFormat format, uintptr_t address) { + if (ShouldTraceRegs()) PrintRead(rt_code, format, address); } - void LogWrite(uintptr_t address, - unsigned reg_code, - PrintRegisterFormat format) { - if (GetTraceParameters() & LOG_WRITE) PrintWrite(address, reg_code, format); - } - void LogVRead(uintptr_t address, - unsigned reg_code, - PrintRegisterFormat format, - unsigned lane = 0) { - if (GetTraceParameters() & LOG_VREGS) { - PrintVRead(address, reg_code, format, lane); + void LogExtendingRead(int rt_code, + PrintRegisterFormat format, + int access_size_in_bytes, + uintptr_t address) { + if (ShouldTraceRegs()) { + PrintExtendingRead(rt_code, format, access_size_in_bytes, address); } } - void LogVWrite(uintptr_t address, - unsigned reg_code, - PrintRegisterFormat format, - unsigned lane = 0) { - if (GetTraceParameters() & LOG_WRITE) { - PrintVWrite(address, reg_code, format, lane); - } + void LogWrite(int rt_code, PrintRegisterFormat format, uintptr_t address) { + if (ShouldTraceWrites()) PrintWrite(rt_code, format, address); + } + void LogVRead(int rt_code, PrintRegisterFormat format, uintptr_t address) { + if (ShouldTraceVRegs()) PrintVRead(rt_code, format, address); + } + void LogVWrite(int rt_code, PrintRegisterFormat format, uintptr_t address) { + if (ShouldTraceWrites()) PrintVWrite(rt_code, format, address); + } + void LogZRead(int rt_code, uintptr_t address) { + if (ShouldTraceVRegs()) PrintZRead(rt_code, address); + } + void LogZWrite(int rt_code, uintptr_t address) { + if (ShouldTraceWrites()) PrintZWrite(rt_code, address); + } + void LogPRead(int rt_code, uintptr_t address) { + if (ShouldTraceVRegs()) PrintPRead(rt_code, address); + } + void LogPWrite(int rt_code, uintptr_t address) { + if (ShouldTraceWrites()) PrintPWrite(rt_code, address); + } + void LogMemTransfer(uintptr_t dst, uintptr_t src, uint8_t value) { + if (ShouldTraceWrites()) PrintMemTransfer(dst, src, value); + } + // Helpers for the above, where the access operation is parameterised. + // - For loads, set op = "<-". + // - For stores, set op = "->". + void PrintAccess(int rt_code, + PrintRegisterFormat format, + const char* op, + uintptr_t address); + void PrintVAccess(int rt_code, + PrintRegisterFormat format, + const char* op, + uintptr_t address); + void PrintMemTransfer(uintptr_t dst, uintptr_t src, uint8_t value); + // Simple, unpredicated SVE accesses always access the whole vector, and never + // know the lane type, so these don't accept a `format`. + void PrintZAccess(int rt_code, const char* op, uintptr_t address); + void PrintPAccess(int rt_code, const char* op, uintptr_t address); + + // Multiple-structure accesses. + void PrintVStructAccess(int rt_code, + int reg_count, + PrintRegisterFormat format, + const char* op, + uintptr_t address); + // Single-structure (single-lane) accesses. + void PrintVSingleStructAccess(int rt_code, + int reg_count, + int lane, + PrintRegisterFormat format, + const char* op, + uintptr_t address); + // Replicating accesses. + void PrintVReplicatingStructAccess(int rt_code, + int reg_count, + PrintRegisterFormat format, + const char* op, + uintptr_t address); + + // Multiple-structure accesses. + void PrintZStructAccess(int rt_code, + int reg_count, + const LogicPRegister& pg, + PrintRegisterFormat format, + int msize_in_bytes, + const char* op, + const LogicSVEAddressVector& addr); + + // Register-printing helper for all structured accessors. + // + // All lanes (according to `format`) are printed, but lanes indicated by + // `focus_mask` are of particular interest. Each bit corresponds to a byte in + // the printed register, in a manner similar to SVE's predicates. Currently, + // this is used to determine when to print human-readable FP annotations. + void PrintVRegistersForStructuredAccess(int rt_code, + int reg_count, + uint16_t focus_mask, + PrintRegisterFormat format); + + // As for the VRegister variant, but print partial Z register names. + void PrintZRegistersForStructuredAccess(int rt_code, + int q_index, + int reg_count, + uint16_t focus_mask, + PrintRegisterFormat format); + + // Print part of a memory access. This should be used for annotating + // non-trivial accesses, such as structured or sign-extending loads. Call + // Print*Register (or Print*RegistersForStructuredAccess), then + // PrintPartialAccess for each contiguous access that makes up the + // instruction. + // + // access_mask: + // The lanes to be printed. Each bit corresponds to a byte in the printed + // register, in a manner similar to SVE's predicates, except that the + // lane size is not respected when interpreting lane_mask: unaligned bits + // must be zeroed. + // + // This function asserts that this mask is non-zero. + // + // future_access_mask: + // The lanes to be printed by a future invocation. This must be specified + // because vertical lines are drawn for partial accesses that haven't yet + // been printed. The format is the same as for accessed_mask. + // + // If a lane is active in both `access_mask` and `future_access_mask`, + // `access_mask` takes precedence. + // + // struct_element_count: + // The number of elements in each structure. For non-structured accesses, + // set this to one. Along with lane_size_in_bytes, this is used determine + // the size of each access, and to format the accessed value. + // + // op: + // For stores, use "->". For loads, use "<-". + // + // address: + // The address of this partial access. (Not the base address of the whole + // instruction.) The traced value is read from this address (according to + // part_count and lane_size_in_bytes) so it must be accessible, and when + // tracing stores, the store must have been executed before this function + // is called. + // + // reg_size_in_bytes: + // The size of the register being accessed. This helper is usually used + // for V registers or Q-sized chunks of Z registers, so that is the + // default, but it is possible to use this to annotate X register + // accesses by specifying kXRegSizeInBytes. + // + // The return value is a future_access_mask suitable for the next iteration, + // so that it is possible to execute this in a loop, until the mask is zero. + // Note that accessed_mask must still be updated by the caller for each call. + uint16_t PrintPartialAccess(uint16_t access_mask, + uint16_t future_access_mask, + int struct_element_count, + int lane_size_in_bytes, + const char* op, + uintptr_t address, + int reg_size_in_bytes = kQRegSizeInBytes); + + // Print an abstract register value. This works for all register types, and + // can print parts of registers. This exists to ensure consistent formatting + // of values. + void PrintRegisterValue(const uint8_t* value, + int value_size, + PrintRegisterFormat format); + template + void PrintRegisterValue(const T& sim_register, PrintRegisterFormat format) { + PrintRegisterValue(sim_register.GetBytes(), + std::min(sim_register.GetSizeInBytes(), + kQRegSizeInBytes), + format); } - // Helper functions for register tracing. - void PrintRegisterRawHelper(unsigned code, - Reg31Mode r31mode, - int size_in_bytes = kXRegSizeInBytes); - void PrintVRegisterRawHelper(unsigned code, - int bytes = kQRegSizeInBytes, - int lsb = 0); - void PrintVRegisterFPHelper(unsigned code, - unsigned lane_size_in_bytes, - int lane_count = 1, - int rightmost_lane = 0); + // As above, but format as an SVE predicate value, using binary notation with + // spaces between each bit so that they align with the Z register bytes that + // they predicate. + void PrintPRegisterValue(uint16_t value); + + void PrintRegisterValueFPAnnotations(const uint8_t* value, + uint16_t lane_mask, + PrintRegisterFormat format); + template + void PrintRegisterValueFPAnnotations(const T& sim_register, + uint16_t lane_mask, + PrintRegisterFormat format) { + PrintRegisterValueFPAnnotations(sim_register.GetBytes(), lane_mask, format); + } + template + void PrintRegisterValueFPAnnotations(const T& sim_register, + PrintRegisterFormat format) { + PrintRegisterValueFPAnnotations(sim_register.GetBytes(), + GetPrintRegLaneMask(format), + format); + } VIXL_NO_RETURN void DoUnreachable(const Instruction* instr); void DoTrace(const Instruction* instr); @@ -1543,10 +2763,13 @@ class Simulator : public DecoderVisitor { Reg31Mode mode = Reg31IsZeroRegister); static const char* XRegNameForCode(unsigned code, Reg31Mode mode = Reg31IsZeroRegister); + static const char* BRegNameForCode(unsigned code); static const char* HRegNameForCode(unsigned code); static const char* SRegNameForCode(unsigned code); static const char* DRegNameForCode(unsigned code); static const char* VRegNameForCode(unsigned code); + static const char* ZRegNameForCode(unsigned code); + static const char* PRegNameForCode(unsigned code); bool IsColouredTrace() const { return coloured_trace_; } VIXL_DEPRECATED("IsColouredTrace", bool coloured_trace() const) { @@ -1565,18 +2788,28 @@ class Simulator : public DecoderVisitor { return GetTraceParameters(); } + bool ShouldTraceWrites() const { + return (GetTraceParameters() & LOG_WRITE) != 0; + } + bool ShouldTraceRegs() const { + return (GetTraceParameters() & LOG_REGS) != 0; + } + bool ShouldTraceVRegs() const { + return (GetTraceParameters() & LOG_VREGS) != 0; + } + bool ShouldTraceSysRegs() const { + return (GetTraceParameters() & LOG_SYSREGS) != 0; + } + bool ShouldTraceBranches() const { + return (GetTraceParameters() & LOG_BRANCH) != 0; + } + void SetTraceParameters(int parameters); VIXL_DEPRECATED("SetTraceParameters", void set_trace_parameters(int parameters)) { SetTraceParameters(parameters); } - void SetInstructionStats(bool value); - VIXL_DEPRECATED("SetInstructionStats", - void set_instruction_stats(bool value)) { - SetInstructionStats(value); - } - // Clear the simulated local monitor to force the next store-exclusive // instruction to fail. void ClearLocalMonitor() { local_monitor_.Clear(); } @@ -1585,6 +2818,31 @@ class Simulator : public DecoderVisitor { print_exclusive_access_warning_ = false; } + void CheckIsValidUnalignedAtomicAccess(int rn, + uint64_t address, + unsigned access_size) { + // Verify that the address is available to the host. + VIXL_ASSERT(address == static_cast(address)); + + if (GetCPUFeatures()->Has(CPUFeatures::kUSCAT)) { + // Check that the access falls entirely within one atomic access granule. + if (AlignDown(address, kAtomicAccessGranule) != + AlignDown(address + access_size - 1, kAtomicAccessGranule)) { + VIXL_ALIGNMENT_EXCEPTION(); + } + } else { + // Check that the access is aligned. + if (AlignDown(address, access_size) != address) { + VIXL_ALIGNMENT_EXCEPTION(); + } + } + + // The sp must be aligned to 16 bytes when it is accessed. + if ((rn == kSpRegCode) && (AlignDown(address, 16) != address)) { + VIXL_ALIGNMENT_EXCEPTION(); + } + } + enum PointerType { kDataPointer, kInstructionPointer }; struct PACKey { @@ -1622,6 +2880,48 @@ class Simulator : public DecoderVisitor { PointerType type); uint64_t AddPAC(uint64_t ptr, uint64_t context, PACKey key, PointerType type); uint64_t StripPAC(uint64_t ptr, PointerType type); + void PACHelper(int dst, + int src, + PACKey key, + decltype(&Simulator::AddPAC) pac_fn); + + // Armv8.5 MTE helpers. + uint64_t ChooseNonExcludedTag(uint64_t tag, + uint64_t offset, + uint64_t exclude = 0) { + VIXL_ASSERT(IsUint4(tag) && IsUint4(offset) && IsUint16(exclude)); + + if (exclude == 0xffff) { + return 0; + } + + if (offset == 0) { + while ((exclude & (1 << tag)) != 0) { + tag = (tag + 1) % 16; + } + } + + while (offset > 0) { + offset--; + tag = (tag + 1) % 16; + while ((exclude & (1 << tag)) != 0) { + tag = (tag + 1) % 16; + } + } + return tag; + } + + uint64_t GetAddressWithAllocationTag(uint64_t addr, uint64_t tag) { + VIXL_ASSERT(IsUint4(tag)); + return (addr & ~(UINT64_C(0xf) << 56)) | (tag << 56); + } + + // Create or remove a mapping with memory protection. Memory attributes such + // as MTE and BTI are represented by metadata in Simulator. + void* Mmap( + void* address, size_t length, int prot, int flags, int fd, off_t offset); + + int Munmap(void* address, size_t length, int prot); // The common CPUFeatures interface with the set of available features. @@ -1690,6 +2990,7 @@ class Simulator : public DecoderVisitor { R DoRuntimeCall(R (*function)(P...), std::tuple arguments, local_index_sequence) { + USE(arguments); return function(std::get(arguments)...); } @@ -1701,7 +3002,10 @@ class Simulator : public DecoderVisitor { R return_value = DoRuntimeCall(function, argument_operands, __local_index_sequence_for{}); - WriteGenericOperand(abi.GetReturnGenericOperand(), return_value); + bool succeeded = + WriteGenericOperand(abi.GetReturnGenericOperand(), return_value); + USE(succeeded); + VIXL_ASSERT(succeeded); } template @@ -1734,6 +3038,192 @@ class Simulator : public DecoderVisitor { }; #endif + // Configure the simulated value of 'VL', which is the size of a Z register. + // Because this cannot occur during a program's lifetime, this function also + // resets the SVE registers. + void SetVectorLengthInBits(unsigned vector_length); + + unsigned GetVectorLengthInBits() const { return vector_length_; } + unsigned GetVectorLengthInBytes() const { + VIXL_ASSERT((vector_length_ % kBitsPerByte) == 0); + return vector_length_ / kBitsPerByte; + } + unsigned GetPredicateLengthInBits() const { + VIXL_ASSERT((GetVectorLengthInBits() % kZRegBitsPerPRegBit) == 0); + return GetVectorLengthInBits() / kZRegBitsPerPRegBit; + } + unsigned GetPredicateLengthInBytes() const { + VIXL_ASSERT((GetVectorLengthInBytes() % kZRegBitsPerPRegBit) == 0); + return GetVectorLengthInBytes() / kZRegBitsPerPRegBit; + } + + unsigned RegisterSizeInBitsFromFormat(VectorFormat vform) const { + if (IsSVEFormat(vform)) { + return GetVectorLengthInBits(); + } else { + return vixl::aarch64::RegisterSizeInBitsFromFormat(vform); + } + } + + unsigned RegisterSizeInBytesFromFormat(VectorFormat vform) const { + unsigned size_in_bits = RegisterSizeInBitsFromFormat(vform); + VIXL_ASSERT((size_in_bits % kBitsPerByte) == 0); + return size_in_bits / kBitsPerByte; + } + + int LaneCountFromFormat(VectorFormat vform) const { + if (IsSVEFormat(vform)) { + return GetVectorLengthInBits() / LaneSizeInBitsFromFormat(vform); + } else { + return vixl::aarch64::LaneCountFromFormat(vform); + } + } + + bool IsFirstActive(VectorFormat vform, + const LogicPRegister& mask, + const LogicPRegister& bits) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + if (mask.IsActive(vform, i)) { + return bits.IsActive(vform, i); + } + } + return false; + } + + bool AreNoneActive(VectorFormat vform, + const LogicPRegister& mask, + const LogicPRegister& bits) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + if (mask.IsActive(vform, i) && bits.IsActive(vform, i)) { + return false; + } + } + return true; + } + + bool IsLastActive(VectorFormat vform, + const LogicPRegister& mask, + const LogicPRegister& bits) { + for (int i = LaneCountFromFormat(vform) - 1; i >= 0; i--) { + if (mask.IsActive(vform, i)) { + return bits.IsActive(vform, i); + } + } + return false; + } + + void PredTest(VectorFormat vform, + const LogicPRegister& mask, + const LogicPRegister& bits) { + ReadNzcv().SetN(IsFirstActive(vform, mask, bits)); + ReadNzcv().SetZ(AreNoneActive(vform, mask, bits)); + ReadNzcv().SetC(!IsLastActive(vform, mask, bits)); + ReadNzcv().SetV(0); + LogSystemRegister(NZCV); + } + + SimPRegister& GetPTrue() { return pregister_all_true_; } + + template + size_t CleanGranuleTag(T address, size_t length = kMTETagGranuleInBytes) { + size_t count = 0; + for (size_t offset = 0; offset < length; offset += kMTETagGranuleInBytes) { + count += + meta_data_.CleanMTETag(reinterpret_cast(address) + offset); + } + size_t expected = + length / kMTETagGranuleInBytes + (length % kMTETagGranuleInBytes != 0); + + // Give a warning when the memory region that is being unmapped isn't all + // either MTE protected or not. + if (count != expected) { + std::stringstream sstream; + sstream << std::hex + << "MTE WARNING : the memory region being unmapped " + "starting at address 0x" + << reinterpret_cast(address) + << "is not fully MTE protected.\n"; + VIXL_WARNING(sstream.str().c_str()); + } + return count; + } + + template + void SetGranuleTag(T address, + int tag, + size_t length = kMTETagGranuleInBytes) { + for (size_t offset = 0; offset < length; offset += kMTETagGranuleInBytes) { + meta_data_.SetMTETag((uintptr_t)(address) + offset, tag); + } + } + + template + int GetGranuleTag(T address) { + return meta_data_.GetMTETag(address); + } + + // Generate a random address tag, and any tags specified in the input are + // excluded from the selection. + uint64_t GenerateRandomTag(uint16_t exclude = 0); + + // Register a new BranchInterception object. If 'function' is branched to + // (e.g: "bl function") in the future; instead, if provided, 'callback' will + // be called otherwise a runtime call will be performed on 'function'. + // + // For example: this can be used to always perform runtime calls on + // non-AArch64 functions without using the macroassembler. + template + void RegisterBranchInterception(R (*function)(P...), + InterceptionCallback callback = nullptr) { + meta_data_.RegisterBranchInterception(*function, callback); + } + + // Return the current output stream in use by the simulator. + FILE* GetOutputStream() const { return stream_; } + + bool IsDebuggerEnabled() const { return debugger_enabled_; } + + void SetDebuggerEnabled(bool enabled) { debugger_enabled_ = enabled; } + + Debugger* GetDebugger() const { return debugger_.get(); } + +#ifdef VIXL_ENABLE_IMPLICIT_CHECKS + // Returns true if the faulting instruction address (usually the program + // counter or instruction pointer) comes from an internal VIXL memory access. + // This can be used by signal handlers to check if a signal was raised from + // the simulator (via TryMemoryAccess) before the actual + // access occurs. + bool IsSimulatedMemoryAccess(uintptr_t fault_pc) const { + return (fault_pc == + reinterpret_cast(&_vixl_internal_ReadMemory)); + } + + // Get the instruction address of the internal VIXL memory access continuation + // label. Signal handlers can resume execution at this address to return to + // TryMemoryAccess which will continue simulation. + uintptr_t GetSignalReturnAddress() const { + return reinterpret_cast(&_vixl_internal_AccessMemory_continue); + } + + // Replace the fault address reported by the kernel with the actual faulting + // address. + // + // This is required because TryMemoryAccess reads a section of + // memory 1 byte at a time meaning the fault address reported may not be the + // base address of memory being accessed. + void ReplaceFaultAddress(siginfo_t* siginfo, void* context) { +#ifdef __x86_64__ + // The base address being accessed is passed in as the first argument to + // _vixl_internal_ReadMemory. + ucontext_t* uc = reinterpret_cast(context); + siginfo->si_addr = reinterpret_cast(uc->uc_mcontext.gregs[REG_RDI]); +#else + USE(siginfo); + USE(context); +#endif // __x86_64__ + } +#endif // VIXL_ENABLE_IMPLICIT_CHECKS + protected: const char* clr_normal; const char* clr_flag_name; @@ -1742,6 +3232,8 @@ class Simulator : public DecoderVisitor { const char* clr_reg_value; const char* clr_vreg_name; const char* clr_vreg_value; + const char* clr_preg_name; + const char* clr_preg_value; const char* clr_memory_address; const char* clr_warning; const char* clr_warning_message; @@ -1749,6 +3241,13 @@ class Simulator : public DecoderVisitor { const char* clr_branch_marker; // Simulation helpers ------------------------------------ + + void ResetSystemRegisters(); + void ResetRegisters(); + void ResetVRegisters(); + void ResetPRegisters(); + void ResetFFR(); + bool ConditionPassed(Condition cond) { switch (cond) { case eq: @@ -1801,6 +3300,15 @@ class Simulator : public DecoderVisitor { uint64_t left, uint64_t right, int carry_in = 0); + std::pair AddWithCarry(unsigned reg_size, + uint64_t left, + uint64_t right, + int carry_in); + vixl_uint128_t Add128(vixl_uint128_t x, vixl_uint128_t y); + vixl_uint128_t Lsl128(vixl_uint128_t x, unsigned shift) const; + vixl_uint128_t Eor128(vixl_uint128_t x, vixl_uint128_t y) const; + vixl_uint128_t Mul64(uint64_t x, uint64_t y); + vixl_uint128_t Neg128(vixl_uint128_t x); void LogicalHelper(const Instruction* instr, int64_t op2); void ConditionalCompareHelper(const Instruction* instr, int64_t op2); void LoadStoreHelper(const Instruction* instr, @@ -1817,6 +3325,10 @@ class Simulator : public DecoderVisitor { void AtomicMemorySwapHelper(const Instruction* instr); template void LoadAcquireRCpcHelper(const Instruction* instr); + template + void LoadAcquireRCpcUnscaledOffsetHelper(const Instruction* instr); + template + void StoreReleaseUnscaledOffsetHelper(const Instruction* instr); uintptr_t AddressModeHelper(unsigned addr_reg, int64_t offset, AddrMode addrmode); @@ -1824,105 +3336,147 @@ class Simulator : public DecoderVisitor { AddrMode addr_mode); void NEONLoadStoreSingleStructHelper(const Instruction* instr, AddrMode addr_mode); + template + void MOPSPHelper(const Instruction* instr) { + VIXL_ASSERT(instr->IsConsistentMOPSTriplet()); - uint64_t AddressUntag(uint64_t address) { return address & ~kAddressTagMask; } + int d = instr->GetRd(); + int n = instr->GetRn(); + int s = instr->GetRs(); - template - T* AddressUntag(T* address) { - uintptr_t address_raw = reinterpret_cast(address); - return reinterpret_cast(AddressUntag(address_raw)); + // Aliased registers and xzr are disallowed for Xd and Xn. + if ((d == n) || (d == s) || (n == s) || (d == 31) || (n == 31)) { + VisitUnallocated(instr); + } + + // Additionally, Xs may not be xzr for cpy. + if ((mops_type == "cpy"_h) && (s == 31)) { + VisitUnallocated(instr); + } + + // Bits 31 and 30 must be zero. + if (instr->ExtractBits(31, 30) != 0) { + VisitUnallocated(instr); + } + + // Saturate copy count. + uint64_t xn = ReadXRegister(n); + int saturation_bits = (mops_type == "cpy"_h) ? 55 : 63; + if ((xn >> saturation_bits) != 0) { + xn = (UINT64_C(1) << saturation_bits) - 1; + if (mops_type == "setg"_h) { + // Align saturated value to granule. + xn &= ~UINT64_C(kMTETagGranuleInBytes - 1); + } + WriteXRegister(n, xn); + } + + ReadNzcv().SetN(0); + ReadNzcv().SetZ(0); + ReadNzcv().SetC(1); // Indicates "option B" implementation. + ReadNzcv().SetV(0); } int64_t ShiftOperand(unsigned reg_size, - int64_t value, + uint64_t value, Shift shift_type, unsigned amount) const; int64_t ExtendValue(unsigned reg_width, int64_t value, Extend extend_type, unsigned left_shift = 0) const; - uint16_t PolynomialMult(uint8_t op1, uint8_t op2) const; + uint64_t PolynomialMult(uint64_t op1, + uint64_t op2, + int lane_size_in_bits) const; + vixl_uint128_t PolynomialMult128(uint64_t op1, + uint64_t op2, + int lane_size_in_bits) const; - void ld1(VectorFormat vform, LogicVRegister dst, uint64_t addr); - void ld1(VectorFormat vform, LogicVRegister dst, int index, uint64_t addr); - void ld1r(VectorFormat vform, LogicVRegister dst, uint64_t addr); - void ld2(VectorFormat vform, + bool ld1(VectorFormat vform, LogicVRegister dst, uint64_t addr); + bool ld1(VectorFormat vform, LogicVRegister dst, int index, uint64_t addr); + bool ld1r(VectorFormat vform, LogicVRegister dst, uint64_t addr); + bool ld1r(VectorFormat vform, + VectorFormat unpack_vform, + LogicVRegister dst, + uint64_t addr, + bool is_signed = false); + bool ld2(VectorFormat vform, LogicVRegister dst1, LogicVRegister dst2, uint64_t addr); - void ld2(VectorFormat vform, + bool ld2(VectorFormat vform, LogicVRegister dst1, LogicVRegister dst2, int index, uint64_t addr); - void ld2r(VectorFormat vform, + bool ld2r(VectorFormat vform, LogicVRegister dst1, LogicVRegister dst2, uint64_t addr); - void ld3(VectorFormat vform, + bool ld3(VectorFormat vform, LogicVRegister dst1, LogicVRegister dst2, LogicVRegister dst3, uint64_t addr); - void ld3(VectorFormat vform, + bool ld3(VectorFormat vform, LogicVRegister dst1, LogicVRegister dst2, LogicVRegister dst3, int index, uint64_t addr); - void ld3r(VectorFormat vform, + bool ld3r(VectorFormat vform, LogicVRegister dst1, LogicVRegister dst2, LogicVRegister dst3, uint64_t addr); - void ld4(VectorFormat vform, + bool ld4(VectorFormat vform, LogicVRegister dst1, LogicVRegister dst2, LogicVRegister dst3, LogicVRegister dst4, uint64_t addr); - void ld4(VectorFormat vform, + bool ld4(VectorFormat vform, LogicVRegister dst1, LogicVRegister dst2, LogicVRegister dst3, LogicVRegister dst4, int index, uint64_t addr); - void ld4r(VectorFormat vform, + bool ld4r(VectorFormat vform, LogicVRegister dst1, LogicVRegister dst2, LogicVRegister dst3, LogicVRegister dst4, uint64_t addr); - void st1(VectorFormat vform, LogicVRegister src, uint64_t addr); - void st1(VectorFormat vform, LogicVRegister src, int index, uint64_t addr); - void st2(VectorFormat vform, + bool st1(VectorFormat vform, LogicVRegister src, uint64_t addr); + bool st1(VectorFormat vform, LogicVRegister src, int index, uint64_t addr); + bool st2(VectorFormat vform, LogicVRegister src, LogicVRegister src2, uint64_t addr); - void st2(VectorFormat vform, + bool st2(VectorFormat vform, LogicVRegister src, LogicVRegister src2, int index, uint64_t addr); - void st3(VectorFormat vform, + bool st3(VectorFormat vform, LogicVRegister src, LogicVRegister src2, LogicVRegister src3, uint64_t addr); - void st3(VectorFormat vform, + bool st3(VectorFormat vform, LogicVRegister src, LogicVRegister src2, LogicVRegister src3, int index, uint64_t addr); - void st4(VectorFormat vform, + bool st4(VectorFormat vform, LogicVRegister src, LogicVRegister src2, LogicVRegister src3, LogicVRegister src4, uint64_t addr); - void st4(VectorFormat vform, + bool st4(VectorFormat vform, LogicVRegister src, LogicVRegister src2, LogicVRegister src3, @@ -1947,16 +3501,43 @@ class Simulator : public DecoderVisitor { LogicVRegister dst, const LogicVRegister& src1, const LogicVRegister& src2); + // Add `value` to each lane of `src1`, treating `value` as unsigned for the + // purposes of setting the saturation flags. + LogicVRegister add_uint(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + uint64_t value); LogicVRegister addp(VectorFormat vform, LogicVRegister dst, const LogicVRegister& src1, const LogicVRegister& src2); + LogicPRegister brka(LogicPRegister pd, + const LogicPRegister& pg, + const LogicPRegister& pn); + LogicPRegister brkb(LogicPRegister pd, + const LogicPRegister& pg, + const LogicPRegister& pn); + LogicPRegister brkn(LogicPRegister pdm, + const LogicPRegister& pg, + const LogicPRegister& pn); + LogicPRegister brkpa(LogicPRegister pd, + const LogicPRegister& pg, + const LogicPRegister& pn, + const LogicPRegister& pm); + LogicPRegister brkpb(LogicPRegister pd, + const LogicPRegister& pg, + const LogicPRegister& pn, + const LogicPRegister& pm); + // dst = srca + src1 * src2 LogicVRegister mla(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& srca, const LogicVRegister& src1, const LogicVRegister& src2); + // dst = srca - src1 * src2 LogicVRegister mls(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& srca, const LogicVRegister& src1, const LogicVRegister& src2); LogicVRegister mul(VectorFormat vform, @@ -1982,6 +3563,14 @@ class Simulator : public DecoderVisitor { LogicVRegister dst, const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister sdiv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister udiv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); typedef LogicVRegister (Simulator::*ByElementOp)(VectorFormat vform, LogicVRegister dst, @@ -1998,106 +3587,59 @@ class Simulator : public DecoderVisitor { const LogicVRegister& src1, const LogicVRegister& src2, int index); + LogicVRegister fmlal(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister fmlal2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); LogicVRegister fmls(VectorFormat vform, LogicVRegister dst, const LogicVRegister& src1, const LogicVRegister& src2, int index); + LogicVRegister fmlsl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister fmlsl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); LogicVRegister fmulx(VectorFormat vform, LogicVRegister dst, const LogicVRegister& src1, const LogicVRegister& src2, int index); - LogicVRegister smull(VectorFormat vform, + LogicVRegister smulh(VectorFormat vform, LogicVRegister dst, const LogicVRegister& src1, - const LogicVRegister& src2, - int index); - LogicVRegister smull2(VectorFormat vform, - LogicVRegister dst, - const LogicVRegister& src1, - const LogicVRegister& src2, - int index); - LogicVRegister umull(VectorFormat vform, + const LogicVRegister& src2); + LogicVRegister umulh(VectorFormat vform, LogicVRegister dst, const LogicVRegister& src1, - const LogicVRegister& src2, - int index); - LogicVRegister umull2(VectorFormat vform, - LogicVRegister dst, - const LogicVRegister& src1, - const LogicVRegister& src2, - int index); - LogicVRegister smlal(VectorFormat vform, - LogicVRegister dst, - const LogicVRegister& src1, - const LogicVRegister& src2, - int index); - LogicVRegister smlal2(VectorFormat vform, - LogicVRegister dst, - const LogicVRegister& src1, - const LogicVRegister& src2, - int index); - LogicVRegister umlal(VectorFormat vform, - LogicVRegister dst, - const LogicVRegister& src1, - const LogicVRegister& src2, - int index); - LogicVRegister umlal2(VectorFormat vform, - LogicVRegister dst, - const LogicVRegister& src1, - const LogicVRegister& src2, - int index); - LogicVRegister smlsl(VectorFormat vform, - LogicVRegister dst, - const LogicVRegister& src1, - const LogicVRegister& src2, - int index); - LogicVRegister smlsl2(VectorFormat vform, - LogicVRegister dst, - const LogicVRegister& src1, - const LogicVRegister& src2, - int index); - LogicVRegister umlsl(VectorFormat vform, - LogicVRegister dst, - const LogicVRegister& src1, - const LogicVRegister& src2, - int index); - LogicVRegister umlsl2(VectorFormat vform, - LogicVRegister dst, - const LogicVRegister& src1, - const LogicVRegister& src2, - int index); + const LogicVRegister& src2); LogicVRegister sqdmull(VectorFormat vform, LogicVRegister dst, const LogicVRegister& src1, const LogicVRegister& src2, int index); - LogicVRegister sqdmull2(VectorFormat vform, - LogicVRegister dst, - const LogicVRegister& src1, - const LogicVRegister& src2, - int index); LogicVRegister sqdmlal(VectorFormat vform, LogicVRegister dst, const LogicVRegister& src1, const LogicVRegister& src2, int index); - LogicVRegister sqdmlal2(VectorFormat vform, - LogicVRegister dst, - const LogicVRegister& src1, - const LogicVRegister& src2, - int index); LogicVRegister sqdmlsl(VectorFormat vform, LogicVRegister dst, const LogicVRegister& src1, const LogicVRegister& src2, int index); - LogicVRegister sqdmlsl2(VectorFormat vform, - LogicVRegister dst, - const LogicVRegister& src1, - const LogicVRegister& src2, - int index); LogicVRegister sqdmulh(VectorFormat vform, LogicVRegister dst, const LogicVRegister& src1, @@ -2108,21 +3650,11 @@ class Simulator : public DecoderVisitor { const LogicVRegister& src1, const LogicVRegister& src2, int index); - LogicVRegister sdot(VectorFormat vform, - LogicVRegister dst, - const LogicVRegister& src1, - const LogicVRegister& src2, - int index); LogicVRegister sqrdmlah(VectorFormat vform, LogicVRegister dst, const LogicVRegister& src1, const LogicVRegister& src2, int index); - LogicVRegister udot(VectorFormat vform, - LogicVRegister dst, - const LogicVRegister& src1, - const LogicVRegister& src2, - int index); LogicVRegister sqrdmlsh(VectorFormat vform, LogicVRegister dst, const LogicVRegister& src1, @@ -2132,6 +3664,12 @@ class Simulator : public DecoderVisitor { LogicVRegister dst, const LogicVRegister& src1, const LogicVRegister& src2); + // Subtract `value` from each lane of `src1`, treating `value` as unsigned for + // the purposes of setting the saturation flags. + LogicVRegister sub_uint(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + uint64_t value); LogicVRegister and_(VectorFormat vform, LogicVRegister dst, const LogicVRegister& src1, @@ -2166,6 +3704,7 @@ class Simulator : public DecoderVisitor { const LogicVRegister& src2); LogicVRegister bsl(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src_mask, const LogicVRegister& src1, const LogicVRegister& src2); LogicVRegister cls(VectorFormat vform, @@ -2174,6 +3713,9 @@ class Simulator : public DecoderVisitor { LogicVRegister clz(VectorFormat vform, LogicVRegister dst, const LogicVRegister& src); + LogicVRegister cnot(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); LogicVRegister cnt(VectorFormat vform, LogicVRegister dst, const LogicVRegister& src); @@ -2185,8 +3727,11 @@ class Simulator : public DecoderVisitor { const LogicVRegister& src); LogicVRegister rev(VectorFormat vform, LogicVRegister dst, - const LogicVRegister& src, - int revSize); + const LogicVRegister& src); + LogicVRegister rev_byte(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int rev_size); LogicVRegister rev16(VectorFormat vform, LogicVRegister dst, const LogicVRegister& src); @@ -2213,11 +3758,19 @@ class Simulator : public DecoderVisitor { LogicVRegister uadalp(VectorFormat vform, LogicVRegister dst, const LogicVRegister& src); + LogicVRegister ror(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int rotation); LogicVRegister ext(VectorFormat vform, LogicVRegister dst, const LogicVRegister& src1, const LogicVRegister& src2, int index); + LogicVRegister rotate_elements_right(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int index); template LogicVRegister fcadd(VectorFormat vform, LogicVRegister dst, @@ -2234,6 +3787,7 @@ class Simulator : public DecoderVisitor { LogicVRegister dst, const LogicVRegister& src1, const LogicVRegister& src2, + const LogicVRegister& acc, int index, int rot); LogicVRegister fcmla(VectorFormat vform, @@ -2242,17 +3796,59 @@ class Simulator : public DecoderVisitor { const LogicVRegister& src2, int index, int rot); + LogicVRegister fcmla(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + const LogicVRegister& acc, + int rot); template - LogicVRegister fcmla(VectorFormat vform, + LogicVRegister fadda(VectorFormat vform, + LogicVRegister acc, + const LogicPRegister& pg, + const LogicVRegister& src); + LogicVRegister fadda(VectorFormat vform, + LogicVRegister acc, + const LogicPRegister& pg, + const LogicVRegister& src); + LogicVRegister cadd(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int rot, + bool saturate = false); + LogicVRegister cmla(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& srca, + const LogicVRegister& src1, + const LogicVRegister& src2, + int rot); + LogicVRegister cmla(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& srca, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index, + int rot); + LogicVRegister bgrp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool do_bext = false); + LogicVRegister bdep(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister histogram(VectorFormat vform, + LogicVRegister dst, + const LogicPRegister& pg, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool do_segmented = false); + LogicVRegister index(VectorFormat vform, LogicVRegister dst, - const LogicVRegister& src1, - const LogicVRegister& src2, - int rot); - LogicVRegister fcmla(VectorFormat vform, - LogicVRegister dst, - const LogicVRegister& src1, - const LogicVRegister& src2, - int rot); + uint64_t start, + uint64_t step); LogicVRegister ins_element(VectorFormat vform, LogicVRegister dst, int dst_index, @@ -2262,13 +3858,44 @@ class Simulator : public DecoderVisitor { LogicVRegister dst, int dst_index, uint64_t imm); + LogicVRegister insr(VectorFormat vform, LogicVRegister dst, uint64_t imm); LogicVRegister dup_element(VectorFormat vform, LogicVRegister dst, const LogicVRegister& src, int src_index); + LogicVRegister dup_elements_to_segments(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int src_index); + LogicVRegister dup_elements_to_segments( + VectorFormat vform, + LogicVRegister dst, + const std::pair& src_and_index); LogicVRegister dup_immediate(VectorFormat vform, LogicVRegister dst, uint64_t imm); + LogicVRegister mov(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicPRegister mov(LogicPRegister dst, const LogicPRegister& src); + LogicVRegister mov_merging(VectorFormat vform, + LogicVRegister dst, + const SimPRegister& pg, + const LogicVRegister& src); + LogicVRegister mov_zeroing(VectorFormat vform, + LogicVRegister dst, + const SimPRegister& pg, + const LogicVRegister& src); + LogicVRegister mov_alternating(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int start_at); + LogicPRegister mov_merging(LogicPRegister dst, + const LogicPRegister& pg, + const LogicPRegister& src); + LogicPRegister mov_zeroing(LogicPRegister dst, + const LogicPRegister& pg, + const LogicPRegister& src); LogicVRegister movi(VectorFormat vform, LogicVRegister dst, uint64_t imm); LogicVRegister mvni(VectorFormat vform, LogicVRegister dst, uint64_t imm); LogicVRegister orr(VectorFormat vform, @@ -2278,11 +3905,52 @@ class Simulator : public DecoderVisitor { LogicVRegister sshl(VectorFormat vform, LogicVRegister dst, const LogicVRegister& src1, - const LogicVRegister& src2); + const LogicVRegister& src2, + bool shift_is_8bit = true); LogicVRegister ushl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool shift_is_8bit = true); + LogicVRegister sshr(VectorFormat vform, LogicVRegister dst, const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister ushr(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + // Perform a "conditional last" operation. The first part of the pair is true + // if any predicate lane is active, false otherwise. The second part takes the + // value of the last active (plus offset) lane, or last (plus offset) lane if + // none active. + std::pair clast(VectorFormat vform, + const LogicPRegister& pg, + const LogicVRegister& src2, + int offset_from_last_active); + LogicPRegister match(VectorFormat vform, + LogicPRegister dst, + const LogicVRegister& haystack, + const LogicVRegister& needles, + bool negate_match); + LogicVRegister compact(VectorFormat vform, + LogicVRegister dst, + const LogicPRegister& pg, + const LogicVRegister& src); + LogicVRegister splice(VectorFormat vform, + LogicVRegister dst, + const LogicPRegister& pg, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister sel(VectorFormat vform, + LogicVRegister dst, + const SimPRegister& pg, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicPRegister sel(LogicPRegister dst, + const LogicPRegister& pg, + const LogicPRegister& src1, + const LogicPRegister& src2); LogicVRegister sminmax(VectorFormat vform, LogicVRegister dst, const LogicVRegister& src1, @@ -2323,6 +3991,7 @@ class Simulator : public DecoderVisitor { const LogicVRegister& src); LogicVRegister sminmaxv(VectorFormat vform, LogicVRegister dst, + const LogicPRegister& pg, const LogicVRegister& src, bool max); LogicVRegister smaxv(VectorFormat vform, @@ -2333,16 +4002,26 @@ class Simulator : public DecoderVisitor { const LogicVRegister& src); LogicVRegister uxtl(VectorFormat vform, LogicVRegister dst, - const LogicVRegister& src); + const LogicVRegister& src, + bool is_2 = false); LogicVRegister uxtl2(VectorFormat vform, LogicVRegister dst, const LogicVRegister& src); LogicVRegister sxtl(VectorFormat vform, LogicVRegister dst, - const LogicVRegister& src); + const LogicVRegister& src, + bool is_2 = false); LogicVRegister sxtl2(VectorFormat vform, LogicVRegister dst, const LogicVRegister& src); + LogicVRegister uxt(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + unsigned from_size_in_bits); + LogicVRegister sxt(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + unsigned from_size_in_bits); LogicVRegister tbl(VectorFormat vform, LogicVRegister dst, const LogicVRegister& tab, @@ -2487,6 +4166,7 @@ class Simulator : public DecoderVisitor { const LogicVRegister& src2); LogicVRegister uminmaxv(VectorFormat vform, LogicVRegister dst, + const LogicPRegister& pg, const LogicVRegister& src, bool max); LogicVRegister umaxv(VectorFormat vform, @@ -2523,11 +4203,27 @@ class Simulator : public DecoderVisitor { LogicVRegister dst, const LogicVRegister& src, int shift); + LogicVRegister scvtf(VectorFormat vform, + unsigned dst_data_size_in_bits, + unsigned src_data_size_in_bits, + LogicVRegister dst, + const LogicPRegister& pg, + const LogicVRegister& src, + FPRounding round, + int fbits = 0); LogicVRegister scvtf(VectorFormat vform, LogicVRegister dst, const LogicVRegister& src, int fbits, FPRounding rounding_mode); + LogicVRegister ucvtf(VectorFormat vform, + unsigned dst_data_size, + unsigned src_data_size, + LogicVRegister dst, + const LogicPRegister& pg, + const LogicVRegister& src, + FPRounding round, + int fbits = 0); LogicVRegister ucvtf(VectorFormat vform, LogicVRegister dst, const LogicVRegister& src, @@ -2589,10 +4285,12 @@ class Simulator : public DecoderVisitor { int shift); LogicVRegister suqadd(VectorFormat vform, LogicVRegister dst, - const LogicVRegister& src); + const LogicVRegister& src1, + const LogicVRegister& src2); LogicVRegister usqadd(VectorFormat vform, LogicVRegister dst, - const LogicVRegister& src); + const LogicVRegister& src1, + const LogicVRegister& src2); LogicVRegister sqshl(VectorFormat vform, LogicVRegister dst, const LogicVRegister& src, @@ -2613,9 +4311,9 @@ class Simulator : public DecoderVisitor { const LogicVRegister& src); LogicVRegister extractnarrow(VectorFormat vform, LogicVRegister dst, - bool dstIsSigned, + bool dst_is_signed, const LogicVRegister& src, - bool srcIsSigned); + bool src_is_signed); LogicVRegister xtn(VectorFormat vform, LogicVRegister dst, const LogicVRegister& src); @@ -2632,7 +4330,7 @@ class Simulator : public DecoderVisitor { LogicVRegister dst, const LogicVRegister& src1, const LogicVRegister& src2, - bool issigned); + bool is_signed); LogicVRegister saba(VectorFormat vform, LogicVRegister dst, const LogicVRegister& src1, @@ -2714,7 +4412,8 @@ class Simulator : public DecoderVisitor { LogicVRegister dst, const LogicVRegister& src1, const LogicVRegister& src2, - bool is_signed); + bool is_src1_signed, + bool is_src2_signed); LogicVRegister sdot(VectorFormat vform, LogicVRegister dst, const LogicVRegister& src1, @@ -2723,12 +4422,41 @@ class Simulator : public DecoderVisitor { LogicVRegister dst, const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister usdot(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister cdot(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& acc, + const LogicVRegister& src1, + const LogicVRegister& src2, + int rot); + LogicVRegister sqrdcmlah(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& srca, + const LogicVRegister& src1, + const LogicVRegister& src2, + int rot); + LogicVRegister sqrdcmlah(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& srca, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index, + int rot); LogicVRegister sqrdmlash(VectorFormat vform, LogicVRegister dst, const LogicVRegister& src1, const LogicVRegister& src2, bool round = true, bool sub_op = false); + LogicVRegister sqrdmlash_d(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool round = true, + bool sub_op = false); LogicVRegister sqrdmlah(VectorFormat vform, LogicVRegister dst, const LogicVRegister& src1, @@ -2743,6 +4471,21 @@ class Simulator : public DecoderVisitor { LogicVRegister dst, const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister matmul(VectorFormat vform_dst, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool src1_signed, + bool src2_signed); + template + LogicVRegister fmatmul(VectorFormat vform, + LogicVRegister srcdst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister fmatmul(VectorFormat vform, + LogicVRegister srcdst, + const LogicVRegister& src1, + const LogicVRegister& src2); #define NEON_3VREG_LOGIC_LIST(V) \ V(addhn) \ V(addhn2) \ @@ -2762,23 +4505,14 @@ class Simulator : public DecoderVisitor { V(sabdl2) \ V(uabdl) \ V(uabdl2) \ - V(smull) \ V(smull2) \ - V(umull) \ V(umull2) \ - V(smlal) \ V(smlal2) \ - V(umlal) \ V(umlal2) \ - V(smlsl) \ V(smlsl2) \ - V(umlsl) \ V(umlsl2) \ - V(sqdmlal) \ V(sqdmlal2) \ - V(sqdmlsl) \ V(sqdmlsl2) \ - V(sqdmull) \ V(sqdmull2) #define DEFINE_LOGIC_FUNC(FXN) \ @@ -2789,6 +4523,26 @@ class Simulator : public DecoderVisitor { NEON_3VREG_LOGIC_LIST(DEFINE_LOGIC_FUNC) #undef DEFINE_LOGIC_FUNC +#define NEON_MULL_LIST(V) \ + V(smull) \ + V(umull) \ + V(smlal) \ + V(umlal) \ + V(smlsl) \ + V(umlsl) \ + V(sqdmlal) \ + V(sqdmlsl) \ + V(sqdmull) + +#define DECLARE_NEON_MULL_OP(FN) \ + LogicVRegister FN(VectorFormat vform, \ + LogicVRegister dst, \ + const LogicVRegister& src1, \ + const LogicVRegister& src2, \ + bool is_2 = false); + NEON_MULL_LIST(DECLARE_NEON_MULL_OP) +#undef DECLARE_NEON_MULL_OP + #define NEON_FP3SAME_LIST(V) \ V(fadd, FPAdd, false) \ V(fsub, FPSub, true) \ @@ -2831,6 +4585,12 @@ class Simulator : public DecoderVisitor { NEON_FPPAIRWISE_LIST(DECLARE_NEON_FP_PAIR_OP) #undef DECLARE_NEON_FP_PAIR_OP + enum FrintMode { + kFrintToInteger = 0, + kFrintToInt32 = 32, + kFrintToInt64 = 64 + }; + template LogicVRegister frecps(VectorFormat vform, LogicVRegister dst, @@ -2852,19 +4612,23 @@ class Simulator : public DecoderVisitor { template LogicVRegister fmla(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& srca, const LogicVRegister& src1, const LogicVRegister& src2); LogicVRegister fmla(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& srca, const LogicVRegister& src1, const LogicVRegister& src2); template LogicVRegister fmls(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& srca, const LogicVRegister& src1, const LogicVRegister& src2); LogicVRegister fmls(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& srca, const LogicVRegister& src1, const LogicVRegister& src2); LogicVRegister fnmul(VectorFormat vform, @@ -2872,6 +4636,23 @@ class Simulator : public DecoderVisitor { const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister fmlal(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister fmlal2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister fmlsl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister fmlsl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + template LogicVRegister fcmp(VectorFormat vform, LogicVRegister dst, @@ -2907,6 +4688,34 @@ class Simulator : public DecoderVisitor { LogicVRegister frecpx(VectorFormat vform, LogicVRegister dst, const LogicVRegister& src); + LogicVRegister ftsmul(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister ftssel(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister ftmad(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + unsigned index); + LogicVRegister fexpa(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister flogb(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + template + LogicVRegister fscale(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister fscale(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); template LogicVRegister fabs_(VectorFormat vform, LogicVRegister dst, @@ -2922,12 +4731,34 @@ class Simulator : public DecoderVisitor { LogicVRegister dst, const LogicVRegister& src, FPRounding rounding_mode, - bool inexact_exception = false); + bool inexact_exception = false, + FrintMode frint_mode = kFrintToInteger); + LogicVRegister fcvt(VectorFormat dst_vform, + VectorFormat src_vform, + LogicVRegister dst, + const LogicPRegister& pg, + const LogicVRegister& src); + LogicVRegister fcvts(VectorFormat vform, + unsigned dst_data_size_in_bits, + unsigned src_data_size_in_bits, + LogicVRegister dst, + const LogicPRegister& pg, + const LogicVRegister& src, + FPRounding round, + int fbits = 0); LogicVRegister fcvts(VectorFormat vform, LogicVRegister dst, const LogicVRegister& src, FPRounding rounding_mode, int fbits = 0); + LogicVRegister fcvtu(VectorFormat vform, + unsigned dst_data_size_in_bits, + unsigned src_data_size_in_bits, + LogicVRegister dst, + const LogicPRegister& pg, + const LogicVRegister& src, + FPRounding round, + int fbits = 0); LogicVRegister fcvtu(VectorFormat vform, LogicVRegister dst, const LogicVRegister& src, @@ -2968,16 +4799,82 @@ class Simulator : public DecoderVisitor { LogicVRegister dst, const LogicVRegister& src); + LogicPRegister pfalse(LogicPRegister dst); + LogicPRegister pfirst(LogicPRegister dst, + const LogicPRegister& pg, + const LogicPRegister& src); + LogicPRegister ptrue(VectorFormat vform, LogicPRegister dst, int pattern); + LogicPRegister pnext(VectorFormat vform, + LogicPRegister dst, + const LogicPRegister& pg, + const LogicPRegister& src); + + LogicVRegister asrd(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + int shift); + + LogicVRegister andv(VectorFormat vform, + LogicVRegister dst, + const LogicPRegister& pg, + const LogicVRegister& src); + LogicVRegister eorv(VectorFormat vform, + LogicVRegister dst, + const LogicPRegister& pg, + const LogicVRegister& src); + LogicVRegister orv(VectorFormat vform, + LogicVRegister dst, + const LogicPRegister& pg, + const LogicVRegister& src); + LogicVRegister saddv(VectorFormat vform, + LogicVRegister dst, + const LogicPRegister& pg, + const LogicVRegister& src); + LogicVRegister sminv(VectorFormat vform, + LogicVRegister dst, + const LogicPRegister& pg, + const LogicVRegister& src); + LogicVRegister smaxv(VectorFormat vform, + LogicVRegister dst, + const LogicPRegister& pg, + const LogicVRegister& src); + LogicVRegister uaddv(VectorFormat vform, + LogicVRegister dst, + const LogicPRegister& pg, + const LogicVRegister& src); + LogicVRegister uminv(VectorFormat vform, + LogicVRegister dst, + const LogicPRegister& pg, + const LogicVRegister& src); + LogicVRegister umaxv(VectorFormat vform, + LogicVRegister dst, + const LogicPRegister& pg, + const LogicVRegister& src); + + LogicVRegister interleave_top_bottom(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + template - struct TFPMinMaxOp { + struct TFPPairOp { typedef T (Simulator::*type)(T a, T b); }; template - LogicVRegister fminmaxv(VectorFormat vform, - LogicVRegister dst, - const LogicVRegister& src, - typename TFPMinMaxOp::type Op); + LogicVRegister FPPairedAcrossHelper(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + typename TFPPairOp::type fn, + uint64_t inactive_value); + + LogicVRegister FPPairedAcrossHelper( + VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + typename TFPPairOp::type fn16, + typename TFPPairOp::type fn32, + typename TFPPairOp::type fn64, + uint64_t inactive_value); LogicVRegister fminv(VectorFormat vform, LogicVRegister dst, @@ -2991,6 +4888,9 @@ class Simulator : public DecoderVisitor { LogicVRegister fmaxnmv(VectorFormat vform, LogicVRegister dst, const LogicVRegister& src); + LogicVRegister faddv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); static const uint32_t CRC32_POLY = 0x04C11DB7; static const uint32_t CRC32C_POLY = 0x1EDC6F41; @@ -3010,6 +4910,8 @@ class Simulator : public DecoderVisitor { void FPCompare(double val0, double val1, FPTrapFlags trap); double FPRoundInt(double value, FPRounding round_mode); + double FPRoundInt(double value, FPRounding round_mode, FrintMode frint_mode); + double FPRoundIntCommon(double value, FPRounding round_mode); double recip_sqrt_estimate(double a); double recip_estimate(double a); double FPRecipSqrtEstimate(double a); @@ -3053,6 +4955,9 @@ class Simulator : public DecoderVisitor { template T FPMinNM(T a, T b); + template + T FPMulNaNs(T op1, T op2); + template T FPMul(T op1, T op2); @@ -3089,10 +4994,152 @@ class Simulator : public DecoderVisitor { void DoSaveCPUFeatures(const Instruction* instr); void DoRestoreCPUFeatures(const Instruction* instr); -// Simulate a runtime call. -#ifndef VIXL_HAS_SIMULATED_RUNTIME_CALL_SUPPORT - VIXL_NO_RETURN_IN_DEBUG_MODE -#endif + // General arithmetic helpers ---------------------------- + + // Add `delta` to the accumulator (`acc`), optionally saturate, then zero- or + // sign-extend. Initial `acc` bits outside `n` are ignored, but the delta must + // be a valid int_t. + uint64_t IncDecN(uint64_t acc, + int64_t delta, + unsigned n, + bool is_saturating = false, + bool is_signed = false); + + // SVE helpers ------------------------------------------- + LogicVRegister SVEBitwiseLogicalUnpredicatedHelper(LogicalOp op, + VectorFormat vform, + LogicVRegister zd, + const LogicVRegister& zn, + const LogicVRegister& zm); + + LogicPRegister SVEPredicateLogicalHelper(SVEPredicateLogicalOp op, + LogicPRegister Pd, + const LogicPRegister& pn, + const LogicPRegister& pm); + + LogicVRegister SVEBitwiseImmHelper(SVEBitwiseLogicalWithImm_UnpredicatedOp op, + VectorFormat vform, + LogicVRegister zd, + uint64_t imm); + enum UnpackType { kHiHalf, kLoHalf }; + enum ExtendType { kSignedExtend, kUnsignedExtend }; + LogicVRegister unpk(VectorFormat vform, + LogicVRegister zd, + const LogicVRegister& zn, + UnpackType unpack_type, + ExtendType extend_type); + + LogicPRegister SVEIntCompareVectorsHelper(Condition cc, + VectorFormat vform, + LogicPRegister dst, + const LogicPRegister& mask, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool is_wide_elements = false, + FlagsUpdate flags = SetFlags); + + void SVEGatherLoadScalarPlusVectorHelper(const Instruction* instr, + VectorFormat vform, + SVEOffsetModifier mod); + + // Store each active zt[lane] to `addr.GetElementAddress(lane, ...)`. + // + // `zt_code` specifies the code of the first register (zt). Each additional + // register (up to `reg_count`) is `(zt_code + i) % 32`. + // + // This helper calls LogZWrite in the proper way, according to `addr`. + void SVEStructuredStoreHelper(VectorFormat vform, + const LogicPRegister& pg, + unsigned zt_code, + const LogicSVEAddressVector& addr); + // Load each active zt[lane] from `addr.GetElementAddress(lane, ...)`. + // Returns false if a load failed. + bool SVEStructuredLoadHelper(VectorFormat vform, + const LogicPRegister& pg, + unsigned zt_code, + const LogicSVEAddressVector& addr, + bool is_signed = false); + + enum SVEFaultTolerantLoadType { + // - Elements active in both FFR and pg are accessed as usual. If the access + // fails, the corresponding lane and all subsequent lanes are filled with + // an unpredictable value, and made inactive in FFR. + // + // - Elements active in FFR but not pg are set to zero. + // + // - Elements that are not active in FFR are filled with an unpredictable + // value, regardless of pg. + kSVENonFaultLoad, + + // If type == kSVEFirstFaultLoad, the behaviour is the same, except that the + // first active element is always accessed, regardless of FFR, and will + // generate a real fault if it is inaccessible. If the lane is not active in + // FFR, the actual value loaded into the result is still unpredictable. + kSVEFirstFaultLoad + }; + + // Load with first-faulting or non-faulting load semantics, respecting and + // updating FFR. + void SVEFaultTolerantLoadHelper(VectorFormat vform, + const LogicPRegister& pg, + unsigned zt_code, + const LogicSVEAddressVector& addr, + SVEFaultTolerantLoadType type, + bool is_signed); + + LogicVRegister SVEBitwiseShiftHelper(Shift shift_op, + VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool is_wide_elements); + + // Pack all even- or odd-numbered elements of source vector side by side and + // place in elements of lower half the destination vector, and leave the upper + // half all zero. + // [...| H | G | F | E | D | C | B | A ] + // => [...................| G | E | C | A ] + LogicVRegister pack_even_elements(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + + // [...| H | G | F | E | D | C | B | A ] + // => [...................| H | F | D | B ] + LogicVRegister pack_odd_elements(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + + LogicVRegister adcl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool top); + + template + LogicVRegister FTMaddHelper(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + uint64_t coeff_pos, + uint64_t coeff_neg); + + // Return the first or last active lane, or -1 if none are active. + int GetFirstActive(VectorFormat vform, const LogicPRegister& pg) const; + int GetLastActive(VectorFormat vform, const LogicPRegister& pg) const; + + int CountActiveLanes(VectorFormat vform, const LogicPRegister& pg) const; + + // Count active and true lanes in `pn`. + int CountActiveAndTrueLanes(VectorFormat vform, + const LogicPRegister& pg, + const LogicPRegister& pn) const; + + // Count the number of lanes referred to by `pattern`, given the vector + // length. If `pattern` is not a recognised SVEPredicateConstraint, this + // returns zero. + int GetPredicateConstraintLaneCount(VectorFormat vform, int pattern) const; + + // Simulate a runtime call. void DoRuntimeCall(const Instruction* instr); // Processor state --------------------------------------- @@ -3105,15 +5152,21 @@ class Simulator : public DecoderVisitor { FILE* stream_; PrintDisassembler* print_disasm_; - // Instruction statistics instrumentation. - Instrument* instrumentation_; - // General purpose registers. Register 31 is the stack pointer. SimRegister registers_[kNumberOfRegisters]; // Vector registers SimVRegister vregisters_[kNumberOfVRegisters]; + // SVE predicate registers. + SimPRegister pregisters_[kNumberOfPRegisters]; + + // SVE first-fault register. + SimFFRRegister ffr_register_; + + // A pseudo SVE predicate register with all bits set to true. + SimPRegister pregister_all_true_; + // Program Status Register. // bits[31, 27]: Condition flags N, Z, C, and V. // (Negative, Zero, Carry, Overflow) @@ -3134,9 +5187,8 @@ class Simulator : public DecoderVisitor { VIXL_ASSERT(ReadFpcr().GetFZ() == 0); // Ties-to-even rounding only. VIXL_ASSERT(ReadFpcr().GetRMode() == FPTieEven); - - // The simulator does not support half-precision operations so - // GetFpcr().AHP() is irrelevant, and is not checked here. + // No alternative half-precision support. + VIXL_ASSERT(ReadFpcr().GetAHP() == 0); } static int CalcNFlag(uint64_t result, unsigned reg_size) { @@ -3147,12 +5199,11 @@ class Simulator : public DecoderVisitor { static const uint32_t kConditionFlagsMask = 0xf0000000; - // Stack - byte* stack_; - static const int stack_protection_size_ = 256; - // 2 KB stack. - static const int stack_size_ = 2 * 1024 + 2 * stack_protection_size_; - byte* stack_limit_; + Memory memory_; + + static const size_t kDefaultStackGuardStartSize = 0; + static const size_t kDefaultStackGuardEndSize = 4 * 1024; + static const size_t kDefaultStackUsableSize = 8 * 1024; Decoder* decoder_; // Indicates if the pc has been modified by the instruction and should not be @@ -3160,20 +5211,52 @@ class Simulator : public DecoderVisitor { bool pc_modified_; const Instruction* pc_; + // Pointer to the last simulated instruction, used for checking the validity + // of the current instruction with the previous instruction, such as movprfx. + Instruction const* last_instr_; + + // Branch type register, used for branch target identification. + BType btype_; + + // Next value of branch type register after the current instruction has been + // decoded. + BType next_btype_; + + // Global flag for enabling guarded pages. + // TODO: implement guarding at page granularity, rather than globally. + bool guard_pages_; + static const char* xreg_names[]; static const char* wreg_names[]; + static const char* breg_names[]; static const char* hreg_names[]; static const char* sreg_names[]; static const char* dreg_names[]; static const char* vreg_names[]; + static const char* zreg_names[]; + static const char* preg_names[]; private: + using FormToVisitorFnMap = + std::unordered_map>; + static const FormToVisitorFnMap* GetFormToVisitorFnMap(); + + uint32_t form_hash_; + static const PACKey kPACKeyIA; static const PACKey kPACKeyIB; static const PACKey kPACKeyDA; static const PACKey kPACKeyDB; static const PACKey kPACKeyGA; + bool CanReadMemory(uintptr_t address, size_t size); + + // CanReadMemory needs placeholder file descriptors, so we use a pipe. We can + // save some system call overhead by opening them on construction, rather than + // on every call to CanReadMemory. + int placeholder_pipe_fd_[2]; + template static T FPDefaultNaN(); @@ -3226,20 +5309,49 @@ class Simulator : public DecoderVisitor { } } + // Construct a SimVRegister from a SimPRegister, where each byte-sized lane of + // the destination is set to all true (0xff) when the corresponding + // predicate flag is set, and false (0x00) otherwise. + SimVRegister ExpandToSimVRegister(const SimPRegister& preg); + + // Set each predicate flag in pd where the corresponding assigned-sized lane + // in vreg is non-zero. Clear the flag, otherwise. This is almost the opposite + // operation to ExpandToSimVRegister(), except that any non-zero lane is + // interpreted as true. + void ExtractFromSimVRegister(VectorFormat vform, + SimPRegister& pd, // NOLINT(runtime/references) + SimVRegister vreg); + bool coloured_trace_; // A set of TraceParameters flags. int trace_parameters_; - // Indicates whether the instruction instrumentation is active. - bool instruction_stats_; - // Indicates whether the exclusive-access warning has been printed. bool print_exclusive_access_warning_; void PrintExclusiveAccessWarning(); CPUFeaturesAuditor cpu_features_auditor_; std::vector saved_cpu_features_; + + // State for *rand48 functions, used to simulate randomness with repeatable + // behaviour (so that tests are deterministic). This is used to simulate RNDR + // and RNDRRS, as well as to simulate a source of entropy for architecturally + // undefined behaviour. + uint16_t rand_state_[3]; + + // A configurable size of SVE vector registers. + unsigned vector_length_; + + // Representation of memory attributes such as MTE tagging and BTI page + // protection in addition to branch interceptions. + MetaDataDepot meta_data_; + + // True if the debugger is enabled and might get entered. + bool debugger_enabled_; + + // Debugger for the simulator. + std::unique_ptr debugger_; }; #if defined(VIXL_HAS_SIMULATED_RUNTIME_CALL_SUPPORT) && __cplusplus < 201402L @@ -3250,6 +5362,17 @@ struct Simulator::emulated_make_index_sequence_helper<0, I...> : Simulator::emulated_index_sequence {}; #endif +template +void MetaDataDepot::BranchInterception::operator()( + Simulator* simulator) const { + if (callback_ == nullptr) { + Simulator::RuntimeCallStructHelper:: + Wrapper(simulator, reinterpret_cast(function_)); + } else { + callback_(reinterpret_cast(function_)); + } +} + } // namespace aarch64 } // namespace vixl diff --git a/dep/vixl/include/vixl/aarch64/simulator-constants-aarch64.h b/dep/vixl/include/vixl/aarch64/simulator-constants-aarch64.h index 6631043d5..1aa4f851f 100644 --- a/dep/vixl/include/vixl/aarch64/simulator-constants-aarch64.h +++ b/dep/vixl/include/vixl/aarch64/simulator-constants-aarch64.h @@ -56,6 +56,8 @@ enum DebugHltOpcode { kDisableCPUFeaturesOpcode, kSaveCPUFeaturesOpcode, kRestoreCPUFeaturesOpcode, + kMTEActive, + kMTEInactive, // Aliases. kDebugHltFirstOpcode = kUnreachableOpcode, kDebugHltLastOpcode = kLogOpcode @@ -88,7 +90,7 @@ VIXL_DEPRECATED("DebugHltOpcode", typedef DebugHltOpcode DebugHltOpcodes); // call): // x0: The format string // x1-x7: Optional arguments, if type == CPURegister::kRegister -// d0-d7: Optional arguments, if type == CPURegister::kFPRegister +// d0-d7: Optional arguments, if type == CPURegister::kVRegister const unsigned kPrintfArgCountOffset = 1 * kInstructionSize; const unsigned kPrintfArgPatternListOffset = 2 * kInstructionSize; const unsigned kPrintfLength = 3 * kInstructionSize; @@ -121,7 +123,7 @@ const unsigned kTraceLength = 3 * kInstructionSize; enum TraceParameters { LOG_DISASM = 1 << 0, // Log disassembly. LOG_REGS = 1 << 1, // Log general purpose registers. - LOG_VREGS = 1 << 2, // Log NEON and floating-point registers. + LOG_VREGS = 1 << 2, // Log SVE, NEON and floating-point registers. LOG_SYSREGS = 1 << 3, // Log the flags and system registers. LOG_WRITE = 1 << 4, // Log writes to memory. LOG_BRANCH = 1 << 5, // Log taken branches. diff --git a/dep/vixl/include/vixl/assembler-base-vixl.h b/dep/vixl/include/vixl/assembler-base-vixl.h index ee54dcbc2..e38619601 100644 --- a/dep/vixl/include/vixl/assembler-base-vixl.h +++ b/dep/vixl/include/vixl/assembler-base-vixl.h @@ -29,6 +29,12 @@ #include "code-buffer-vixl.h" +// Microsoft Visual C++ defines a `mvn` macro that conflicts with our own +// definition. +#if defined(_MSC_VER) && defined(mvn) +#undef mvn +#endif + namespace vixl { class CodeBufferCheckScope; @@ -37,9 +43,8 @@ namespace internal { class AssemblerBase { public: - AssemblerBase() : allow_assembler_(false) {} - explicit AssemblerBase(size_t capacity) - : buffer_(capacity), allow_assembler_(false) {} + AssemblerBase() + : allow_assembler_(false) {} AssemblerBase(byte* buffer, size_t capacity) : buffer_(buffer, capacity), allow_assembler_(false) {} diff --git a/dep/vixl/include/vixl/code-buffer-vixl.h b/dep/vixl/include/vixl/code-buffer-vixl.h index ed01eebb1..35a953111 100644 --- a/dep/vixl/include/vixl/code-buffer-vixl.h +++ b/dep/vixl/include/vixl/code-buffer-vixl.h @@ -36,24 +36,12 @@ namespace vixl { class CodeBuffer { public: - static const size_t kDefaultCapacity = 4 * KBytes; - - explicit CodeBuffer(size_t capacity = kDefaultCapacity); + CodeBuffer(); CodeBuffer(byte* buffer, size_t capacity); - ~CodeBuffer(); + ~CodeBuffer() VIXL_NEGATIVE_TESTING_ALLOW_EXCEPTION; void Reset(); - void Reset(byte* buffer, size_t capacity, bool managed = false); - -#ifdef VIXL_CODE_BUFFER_MMAP - void SetExecutable(); - void SetWritable(); -#else - // These require page-aligned memory blocks, which we can only guarantee with - // mmap. - VIXL_NO_RETURN_IN_DEBUG_MODE void SetExecutable() { VIXL_UNIMPLEMENTED(); } - VIXL_NO_RETURN_IN_DEBUG_MODE void SetWritable() { VIXL_UNIMPLEMENTED(); } -#endif + void Reset(byte* buffer, size_t capacity); ptrdiff_t GetOffsetFrom(ptrdiff_t offset) const { ptrdiff_t cursor_offset = cursor_ - buffer_; @@ -128,8 +116,9 @@ class CodeBuffer { void Emit(T value) { VIXL_ASSERT(HasSpaceFor(sizeof(value))); dirty_ = true; - memcpy(cursor_, &value, sizeof(value)); - cursor_ += sizeof(value); + byte* c = cursor_; + memcpy(c, &value, sizeof(value)); + cursor_ = c + sizeof(value); } void UpdateData(size_t offset, const void* data, size_t size); @@ -149,10 +138,6 @@ class CodeBuffer { return GetCapacity(); } - bool IsManaged() const { return managed_; } - - void Grow(size_t new_capacity); - bool IsDirty() const { return dirty_; } void SetClean() { dirty_ = false; } @@ -161,24 +146,9 @@ class CodeBuffer { return GetRemainingBytes() >= amount; } - void EnsureSpaceFor(size_t amount, bool* has_grown) { - bool is_full = !HasSpaceFor(amount); - if (is_full) Grow(capacity_ * 2 + amount); - VIXL_ASSERT(has_grown != NULL); - *has_grown = is_full; - } - void EnsureSpaceFor(size_t amount) { - bool dummy; - EnsureSpaceFor(amount, &dummy); - } - private: // Backing store of the buffer. byte* buffer_; - // If true the backing store is allocated and deallocated by the buffer. The - // backing store can then grow on demand. If false the backing store is - // provided by the user and cannot be resized internally. - bool managed_; // Pointer to the next location to be written. byte* cursor_; // True if there has been any write since the buffer was created or cleaned. diff --git a/dep/vixl/include/vixl/code-generation-scopes-vixl.h b/dep/vixl/include/vixl/code-generation-scopes-vixl.h index b7ea2d92b..4dd2698b5 100644 --- a/dep/vixl/include/vixl/code-generation-scopes-vixl.h +++ b/dep/vixl/include/vixl/code-generation-scopes-vixl.h @@ -68,14 +68,19 @@ class CodeBufferCheckScope { size_t size, BufferSpacePolicy check_policy = kReserveBufferSpace, SizePolicy size_policy = kMaximumSize) - : assembler_(NULL), initialised_(false) { + : CodeBufferCheckScope() { Open(assembler, size, check_policy, size_policy); } // This constructor does not implicitly initialise the scope. Instead, the // user is required to explicitly call the `Open` function before using the // scope. - CodeBufferCheckScope() : assembler_(NULL), initialised_(false) { + CodeBufferCheckScope() + : assembler_(NULL), + assert_policy_(kMaximumSize), + limit_(0), + previous_allow_assembler_(false), + initialised_(false) { // Nothing to do. } @@ -90,7 +95,7 @@ class CodeBufferCheckScope { VIXL_ASSERT(assembler != NULL); assembler_ = assembler; if (check_policy == kReserveBufferSpace) { - assembler->GetBuffer()->EnsureSpaceFor(size); + VIXL_ASSERT(assembler->GetBuffer()->HasSpaceFor(size)); } #ifdef VIXL_DEBUG limit_ = assembler_->GetSizeOfCodeGenerated() + size; @@ -152,14 +157,15 @@ class EmissionCheckScope : public CodeBufferCheckScope { // constructed. EmissionCheckScope(MacroAssemblerInterface* masm, size_t size, - SizePolicy size_policy = kMaximumSize) { + SizePolicy size_policy = kMaximumSize) + : EmissionCheckScope() { Open(masm, size, size_policy); } // This constructor does not implicitly initialise the scope. Instead, the // user is required to explicitly call the `Open` function before using the // scope. - EmissionCheckScope() {} + EmissionCheckScope() : masm_(nullptr), pool_policy_(kBlockPools) {} virtual ~EmissionCheckScope() { Close(); } @@ -250,14 +256,15 @@ class ExactAssemblyScope : public EmissionCheckScope { // constructed. ExactAssemblyScope(MacroAssemblerInterface* masm, size_t size, - SizePolicy size_policy = kExactSize) { + SizePolicy size_policy = kExactSize) + : ExactAssemblyScope() { Open(masm, size, size_policy); } // This constructor does not implicitly initialise the scope. Instead, the // user is required to explicitly call the `Open` function before using the // scope. - ExactAssemblyScope() {} + ExactAssemblyScope() : previous_allow_macro_assembler_(false) {} virtual ~ExactAssemblyScope() { Close(); } diff --git a/dep/vixl/include/vixl/compiler-intrinsics-vixl.h b/dep/vixl/include/vixl/compiler-intrinsics-vixl.h index b27f94ebf..8d0849a81 100644 --- a/dep/vixl/include/vixl/compiler-intrinsics-vixl.h +++ b/dep/vixl/include/vixl/compiler-intrinsics-vixl.h @@ -28,6 +28,8 @@ #ifndef VIXL_COMPILER_INTRINSICS_H #define VIXL_COMPILER_INTRINSICS_H +#include + #include "globals-vixl.h" namespace vixl { @@ -104,16 +106,23 @@ int CountTrailingZerosFallBack(uint64_t value, int width); // TODO: The implementations could be improved for sizes different from 32bit // and 64bit: we could mask the values and call the appropriate builtin. +// Return the number of leading bits that match the topmost (sign) bit, +// excluding the topmost bit itself. template inline int CountLeadingSignBits(V value, int width = (sizeof(V) * 8)) { + VIXL_ASSERT(IsPowerOf2(width) && (width <= 64)); #if COMPILER_HAS_BUILTIN_CLRSB - if (width == 32) { - return __builtin_clrsb(value); - } else if (width == 64) { - return __builtin_clrsbll(value); - } -#endif + VIXL_ASSERT((LLONG_MIN <= value) && (value <= LLONG_MAX)); + int ll_width = + sizeof(long long) * kBitsPerByte; // NOLINT(google-runtime-int) + int result = __builtin_clrsbll(value) - (ll_width - width); + // Check that the value fits in the specified width. + VIXL_ASSERT(result >= 0); + return result; +#else + VIXL_ASSERT((INT64_MIN <= value) && (value <= INT64_MAX)); return CountLeadingSignBitsFallBack(value, width); +#endif } diff --git a/dep/vixl/include/vixl/cpu-features.h b/dep/vixl/include/vixl/cpu-features.h index f94b955fa..97eb661a2 100644 --- a/dep/vixl/include/vixl/cpu-features.h +++ b/dep/vixl/include/vixl/cpu-features.h @@ -27,6 +27,7 @@ #ifndef VIXL_CPU_FEATURES_H #define VIXL_CPU_FEATURES_H +#include #include #include "globals-vixl.h" @@ -34,16 +35,65 @@ namespace vixl { +// VIXL aims to handle and detect all architectural features that are likely to +// influence code-generation decisions at EL0 (user-space). +// +// - There may be multiple VIXL feature flags for a given architectural +// extension. This occurs where the extension allow components to be +// implemented independently, or where kernel support is needed, and is likely +// to be fragmented. +// +// For example, Pointer Authentication (kPAuth*) has a separate feature flag +// for access to PACGA, and to indicate that the QARMA algorithm is +// implemented. +// +// - Conversely, some extensions have configuration options that do not affect +// EL0, so these are presented as a single VIXL feature. +// +// For example, the RAS extension (kRAS) has several variants, but the only +// feature relevant to VIXL is the addition of the ESB instruction so we only +// need a single flag. +// +// - VIXL offers separate flags for separate features even if they're +// architecturally linked. +// +// For example, the architecture requires kFPHalf and kNEONHalf to be equal, +// but they have separate hardware ID register fields so VIXL presents them as +// separate features. +// +// - VIXL can detect every feature for which it can generate code. +// +// - VIXL can detect some features for which it cannot generate code. +// +// The CPUFeatures::Feature enum — derived from the macro list below — is +// frequently extended. New features may be added to the list at any point, and +// no assumptions should be made about the numerical values assigned to each +// enum constant. The symbolic names can be considered to be stable. +// +// The debug descriptions are used only for debug output. The 'cpuinfo' strings +// are informative; VIXL does not use /proc/cpuinfo for feature detection. + // clang-format off #define VIXL_CPU_FEATURE_LIST(V) \ /* If set, the OS traps and emulates MRS accesses to relevant (EL1) ID_* */ \ /* registers, so that the detailed feature registers can be read */ \ /* directly. */ \ + \ + /* Constant name Debug description Linux 'cpuinfo' string. */ \ V(kIDRegisterEmulation, "ID register emulation", "cpuid") \ \ V(kFP, "FP", "fp") \ V(kNEON, "NEON", "asimd") \ V(kCRC32, "CRC32", "crc32") \ + V(kDGH, "DGH", "dgh") \ + /* Speculation control features. */ \ + V(kCSV2, "CSV2", NULL) \ + V(kSCXTNUM, "SCXTNUM", NULL) \ + V(kCSV3, "CSV3", NULL) \ + V(kSB, "SB", "sb") \ + V(kSPECRES, "SPECRES", NULL) \ + V(kSSBS, "SSBS", NULL) \ + V(kSSBSControl, "SSBS (PSTATE control)", "ssbs") \ /* Cryptographic support instructions. */ \ V(kAES, "AES", "aes") \ V(kSHA1, "SHA1", "sha1") \ @@ -56,34 +106,102 @@ namespace vixl { V(kLORegions, "LORegions", NULL) \ /* Rounding doubling multiply add/subtract: SQRDMLAH and SQRDMLSH. */ \ V(kRDM, "RDM", "asimdrdm") \ + /* Scalable Vector Extension. */ \ + V(kSVE, "SVE", "sve") \ + V(kSVEF64MM, "SVE F64MM", "svef64mm") \ + V(kSVEF32MM, "SVE F32MM", "svef32mm") \ + V(kSVEI8MM, "SVE I8MM", "svei8imm") \ + V(kSVEBF16, "SVE BFloat16", "svebf16") \ /* SDOT and UDOT support (in NEON). */ \ V(kDotProduct, "DotProduct", "asimddp") \ + /* Int8 matrix multiplication (in NEON). */ \ + V(kI8MM, "NEON I8MM", "i8mm") \ /* Half-precision (FP16) support for FP and NEON, respectively. */ \ V(kFPHalf, "FPHalf", "fphp") \ V(kNEONHalf, "NEONHalf", "asimdhp") \ + /* BFloat16 support (in both FP and NEON.) */ \ + V(kBF16, "FP/NEON BFloat 16", "bf16") \ /* The RAS extension, including the ESB instruction. */ \ V(kRAS, "RAS", NULL) \ /* Data cache clean to the point of persistence: DC CVAP. */ \ V(kDCPoP, "DCPoP", "dcpop") \ + /* Data cache clean to the point of deep persistence: DC CVADP. */ \ + V(kDCCVADP, "DCCVADP", "dcpodp") \ /* Cryptographic support instructions. */ \ V(kSHA3, "SHA3", "sha3") \ V(kSHA512, "SHA512", "sha512") \ V(kSM3, "SM3", "sm3") \ V(kSM4, "SM4", "sm4") \ /* Pointer authentication for addresses. */ \ - V(kPAuth, "PAuth", NULL) \ + V(kPAuth, "PAuth", "paca") \ /* Pointer authentication for addresses uses QARMA. */ \ V(kPAuthQARMA, "PAuthQARMA", NULL) \ /* Generic authentication (using the PACGA instruction). */ \ - V(kPAuthGeneric, "PAuthGeneric", NULL) \ + V(kPAuthGeneric, "PAuthGeneric", "pacg") \ /* Generic authentication uses QARMA. */ \ V(kPAuthGenericQARMA, "PAuthGenericQARMA", NULL) \ - /* JavaScript-style FP <-> integer conversion instruction: FJCVTZS. */ \ + /* JavaScript-style FP -> integer conversion instruction: FJCVTZS. */ \ V(kJSCVT, "JSCVT", "jscvt") \ + /* Complex number support for NEON: FCMLA and FCADD. */ \ + V(kFcma, "Fcma", "fcma") \ /* RCpc-based model (for weaker release consistency): LDAPR and variants. */ \ V(kRCpc, "RCpc", "lrcpc") \ - /* Complex number support for NEON: FCMLA and FCADD. */ \ - V(kFcma, "Fcma", "fcma") + V(kRCpcImm, "RCpc (imm)", "ilrcpc") \ + /* Flag manipulation instructions: SETF{8,16}, CFINV, RMIF. */ \ + V(kFlagM, "FlagM", "flagm") \ + /* Unaligned single-copy atomicity. */ \ + V(kUSCAT, "USCAT", "uscat") \ + /* FP16 fused multiply-add or -subtract long: FMLAL{2}, FMLSL{2}. */ \ + V(kFHM, "FHM", "asimdfhm") \ + /* Data-independent timing (for selected instructions). */ \ + V(kDIT, "DIT", "dit") \ + /* Branch target identification. */ \ + V(kBTI, "BTI", "bti") \ + /* Flag manipulation instructions: {AX,XA}FLAG */ \ + V(kAXFlag, "AXFlag", "flagm2") \ + /* Random number generation extension, */ \ + V(kRNG, "RNG", "rng") \ + /* Floating-point round to {32,64}-bit integer. */ \ + V(kFrintToFixedSizedInt,"Frint (bounded)", "frint") \ + /* Memory Tagging Extension. */ \ + V(kMTEInstructions, "MTE (EL0 instructions)", NULL) \ + V(kMTE, "MTE", NULL) \ + V(kMTE3, "MTE (asymmetric)", "mte3") \ + /* PAuth extensions. */ \ + V(kPAuthEnhancedPAC, "PAuth EnhancedPAC", NULL) \ + V(kPAuthEnhancedPAC2, "PAuth EnhancedPAC2", NULL) \ + V(kPAuthFPAC, "PAuth FPAC", NULL) \ + V(kPAuthFPACCombined, "PAuth FPACCombined", NULL) \ + /* Scalable Vector Extension 2. */ \ + V(kSVE2, "SVE2", "sve2") \ + V(kSVESM4, "SVE SM4", "svesm4") \ + V(kSVESHA3, "SVE SHA3", "svesha3") \ + V(kSVEBitPerm, "SVE BitPerm", "svebitperm") \ + V(kSVEAES, "SVE AES", "sveaes") \ + V(kSVEPmull128, "SVE Pmull128", "svepmull") \ + /* Alternate floating-point behavior */ \ + V(kAFP, "AFP", "afp") \ + /* Enhanced Counter Virtualization */ \ + V(kECV, "ECV", "ecv") \ + /* Increased precision of Reciprocal Estimate and Square Root Estimate */ \ + V(kRPRES, "RPRES", "rpres") \ + /* Memory operation instructions, for memcpy, memset */ \ + V(kMOPS, "Memory ops", NULL) \ + /* Scalable Matrix Extension (SME) */ \ + V(kSME, "SME", "sme") \ + V(kSMEi16i64, "SME (i16i64)", "smei16i64") \ + V(kSMEf64f64, "SME (f64f64)", "smef64f64") \ + V(kSMEi8i32, "SME (i8i32)", "smei8i32") \ + V(kSMEf16f32, "SME (f16f32)", "smef16f32") \ + V(kSMEb16f32, "SME (b16f32)", "smeb16f32") \ + V(kSMEf32f32, "SME (f32f32)", "smef32f32") \ + V(kSMEfa64, "SME (fa64)", "smefa64") \ + /* WFET and WFIT instruction support */ \ + V(kWFXT, "WFXT", "wfxt") \ + /* Extended BFloat16 instructions */ \ + V(kEBF16, "EBF16", "ebf16") \ + V(kSVE_EBF16, "EBF16 (SVE)", "sveebf16") \ + V(kCSSC, "CSSC", "cssc") // clang-format on @@ -176,13 +294,13 @@ class CPUFeatures { // clang-format on // By default, construct with no features enabled. - CPUFeatures() : features_(0) {} + CPUFeatures() : features_{} {} // Construct with some features already enabled. - CPUFeatures(Feature feature0, - Feature feature1 = kNone, - Feature feature2 = kNone, - Feature feature3 = kNone); + template + CPUFeatures(T first, U... others) : features_{} { + Combine(first, others...); + } // Construct with all features enabled. This can be used to disable feature // checking: `Has(...)` returns true regardless of the argument. @@ -198,51 +316,80 @@ class CPUFeatures { return CPUFeatures(kFP, kNEON, kCRC32); } + // Construct a new CPUFeatures object using ID registers. This assumes that + // kIDRegisterEmulation is present. + static CPUFeatures InferFromIDRegisters(); + + enum QueryIDRegistersOption { + kDontQueryIDRegisters, + kQueryIDRegistersIfAvailable + }; + // Construct a new CPUFeatures object based on what the OS reports. - static CPUFeatures InferFromOS(); + static CPUFeatures InferFromOS( + QueryIDRegistersOption option = kQueryIDRegistersIfAvailable); // Combine another CPUFeatures object into this one. Features that already // exist in this set are left unchanged. void Combine(const CPUFeatures& other); - // Combine specific features into this set. Features that already exist in - // this set are left unchanged. - void Combine(Feature feature0, - Feature feature1 = kNone, - Feature feature2 = kNone, - Feature feature3 = kNone); + // Combine a specific feature into this set. If it already exists in the set, + // the set is left unchanged. + void Combine(Feature feature); + + // Combine multiple features (or feature sets) into this set. + template + void Combine(T first, U... others) { + Combine(first); + Combine(others...); + } // Remove features in another CPUFeatures object from this one. void Remove(const CPUFeatures& other); - // Remove specific features from this set. - void Remove(Feature feature0, - Feature feature1 = kNone, - Feature feature2 = kNone, - Feature feature3 = kNone); + // Remove a specific feature from this set. This has no effect if the feature + // doesn't exist in the set. + void Remove(Feature feature0); - // Chaining helpers for convenient construction. - CPUFeatures With(const CPUFeatures& other) const; - CPUFeatures With(Feature feature0, - Feature feature1 = kNone, - Feature feature2 = kNone, - Feature feature3 = kNone) const; - CPUFeatures Without(const CPUFeatures& other) const; - CPUFeatures Without(Feature feature0, - Feature feature1 = kNone, - Feature feature2 = kNone, - Feature feature3 = kNone) const; + // Remove multiple features (or feature sets) from this set. + template + void Remove(T first, U... others) { + Remove(first); + Remove(others...); + } - // Query features. - // Note that an empty query (like `Has(kNone)`) always returns true. + // Chaining helpers for convenient construction by combining other CPUFeatures + // or individual Features. + template + CPUFeatures With(T... others) const { + CPUFeatures f(*this); + f.Combine(others...); + return f; + } + + template + CPUFeatures Without(T... others) const { + CPUFeatures f(*this); + f.Remove(others...); + return f; + } + + // Test whether the `other` feature set is equal to or a subset of this one. bool Has(const CPUFeatures& other) const; - bool Has(Feature feature0, - Feature feature1 = kNone, - Feature feature2 = kNone, - Feature feature3 = kNone) const; + + // Test whether a single feature exists in this set. + // Note that `Has(kNone)` always returns true. + bool Has(Feature feature) const; + + // Test whether all of the specified features exist in this set. + template + bool Has(T first, U... others) const { + return Has(first) && Has(others...); + } // Return the number of enabled features. size_t Count() const; + bool HasNoFeatures() const { return Count() == 0; } // Check for equivalence. bool operator==(const CPUFeatures& other) const { @@ -256,9 +403,8 @@ class CPUFeatures { const_iterator end() const; private: - // Each bit represents a feature. This field will be replaced as needed if - // features are added. - uint64_t features_; + // Each bit represents a feature. This set will be extended as needed. + std::bitset features_; friend std::ostream& operator<<(std::ostream& os, const vixl::CPUFeatures& features); @@ -281,8 +427,8 @@ class CPUFeaturesConstIterator { bool operator!=(const CPUFeaturesConstIterator& other) const { return !(*this == other); } - CPUFeatures::Feature operator++(); - CPUFeatures::Feature operator++(int); + CPUFeaturesConstIterator& operator++(); + CPUFeaturesConstIterator operator++(int); CPUFeatures::Feature operator*() const { VIXL_ASSERT(IsValid()); @@ -301,8 +447,10 @@ class CPUFeaturesConstIterator { CPUFeatures::Feature feature_; bool IsValid() const { - return ((cpu_features_ == NULL) && (feature_ == CPUFeatures::kNone)) || - cpu_features_->Has(feature_); + if (cpu_features_ == NULL) { + return feature_ == CPUFeatures::kNone; + } + return cpu_features_->Has(feature_); } }; @@ -325,21 +473,17 @@ class CPUFeaturesScope { // Start a CPUFeaturesScope on any object that implements // `CPUFeatures* GetCPUFeatures()`. template - explicit CPUFeaturesScope(T* cpu_features_wrapper, - CPUFeatures::Feature feature0 = CPUFeatures::kNone, - CPUFeatures::Feature feature1 = CPUFeatures::kNone, - CPUFeatures::Feature feature2 = CPUFeatures::kNone, - CPUFeatures::Feature feature3 = CPUFeatures::kNone) + explicit CPUFeaturesScope(T* cpu_features_wrapper) : cpu_features_(cpu_features_wrapper->GetCPUFeatures()), - old_features_(*cpu_features_) { - cpu_features_->Combine(feature0, feature1, feature2, feature3); - } + old_features_(*cpu_features_) {} - template - CPUFeaturesScope(T* cpu_features_wrapper, const CPUFeatures& other) + // Start a CPUFeaturesScope on any object that implements + // `CPUFeatures* GetCPUFeatures()`, with the specified features enabled. + template + CPUFeaturesScope(T* cpu_features_wrapper, U first, V... features) : cpu_features_(cpu_features_wrapper->GetCPUFeatures()), old_features_(*cpu_features_) { - cpu_features_->Combine(other); + cpu_features_->Combine(first, features...); } ~CPUFeaturesScope() { *cpu_features_ = old_features_; } diff --git a/dep/vixl/include/vixl/globals-vixl.h b/dep/vixl/include/vixl/globals-vixl.h index 727d4947f..b096c7f37 100644 --- a/dep/vixl/include/vixl/globals-vixl.h +++ b/dep/vixl/include/vixl/globals-vixl.h @@ -27,6 +27,10 @@ #ifndef VIXL_GLOBALS_H #define VIXL_GLOBALS_H +#if __cplusplus < 201703L +#error VIXL requires C++17 +#endif + // Get standard C99 macros for integer types. #ifndef __STDC_CONSTANT_MACROS #define __STDC_CONSTANT_MACROS @@ -66,7 +70,8 @@ typedef uint8_t byte; const int KBytes = 1024; const int MBytes = 1024 * KBytes; -const int kBitsPerByte = 8; +const int kBitsPerByteLog2 = 3; +const int kBitsPerByte = 1 << kBitsPerByteLog2; template struct Unsigned; @@ -153,7 +158,7 @@ struct Unsigned<64> { #endif // This is not as powerful as template based assertions, but it is simple. // It assumes that the descriptions are unique. If this starts being a problem, -// we can switch to a different implemention. +// we can switch to a different implementation. #define VIXL_CONCAT(a, b) a##b #if __cplusplus >= 201103L #define VIXL_STATIC_ASSERT_LINE(line_unused, condition, message) \ @@ -185,10 +190,9 @@ inline void USE(const T1&, const T2&, const T3&) {} template inline void USE(const T1&, const T2&, const T3&, const T4&) {} -#define VIXL_ALIGNMENT_EXCEPTION() \ - do { \ - fprintf(stderr, "ALIGNMENT EXCEPTION\t"); \ - VIXL_ABORT(); \ +#define VIXL_ALIGNMENT_EXCEPTION() \ + do { \ + VIXL_ABORT_WITH_MSG("ALIGNMENT EXCEPTION\t"); \ } while (0) // The clang::fallthrough attribute is used along with the Wimplicit-fallthrough @@ -203,7 +207,7 @@ inline void USE(const T1&, const T2&, const T3&, const T4&) {} #if __has_warning("-Wimplicit-fallthrough") && __cplusplus >= 201103L #define VIXL_FALLTHROUGH() [[clang::fallthrough]] // Fallthrough annotation for GCC >= 7. -#elif __GNUC__ >= 7 +#elif defined(__GNUC__) && __GNUC__ >= 7 #define VIXL_FALLTHROUGH() __attribute__((fallthrough)) #else #define VIXL_FALLTHROUGH() \ @@ -211,6 +215,18 @@ inline void USE(const T1&, const T2&, const T3&, const T4&) {} } while (0) #endif +// Evaluate 'init' to an std::optional and return if it's empty. If 'init' is +// not empty then define a variable 'name' with the value inside the +// std::optional. +#define VIXL_DEFINE_OR_RETURN(name, init) \ + auto opt##name = init; \ + if (!opt##name) return; \ + auto name = *opt##name; +#define VIXL_DEFINE_OR_RETURN_FALSE(name, init) \ + auto opt##name = init; \ + if (!opt##name) return false; \ + auto name = *opt##name; + #if __cplusplus >= 201103L #define VIXL_NO_RETURN [[noreturn]] #else @@ -224,17 +240,19 @@ inline void USE(const T1&, const T2&, const T3&, const T4&) {} #if __cplusplus >= 201103L #define VIXL_OVERRIDE override +#define VIXL_CONSTEXPR constexpr +#define VIXL_HAS_CONSTEXPR 1 #else #define VIXL_OVERRIDE +#define VIXL_CONSTEXPR #endif -// Some functions might only be marked as "noreturn" for the DEBUG build. This -// macro should be used for such cases (for more details see what -// VIXL_UNREACHABLE expands to). -#ifdef VIXL_DEBUG -#define VIXL_DEBUG_NO_RETURN VIXL_NO_RETURN +// With VIXL_NEGATIVE_TESTING on, VIXL_ASSERT and VIXL_CHECK will throw +// exceptions but C++11 marks destructors as noexcept(true) by default. +#if defined(VIXL_NEGATIVE_TESTING) && __cplusplus >= 201103L +#define VIXL_NEGATIVE_TESTING_ALLOW_EXCEPTION noexcept(false) #else -#define VIXL_DEBUG_NO_RETURN +#define VIXL_NEGATIVE_TESTING_ALLOW_EXCEPTION #endif #ifdef VIXL_INCLUDE_SIMULATOR_AARCH64 @@ -269,16 +287,24 @@ inline void USE(const T1&, const T2&, const T3&, const T4&) {} // Target Architecture/ISA #ifdef VIXL_INCLUDE_TARGET_A64 +#ifndef VIXL_INCLUDE_TARGET_AARCH64 #define VIXL_INCLUDE_TARGET_AARCH64 #endif +#endif #if defined(VIXL_INCLUDE_TARGET_A32) && defined(VIXL_INCLUDE_TARGET_T32) +#ifndef VIXL_INCLUDE_TARGET_AARCH32 #define VIXL_INCLUDE_TARGET_AARCH32 +#endif #elif defined(VIXL_INCLUDE_TARGET_A32) +#ifndef VIXL_INCLUDE_TARGET_A32_ONLY #define VIXL_INCLUDE_TARGET_A32_ONLY +#endif #else +#ifndef VIXL_INCLUDE_TARGET_T32_ONLY #define VIXL_INCLUDE_TARGET_T32_ONLY #endif +#endif #endif // VIXL_GLOBALS_H diff --git a/dep/vixl/include/vixl/invalset-vixl.h b/dep/vixl/include/vixl/invalset-vixl.h index 94cc1aac2..cf8c982fc 100644 --- a/dep/vixl/include/vixl/invalset-vixl.h +++ b/dep/vixl/include/vixl/invalset-vixl.h @@ -27,9 +27,8 @@ #ifndef VIXL_INVALSET_H_ #define VIXL_INVALSET_H_ -#include - #include +#include #include #include "globals-vixl.h" @@ -91,7 +90,7 @@ template class InvalSet { public: InvalSet(); - ~InvalSet(); + ~InvalSet() VIXL_NEGATIVE_TESTING_ALLOW_EXCEPTION; static const size_t kNPreallocatedElements = N_PREALLOCATED_ELEMENTS; static const KeyType kInvalidKey = INVALID_KEY; @@ -112,7 +111,7 @@ class InvalSet { size_t size() const; // Returns true if no elements are stored in the set. - // Note that this does not mean the the backing storage is empty: it can still + // Note that this does not mean the backing storage is empty: it can still // contain invalid elements. bool empty() const; @@ -244,8 +243,13 @@ class InvalSet { template -class InvalSetIterator/* : public std::iterator */{ +class InvalSetIterator { + using iterator_category = std::forward_iterator_tag; + using value_type = typename S::_ElementType; + using difference_type = std::ptrdiff_t; + using pointer = S*; + using reference = S&; + private: // Redefine types to mirror the associated set types. typedef typename S::_ElementType ElementType; @@ -323,7 +327,8 @@ InvalSet::InvalSet() template -InvalSet::~InvalSet() { +InvalSet::~InvalSet() + VIXL_NEGATIVE_TESTING_ALLOW_EXCEPTION { VIXL_ASSERT(monitor_ == 0); delete vector_; } @@ -841,9 +846,7 @@ InvalSetIterator::InvalSetIterator(const InvalSetIterator& other) #if __cplusplus >= 201103L template InvalSetIterator::InvalSetIterator(InvalSetIterator&& other) noexcept - : using_vector_(false), - index_(0), - inval_set_(NULL) { + : using_vector_(false), index_(0), inval_set_(NULL) { swap(*this, other); } #endif diff --git a/dep/vixl/include/vixl/macro-assembler-interface.h b/dep/vixl/include/vixl/macro-assembler-interface.h index a3194e308..3c0421f2c 100644 --- a/dep/vixl/include/vixl/macro-assembler-interface.h +++ b/dep/vixl/include/vixl/macro-assembler-interface.h @@ -35,7 +35,7 @@ class MacroAssemblerInterface { public: virtual internal::AssemblerBase* AsAssemblerBase() = 0; - virtual ~MacroAssemblerInterface() {} + virtual ~MacroAssemblerInterface() VIXL_NEGATIVE_TESTING_ALLOW_EXCEPTION {} virtual bool AllowMacroInstructions() const = 0; virtual bool ArePoolsBlocked() const = 0; diff --git a/dep/vixl/include/vixl/pool-manager-impl.h b/dep/vixl/include/vixl/pool-manager-impl.h index c49b643fc..3cee3b77f 100644 --- a/dep/vixl/include/vixl/pool-manager-impl.h +++ b/dep/vixl/include/vixl/pool-manager-impl.h @@ -27,10 +27,10 @@ #ifndef VIXL_POOL_MANAGER_IMPL_H_ #define VIXL_POOL_MANAGER_IMPL_H_ -#include "pool-manager.h" - #include + #include "assembler-base-vixl.h" +#include "pool-manager.h" namespace vixl { @@ -264,14 +264,14 @@ bool PoolManager::MustEmit(T pc, if (checkpoint < temp.min_location_) return true; } - bool tempNotPlacedYet = true; + bool temp_not_placed_yet = true; for (int i = static_cast(objects_.size()) - 1; i >= 0; --i) { const PoolObject& current = objects_[i]; - if (tempNotPlacedYet && PoolObjectLessThan(current, temp)) { + if (temp_not_placed_yet && PoolObjectLessThan(current, temp)) { checkpoint = UpdateCheckpointForObject(checkpoint, &temp); if (checkpoint < temp.min_location_) return true; if (CheckFuturePC(pc, checkpoint)) return true; - tempNotPlacedYet = false; + temp_not_placed_yet = false; } if (current.label_base_ == label_base) continue; checkpoint = UpdateCheckpointForObject(checkpoint, ¤t); @@ -279,7 +279,7 @@ bool PoolManager::MustEmit(T pc, if (CheckFuturePC(pc, checkpoint)) return true; } // temp is the object with the smallest max_location_. - if (tempNotPlacedYet) { + if (temp_not_placed_yet) { checkpoint = UpdateCheckpointForObject(checkpoint, &temp); if (checkpoint < temp.min_location_) return true; } @@ -487,7 +487,7 @@ void PoolManager::Release(T pc) { } template -PoolManager::~PoolManager() { +PoolManager::~PoolManager() VIXL_NEGATIVE_TESTING_ALLOW_EXCEPTION { #ifdef VIXL_DEBUG // Check for unbound objects. for (objects_iter iter = objects_.begin(); iter != objects_.end(); ++iter) { @@ -497,7 +497,7 @@ PoolManager::~PoolManager() { } #endif // Delete objects the pool manager owns. - for (typename std::vector *>::iterator + for (typename std::vector*>::iterator iter = delete_on_destruction_.begin(), end = delete_on_destruction_.end(); iter != end; @@ -517,6 +517,6 @@ int PoolManager::GetPoolSizeForTest() const { } return size; } -} +} // namespace vixl #endif // VIXL_POOL_MANAGER_IMPL_H_ diff --git a/dep/vixl/include/vixl/pool-manager.h b/dep/vixl/include/vixl/pool-manager.h index b5cb867be..f5101cc77 100644 --- a/dep/vixl/include/vixl/pool-manager.h +++ b/dep/vixl/include/vixl/pool-manager.h @@ -27,11 +27,10 @@ #ifndef VIXL_POOL_MANAGER_H_ #define VIXL_POOL_MANAGER_H_ -#include - #include #include #include +#include #include #include "globals-vixl.h" @@ -142,7 +141,7 @@ class LocationBase { is_bound_(true), location_(location) {} - virtual ~LocationBase() {} + virtual ~LocationBase() VIXL_NEGATIVE_TESTING_ALLOW_EXCEPTION {} // The PoolManager should assume ownership of some objects, and delete them // after they have been placed. This can happen for example for literals that @@ -369,8 +368,8 @@ class ForwardReference { // Specify the possible locations where the object could be stored. AArch32's // PC offset, and T32's PC alignment calculations should be applied by the - // Assembler, not here. The PoolManager deals only with simple locationes. - // Including min_object_adddress_ is necessary to handle AArch32 some + // Assembler, not here. The PoolManager deals only with simple locations. + // Including min_object_address_ is necessary to handle AArch32 some // instructions which have a minimum offset of 0, but also have the implicit // PC offset. // Note that this structure cannot handle sparse ranges, such as A32's ADR, @@ -397,7 +396,7 @@ class PoolManager { max_pool_size_(0), monitor_(0) {} - ~PoolManager(); + ~PoolManager() VIXL_NEGATIVE_TESTING_ALLOW_EXCEPTION; // Check if we will need to emit the pool at location 'pc', when planning to // generate a certain number of bytes. This optionally takes a diff --git a/dep/vixl/include/vixl/utils-vixl.h b/dep/vixl/include/vixl/utils-vixl.h index fe56b92f0..a6632b2fc 100644 --- a/dep/vixl/include/vixl/utils-vixl.h +++ b/dep/vixl/include/vixl/utils-vixl.h @@ -30,6 +30,7 @@ #include #include #include +#include #include #include "compiler-intrinsics-vixl.h" @@ -67,29 +68,40 @@ namespace vixl { #endif template -size_t ArrayLength(const T (&)[n]) { +constexpr size_t ArrayLength(const T (&)[n]) { return n; } +inline uint64_t GetUintMask(unsigned bits) { + VIXL_ASSERT(bits <= 64); + uint64_t base = (bits >= 64) ? 0 : (UINT64_C(1) << bits); + return base - 1; +} + +inline uint64_t GetSignMask(unsigned bits) { + VIXL_ASSERT(bits <= 64); + return UINT64_C(1) << (bits - 1); +} + // Check number width. // TODO: Refactor these using templates. inline bool IsIntN(unsigned n, uint32_t x) { - VIXL_ASSERT((0 < n) && (n < 32)); - uint32_t limit = UINT32_C(1) << (n - 1); - return x < limit; + VIXL_ASSERT((0 < n) && (n <= 32)); + return x <= static_cast(INT32_MAX >> (32 - n)); } inline bool IsIntN(unsigned n, int32_t x) { - VIXL_ASSERT((0 < n) && (n < 32)); + VIXL_ASSERT((0 < n) && (n <= 32)); + if (n == 32) return true; int32_t limit = INT32_C(1) << (n - 1); return (-limit <= x) && (x < limit); } inline bool IsIntN(unsigned n, uint64_t x) { - VIXL_ASSERT((0 < n) && (n < 64)); - uint64_t limit = UINT64_C(1) << (n - 1); - return x < limit; + VIXL_ASSERT((0 < n) && (n <= 64)); + return x <= static_cast(INT64_MAX >> (64 - n)); } inline bool IsIntN(unsigned n, int64_t x) { - VIXL_ASSERT((0 < n) && (n < 64)); + VIXL_ASSERT((0 < n) && (n <= 64)); + if (n == 64) return true; int64_t limit = INT64_C(1) << (n - 1); return (-limit <= x) && (x < limit); } @@ -98,7 +110,8 @@ VIXL_DEPRECATED("IsIntN", inline bool is_intn(unsigned n, int64_t x)) { } inline bool IsUintN(unsigned n, uint32_t x) { - VIXL_ASSERT((0 < n) && (n < 32)); + VIXL_ASSERT((0 < n) && (n <= 32)); + if (n >= 32) return true; return !(x >> n); } inline bool IsUintN(unsigned n, int32_t x) { @@ -107,7 +120,8 @@ inline bool IsUintN(unsigned n, int32_t x) { return !(static_cast(x) >> n); } inline bool IsUintN(unsigned n, uint64_t x) { - VIXL_ASSERT((0 < n) && (n < 64)); + VIXL_ASSERT((0 < n) && (n <= 64)); + if (n >= 64) return true; return !(x >> n); } inline bool IsUintN(unsigned n, int64_t x) { @@ -183,14 +197,14 @@ inline uint64_t ExtractUnsignedBitfield64(int msb, int lsb, uint64_t x) { } -inline uint32_t ExtractUnsignedBitfield32(int msb, int lsb, uint32_t x) { +inline uint32_t ExtractUnsignedBitfield32(int msb, int lsb, uint64_t x) { VIXL_ASSERT((static_cast(msb) < sizeof(x) * 8) && (lsb >= 0) && (msb >= lsb)); return TruncateToUint32(ExtractUnsignedBitfield64(msb, lsb, x)); } -inline int64_t ExtractSignedBitfield64(int msb, int lsb, int64_t x) { +inline int64_t ExtractSignedBitfield64(int msb, int lsb, uint64_t x) { VIXL_ASSERT((static_cast(msb) < sizeof(x) * 8) && (lsb >= 0) && (msb >= lsb)); uint64_t temp = ExtractUnsignedBitfield64(msb, lsb, x); @@ -203,8 +217,7 @@ inline int64_t ExtractSignedBitfield64(int msb, int lsb, int64_t x) { return result; } - -inline int32_t ExtractSignedBitfield32(int msb, int lsb, int32_t x) { +inline int32_t ExtractSignedBitfield32(int msb, int lsb, uint64_t x) { VIXL_ASSERT((static_cast(msb) < sizeof(x) * 8) && (lsb >= 0) && (msb >= lsb)); uint32_t temp = TruncateToUint32(ExtractSignedBitfield64(msb, lsb, x)); @@ -213,7 +226,6 @@ inline int32_t ExtractSignedBitfield32(int msb, int lsb, int32_t x) { return result; } - inline uint64_t RotateRight(uint64_t value, unsigned int rotate, unsigned int width) { @@ -271,6 +283,39 @@ VIXL_DEPRECATED("RawbitsToDouble", return RawbitsToDouble(bits); } +// Some compilers dislike negating unsigned integers, +// so we provide an equivalent. +template +T UnsignedNegate(T value) { + VIXL_STATIC_ASSERT(std::is_unsigned::value); + return ~value + 1; +} + +// An absolute operation for signed integers that is defined for results outside +// the representable range. Specifically, Abs(MIN_INT) is MIN_INT. +template +T Abs(T val) { + // TODO: this static assertion is for signed integer inputs, as that's the + // only type tested. However, the code should work for all numeric inputs. + // Remove the assertion and this comment when more tests are available. + VIXL_STATIC_ASSERT(std::is_signed::value && std::is_integral::value); + return ((val >= -std::numeric_limits::max()) && (val < 0)) ? -val : val; +} + +// Convert unsigned to signed numbers in a well-defined way (using two's +// complement representations). +inline int64_t RawbitsToInt64(uint64_t bits) { + return (bits >= UINT64_C(0x8000000000000000)) + ? (-static_cast(UnsignedNegate(bits) - 1) - 1) + : static_cast(bits); +} + +inline int32_t RawbitsToInt32(uint32_t bits) { + return (bits >= UINT64_C(0x80000000)) + ? (-static_cast(UnsignedNegate(bits) - 1) - 1) + : static_cast(bits); +} + namespace internal { // Internal simulation class used solely by the simulator to @@ -294,7 +339,7 @@ class SimFloat16 : public Float16 { bool operator>(SimFloat16 rhs) const; bool operator==(SimFloat16 rhs) const; bool operator!=(SimFloat16 rhs) const; - // This is necessary for conversions peformed in (macro asm) Fmov. + // This is necessary for conversions performed in (macro asm) Fmov. bool operator==(double rhs) const; operator double() const; }; @@ -365,6 +410,10 @@ VIXL_DEPRECATED("Float16Classify", inline int float16classify(uint16_t value)) { bool IsZero(Float16 value); +inline bool IsPositiveZero(double value) { + return (value == 0.0) && (copysign(1.0, value) > 0.0); +} + inline bool IsNaN(float value) { return std::isnan(value); } inline bool IsNaN(double value) { return std::isnan(value); } @@ -447,7 +496,9 @@ inline float FusedMultiplyAdd(float op1, float op2, float a) { } -inline uint64_t LowestSetBit(uint64_t value) { return value & static_cast(-static_cast(value)); } +inline uint64_t LowestSetBit(uint64_t value) { + return value & UnsignedNegate(value); +} template @@ -484,11 +535,11 @@ T ReverseBits(T value) { template -inline T SignExtend(T val, int bitSize) { - VIXL_ASSERT(bitSize > 0); - T mask = (T(2) << (bitSize - 1)) - T(1); +inline T SignExtend(T val, int size_in_bits) { + VIXL_ASSERT(size_in_bits > 0); + T mask = (T(2) << (size_in_bits - 1)) - T(1); val &= mask; - T sign_bits = -((val >> (bitSize - 1)) << bitSize); + T sign_bits = -((val >> (size_in_bits - 1)) << size_in_bits); val |= sign_bits; return val; } @@ -570,7 +621,7 @@ T AlignUp(T pointer, // reinterpret_cast behaviour for other types. typename Unsigned::type pointer_raw = - (typename Unsigned::type)pointer; + (typename Unsigned::type) pointer; VIXL_STATIC_ASSERT(sizeof(pointer) <= sizeof(pointer_raw)); size_t mask = alignment - 1; @@ -590,7 +641,7 @@ T AlignDown(T pointer, // reinterpret_cast behaviour for other types. typename Unsigned::type pointer_raw = - (typename Unsigned::type)pointer; + (typename Unsigned::type) pointer; VIXL_STATIC_ASSERT(sizeof(pointer) <= sizeof(pointer_raw)); size_t mask = alignment - 1; @@ -801,7 +852,7 @@ class Uint32 { } int32_t GetSigned() const { return data_; } Uint32 operator~() const { return Uint32(~data_); } - Uint32 operator-() const { return Uint32(static_cast(-static_cast(data_))); } + Uint32 operator-() const { return Uint32(UnsignedNegate(data_)); } bool operator==(Uint32 value) const { return data_ == value.data_; } bool operator!=(Uint32 value) const { return data_ != value.data_; } bool operator>(Uint32 value) const { return data_ > value.data_; } @@ -869,7 +920,7 @@ class Uint64 { Uint32 GetHigh32() const { return Uint32(data_ >> 32); } Uint32 GetLow32() const { return Uint32(data_ & 0xffffffff); } Uint64 operator~() const { return Uint64(~data_); } - Uint64 operator-() const { return Uint64(static_cast(-static_cast(data_))); } + Uint64 operator-() const { return Uint64(UnsignedNegate(data_)); } bool operator==(Uint64 value) const { return data_ == value.data_; } bool operator!=(Uint64 value) const { return data_ != value.data_; } Uint64 operator+(Uint64 value) const { return Uint64(data_ + value.data_); } @@ -974,6 +1025,42 @@ Uint64::Uint64(Uint128 data) : data_(data.ToUint64().Get()) {} Int64 BitCount(Uint32 value); +// The algorithm used is adapted from the one described in section 8.2 of +// Hacker's Delight, by Henry S. Warren, Jr. +template +int64_t MultiplyHigh(T u, T v) { + uint64_t u0, v0, w0, u1, v1, w1, w2, t; + VIXL_STATIC_ASSERT((N == 8) || (N == 16) || (N == 32) || (N == 64)); + uint64_t sign_mask = UINT64_C(1) << (N - 1); + uint64_t sign_ext = 0; + unsigned half_bits = N / 2; + uint64_t half_mask = GetUintMask(half_bits); + if (std::numeric_limits::is_signed) { + sign_ext = UINT64_C(0xffffffffffffffff) << half_bits; + } + + VIXL_ASSERT(sizeof(u) == sizeof(uint64_t)); + VIXL_ASSERT(sizeof(u) == sizeof(u0)); + + u0 = u & half_mask; + u1 = u >> half_bits | (((u & sign_mask) != 0) ? sign_ext : 0); + v0 = v & half_mask; + v1 = v >> half_bits | (((v & sign_mask) != 0) ? sign_ext : 0); + + w0 = u0 * v0; + t = u1 * v0 + (w0 >> half_bits); + + w1 = t & half_mask; + w2 = t >> half_bits | (((t & sign_mask) != 0) ? sign_ext : 0); + w1 = u0 * v1 + w1; + w1 = w1 >> half_bits | (((w1 & sign_mask) != 0) ? sign_ext : 0); + + uint64_t value = u1 * v1 + w2 + w1; + int64_t result; + memcpy(&result, &value, sizeof(result)); + return result; +} + } // namespace internal // The default NaN values (for FPCR.DN=1). @@ -1139,7 +1226,7 @@ T FPRound(int64_t sign, // For subnormal outputs, the shift must be adjusted by the exponent. The +1 // is necessary because the exponent of a subnormal value (encoded as 0) is // the same as the exponent of the smallest normal value (encoded as 1). - shift += -exponent + 1; + shift += static_cast(-exponent + 1); // Handle inputs that would produce a zero output. // @@ -1238,9 +1325,8 @@ inline Float16 FPRoundToFloat16(int64_t sign, uint64_t mantissa, FPRounding round_mode) { return RawbitsToFloat16( - FPRound(sign, exponent, mantissa, round_mode)); + FPRound( + sign, exponent, mantissa, round_mode)); } @@ -1276,6 +1362,81 @@ Float16 FPToFloat16(double value, FPRounding round_mode, UseDefaultNaN DN, bool* exception = NULL); + +// Like static_cast(value), but with specialisations for the Float16 type. +template +T StaticCastFPTo(F value) { + return static_cast(value); +} + +template <> +inline float StaticCastFPTo(Float16 value) { + return FPToFloat(value, kIgnoreDefaultNaN); +} + +template <> +inline double StaticCastFPTo(Float16 value) { + return FPToDouble(value, kIgnoreDefaultNaN); +} + +template <> +inline Float16 StaticCastFPTo(float value) { + return FPToFloat16(value, FPTieEven, kIgnoreDefaultNaN); +} + +template <> +inline Float16 StaticCastFPTo(double value) { + return FPToFloat16(value, FPTieEven, kIgnoreDefaultNaN); +} + +template +uint64_t FPToRawbitsWithSize(unsigned size_in_bits, T value) { + switch (size_in_bits) { + case 16: + return Float16ToRawbits(StaticCastFPTo(value)); + case 32: + return FloatToRawbits(StaticCastFPTo(value)); + case 64: + return DoubleToRawbits(StaticCastFPTo(value)); + } + VIXL_UNREACHABLE(); + return 0; +} + +template +T RawbitsWithSizeToFP(unsigned size_in_bits, uint64_t value) { + VIXL_ASSERT(IsUintN(size_in_bits, value)); + switch (size_in_bits) { + case 16: + return StaticCastFPTo(RawbitsToFloat16(static_cast(value))); + case 32: + return StaticCastFPTo(RawbitsToFloat(static_cast(value))); + case 64: + return StaticCastFPTo(RawbitsToDouble(value)); + } + VIXL_UNREACHABLE(); + return 0; +} + +// Jenkins one-at-a-time hash, based on +// https://en.wikipedia.org/wiki/Jenkins_hash_function citing +// https://www.drdobbs.com/database/algorithm-alley/184410284. +constexpr uint32_t Hash(const char* str, uint32_t hash = 0) { + if (*str == '\0') { + hash += hash << 3; + hash ^= hash >> 11; + hash += hash << 15; + return hash; + } else { + hash += *str; + hash += hash << 10; + hash ^= hash >> 6; + return Hash(str + 1, hash); + } +} + +constexpr uint32_t operator"" _h(const char* x, size_t) { return Hash(x); } + } // namespace vixl #endif // VIXL_UTILS_H diff --git a/dep/vixl/src/aarch32/assembler-aarch32.cc b/dep/vixl/src/aarch32/assembler-aarch32.cc index 5f636981d..641266648 100644 --- a/dep/vixl/src/aarch32/assembler-aarch32.cc +++ b/dep/vixl/src/aarch32/assembler-aarch32.cc @@ -2557,13 +2557,13 @@ void Assembler::adr(Condition cond, public: EmitOp() : Location::EmitOperator(T32) {} virtual uint32_t Encode(uint32_t instr, - Location::Offset pc, - const Location* location) const VIXL_OVERRIDE { - pc += kT32PcDelta; - Location::Offset offset = location->GetLocation() - AlignDown(pc, 4); - VIXL_ASSERT((offset >= 0) && (offset <= 1020) && - ((offset & 0x3) == 0)); - const int32_t target = offset >> 2; + Location::Offset program_counter, + const Location* loc) const VIXL_OVERRIDE { + program_counter += kT32PcDelta; + Location::Offset off = + loc->GetLocation() - AlignDown(program_counter, 4); + VIXL_ASSERT((off >= 0) && (off <= 1020) && ((off & 0x3) == 0)); + const int32_t target = off >> 2; return instr | (target & 0xff); } } immop; @@ -2588,15 +2588,16 @@ void Assembler::adr(Condition cond, public: EmitOp() : Location::EmitOperator(T32) {} virtual uint32_t Encode(uint32_t instr, - Location::Offset pc, - const Location* location) const VIXL_OVERRIDE { - pc += kT32PcDelta; - Location::Offset offset = location->GetLocation() - AlignDown(pc, 4); + Location::Offset program_counter, + const Location* loc) const VIXL_OVERRIDE { + program_counter += kT32PcDelta; + Location::Offset off = + loc->GetLocation() - AlignDown(program_counter, 4); int32_t target; - if ((offset >= 0) && (offset <= 4095)) { - target = offset; + if ((off >= 0) && (off <= 4095)) { + target = off; } else { - target = -offset; + target = -off; VIXL_ASSERT((target >= 0) && (target <= 4095)); // Emit the T2 encoding. instr |= 0x00a00000; @@ -2622,19 +2623,20 @@ void Assembler::adr(Condition cond, public: EmitOp() : Location::EmitOperator(A32) {} virtual uint32_t Encode(uint32_t instr, - Location::Offset pc, - const Location* location) const VIXL_OVERRIDE { - pc += kA32PcDelta; - Location::Offset offset = location->GetLocation() - AlignDown(pc, 4); + Location::Offset program_counter, + const Location* loc) const VIXL_OVERRIDE { + program_counter += kA32PcDelta; + Location::Offset off = + loc->GetLocation() - AlignDown(program_counter, 4); int32_t target; - ImmediateA32 positive_immediate_a32(offset); - if (positive_immediate_a32.IsValid()) { - target = positive_immediate_a32.GetEncodingValue(); + ImmediateA32 pos_imm_a32(off); + if (pos_imm_a32.IsValid()) { + target = pos_imm_a32.GetEncodingValue(); } else { - ImmediateA32 negative_immediate_a32(-offset); - VIXL_ASSERT(negative_immediate_a32.IsValid()); + ImmediateA32 neg_imm_a32(-off); + VIXL_ASSERT(neg_imm_a32.IsValid()); // Emit the A2 encoding. - target = negative_immediate_a32.GetEncodingValue(); + target = neg_imm_a32.GetEncodingValue(); instr = (instr & ~0x00f00000) | 0x00400000; } return instr | (target & 0xfff); @@ -3024,13 +3026,12 @@ void Assembler::b(Condition cond, EncodingSize size, Location* location) { public: EmitOp() : Location::EmitOperator(T32) {} virtual uint32_t Encode(uint32_t instr, - Location::Offset pc, - const Location* location) const VIXL_OVERRIDE { - pc += kT32PcDelta; - Location::Offset offset = location->GetLocation() - pc; - VIXL_ASSERT((offset >= -256) && (offset <= 254) && - ((offset & 0x1) == 0)); - const int32_t target = offset >> 1; + Location::Offset program_counter, + const Location* loc) const VIXL_OVERRIDE { + program_counter += kT32PcDelta; + Location::Offset off = loc->GetLocation() - program_counter; + VIXL_ASSERT((off >= -256) && (off <= 254) && ((off & 0x1) == 0)); + const int32_t target = off >> 1; return instr | (target & 0xff); } } immop; @@ -3051,13 +3052,12 @@ void Assembler::b(Condition cond, EncodingSize size, Location* location) { public: EmitOp() : Location::EmitOperator(T32) {} virtual uint32_t Encode(uint32_t instr, - Location::Offset pc, - const Location* location) const VIXL_OVERRIDE { - pc += kT32PcDelta; - Location::Offset offset = location->GetLocation() - pc; - VIXL_ASSERT((offset >= -2048) && (offset <= 2046) && - ((offset & 0x1) == 0)); - const int32_t target = offset >> 1; + Location::Offset program_counter, + const Location* loc) const VIXL_OVERRIDE { + program_counter += kT32PcDelta; + Location::Offset off = loc->GetLocation() - program_counter; + VIXL_ASSERT((off >= -2048) && (off <= 2046) && ((off & 0x1) == 0)); + const int32_t target = off >> 1; return instr | (target & 0x7ff); } } immop; @@ -3075,13 +3075,13 @@ void Assembler::b(Condition cond, EncodingSize size, Location* location) { public: EmitOp() : Location::EmitOperator(T32) {} virtual uint32_t Encode(uint32_t instr, - Location::Offset pc, - const Location* location) const VIXL_OVERRIDE { - pc += kT32PcDelta; - Location::Offset offset = location->GetLocation() - pc; - VIXL_ASSERT((offset >= -1048576) && (offset <= 1048574) && - ((offset & 0x1) == 0)); - const int32_t target = offset >> 1; + Location::Offset program_counter, + const Location* loc) const VIXL_OVERRIDE { + program_counter += kT32PcDelta; + Location::Offset off = loc->GetLocation() - program_counter; + VIXL_ASSERT((off >= -1048576) && (off <= 1048574) && + ((off & 0x1) == 0)); + const int32_t target = off >> 1; return instr | (target & 0x7ff) | ((target & 0x1f800) << 5) | ((target & 0x20000) >> 4) | ((target & 0x40000) >> 7) | ((target & 0x80000) << 7); @@ -3104,13 +3104,13 @@ void Assembler::b(Condition cond, EncodingSize size, Location* location) { public: EmitOp() : Location::EmitOperator(T32) {} virtual uint32_t Encode(uint32_t instr, - Location::Offset pc, - const Location* location) const VIXL_OVERRIDE { - pc += kT32PcDelta; - Location::Offset offset = location->GetLocation() - pc; - VIXL_ASSERT((offset >= -16777216) && (offset <= 16777214) && - ((offset & 0x1) == 0)); - int32_t target = offset >> 1; + Location::Offset program_counter, + const Location* loc) const VIXL_OVERRIDE { + program_counter += kT32PcDelta; + Location::Offset off = loc->GetLocation() - program_counter; + VIXL_ASSERT((off >= -16777216) && (off <= 16777214) && + ((off & 0x1) == 0)); + int32_t target = off >> 1; uint32_t S = target & (1 << 23); target ^= ((S >> 1) | (S >> 2)) ^ (3 << 21); return instr | (target & 0x7ff) | ((target & 0x1ff800) << 5) | @@ -3132,13 +3132,13 @@ void Assembler::b(Condition cond, EncodingSize size, Location* location) { public: EmitOp() : Location::EmitOperator(A32) {} virtual uint32_t Encode(uint32_t instr, - Location::Offset pc, - const Location* location) const VIXL_OVERRIDE { - pc += kA32PcDelta; - Location::Offset offset = location->GetLocation() - pc; - VIXL_ASSERT((offset >= -33554432) && (offset <= 33554428) && - ((offset & 0x3) == 0)); - const int32_t target = offset >> 2; + Location::Offset program_counter, + const Location* loc) const VIXL_OVERRIDE { + program_counter += kA32PcDelta; + Location::Offset off = loc->GetLocation() - program_counter; + VIXL_ASSERT((off >= -33554432) && (off <= 33554428) && + ((off & 0x3) == 0)); + const int32_t target = off >> 2; return instr | (target & 0xffffff); } } immop; @@ -3462,13 +3462,13 @@ void Assembler::bl(Condition cond, Location* location) { public: EmitOp() : Location::EmitOperator(T32) {} virtual uint32_t Encode(uint32_t instr, - Location::Offset pc, - const Location* location) const VIXL_OVERRIDE { - pc += kT32PcDelta; - Location::Offset offset = location->GetLocation() - pc; - VIXL_ASSERT((offset >= -16777216) && (offset <= 16777214) && - ((offset & 0x1) == 0)); - int32_t target = offset >> 1; + Location::Offset program_counter, + const Location* loc) const VIXL_OVERRIDE { + program_counter += kT32PcDelta; + Location::Offset off = loc->GetLocation() - program_counter; + VIXL_ASSERT((off >= -16777216) && (off <= 16777214) && + ((off & 0x1) == 0)); + int32_t target = off >> 1; uint32_t S = target & (1 << 23); target ^= ((S >> 1) | (S >> 2)) ^ (3 << 21); return instr | (target & 0x7ff) | ((target & 0x1ff800) << 5) | @@ -3490,13 +3490,13 @@ void Assembler::bl(Condition cond, Location* location) { public: EmitOp() : Location::EmitOperator(A32) {} virtual uint32_t Encode(uint32_t instr, - Location::Offset pc, - const Location* location) const VIXL_OVERRIDE { - pc += kA32PcDelta; - Location::Offset offset = location->GetLocation() - pc; - VIXL_ASSERT((offset >= -33554432) && (offset <= 33554428) && - ((offset & 0x3) == 0)); - const int32_t target = offset >> 2; + Location::Offset program_counter, + const Location* loc) const VIXL_OVERRIDE { + program_counter += kA32PcDelta; + Location::Offset off = loc->GetLocation() - program_counter; + VIXL_ASSERT((off >= -33554432) && (off <= 33554428) && + ((off & 0x3) == 0)); + const int32_t target = off >> 2; return instr | (target & 0xffffff); } } immop; @@ -3549,13 +3549,14 @@ void Assembler::blx(Condition cond, Location* location) { public: EmitOp() : Location::EmitOperator(T32) {} virtual uint32_t Encode(uint32_t instr, - Location::Offset pc, - const Location* location) const VIXL_OVERRIDE { - pc += kT32PcDelta; - Location::Offset offset = location->GetLocation() - AlignDown(pc, 4); - VIXL_ASSERT((offset >= -16777216) && (offset <= 16777212) && - ((offset & 0x3) == 0)); - int32_t target = offset >> 2; + Location::Offset program_counter, + const Location* loc) const VIXL_OVERRIDE { + program_counter += kT32PcDelta; + Location::Offset off = + loc->GetLocation() - AlignDown(program_counter, 4); + VIXL_ASSERT((off >= -16777216) && (off <= 16777212) && + ((off & 0x3) == 0)); + int32_t target = off >> 2; uint32_t S = target & (1 << 22); target ^= ((S >> 1) | (S >> 2)) ^ (3 << 20); return instr | ((target & 0x3ff) << 1) | ((target & 0xffc00) << 6) | @@ -3577,15 +3578,14 @@ void Assembler::blx(Condition cond, Location* location) { public: EmitOp() : Location::EmitOperator(A32) {} virtual uint32_t Encode(uint32_t instr, - Location::Offset pc, - const Location* location) const - VIXL_OVERRIDE { - pc += kA32PcDelta; - Location::Offset offset = - location->GetLocation() - AlignDown(pc, 4); - VIXL_ASSERT((offset >= -33554432) && (offset <= 33554430) && - ((offset & 0x1) == 0)); - const int32_t target = offset >> 1; + Location::Offset program_counter, + const Location* loc) const VIXL_OVERRIDE { + program_counter += kA32PcDelta; + Location::Offset off = + loc->GetLocation() - AlignDown(program_counter, 4); + VIXL_ASSERT((off >= -33554432) && (off <= 33554430) && + ((off & 0x1) == 0)); + const int32_t target = off >> 1; return instr | ((target & 0x1) << 24) | ((target & 0x1fffffe) >> 1); } } immop; @@ -3698,13 +3698,12 @@ void Assembler::cbnz(Register rn, Location* location) { public: EmitOp() : Location::EmitOperator(T32) {} virtual uint32_t Encode(uint32_t instr, - Location::Offset pc, - const Location* location) const VIXL_OVERRIDE { - pc += kT32PcDelta; - Location::Offset offset = location->GetLocation() - pc; - VIXL_ASSERT((offset >= 0) && (offset <= 126) && - ((offset & 0x1) == 0)); - const int32_t target = offset >> 1; + Location::Offset program_counter, + const Location* loc) const VIXL_OVERRIDE { + program_counter += kT32PcDelta; + Location::Offset off = loc->GetLocation() - program_counter; + VIXL_ASSERT((off >= 0) && (off <= 126) && ((off & 0x1) == 0)); + const int32_t target = off >> 1; return instr | ((target & 0x1f) << 3) | ((target & 0x20) << 4); } } immop; @@ -3748,13 +3747,12 @@ void Assembler::cbz(Register rn, Location* location) { public: EmitOp() : Location::EmitOperator(T32) {} virtual uint32_t Encode(uint32_t instr, - Location::Offset pc, - const Location* location) const VIXL_OVERRIDE { - pc += kT32PcDelta; - Location::Offset offset = location->GetLocation() - pc; - VIXL_ASSERT((offset >= 0) && (offset <= 126) && - ((offset & 0x1) == 0)); - const int32_t target = offset >> 1; + Location::Offset program_counter, + const Location* loc) const VIXL_OVERRIDE { + program_counter += kT32PcDelta; + Location::Offset off = loc->GetLocation() - program_counter; + VIXL_ASSERT((off >= 0) && (off <= 126) && ((off & 0x1) == 0)); + const int32_t target = off >> 1; return instr | ((target & 0x1f) << 3) | ((target & 0x20) << 4); } } immop; @@ -4790,7 +4788,7 @@ void Assembler::ldm(Condition cond, } // LDM{}{} SP!, ; T1 if (!size.IsWide() && rn.Is(sp) && write_back.DoesWriteBack() && - ((registers.GetList() & ~0x80ff) == 0)) { + registers.IsR0toR7orPC()) { EmitT32_16(0xbc00 | (GetRegisterListEncoding(registers, 15, 1) << 8) | GetRegisterListEncoding(registers, 0, 8)); AdvanceIT(); @@ -5208,13 +5206,13 @@ void Assembler::ldr(Condition cond, public: EmitOp() : Location::EmitOperator(T32) {} virtual uint32_t Encode(uint32_t instr, - Location::Offset pc, - const Location* location) const VIXL_OVERRIDE { - pc += kT32PcDelta; - Location::Offset offset = location->GetLocation() - AlignDown(pc, 4); - VIXL_ASSERT((offset >= 0) && (offset <= 1020) && - ((offset & 0x3) == 0)); - const int32_t target = offset >> 2; + Location::Offset program_counter, + const Location* loc) const VIXL_OVERRIDE { + program_counter += kT32PcDelta; + Location::Offset off = + loc->GetLocation() - AlignDown(program_counter, 4); + VIXL_ASSERT((off >= 0) && (off <= 1020) && ((off & 0x3) == 0)); + const int32_t target = off >> 2; return instr | (target & 0xff); } } immop; @@ -5233,13 +5231,14 @@ void Assembler::ldr(Condition cond, public: EmitOp() : Location::EmitOperator(T32) {} virtual uint32_t Encode(uint32_t instr, - Location::Offset pc, - const Location* location) const VIXL_OVERRIDE { - pc += kT32PcDelta; - Location::Offset offset = location->GetLocation() - AlignDown(pc, 4); - VIXL_ASSERT((offset >= -4095) && (offset <= 4095)); - uint32_t U = (offset >= 0); - int32_t target = abs(offset) | (U << 12); + Location::Offset program_counter, + const Location* loc) const VIXL_OVERRIDE { + program_counter += kT32PcDelta; + Location::Offset off = + loc->GetLocation() - AlignDown(program_counter, 4); + VIXL_ASSERT((off >= -4095) && (off <= 4095)); + uint32_t U = (off >= 0); + int32_t target = abs(off) | (U << 12); return instr | (target & 0xfff) | ((target & 0x1000) << 11); } } immop; @@ -5259,13 +5258,14 @@ void Assembler::ldr(Condition cond, public: EmitOp() : Location::EmitOperator(A32) {} virtual uint32_t Encode(uint32_t instr, - Location::Offset pc, - const Location* location) const VIXL_OVERRIDE { - pc += kA32PcDelta; - Location::Offset offset = location->GetLocation() - AlignDown(pc, 4); - VIXL_ASSERT((offset >= -4095) && (offset <= 4095)); - uint32_t U = (offset >= 0); - int32_t target = abs(offset) | (U << 12); + Location::Offset program_counter, + const Location* loc) const VIXL_OVERRIDE { + program_counter += kA32PcDelta; + Location::Offset off = + loc->GetLocation() - AlignDown(program_counter, 4); + VIXL_ASSERT((off >= -4095) && (off <= 4095)); + uint32_t U = (off >= 0); + int32_t target = abs(off) | (U << 12); return instr | (target & 0xfff) | ((target & 0x1000) << 11); } } immop; @@ -5505,13 +5505,14 @@ void Assembler::ldrb(Condition cond, Register rt, Location* location) { public: EmitOp() : Location::EmitOperator(T32) {} virtual uint32_t Encode(uint32_t instr, - Location::Offset pc, - const Location* location) const VIXL_OVERRIDE { - pc += kT32PcDelta; - Location::Offset offset = location->GetLocation() - AlignDown(pc, 4); - VIXL_ASSERT((offset >= -4095) && (offset <= 4095)); - uint32_t U = (offset >= 0); - int32_t target = abs(offset) | (U << 12); + Location::Offset program_counter, + const Location* loc) const VIXL_OVERRIDE { + program_counter += kT32PcDelta; + Location::Offset off = + loc->GetLocation() - AlignDown(program_counter, 4); + VIXL_ASSERT((off >= -4095) && (off <= 4095)); + uint32_t U = (off >= 0); + int32_t target = abs(off) | (U << 12); return instr | (target & 0xfff) | ((target & 0x1000) << 11); } } immop; @@ -5531,13 +5532,14 @@ void Assembler::ldrb(Condition cond, Register rt, Location* location) { public: EmitOp() : Location::EmitOperator(A32) {} virtual uint32_t Encode(uint32_t instr, - Location::Offset pc, - const Location* location) const VIXL_OVERRIDE { - pc += kA32PcDelta; - Location::Offset offset = location->GetLocation() - AlignDown(pc, 4); - VIXL_ASSERT((offset >= -4095) && (offset <= 4095)); - uint32_t U = (offset >= 0); - int32_t target = abs(offset) | (U << 12); + Location::Offset program_counter, + const Location* loc) const VIXL_OVERRIDE { + program_counter += kA32PcDelta; + Location::Offset off = + loc->GetLocation() - AlignDown(program_counter, 4); + VIXL_ASSERT((off >= -4095) && (off <= 4095)); + uint32_t U = (off >= 0); + int32_t target = abs(off) | (U << 12); return instr | (target & 0xfff) | ((target & 0x1000) << 11); } } immop; @@ -5747,13 +5749,13 @@ void Assembler::ldrd(Condition cond, public: EmitOp() : Location::EmitOperator(T32) {} virtual uint32_t Encode(uint32_t instr, - Location::Offset pc, - const Location* location) const VIXL_OVERRIDE { - pc += kT32PcDelta; - Location::Offset offset = location->GetLocation() - AlignDown(pc, 4); - VIXL_ASSERT((offset >= -1020) && (offset <= 1020) && - ((offset & 0x3) == 0)); - int32_t target = offset >> 2; + Location::Offset program_counter, + const Location* loc) const VIXL_OVERRIDE { + program_counter += kT32PcDelta; + Location::Offset off = + loc->GetLocation() - AlignDown(program_counter, 4); + VIXL_ASSERT((off >= -1020) && (off <= 1020) && ((off & 0x3) == 0)); + int32_t target = off >> 2; uint32_t U = (target >= 0); target = abs(target) | (U << 8); return instr | (target & 0xff) | ((target & 0x100) << 15); @@ -5777,13 +5779,14 @@ void Assembler::ldrd(Condition cond, public: EmitOp() : Location::EmitOperator(A32) {} virtual uint32_t Encode(uint32_t instr, - Location::Offset pc, - const Location* location) const VIXL_OVERRIDE { - pc += kA32PcDelta; - Location::Offset offset = location->GetLocation() - AlignDown(pc, 4); - VIXL_ASSERT((offset >= -255) && (offset <= 255)); - uint32_t U = (offset >= 0); - int32_t target = abs(offset) | (U << 8); + Location::Offset program_counter, + const Location* loc) const VIXL_OVERRIDE { + program_counter += kA32PcDelta; + Location::Offset off = + loc->GetLocation() - AlignDown(program_counter, 4); + VIXL_ASSERT((off >= -255) && (off <= 255)); + uint32_t U = (off >= 0); + int32_t target = abs(off) | (U << 8); return instr | (target & 0xf) | ((target & 0xf0) << 4) | ((target & 0x100) << 15); } @@ -6129,13 +6132,14 @@ void Assembler::ldrh(Condition cond, Register rt, Location* location) { public: EmitOp() : Location::EmitOperator(T32) {} virtual uint32_t Encode(uint32_t instr, - Location::Offset pc, - const Location* location) const VIXL_OVERRIDE { - pc += kT32PcDelta; - Location::Offset offset = location->GetLocation() - AlignDown(pc, 4); - VIXL_ASSERT((offset >= -4095) && (offset <= 4095)); - uint32_t U = (offset >= 0); - int32_t target = abs(offset) | (U << 12); + Location::Offset program_counter, + const Location* loc) const VIXL_OVERRIDE { + program_counter += kT32PcDelta; + Location::Offset off = + loc->GetLocation() - AlignDown(program_counter, 4); + VIXL_ASSERT((off >= -4095) && (off <= 4095)); + uint32_t U = (off >= 0); + int32_t target = abs(off) | (U << 12); return instr | (target & 0xfff) | ((target & 0x1000) << 11); } } immop; @@ -6155,13 +6159,14 @@ void Assembler::ldrh(Condition cond, Register rt, Location* location) { public: EmitOp() : Location::EmitOperator(A32) {} virtual uint32_t Encode(uint32_t instr, - Location::Offset pc, - const Location* location) const VIXL_OVERRIDE { - pc += kA32PcDelta; - Location::Offset offset = location->GetLocation() - AlignDown(pc, 4); - VIXL_ASSERT((offset >= -255) && (offset <= 255)); - uint32_t U = (offset >= 0); - int32_t target = abs(offset) | (U << 8); + Location::Offset program_counter, + const Location* loc) const VIXL_OVERRIDE { + program_counter += kA32PcDelta; + Location::Offset off = + loc->GetLocation() - AlignDown(program_counter, 4); + VIXL_ASSERT((off >= -255) && (off <= 255)); + uint32_t U = (off >= 0); + int32_t target = abs(off) | (U << 8); return instr | (target & 0xf) | ((target & 0xf0) << 4) | ((target & 0x100) << 15); } @@ -6382,13 +6387,14 @@ void Assembler::ldrsb(Condition cond, Register rt, Location* location) { public: EmitOp() : Location::EmitOperator(T32) {} virtual uint32_t Encode(uint32_t instr, - Location::Offset pc, - const Location* location) const VIXL_OVERRIDE { - pc += kT32PcDelta; - Location::Offset offset = location->GetLocation() - AlignDown(pc, 4); - VIXL_ASSERT((offset >= -4095) && (offset <= 4095)); - uint32_t U = (offset >= 0); - int32_t target = abs(offset) | (U << 12); + Location::Offset program_counter, + const Location* loc) const VIXL_OVERRIDE { + program_counter += kT32PcDelta; + Location::Offset off = + loc->GetLocation() - AlignDown(program_counter, 4); + VIXL_ASSERT((off >= -4095) && (off <= 4095)); + uint32_t U = (off >= 0); + int32_t target = abs(off) | (U << 12); return instr | (target & 0xfff) | ((target & 0x1000) << 11); } } immop; @@ -6408,13 +6414,14 @@ void Assembler::ldrsb(Condition cond, Register rt, Location* location) { public: EmitOp() : Location::EmitOperator(A32) {} virtual uint32_t Encode(uint32_t instr, - Location::Offset pc, - const Location* location) const VIXL_OVERRIDE { - pc += kA32PcDelta; - Location::Offset offset = location->GetLocation() - AlignDown(pc, 4); - VIXL_ASSERT((offset >= -255) && (offset <= 255)); - uint32_t U = (offset >= 0); - int32_t target = abs(offset) | (U << 8); + Location::Offset program_counter, + const Location* loc) const VIXL_OVERRIDE { + program_counter += kA32PcDelta; + Location::Offset off = + loc->GetLocation() - AlignDown(program_counter, 4); + VIXL_ASSERT((off >= -255) && (off <= 255)); + uint32_t U = (off >= 0); + int32_t target = abs(off) | (U << 8); return instr | (target & 0xf) | ((target & 0xf0) << 4) | ((target & 0x100) << 15); } @@ -6635,13 +6642,14 @@ void Assembler::ldrsh(Condition cond, Register rt, Location* location) { public: EmitOp() : Location::EmitOperator(T32) {} virtual uint32_t Encode(uint32_t instr, - Location::Offset pc, - const Location* location) const VIXL_OVERRIDE { - pc += kT32PcDelta; - Location::Offset offset = location->GetLocation() - AlignDown(pc, 4); - VIXL_ASSERT((offset >= -4095) && (offset <= 4095)); - uint32_t U = (offset >= 0); - int32_t target = abs(offset) | (U << 12); + Location::Offset program_counter, + const Location* loc) const VIXL_OVERRIDE { + program_counter += kT32PcDelta; + Location::Offset off = + loc->GetLocation() - AlignDown(program_counter, 4); + VIXL_ASSERT((off >= -4095) && (off <= 4095)); + uint32_t U = (off >= 0); + int32_t target = abs(off) | (U << 12); return instr | (target & 0xfff) | ((target & 0x1000) << 11); } } immop; @@ -6661,13 +6669,14 @@ void Assembler::ldrsh(Condition cond, Register rt, Location* location) { public: EmitOp() : Location::EmitOperator(A32) {} virtual uint32_t Encode(uint32_t instr, - Location::Offset pc, - const Location* location) const VIXL_OVERRIDE { - pc += kA32PcDelta; - Location::Offset offset = location->GetLocation() - AlignDown(pc, 4); - VIXL_ASSERT((offset >= -255) && (offset <= 255)); - uint32_t U = (offset >= 0); - int32_t target = abs(offset) | (U << 8); + Location::Offset program_counter, + const Location* loc) const VIXL_OVERRIDE { + program_counter += kA32PcDelta; + Location::Offset off = + loc->GetLocation() - AlignDown(program_counter, 4); + VIXL_ASSERT((off >= -255) && (off <= 255)); + uint32_t U = (off >= 0); + int32_t target = abs(off) | (U << 8); return instr | (target & 0xf) | ((target & 0xf0) << 4) | ((target & 0x100) << 15); } @@ -8039,13 +8048,14 @@ void Assembler::pld(Condition cond, Location* location) { public: EmitOp() : Location::EmitOperator(T32) {} virtual uint32_t Encode(uint32_t instr, - Location::Offset pc, - const Location* location) const VIXL_OVERRIDE { - pc += kT32PcDelta; - Location::Offset offset = location->GetLocation() - AlignDown(pc, 4); - VIXL_ASSERT((offset >= -4095) && (offset <= 4095)); - uint32_t U = (offset >= 0); - int32_t target = abs(offset) | (U << 12); + Location::Offset program_counter, + const Location* loc) const VIXL_OVERRIDE { + program_counter += kT32PcDelta; + Location::Offset off = + loc->GetLocation() - AlignDown(program_counter, 4); + VIXL_ASSERT((off >= -4095) && (off <= 4095)); + uint32_t U = (off >= 0); + int32_t target = abs(off) | (U << 12); return instr | (target & 0xfff) | ((target & 0x1000) << 11); } } immop; @@ -8062,15 +8072,14 @@ void Assembler::pld(Condition cond, Location* location) { public: EmitOp() : Location::EmitOperator(A32) {} virtual uint32_t Encode(uint32_t instr, - Location::Offset pc, - const Location* location) const - VIXL_OVERRIDE { - pc += kA32PcDelta; - Location::Offset offset = - location->GetLocation() - AlignDown(pc, 4); - VIXL_ASSERT((offset >= -4095) && (offset <= 4095)); - uint32_t U = (offset >= 0); - int32_t target = abs(offset) | (U << 12); + Location::Offset program_counter, + const Location* loc) const VIXL_OVERRIDE { + program_counter += kA32PcDelta; + Location::Offset off = + loc->GetLocation() - AlignDown(program_counter, 4); + VIXL_ASSERT((off >= -4095) && (off <= 4095)); + uint32_t U = (off >= 0); + int32_t target = abs(off) | (U << 12); return instr | (target & 0xfff) | ((target & 0x1000) << 11); } } immop; @@ -8403,13 +8412,14 @@ void Assembler::pli(Condition cond, Location* location) { public: EmitOp() : Location::EmitOperator(T32) {} virtual uint32_t Encode(uint32_t instr, - Location::Offset pc, - const Location* location) const VIXL_OVERRIDE { - pc += kT32PcDelta; - Location::Offset offset = location->GetLocation() - AlignDown(pc, 4); - VIXL_ASSERT((offset >= -4095) && (offset <= 4095)); - uint32_t U = (offset >= 0); - int32_t target = abs(offset) | (U << 12); + Location::Offset program_counter, + const Location* loc) const VIXL_OVERRIDE { + program_counter += kT32PcDelta; + Location::Offset off = + loc->GetLocation() - AlignDown(program_counter, 4); + VIXL_ASSERT((off >= -4095) && (off <= 4095)); + uint32_t U = (off >= 0); + int32_t target = abs(off) | (U << 12); return instr | (target & 0xfff) | ((target & 0x1000) << 11); } } immop; @@ -8426,15 +8436,14 @@ void Assembler::pli(Condition cond, Location* location) { public: EmitOp() : Location::EmitOperator(A32) {} virtual uint32_t Encode(uint32_t instr, - Location::Offset pc, - const Location* location) const - VIXL_OVERRIDE { - pc += kA32PcDelta; - Location::Offset offset = - location->GetLocation() - AlignDown(pc, 4); - VIXL_ASSERT((offset >= -4095) && (offset <= 4095)); - uint32_t U = (offset >= 0); - int32_t target = abs(offset) | (U << 12); + Location::Offset program_counter, + const Location* loc) const VIXL_OVERRIDE { + program_counter += kA32PcDelta; + Location::Offset off = + loc->GetLocation() - AlignDown(program_counter, 4); + VIXL_ASSERT((off >= -4095) && (off <= 4095)); + uint32_t U = (off >= 0); + int32_t target = abs(off) | (U << 12); return instr | (target & 0xfff) | ((target & 0x1000) << 11); } } immop; @@ -8471,29 +8480,39 @@ bool Assembler::pli_info(Condition cond, void Assembler::pop(Condition cond, EncodingSize size, RegisterList registers) { VIXL_ASSERT(AllowAssembler()); CheckIT(cond); - if (IsUsingT32()) { - // POP{}{} ; T1 - if (!size.IsWide() && ((registers.GetList() & ~0x80ff) == 0)) { - EmitT32_16(0xbc00 | (GetRegisterListEncoding(registers, 15, 1) << 8) | - GetRegisterListEncoding(registers, 0, 8)); - AdvanceIT(); - return; - } - // POP{}{} ; T2 - if (!size.IsNarrow() && ((registers.GetList() & ~0xdfff) == 0)) { - EmitT32_32(0xe8bd0000U | - (GetRegisterListEncoding(registers, 15, 1) << 15) | - (GetRegisterListEncoding(registers, 14, 1) << 14) | - GetRegisterListEncoding(registers, 0, 13)); - AdvanceIT(); - return; - } - } else { - // POP{}{} ; A1 - if (cond.IsNotNever()) { - EmitA32(0x08bd0000U | (cond.GetCondition() << 28) | - GetRegisterListEncoding(registers, 0, 16)); - return; + if (!registers.IsEmpty() || AllowUnpredictable()) { + if (IsUsingT32()) { + // A branch out of an IT block should be the last instruction in the + // block. + if (!registers.Includes(pc) || OutsideITBlockAndAlOrLast(cond) || + AllowUnpredictable()) { + // POP{}{} ; T1 + if (!size.IsWide() && registers.IsR0toR7orPC()) { + EmitT32_16(0xbc00 | (GetRegisterListEncoding(registers, 15, 1) << 8) | + GetRegisterListEncoding(registers, 0, 8)); + AdvanceIT(); + return; + } + // POP{}{} ; T2 + // Alias of: LDM{}{} SP!, ; T2 + if (!size.IsNarrow() && + ((!registers.Includes(sp) && (registers.GetCount() > 1) && + !(registers.Includes(pc) && registers.Includes(lr))) || + AllowUnpredictable())) { + EmitT32_32(0xe8bd0000U | GetRegisterListEncoding(registers, 0, 16)); + AdvanceIT(); + return; + } + } + } else { + // POP{}{} ; A1 + // Alias of: LDM{}{} SP!, ; A1 + if (cond.IsNotNever() && + (!registers.Includes(sp) || AllowUnpredictable())) { + EmitA32(0x08bd0000U | (cond.GetCondition() << 28) | + GetRegisterListEncoding(registers, 0, 16)); + return; + } } } Delegate(kPop, &Assembler::pop, cond, size, registers); @@ -8502,19 +8521,24 @@ void Assembler::pop(Condition cond, EncodingSize size, RegisterList registers) { void Assembler::pop(Condition cond, EncodingSize size, Register rt) { VIXL_ASSERT(AllowAssembler()); CheckIT(cond); - if (IsUsingT32()) { - // POP{}{} ; T4 - if (!size.IsNarrow() && ((!rt.IsPC() || OutsideITBlockAndAlOrLast(cond)) || - AllowUnpredictable())) { - EmitT32_32(0xf85d0b04U | (rt.GetCode() << 12)); - AdvanceIT(); - return; - } - } else { - // POP{}{} ; A1 - if (cond.IsNotNever()) { - EmitA32(0x049d0004U | (cond.GetCondition() << 28) | (rt.GetCode() << 12)); - return; + if (!rt.IsSP() || AllowUnpredictable()) { + if (IsUsingT32()) { + // POP{}{} ; T4 + // Alias of: LDR{}{} , [SP], #4 ; T4 + if (!size.IsNarrow() && (!rt.IsPC() || OutsideITBlockAndAlOrLast(cond) || + AllowUnpredictable())) { + EmitT32_32(0xf85d0b04U | (rt.GetCode() << 12)); + AdvanceIT(); + return; + } + } else { + // POP{}{} ; A1 + // Alias of: LDR{}{} , [SP], #4 ; T1 + if (cond.IsNotNever()) { + EmitA32(0x049d0004U | (cond.GetCondition() << 28) | + (rt.GetCode() << 12)); + return; + } } } Delegate(kPop, &Assembler::pop, cond, size, rt); @@ -8525,28 +8549,37 @@ void Assembler::push(Condition cond, RegisterList registers) { VIXL_ASSERT(AllowAssembler()); CheckIT(cond); - if (IsUsingT32()) { - // PUSH{}{} ; T1 - if (!size.IsWide() && ((registers.GetList() & ~0x40ff) == 0)) { - EmitT32_16(0xb400 | (GetRegisterListEncoding(registers, 14, 1) << 8) | - GetRegisterListEncoding(registers, 0, 8)); - AdvanceIT(); - return; - } - // PUSH{}{} ; T1 - if (!size.IsNarrow() && ((registers.GetList() & ~0x5fff) == 0)) { - EmitT32_32(0xe92d0000U | - (GetRegisterListEncoding(registers, 14, 1) << 14) | - GetRegisterListEncoding(registers, 0, 13)); - AdvanceIT(); - return; - } - } else { - // PUSH{}{} ; A1 - if (cond.IsNotNever()) { - EmitA32(0x092d0000U | (cond.GetCondition() << 28) | - GetRegisterListEncoding(registers, 0, 16)); - return; + if (!registers.IsEmpty() || AllowUnpredictable()) { + if (IsUsingT32()) { + // PUSH{}{} ; T1 + if (!size.IsWide() && registers.IsR0toR7orLR()) { + EmitT32_16(0xb400 | (GetRegisterListEncoding(registers, 14, 1) << 8) | + GetRegisterListEncoding(registers, 0, 8)); + AdvanceIT(); + return; + } + // PUSH{}{} ; T1 + // Alias of: STMDB SP!, ; T1 + if (!size.IsNarrow() && !registers.Includes(pc) && + ((!registers.Includes(sp) && (registers.GetCount() > 1)) || + AllowUnpredictable())) { + EmitT32_32(0xe92d0000U | GetRegisterListEncoding(registers, 0, 15)); + AdvanceIT(); + return; + } + } else { + // PUSH{}{} ; A1 + // Alias of: STMDB SP!, ; A1 + if (cond.IsNotNever() && + // For A32, sp can appear in the list, but stores an UNKNOWN value if + // it is not the lowest-valued register. + (!registers.Includes(sp) || + registers.GetFirstAvailableRegister().IsSP() || + AllowUnpredictable())) { + EmitA32(0x092d0000U | (cond.GetCondition() << 28) | + GetRegisterListEncoding(registers, 0, 16)); + return; + } } } Delegate(kPush, &Assembler::push, cond, size, registers); @@ -8557,14 +8590,17 @@ void Assembler::push(Condition cond, EncodingSize size, Register rt) { CheckIT(cond); if (IsUsingT32()) { // PUSH{}{} ; T4 - if (!size.IsNarrow() && (!rt.IsPC() || AllowUnpredictable())) { + // Alias of: STR{}{} , [SP, #4]! ; T4 + if (!size.IsNarrow() && + ((!rt.IsPC() && !rt.IsSP()) || AllowUnpredictable())) { EmitT32_32(0xf84d0d04U | (rt.GetCode() << 12)); AdvanceIT(); return; } } else { // PUSH{}{} ; A1 - if (cond.IsNotNever() && (!rt.IsPC() || AllowUnpredictable())) { + // Alias of: STR{}{} , [SP, #4]! ; A1 + if (cond.IsNotNever() && (!rt.IsSP() || AllowUnpredictable())) { EmitA32(0x052d0004U | (cond.GetCondition() << 28) | (rt.GetCode() << 12)); return; } @@ -11177,7 +11213,7 @@ void Assembler::stmdb(Condition cond, if (IsUsingT32()) { // STMDB{}{} SP!, ; T1 if (!size.IsWide() && rn.Is(sp) && write_back.DoesWriteBack() && - ((registers.GetList() & ~0x40ff) == 0)) { + registers.IsR0toR7orLR()) { EmitT32_16(0xb400 | (GetRegisterListEncoding(registers, 14, 1) << 8) | GetRegisterListEncoding(registers, 0, 8)); AdvanceIT(); @@ -19589,13 +19625,13 @@ void Assembler::vldr(Condition cond, public: EmitOp() : Location::EmitOperator(T32) {} virtual uint32_t Encode(uint32_t instr, - Location::Offset pc, - const Location* location) const VIXL_OVERRIDE { - pc += kT32PcDelta; - Location::Offset offset = location->GetLocation() - AlignDown(pc, 4); - VIXL_ASSERT((offset >= -1020) && (offset <= 1020) && - ((offset & 0x3) == 0)); - int32_t target = offset >> 2; + Location::Offset program_counter, + const Location* loc) const VIXL_OVERRIDE { + program_counter += kT32PcDelta; + Location::Offset off = + loc->GetLocation() - AlignDown(program_counter, 4); + VIXL_ASSERT((off >= -1020) && (off <= 1020) && ((off & 0x3) == 0)); + int32_t target = off >> 2; uint32_t U = (target >= 0); target = abs(target) | (U << 8); return instr | (target & 0xff) | ((target & 0x100) << 15); @@ -19619,13 +19655,13 @@ void Assembler::vldr(Condition cond, public: EmitOp() : Location::EmitOperator(A32) {} virtual uint32_t Encode(uint32_t instr, - Location::Offset pc, - const Location* location) const VIXL_OVERRIDE { - pc += kA32PcDelta; - Location::Offset offset = location->GetLocation() - AlignDown(pc, 4); - VIXL_ASSERT((offset >= -1020) && (offset <= 1020) && - ((offset & 0x3) == 0)); - int32_t target = offset >> 2; + Location::Offset program_counter, + const Location* loc) const VIXL_OVERRIDE { + program_counter += kA32PcDelta; + Location::Offset off = + loc->GetLocation() - AlignDown(program_counter, 4); + VIXL_ASSERT((off >= -1020) && (off <= 1020) && ((off & 0x3) == 0)); + int32_t target = off >> 2; uint32_t U = (target >= 0); target = abs(target) | (U << 8); return instr | (target & 0xff) | ((target & 0x100) << 15); @@ -19743,13 +19779,13 @@ void Assembler::vldr(Condition cond, public: EmitOp() : Location::EmitOperator(T32) {} virtual uint32_t Encode(uint32_t instr, - Location::Offset pc, - const Location* location) const VIXL_OVERRIDE { - pc += kT32PcDelta; - Location::Offset offset = location->GetLocation() - AlignDown(pc, 4); - VIXL_ASSERT((offset >= -1020) && (offset <= 1020) && - ((offset & 0x3) == 0)); - int32_t target = offset >> 2; + Location::Offset program_counter, + const Location* loc) const VIXL_OVERRIDE { + program_counter += kT32PcDelta; + Location::Offset off = + loc->GetLocation() - AlignDown(program_counter, 4); + VIXL_ASSERT((off >= -1020) && (off <= 1020) && ((off & 0x3) == 0)); + int32_t target = off >> 2; uint32_t U = (target >= 0); target = abs(target) | (U << 8); return instr | (target & 0xff) | ((target & 0x100) << 15); @@ -19773,13 +19809,13 @@ void Assembler::vldr(Condition cond, public: EmitOp() : Location::EmitOperator(A32) {} virtual uint32_t Encode(uint32_t instr, - Location::Offset pc, - const Location* location) const VIXL_OVERRIDE { - pc += kA32PcDelta; - Location::Offset offset = location->GetLocation() - AlignDown(pc, 4); - VIXL_ASSERT((offset >= -1020) && (offset <= 1020) && - ((offset & 0x3) == 0)); - int32_t target = offset >> 2; + Location::Offset program_counter, + const Location* loc) const VIXL_OVERRIDE { + program_counter += kA32PcDelta; + Location::Offset off = + loc->GetLocation() - AlignDown(program_counter, 4); + VIXL_ASSERT((off >= -1020) && (off <= 1020) && ((off & 0x3) == 0)); + int32_t target = off >> 2; uint32_t U = (target >= 0); target = abs(target) | (U << 8); return instr | (target & 0xff) | ((target & 0x100) << 15); diff --git a/dep/vixl/src/aarch32/disasm-aarch32.cc b/dep/vixl/src/aarch32/disasm-aarch32.cc index 9ed3a8315..54dafe1fe 100644 --- a/dep/vixl/src/aarch32/disasm-aarch32.cc +++ b/dep/vixl/src/aarch32/disasm-aarch32.cc @@ -348,7 +348,7 @@ DataTypeValue Dt_U_opc1_opc2_1_Decode(uint32_t value, unsigned* lane) { *lane = (value >> 2) & 1; return Untyped32; } - *lane = -1; + *lane = ~0U; return kDataTypeValueInvalid; } @@ -365,7 +365,7 @@ DataTypeValue Dt_opc1_opc2_1_Decode(uint32_t value, unsigned* lane) { *lane = (value >> 2) & 1; return Untyped32; } - *lane = -1; + *lane = ~0U; return kDataTypeValueInvalid; } @@ -382,7 +382,7 @@ DataTypeValue Dt_imm4_1_Decode(uint32_t value, unsigned* lane) { *lane = (value >> 3) & 1; return Untyped32; } - *lane = -1; + *lane = ~0U; return kDataTypeValueInvalid; } @@ -8288,13 +8288,13 @@ void Disassembler::DecodeT32(uint32_t instr) { UnallocatedT32(instr); return; } - unsigned firstcond = (instr >> 20) & 0xf; + unsigned first_cond = (instr >> 20) & 0xf; unsigned mask = (instr >> 16) & 0xf; - bool wasInITBlock = InITBlock(); - SetIT(Condition(firstcond), mask); - it(Condition(firstcond), mask); - if (wasInITBlock || (firstcond == 15) || - ((firstcond == al) && + bool was_in_it_block = InITBlock(); + SetIT(Condition(first_cond), mask); + it(Condition(first_cond), mask); + if (was_in_it_block || (first_cond == 15) || + ((first_cond == al) && (BitCount(Uint32(mask)) != 1))) { UnpredictableT32(instr); } @@ -60977,7 +60977,7 @@ void Disassembler::DecodeA32(uint32_t instr) { Condition condition((instr >> 28) & 0xf); unsigned rd = (instr >> 12) & 0xf; uint32_t imm = ImmediateA32::Decode(instr & 0xfff); - Location location(-imm, kA32PcDelta); + Location location(UnsignedNegate(imm), kA32PcDelta); // ADR{}{} ,