diff --git a/dep/CMakeLists.txt b/dep/CMakeLists.txt index c8613695e..e3d76fc68 100644 --- a/dep/CMakeLists.txt +++ b/dep/CMakeLists.txt @@ -9,6 +9,10 @@ if(NOT ANDROID) add_subdirectory(nativefiledialog) endif() +if(${CPU_ARCH} STREQUAL "aarch64") + add_subdirectory(vixl) +endif() + ###################### YBaseLib ############################ set(YBASELIB_SRC_BASE ${CMAKE_SOURCE_DIR}/dep/YBaseLib/Source) @@ -95,4 +99,4 @@ target_link_libraries(YBaseLib PUBLIC Threads::Threads) if(ANDROID) target_link_libraries(YBaseLib PRIVATE log) -endif() \ No newline at end of file +endif() diff --git a/dep/vixl/AUTHORS b/dep/vixl/AUTHORS new file mode 100644 index 000000000..257ec9d32 --- /dev/null +++ b/dep/vixl/AUTHORS @@ -0,0 +1,8 @@ +# Below is a list of people and organisations that have contributed to the VIXL +# project. Entries should be added to the list as: +# +# Name/Organization + +ARM Ltd. <*@arm.com> +Google Inc. <*@google.com> +Linaro <*@linaro.org> diff --git a/dep/vixl/CMakeLists.txt b/dep/vixl/CMakeLists.txt new file mode 100644 index 000000000..26ca45704 --- /dev/null +++ b/dep/vixl/CMakeLists.txt @@ -0,0 +1,73 @@ +set(SRCS + include/vixl/aarch32/assembler-aarch32.h + include/vixl/aarch32/constants-aarch32.h + include/vixl/aarch32/disasm-aarch32.h + include/vixl/aarch32/instructions-aarch32.h + include/vixl/aarch32/location-aarch32.h + include/vixl/aarch32/macro-assembler-aarch32.h + include/vixl/aarch32/operands-aarch32.h + include/vixl/aarch64/abi-aarch64.h + include/vixl/aarch64/assembler-aarch64.h + include/vixl/aarch64/constants-aarch64.h + include/vixl/aarch64/cpu-aarch64.h + include/vixl/aarch64/cpu-features-auditor-aarch64.h + include/vixl/aarch64/decoder-aarch64.h + include/vixl/aarch64/disasm-aarch64.h + include/vixl/aarch64/instructions-aarch64.h + include/vixl/aarch64/instrument-aarch64.h + include/vixl/aarch64/macro-assembler-aarch64.h + include/vixl/aarch64/operands-aarch64.h + include/vixl/aarch64/simulator-aarch64.h + include/vixl/aarch64/simulator-constants-aarch64.h + include/vixl/assembler-base-vixl.h + include/vixl/code-buffer-vixl.h + include/vixl/code-generation-scopes-vixl.h + include/vixl/compiler-intrinsics-vixl.h + include/vixl/cpu-features.h + include/vixl/globals-vixl.h + include/vixl/invalset-vixl.h + include/vixl/macro-assembler-interface.h + include/vixl/platform-vixl.h + include/vixl/pool-manager-impl.h + include/vixl/pool-manager.h + include/vixl/utils-vixl.h + src/aarch32/assembler-aarch32.cc + src/aarch32/constants-aarch32.cc + src/aarch32/disasm-aarch32.cc + src/aarch32/instructions-aarch32.cc + src/aarch32/location-aarch32.cc + src/aarch32/macro-assembler-aarch32.cc + src/aarch32/operands-aarch32.cc + src/aarch64/assembler-aarch64.cc + src/aarch64/cpu-aarch64.cc + src/aarch64/cpu-features-auditor-aarch64.cc + src/aarch64/decoder-aarch64.cc + src/aarch64/disasm-aarch64.cc + src/aarch64/instructions-aarch64.cc + src/aarch64/instrument-aarch64.cc + src/aarch64/logic-aarch64.cc + src/aarch64/macro-assembler-aarch64.cc + src/aarch64/operands-aarch64.cc + src/aarch64/pointer-auth-aarch64.cc + src/aarch64/simulator-aarch64.cc + src/code-buffer-vixl.cc + src/compiler-intrinsics-vixl.cc + src/cpu-features.cc + src/utils-vixl.cc +) + +add_library(vixl ${SRCS}) +target_include_directories(vixl PUBLIC + ${CMAKE_CURRENT_SOURCE_DIR}/include +) +target_include_directories(vixl PRIVATE + ${CMAKE_CURRENT_SOURCE_DIR}/include/vixl + ${CMAKE_CURRENT_SOURCE_DIR}/include/vixl/aarch32 + ${CMAKE_CURRENT_SOURCE_DIR}/include/vixl/aarch64 +) +target_compile_definitions(vixl PUBLIC + VIXL_INCLUDE_TARGET_AARCH32 + VIXL_INCLUDE_TARGET_AARCH64 + VIXL_CODE_BUFFER_MMAP +) + diff --git a/dep/vixl/LICENCE b/dep/vixl/LICENCE new file mode 100644 index 000000000..0acd8ebd6 --- /dev/null +++ b/dep/vixl/LICENCE @@ -0,0 +1,30 @@ +LICENCE +======= + +The software in this repository is covered by the following licence. + +// Copyright 2015, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/dep/vixl/README.md b/dep/vixl/README.md new file mode 100644 index 000000000..acf226051 --- /dev/null +++ b/dep/vixl/README.md @@ -0,0 +1,186 @@ +VIXL: Armv8 Runtime Code Generation Library, 3.0.0 +================================================== + +Contents: + + * Overview + * Licence + * Requirements + * Known limitations + * Usage + + +Overview +======== + +VIXL contains three components. + + 1. Programmatic **assemblers** to generate A64, A32 or T32 code at runtime. The + assemblers abstract some of the constraints of each ISA; for example, most + instructions support any immediate. + 2. **Disassemblers** that can print any instruction emitted by the assemblers. + 3. A **simulator** that can simulate any instruction emitted by the A64 + assembler. The simulator allows generated code to be run on another + architecture without the need for a full ISA model. + +The VIXL git repository can be found [on 'https://git.linaro.org'][vixl]. + +Changes from previous versions of VIXL can be found in the +[Changelog](doc/changelog.md). + + +Licence +======= + +This software is covered by the licence described in the [LICENCE](LICENCE) +file. + + +Requirements +============ + +To build VIXL the following software is required: + + 1. Python 2.7 + 2. SCons 2.0 + 3. GCC 4.8+ or Clang 3.4+ + +A 64-bit host machine is required, implementing an LP64 data model. VIXL has +been tested using GCC on AArch64 Debian, GCC and Clang on amd64 Ubuntu +systems. + +To run the linter and code formatting stages of the tests, the following +software is also required: + + 1. Git + 2. [Google's `cpplint.py`][cpplint] + 3. clang-format-3.8 + +Refer to the 'Usage' section for details. + + +Known Limitations for AArch64 code generation +============================================= + +VIXL was developed for JavaScript engines so a number of features from A64 were +deemed unnecessary: + + * Limited rounding mode support for floating point. + * Limited support for synchronisation instructions. + * Limited support for system instructions. + * A few miscellaneous integer and floating point instructions are missing. + +The VIXL simulator supports only those instructions that the VIXL assembler can +generate. The `doc` directory contains a +[list of supported A64 instructions](doc/aarch64/supported-instructions-aarch64.md). + +The VIXL simulator was developed to run on 64-bit amd64 platforms. Whilst it +builds and mostly works for 32-bit x86 platforms, there are a number of +floating-point operations which do not work correctly, and a number of tests +fail as a result. + +VIXL may not build using Clang 3.7, due to a compiler warning. A workaround is +to disable conversion of warnings to errors, or to delete the offending +`return` statement reported and rebuild. This problem will be fixed in the next +release. + +Debug Builds +------------ + +Your project's build system must define `VIXL_DEBUG` (eg. `-DVIXL_DEBUG`) +when using a VIXL library that has been built with debug enabled. + +Some classes defined in VIXL header files contain fields that are only present +in debug builds, so if `VIXL_DEBUG` is defined when the library is built, but +not defined for the header files included in your project, you will see runtime +failures. + +Exclusive-Access Instructions +----------------------------- + +All exclusive-access instructions are supported, but the simulator cannot +accurately simulate their behaviour as described in the ARMv8 Architecture +Reference Manual. + + * A local monitor is simulated, so simulated exclusive loads and stores execute + as expected in a single-threaded environment. + * The global monitor is simulated by occasionally causing exclusive-access + instructions to fail regardless of the local monitor state. + * Load-acquire, store-release semantics are approximated by issuing a host + memory barrier after loads or before stores. The built-in + `__sync_synchronize()` is used for this purpose. + +The simulator tries to be strict, and implements the following restrictions that +the ARMv8 ARM allows: + + * A pair of load-/store-exclusive instructions will only succeed if they have + the same address and access size. + * Most of the time, cache-maintenance operations or explicit memory accesses + will clear the exclusive monitor. + * To ensure that simulated code does not depend on this behaviour, the + exclusive monitor will sometimes be left intact after these instructions. + +Instructions affected by these limitations: + `stxrb`, `stxrh`, `stxr`, `ldxrb`, `ldxrh`, `ldxr`, `stxp`, `ldxp`, `stlxrb`, + `stlxrh`, `stlxr`, `ldaxrb`, `ldaxrh`, `ldaxr`, `stlxp`, `ldaxp`, `stlrb`, + `stlrh`, `stlr`, `ldarb`, `ldarh`, `ldar`, `clrex`. + + +Usage +===== + +Running all Tests +----------------- + +The helper script `tools/test.py` will build and run every test that is provided +with VIXL, in both release and debug mode. It is a useful script for verifying +that all of VIXL's dependencies are in place and that VIXL is working as it +should. + +By default, the `tools/test.py` script runs a linter to check that the source +code conforms with the code style guide, and to detect several common errors +that the compiler may not warn about. This is most useful for VIXL developers. +The linter has the following dependencies: + + 1. Git must be installed, and the VIXL project must be in a valid Git + repository, such as one produced using `git clone`. + 2. `cpplint.py`, [as provided by Google][cpplint], must be available (and + executable) on the `PATH`. + +It is possible to tell `tools/test.py` to skip the linter stage by passing +`--nolint`. This removes the dependency on `cpplint.py` and Git. The `--nolint` +option is implied if the VIXL project is a snapshot (with no `.git` directory). + +Additionally, `tools/test.py` tests code formatting using `clang-format-3.8`. +If you don't have `clang-format-3.8`, disable the test using the +`--noclang-format` option. + +Also note that the tests for the tracing features depend upon external `diff` +and `sed` tools. If these tools are not available in `PATH`, these tests will +fail. + +Getting Started +--------------- + +We have separate guides for introducing VIXL, depending on what architecture you +are targeting. A guide for working with AArch32 can be found +[here][getting-started-aarch32], while the AArch64 guide is +[here][getting-started-aarch64]. Example source code is provided in the +[examples](examples) directory. You can build examples with either `scons +aarch32_examples` or `scons aarch64_examples` from the root directory, or use +`scons --help` to get a detailed list of available build targets. + + + + +[cpplint]: http://google-styleguide.googlecode.com/svn/trunk/cpplint/cpplint.py + "Google's cpplint.py script." + +[vixl]: https://git.linaro.org/arm/vixl.git + "The VIXL repository at 'https://git.linaro.org'." + +[getting-started-aarch32]: doc/aarch32/getting-started-aarch32.md + "Introduction to VIXL for AArch32." + +[getting-started-aarch64]: doc/aarch64/getting-started-aarch64.md + "Introduction to VIXL for AArch64." diff --git a/dep/vixl/VERSIONS.md b/dep/vixl/VERSIONS.md new file mode 100644 index 000000000..87bc0dbae --- /dev/null +++ b/dep/vixl/VERSIONS.md @@ -0,0 +1,30 @@ +Versioning +========== + +Since version 3.0.0, VIXL uses [Semantic Versioning 2.0.0][semver]. + +Briefly: + +- Backwards-incompatible changes update the _major_ version. +- New features update the _minor_ version. +- Bug fixes update the _patch_ version. + +Why 3.0.0? +---------- + +VIXL was originally released as 1.x using snapshot releases. When we moved VIXL +into Linaro, we started working directly on `master` and stopped tagging +named releases. However, we informally called this "VIXL 2", so we are skipping +2.0.0 to avoid potential confusion. + +Using `master` +-------------- + +Users who want to take the latest development version of VIXL can still take +commits from `master`. Our day-to-day development process hasn't changed and +these commits should still pass their own tests. However, note that commits not +explicitly tagged with a given version should be considered to be unversioned, +with no backwards-compatibility guarantees. + +[semver]: https://semver.org/spec/v2.0.0.html + "Semantic Versioning 2.0.0 Specification" diff --git a/dep/vixl/include/vixl/aarch32/assembler-aarch32.h b/dep/vixl/include/vixl/aarch32/assembler-aarch32.h new file mode 100644 index 000000000..bb7df8404 --- /dev/null +++ b/dep/vixl/include/vixl/aarch32/assembler-aarch32.h @@ -0,0 +1,6159 @@ +// Copyright 2017, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_AARCH32_ASSEMBLER_AARCH32_H_ +#define VIXL_AARCH32_ASSEMBLER_AARCH32_H_ + +#include "assembler-base-vixl.h" + +#include "aarch32/instructions-aarch32.h" +#include "aarch32/location-aarch32.h" + +namespace vixl { +namespace aarch32 { + +class Assembler : public internal::AssemblerBase { + InstructionSet isa_; + Condition first_condition_; + uint16_t it_mask_; + bool has_32_dregs_; + bool allow_unpredictable_; + bool allow_strongly_discouraged_; + + protected: + void EmitT32_16(uint16_t instr); + void EmitT32_32(uint32_t instr); + void EmitA32(uint32_t instr); + // Check that the condition of the current instruction is consistent with the + // IT state. + void CheckIT(Condition condition) { +#ifdef VIXL_DEBUG + PerformCheckIT(condition); +#else + USE(condition); +#endif + } +#ifdef VIXL_DEBUG + void PerformCheckIT(Condition condition); +#endif + void AdvanceIT() { + first_condition_ = + Condition((first_condition_.GetCondition() & 0xe) | (it_mask_ >> 3)); + it_mask_ = (it_mask_ << 1) & 0xf; + } + // Virtual, in order to be overridden by the MacroAssembler, which needs to + // notify the pool manager. + virtual void BindHelper(Label* label); + + uint32_t Link(uint32_t instr, + Location* location, + const Location::EmitOperator& op, + const ReferenceInfo* info); + + public: + class AllowUnpredictableScope { + Assembler* assembler_; + bool old_; + + public: + explicit AllowUnpredictableScope(Assembler* assembler) + : assembler_(assembler), old_(assembler->allow_unpredictable_) { + assembler_->allow_unpredictable_ = true; + } + ~AllowUnpredictableScope() { assembler_->allow_unpredictable_ = old_; } + }; + class AllowStronglyDiscouragedScope { + Assembler* assembler_; + bool old_; + + public: + explicit AllowStronglyDiscouragedScope(Assembler* assembler) + : assembler_(assembler), old_(assembler->allow_strongly_discouraged_) { + assembler_->allow_strongly_discouraged_ = true; + } + ~AllowStronglyDiscouragedScope() { + assembler_->allow_strongly_discouraged_ = old_; + } + }; + + explicit Assembler(InstructionSet isa = kDefaultISA) + : isa_(isa), + first_condition_(al), + it_mask_(0), + has_32_dregs_(true), + allow_unpredictable_(false), + allow_strongly_discouraged_(false) { +#if defined(VIXL_INCLUDE_TARGET_A32_ONLY) + // Avoid compiler warning. + USE(isa_); + VIXL_ASSERT(isa == A32); +#elif defined(VIXL_INCLUDE_TARGET_T32_ONLY) + USE(isa_); + VIXL_ASSERT(isa == T32); +#endif + } + explicit Assembler(size_t capacity, InstructionSet isa = kDefaultISA) + : AssemblerBase(capacity), + isa_(isa), + first_condition_(al), + it_mask_(0), + has_32_dregs_(true), + allow_unpredictable_(false), + allow_strongly_discouraged_(false) { +#if defined(VIXL_INCLUDE_TARGET_A32_ONLY) + VIXL_ASSERT(isa == A32); +#elif defined(VIXL_INCLUDE_TARGET_T32_ONLY) + VIXL_ASSERT(isa == T32); +#endif + } + Assembler(byte* buffer, size_t capacity, InstructionSet isa = kDefaultISA) + : AssemblerBase(buffer, capacity), + isa_(isa), + first_condition_(al), + it_mask_(0), + has_32_dregs_(true), + allow_unpredictable_(false), + allow_strongly_discouraged_(false) { +#if defined(VIXL_INCLUDE_TARGET_A32_ONLY) + VIXL_ASSERT(isa == A32); +#elif defined(VIXL_INCLUDE_TARGET_T32_ONLY) + VIXL_ASSERT(isa == T32); +#endif + } + virtual ~Assembler() {} + + void UseInstructionSet(InstructionSet isa) { +#if defined(VIXL_INCLUDE_TARGET_A32_ONLY) + USE(isa); + VIXL_ASSERT(isa == A32); +#elif defined(VIXL_INCLUDE_TARGET_T32_ONLY) + USE(isa); + VIXL_ASSERT(isa == T32); +#else + VIXL_ASSERT((isa_ == isa) || (GetCursorOffset() == 0)); + isa_ = isa; +#endif + } + +#if defined(VIXL_INCLUDE_TARGET_A32_ONLY) + InstructionSet GetInstructionSetInUse() const { return A32; } +#elif defined(VIXL_INCLUDE_TARGET_T32_ONLY) + InstructionSet GetInstructionSetInUse() const { return T32; } +#else + InstructionSet GetInstructionSetInUse() const { return isa_; } +#endif + + void UseT32() { UseInstructionSet(T32); } + void UseA32() { UseInstructionSet(A32); } + bool IsUsingT32() const { return GetInstructionSetInUse() == T32; } + bool IsUsingA32() const { return GetInstructionSetInUse() == A32; } + + void SetIT(Condition first_condition, uint16_t it_mask) { + VIXL_ASSERT(it_mask_ == 0); + first_condition_ = first_condition; + it_mask_ = it_mask; + } + bool InITBlock() { return it_mask_ != 0; } + bool OutsideITBlock() { return it_mask_ == 0; } + bool OutsideITBlockOrLast() { return (it_mask_ == 0) || (it_mask_ == 0x8); } + bool OutsideITBlockAndAlOrLast(Condition cond) { + return ((it_mask_ == 0) && cond.Is(al)) || (it_mask_ == 0x8); + } + void CheckNotIT() { VIXL_ASSERT(it_mask_ == 0); } + bool Has32DRegs() const { return has_32_dregs_; } + void SetHas32DRegs(bool has_32_dregs) { has_32_dregs_ = has_32_dregs; } + + int32_t GetCursorOffset() const { + ptrdiff_t offset = buffer_.GetCursorOffset(); + VIXL_ASSERT(IsInt32(offset)); + return static_cast(offset); + } + + uint32_t GetArchitectureStatePCOffset() const { return IsUsingT32() ? 4 : 8; } + + // Bind a raw Location that will never be tracked by the pool manager. + void bind(Location* location) { + VIXL_ASSERT(AllowAssembler()); + VIXL_ASSERT(!location->IsBound()); + location->SetLocation(this, GetCursorOffset()); + location->MarkBound(); + } + + // Bind a Label, which may be tracked by the pool manager in the presence of a + // MacroAssembler. + void bind(Label* label) { + VIXL_ASSERT(AllowAssembler()); + BindHelper(label); + } + + void place(RawLiteral* literal) { + VIXL_ASSERT(AllowAssembler()); + VIXL_ASSERT(literal->IsManuallyPlaced()); + literal->SetLocation(this, GetCursorOffset()); + literal->MarkBound(); + GetBuffer()->EnsureSpaceFor(literal->GetSize()); + GetBuffer()->EmitData(literal->GetDataAddress(), literal->GetSize()); + } + + size_t GetSizeOfCodeGeneratedSince(Label* label) const { + VIXL_ASSERT(label->IsBound()); + return buffer_.GetOffsetFrom(label->GetLocation()); + } + + // Helpers for it instruction. + void it(Condition cond) { it(cond, 0x8); } + void itt(Condition cond) { it(cond, 0x4); } + void ite(Condition cond) { it(cond, 0xc); } + void ittt(Condition cond) { it(cond, 0x2); } + void itet(Condition cond) { it(cond, 0xa); } + void itte(Condition cond) { it(cond, 0x6); } + void itee(Condition cond) { it(cond, 0xe); } + void itttt(Condition cond) { it(cond, 0x1); } + void itett(Condition cond) { it(cond, 0x9); } + void ittet(Condition cond) { it(cond, 0x5); } + void iteet(Condition cond) { it(cond, 0xd); } + void ittte(Condition cond) { it(cond, 0x3); } + void itete(Condition cond) { it(cond, 0xb); } + void ittee(Condition cond) { it(cond, 0x7); } + void iteee(Condition cond) { it(cond, 0xf); } + + // Start of generated code. + typedef void (Assembler::*InstructionCondSizeRROp)(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + typedef void (Assembler::*InstructionCondROp)(Condition cond, + Register rd, + const Operand& operand); + typedef void (Assembler::*InstructionROp)(Register rd, + const Operand& operand); + typedef void (Assembler::*InstructionCondRROp)(Condition cond, + Register rd, + Register rn, + const Operand& operand); + typedef void (Assembler::*InstructionCondSizeRL)(Condition cond, + EncodingSize size, + Register rd, + Location* location); + typedef void (Assembler::*InstructionDtQQ)(DataType dt, + QRegister rd, + QRegister rm); + typedef void (Assembler::*InstructionCondSizeL)(Condition cond, + EncodingSize size, + Location* location); + typedef void (Assembler::*InstructionCondRII)(Condition cond, + Register rd, + uint32_t lsb, + uint32_t width); + typedef void (Assembler::*InstructionCondRRII)( + Condition cond, Register rd, Register rn, uint32_t lsb, uint32_t width); + typedef void (Assembler::*InstructionCondI)(Condition cond, uint32_t imm); + typedef void (Assembler::*InstructionCondL)(Condition cond, + Location* location); + typedef void (Assembler::*InstructionCondR)(Condition cond, Register rm); + typedef void (Assembler::*InstructionRL)(Register rn, Location* location); + typedef void (Assembler::*InstructionCond)(Condition cond); + typedef void (Assembler::*InstructionCondRR)(Condition cond, + Register rd, + Register rm); + typedef void (Assembler::*InstructionCondSizeROp)(Condition cond, + EncodingSize size, + Register rn, + const Operand& operand); + typedef void (Assembler::*InstructionCondRRR)(Condition cond, + Register rd, + Register rn, + Register rm); + typedef void (Assembler::*InstructionCondBa)(Condition cond, + MemoryBarrier option); + typedef void (Assembler::*InstructionCondRwbDrl)(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist); + typedef void (Assembler::*InstructionCondRMop)(Condition cond, + Register rt, + const MemOperand& operand); + typedef void (Assembler::*InstructionCondRRMop)(Condition cond, + Register rt, + Register rt2, + const MemOperand& operand); + typedef void (Assembler::*InstructionCondSizeRwbRl)(Condition cond, + EncodingSize size, + Register rn, + WriteBack write_back, + RegisterList registers); + typedef void (Assembler::*InstructionCondRwbRl)(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers); + typedef void (Assembler::*InstructionCondSizeRMop)(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand); + typedef void (Assembler::*InstructionCondRL)(Condition cond, + Register rt, + Location* location); + typedef void (Assembler::*InstructionCondRRL)(Condition cond, + Register rt, + Register rt2, + Location* location); + typedef void (Assembler::*InstructionCondRRRR)( + Condition cond, Register rd, Register rn, Register rm, Register ra); + typedef void (Assembler::*InstructionCondRSr)(Condition cond, + Register rd, + SpecialRegister spec_reg); + typedef void (Assembler::*InstructionCondMsrOp)( + Condition cond, MaskedSpecialRegister spec_reg, const Operand& operand); + typedef void (Assembler::*InstructionCondSizeRRR)( + Condition cond, EncodingSize size, Register rd, Register rn, Register rm); + typedef void (Assembler::*InstructionCondSize)(Condition cond, + EncodingSize size); + typedef void (Assembler::*InstructionCondMop)(Condition cond, + const MemOperand& operand); + typedef void (Assembler::*InstructionCondSizeRl)(Condition cond, + EncodingSize size, + RegisterList registers); + typedef void (Assembler::*InstructionCondSizeOrl)(Condition cond, + EncodingSize size, + Register rt); + typedef void (Assembler::*InstructionCondSizeRR)(Condition cond, + EncodingSize size, + Register rd, + Register rm); + typedef void (Assembler::*InstructionDtQQQ)(DataType dt, + QRegister rd, + QRegister rn, + QRegister rm); + typedef void (Assembler::*InstructionCondRIOp)(Condition cond, + Register rd, + uint32_t imm, + const Operand& operand); + typedef void (Assembler::*InstructionCondRIR)(Condition cond, + Register rd, + uint32_t imm, + Register rn); + typedef void (Assembler::*InstructionCondRRRMop)(Condition cond, + Register rd, + Register rt, + Register rt2, + const MemOperand& operand); + typedef void (Assembler::*InstructionCondSizeI)(Condition cond, + EncodingSize size, + uint32_t imm); + typedef void (Assembler::*InstructionCondDtDDD)( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + typedef void (Assembler::*InstructionCondDtQQQ)( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + typedef void (Assembler::*InstructionCondDtQDD)( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm); + typedef void (Assembler::*InstructionCondDtDD)(Condition cond, + DataType dt, + DRegister rd, + DRegister rm); + typedef void (Assembler::*InstructionCondDtQQ)(Condition cond, + DataType dt, + QRegister rd, + QRegister rm); + typedef void (Assembler::*InstructionCondDtSS)(Condition cond, + DataType dt, + SRegister rd, + SRegister rm); + typedef void (Assembler::*InstructionCondDtSSS)( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + typedef void (Assembler::*InstructionCondDtDQQ)( + Condition cond, DataType dt, DRegister rd, QRegister rn, QRegister rm); + typedef void (Assembler::*InstructionCondDtQQD)( + Condition cond, DataType dt, QRegister rd, QRegister rn, DRegister rm); + typedef void (Assembler::*InstructionCondDtDDDop)(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + const DOperand& operand); + typedef void (Assembler::*InstructionCondDtQQQop)(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + const QOperand& operand); + typedef void (Assembler::*InstructionCondDtSSop)(Condition cond, + DataType dt, + SRegister rd, + const SOperand& operand); + typedef void (Assembler::*InstructionCondDtDDop)(Condition cond, + DataType dt, + DRegister rd, + const DOperand& operand); + typedef void (Assembler::*InstructionCondDtDtDS)( + Condition cond, DataType dt1, DataType dt2, DRegister rd, SRegister rm); + typedef void (Assembler::*InstructionCondDtDtSD)( + Condition cond, DataType dt1, DataType dt2, SRegister rd, DRegister rm); + typedef void (Assembler::*InstructionCondDtDtDDSi)(Condition cond, + DataType dt1, + DataType dt2, + DRegister rd, + DRegister rm, + int32_t fbits); + typedef void (Assembler::*InstructionCondDtDtQQSi)(Condition cond, + DataType dt1, + DataType dt2, + QRegister rd, + QRegister rm, + int32_t fbits); + typedef void (Assembler::*InstructionCondDtDtSSSi)(Condition cond, + DataType dt1, + DataType dt2, + SRegister rd, + SRegister rm, + int32_t fbits); + typedef void (Assembler::*InstructionCondDtDtDD)( + Condition cond, DataType dt1, DataType dt2, DRegister rd, DRegister rm); + typedef void (Assembler::*InstructionCondDtDtQQ)( + Condition cond, DataType dt1, DataType dt2, QRegister rd, QRegister rm); + typedef void (Assembler::*InstructionCondDtDtDQ)( + Condition cond, DataType dt1, DataType dt2, DRegister rd, QRegister rm); + typedef void (Assembler::*InstructionCondDtDtQD)( + Condition cond, DataType dt1, DataType dt2, QRegister rd, DRegister rm); + typedef void (Assembler::*InstructionCondDtDtSS)( + Condition cond, DataType dt1, DataType dt2, SRegister rd, SRegister rm); + typedef void (Assembler::*InstructionDtDtDD)(DataType dt1, + DataType dt2, + DRegister rd, + DRegister rm); + typedef void (Assembler::*InstructionDtDtQQ)(DataType dt1, + DataType dt2, + QRegister rd, + QRegister rm); + typedef void (Assembler::*InstructionDtDtSS)(DataType dt1, + DataType dt2, + SRegister rd, + SRegister rm); + typedef void (Assembler::*InstructionDtDtSD)(DataType dt1, + DataType dt2, + SRegister rd, + DRegister rm); + typedef void (Assembler::*InstructionCondDtQR)(Condition cond, + DataType dt, + QRegister rd, + Register rt); + typedef void (Assembler::*InstructionCondDtDR)(Condition cond, + DataType dt, + DRegister rd, + Register rt); + typedef void (Assembler::*InstructionCondDtDDx)(Condition cond, + DataType dt, + DRegister rd, + DRegisterLane rm); + typedef void (Assembler::*InstructionCondDtQDx)(Condition cond, + DataType dt, + QRegister rd, + DRegisterLane rm); + typedef void (Assembler::*InstructionCondDtDDDDop)(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegister rm, + const DOperand& operand); + typedef void (Assembler::*InstructionCondDtQQQQop)(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + QRegister rm, + const QOperand& operand); + typedef void (Assembler::*InstructionCondDtNrlAmop)( + Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand); + typedef void (Assembler::*InstructionCondDtNrlMop)( + Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const MemOperand& operand); + typedef void (Assembler::*InstructionCondDtRwbDrl)(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist); + typedef void (Assembler::*InstructionCondDtRwbSrl)(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist); + typedef void (Assembler::*InstructionCondDtDL)(Condition cond, + DataType dt, + DRegister rd, + Location* location); + typedef void (Assembler::*InstructionCondDtDMop)(Condition cond, + DataType dt, + DRegister rd, + const MemOperand& operand); + typedef void (Assembler::*InstructionCondDtSL)(Condition cond, + DataType dt, + SRegister rd, + Location* location); + typedef void (Assembler::*InstructionCondDtSMop)(Condition cond, + DataType dt, + SRegister rd, + const MemOperand& operand); + typedef void (Assembler::*InstructionDtDDD)(DataType dt, + DRegister rd, + DRegister rn, + DRegister rm); + typedef void (Assembler::*InstructionDtSSS)(DataType dt, + SRegister rd, + SRegister rn, + SRegister rm); + typedef void (Assembler::*InstructionCondDtDDDx)(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegisterLane rm); + typedef void (Assembler::*InstructionCondDtQQDx)(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + DRegisterLane rm); + typedef void (Assembler::*InstructionCondDtQDDx)(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegisterLane rm); + typedef void (Assembler::*InstructionCondRS)(Condition cond, + Register rt, + SRegister rn); + typedef void (Assembler::*InstructionCondSR)(Condition cond, + SRegister rn, + Register rt); + typedef void (Assembler::*InstructionCondRRD)(Condition cond, + Register rt, + Register rt2, + DRegister rm); + typedef void (Assembler::*InstructionCondDRR)(Condition cond, + DRegister rm, + Register rt, + Register rt2); + typedef void (Assembler::*InstructionCondRRSS)( + Condition cond, Register rt, Register rt2, SRegister rm, SRegister rm1); + typedef void (Assembler::*InstructionCondSSRR)( + Condition cond, SRegister rm, SRegister rm1, Register rt, Register rt2); + typedef void (Assembler::*InstructionCondDtDxR)(Condition cond, + DataType dt, + DRegisterLane rd, + Register rt); + typedef void (Assembler::*InstructionCondDtQQop)(Condition cond, + DataType dt, + QRegister rd, + const QOperand& operand); + typedef void (Assembler::*InstructionCondDtRDx)(Condition cond, + DataType dt, + Register rt, + DRegisterLane rn); + typedef void (Assembler::*InstructionCondDtQD)(Condition cond, + DataType dt, + QRegister rd, + DRegister rm); + typedef void (Assembler::*InstructionCondDtDQ)(Condition cond, + DataType dt, + DRegister rd, + QRegister rm); + typedef void (Assembler::*InstructionCondRoaSfp)(Condition cond, + RegisterOrAPSR_nzcv rt, + SpecialFPRegister spec_reg); + typedef void (Assembler::*InstructionCondSfpR)(Condition cond, + SpecialFPRegister spec_reg, + Register rt); + typedef void (Assembler::*InstructionCondDtDDIr)(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegister dm, + unsigned index); + typedef void (Assembler::*InstructionCondDtQQIr)(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + DRegister dm, + unsigned index); + typedef void (Assembler::*InstructionCondDtQDIr)(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegister dm, + unsigned index); + typedef void (Assembler::*InstructionCondDtDrl)(Condition cond, + DataType dt, + DRegisterList dreglist); + typedef void (Assembler::*InstructionCondDtSrl)(Condition cond, + DataType dt, + SRegisterList sreglist); + typedef void (Assembler::*InstructionCondDtDQQop)(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand); + typedef void (Assembler::*InstructionDtDD)(DataType dt, + DRegister rd, + DRegister rm); + typedef void (Assembler::*InstructionDtSS)(DataType dt, + SRegister rd, + SRegister rm); + typedef void (Assembler::*InstructionCondDtQDDop)(Condition cond, + DataType dt, + QRegister rd, + DRegister rm, + const DOperand& operand); + typedef void (Assembler::*InstructionCondDtDNrlD)( + Condition cond, + DataType dt, + DRegister rd, + const NeonRegisterList& nreglist, + DRegister rm); + virtual void Delegate(InstructionType type, + InstructionCondSizeRROp /*instruction*/, + Condition /*cond*/, + EncodingSize /*size*/, + Register /*rd*/, + Register /*rn*/, + const Operand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kAdc) || (type == kAdcs) || (type == kAdd) || + (type == kAdds) || (type == kAnd) || (type == kAnds) || + (type == kAsr) || (type == kAsrs) || (type == kBic) || + (type == kBics) || (type == kEor) || (type == kEors) || + (type == kLsl) || (type == kLsls) || (type == kLsr) || + (type == kLsrs) || (type == kOrr) || (type == kOrrs) || + (type == kRor) || (type == kRors) || (type == kRsb) || + (type == kRsbs) || (type == kSbc) || (type == kSbcs) || + (type == kSub) || (type == kSubs)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondROp /*instruction*/, + Condition /*cond*/, + Register /*rd*/, + const Operand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kAdd) || (type == kMovt) || (type == kMovw) || + (type == kSub) || (type == kSxtb16) || (type == kTeq) || + (type == kUxtb16)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionROp /*instruction*/, + Register /*rd*/, + const Operand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kAdds) || (type == kSubs)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondRROp /*instruction*/, + Condition /*cond*/, + Register /*rd*/, + Register /*rn*/, + const Operand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kAddw) || (type == kOrn) || (type == kOrns) || + (type == kPkhbt) || (type == kPkhtb) || (type == kRsc) || + (type == kRscs) || (type == kSubw) || (type == kSxtab) || + (type == kSxtab16) || (type == kSxtah) || (type == kUxtab) || + (type == kUxtab16) || (type == kUxtah)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondSizeRL /*instruction*/, + Condition /*cond*/, + EncodingSize /*size*/, + Register /*rd*/, + Location* /*location*/) { + USE(type); + VIXL_ASSERT((type == kAdr) || (type == kLdr)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionDtQQ /*instruction*/, + DataType /*dt*/, + QRegister /*rd*/, + QRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVrinta) || (type == kVrintm) || (type == kVrintn) || + (type == kVrintp) || (type == kVrintx) || (type == kVrintz)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondSizeL /*instruction*/, + Condition /*cond*/, + EncodingSize /*size*/, + Location* /*location*/) { + USE(type); + VIXL_ASSERT((type == kB)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondRII /*instruction*/, + Condition /*cond*/, + Register /*rd*/, + uint32_t /*lsb*/, + uint32_t /*width*/) { + USE(type); + VIXL_ASSERT((type == kBfc)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondRRII /*instruction*/, + Condition /*cond*/, + Register /*rd*/, + Register /*rn*/, + uint32_t /*lsb*/, + uint32_t /*width*/) { + USE(type); + VIXL_ASSERT((type == kBfi) || (type == kSbfx) || (type == kUbfx)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondI /*instruction*/, + Condition /*cond*/, + uint32_t /*imm*/) { + USE(type); + VIXL_ASSERT((type == kBkpt) || (type == kHlt) || (type == kHvc) || + (type == kSvc)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondL /*instruction*/, + Condition /*cond*/, + Location* /*location*/) { + USE(type); + VIXL_ASSERT((type == kBl) || (type == kBlx) || (type == kPld) || + (type == kPli)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondR /*instruction*/, + Condition /*cond*/, + Register /*rm*/) { + USE(type); + VIXL_ASSERT((type == kBlx) || (type == kBx) || (type == kBxj)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionRL /*instruction*/, + Register /*rn*/, + Location* /*location*/) { + USE(type); + VIXL_ASSERT((type == kCbnz) || (type == kCbz)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCond /*instruction*/, + Condition /*cond*/) { + USE(type); + VIXL_ASSERT((type == kClrex)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondRR /*instruction*/, + Condition /*cond*/, + Register /*rd*/, + Register /*rm*/) { + USE(type); + VIXL_ASSERT((type == kClz) || (type == kRbit) || (type == kRrx) || + (type == kRrxs)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondSizeROp /*instruction*/, + Condition /*cond*/, + EncodingSize /*size*/, + Register /*rn*/, + const Operand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kCmn) || (type == kCmp) || (type == kMov) || + (type == kMovs) || (type == kMvn) || (type == kMvns) || + (type == kSxtb) || (type == kSxth) || (type == kTst) || + (type == kUxtb) || (type == kUxth)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondRRR /*instruction*/, + Condition /*cond*/, + Register /*rd*/, + Register /*rn*/, + Register /*rm*/) { + USE(type); + VIXL_ASSERT((type == kCrc32b) || (type == kCrc32cb) || (type == kCrc32ch) || + (type == kCrc32cw) || (type == kCrc32h) || (type == kCrc32w) || + (type == kMuls) || (type == kQadd) || (type == kQadd16) || + (type == kQadd8) || (type == kQasx) || (type == kQdadd) || + (type == kQdsub) || (type == kQsax) || (type == kQsub) || + (type == kQsub16) || (type == kQsub8) || (type == kSadd16) || + (type == kSadd8) || (type == kSasx) || (type == kSdiv) || + (type == kSel) || (type == kShadd16) || (type == kShadd8) || + (type == kShasx) || (type == kShsax) || (type == kShsub16) || + (type == kShsub8) || (type == kSmmul) || (type == kSmmulr) || + (type == kSmuad) || (type == kSmuadx) || (type == kSmulbb) || + (type == kSmulbt) || (type == kSmultb) || (type == kSmultt) || + (type == kSmulwb) || (type == kSmulwt) || (type == kSmusd) || + (type == kSmusdx) || (type == kSsax) || (type == kSsub16) || + (type == kSsub8) || (type == kUadd16) || (type == kUadd8) || + (type == kUasx) || (type == kUdiv) || (type == kUhadd16) || + (type == kUhadd8) || (type == kUhasx) || (type == kUhsax) || + (type == kUhsub16) || (type == kUhsub8) || (type == kUqadd16) || + (type == kUqadd8) || (type == kUqasx) || (type == kUqsax) || + (type == kUqsub16) || (type == kUqsub8) || (type == kUsad8) || + (type == kUsax) || (type == kUsub16) || (type == kUsub8)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondBa /*instruction*/, + Condition /*cond*/, + MemoryBarrier /*option*/) { + USE(type); + VIXL_ASSERT((type == kDmb) || (type == kDsb) || (type == kIsb)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondRwbDrl /*instruction*/, + Condition /*cond*/, + Register /*rn*/, + WriteBack /*write_back*/, + DRegisterList /*dreglist*/) { + USE(type); + VIXL_ASSERT((type == kFldmdbx) || (type == kFldmiax) || + (type == kFstmdbx) || (type == kFstmiax)); + UnimplementedDelegate(type); + } + virtual void DelegateIt(Condition /*cond*/, uint16_t /*mask*/) { + UnimplementedDelegate(kIt); + } + virtual void Delegate(InstructionType type, + InstructionCondRMop /*instruction*/, + Condition /*cond*/, + Register /*rt*/, + const MemOperand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kLda) || (type == kLdab) || (type == kLdaex) || + (type == kLdaexb) || (type == kLdaexh) || (type == kLdah) || + (type == kLdrex) || (type == kLdrexb) || (type == kLdrexh) || + (type == kStl) || (type == kStlb) || (type == kStlh)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondRRMop /*instruction*/, + Condition /*cond*/, + Register /*rt*/, + Register /*rt2*/, + const MemOperand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kLdaexd) || (type == kLdrd) || (type == kLdrexd) || + (type == kStlex) || (type == kStlexb) || (type == kStlexh) || + (type == kStrd) || (type == kStrex) || (type == kStrexb) || + (type == kStrexh)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondSizeRwbRl /*instruction*/, + Condition /*cond*/, + EncodingSize /*size*/, + Register /*rn*/, + WriteBack /*write_back*/, + RegisterList /*registers*/) { + USE(type); + VIXL_ASSERT((type == kLdm) || (type == kLdmfd) || (type == kStm) || + (type == kStmdb) || (type == kStmea)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondRwbRl /*instruction*/, + Condition /*cond*/, + Register /*rn*/, + WriteBack /*write_back*/, + RegisterList /*registers*/) { + USE(type); + VIXL_ASSERT((type == kLdmda) || (type == kLdmdb) || (type == kLdmea) || + (type == kLdmed) || (type == kLdmfa) || (type == kLdmib) || + (type == kStmda) || (type == kStmed) || (type == kStmfa) || + (type == kStmfd) || (type == kStmib)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondSizeRMop /*instruction*/, + Condition /*cond*/, + EncodingSize /*size*/, + Register /*rt*/, + const MemOperand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kLdr) || (type == kLdrb) || (type == kLdrh) || + (type == kLdrsb) || (type == kLdrsh) || (type == kStr) || + (type == kStrb) || (type == kStrh)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondRL /*instruction*/, + Condition /*cond*/, + Register /*rt*/, + Location* /*location*/) { + USE(type); + VIXL_ASSERT((type == kLdrb) || (type == kLdrh) || (type == kLdrsb) || + (type == kLdrsh)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondRRL /*instruction*/, + Condition /*cond*/, + Register /*rt*/, + Register /*rt2*/, + Location* /*location*/) { + USE(type); + VIXL_ASSERT((type == kLdrd)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondRRRR /*instruction*/, + Condition /*cond*/, + Register /*rd*/, + Register /*rn*/, + Register /*rm*/, + Register /*ra*/) { + USE(type); + VIXL_ASSERT((type == kMla) || (type == kMlas) || (type == kMls) || + (type == kSmlabb) || (type == kSmlabt) || (type == kSmlad) || + (type == kSmladx) || (type == kSmlal) || (type == kSmlalbb) || + (type == kSmlalbt) || (type == kSmlald) || (type == kSmlaldx) || + (type == kSmlals) || (type == kSmlaltb) || (type == kSmlaltt) || + (type == kSmlatb) || (type == kSmlatt) || (type == kSmlawb) || + (type == kSmlawt) || (type == kSmlsd) || (type == kSmlsdx) || + (type == kSmlsld) || (type == kSmlsldx) || (type == kSmmla) || + (type == kSmmlar) || (type == kSmmls) || (type == kSmmlsr) || + (type == kSmull) || (type == kSmulls) || (type == kUmaal) || + (type == kUmlal) || (type == kUmlals) || (type == kUmull) || + (type == kUmulls) || (type == kUsada8)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondRSr /*instruction*/, + Condition /*cond*/, + Register /*rd*/, + SpecialRegister /*spec_reg*/) { + USE(type); + VIXL_ASSERT((type == kMrs)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondMsrOp /*instruction*/, + Condition /*cond*/, + MaskedSpecialRegister /*spec_reg*/, + const Operand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kMsr)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondSizeRRR /*instruction*/, + Condition /*cond*/, + EncodingSize /*size*/, + Register /*rd*/, + Register /*rn*/, + Register /*rm*/) { + USE(type); + VIXL_ASSERT((type == kMul)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondSize /*instruction*/, + Condition /*cond*/, + EncodingSize /*size*/) { + USE(type); + VIXL_ASSERT((type == kNop) || (type == kYield)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondMop /*instruction*/, + Condition /*cond*/, + const MemOperand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kPld) || (type == kPldw) || (type == kPli)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondSizeRl /*instruction*/, + Condition /*cond*/, + EncodingSize /*size*/, + RegisterList /*registers*/) { + USE(type); + VIXL_ASSERT((type == kPop) || (type == kPush)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondSizeOrl /*instruction*/, + Condition /*cond*/, + EncodingSize /*size*/, + Register /*rt*/) { + USE(type); + VIXL_ASSERT((type == kPop) || (type == kPush)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondSizeRR /*instruction*/, + Condition /*cond*/, + EncodingSize /*size*/, + Register /*rd*/, + Register /*rm*/) { + USE(type); + VIXL_ASSERT((type == kRev) || (type == kRev16) || (type == kRevsh)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionDtQQQ /*instruction*/, + DataType /*dt*/, + QRegister /*rd*/, + QRegister /*rn*/, + QRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVmaxnm) || (type == kVminnm)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondRIOp /*instruction*/, + Condition /*cond*/, + Register /*rd*/, + uint32_t /*imm*/, + const Operand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kSsat) || (type == kUsat)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondRIR /*instruction*/, + Condition /*cond*/, + Register /*rd*/, + uint32_t /*imm*/, + Register /*rn*/) { + USE(type); + VIXL_ASSERT((type == kSsat16) || (type == kUsat16)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondRRRMop /*instruction*/, + Condition /*cond*/, + Register /*rd*/, + Register /*rt*/, + Register /*rt2*/, + const MemOperand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kStlexd) || (type == kStrexd)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondSizeI /*instruction*/, + Condition /*cond*/, + EncodingSize /*size*/, + uint32_t /*imm*/) { + USE(type); + VIXL_ASSERT((type == kUdf)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDDD /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + DRegister /*rd*/, + DRegister /*rn*/, + DRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVaba) || (type == kVabd) || (type == kVacge) || + (type == kVacgt) || (type == kVacle) || (type == kVaclt) || + (type == kVadd) || (type == kVbif) || (type == kVbit) || + (type == kVbsl) || (type == kVceq) || (type == kVcge) || + (type == kVcgt) || (type == kVcle) || (type == kVclt) || + (type == kVdiv) || (type == kVeor) || (type == kVfma) || + (type == kVfms) || (type == kVfnma) || (type == kVfnms) || + (type == kVhadd) || (type == kVhsub) || (type == kVmax) || + (type == kVmin) || (type == kVmla) || (type == kVmls) || + (type == kVmul) || (type == kVnmla) || (type == kVnmls) || + (type == kVnmul) || (type == kVpadd) || (type == kVpmax) || + (type == kVpmin) || (type == kVqadd) || (type == kVqdmulh) || + (type == kVqrdmulh) || (type == kVqrshl) || (type == kVqsub) || + (type == kVrecps) || (type == kVrhadd) || (type == kVrshl) || + (type == kVrsqrts) || (type == kVsub) || (type == kVtst)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtQQQ /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + QRegister /*rd*/, + QRegister /*rn*/, + QRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVaba) || (type == kVabd) || (type == kVacge) || + (type == kVacgt) || (type == kVacle) || (type == kVaclt) || + (type == kVadd) || (type == kVbif) || (type == kVbit) || + (type == kVbsl) || (type == kVceq) || (type == kVcge) || + (type == kVcgt) || (type == kVcle) || (type == kVclt) || + (type == kVeor) || (type == kVfma) || (type == kVfms) || + (type == kVhadd) || (type == kVhsub) || (type == kVmax) || + (type == kVmin) || (type == kVmla) || (type == kVmls) || + (type == kVmul) || (type == kVqadd) || (type == kVqdmulh) || + (type == kVqrdmulh) || (type == kVqrshl) || (type == kVqsub) || + (type == kVrecps) || (type == kVrhadd) || (type == kVrshl) || + (type == kVrsqrts) || (type == kVsub) || (type == kVtst)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtQDD /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + QRegister /*rd*/, + DRegister /*rn*/, + DRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVabal) || (type == kVabdl) || (type == kVaddl) || + (type == kVmlal) || (type == kVmlsl) || (type == kVmull) || + (type == kVqdmlal) || (type == kVqdmlsl) || + (type == kVqdmull) || (type == kVsubl)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDD /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + DRegister /*rd*/, + DRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVabs) || (type == kVcls) || (type == kVclz) || + (type == kVcnt) || (type == kVneg) || (type == kVpadal) || + (type == kVpaddl) || (type == kVqabs) || (type == kVqneg) || + (type == kVrecpe) || (type == kVrev16) || (type == kVrev32) || + (type == kVrev64) || (type == kVrintr) || (type == kVrintx) || + (type == kVrintz) || (type == kVrsqrte) || (type == kVsqrt) || + (type == kVswp) || (type == kVtrn) || (type == kVuzp) || + (type == kVzip)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtQQ /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + QRegister /*rd*/, + QRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVabs) || (type == kVcls) || (type == kVclz) || + (type == kVcnt) || (type == kVneg) || (type == kVpadal) || + (type == kVpaddl) || (type == kVqabs) || (type == kVqneg) || + (type == kVrecpe) || (type == kVrev16) || (type == kVrev32) || + (type == kVrev64) || (type == kVrsqrte) || (type == kVswp) || + (type == kVtrn) || (type == kVuzp) || (type == kVzip)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtSS /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + SRegister /*rd*/, + SRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVabs) || (type == kVneg) || (type == kVrintr) || + (type == kVrintx) || (type == kVrintz) || (type == kVsqrt)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtSSS /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + SRegister /*rd*/, + SRegister /*rn*/, + SRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVadd) || (type == kVdiv) || (type == kVfma) || + (type == kVfms) || (type == kVfnma) || (type == kVfnms) || + (type == kVmla) || (type == kVmls) || (type == kVmul) || + (type == kVnmla) || (type == kVnmls) || (type == kVnmul) || + (type == kVsub)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDQQ /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + DRegister /*rd*/, + QRegister /*rn*/, + QRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVaddhn) || (type == kVraddhn) || (type == kVrsubhn) || + (type == kVsubhn)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtQQD /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + QRegister /*rd*/, + QRegister /*rn*/, + DRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVaddw) || (type == kVsubw)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDDDop /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + DRegister /*rd*/, + DRegister /*rn*/, + const DOperand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kVand) || (type == kVbic) || (type == kVceq) || + (type == kVcge) || (type == kVcgt) || (type == kVcle) || + (type == kVclt) || (type == kVorn) || (type == kVorr) || + (type == kVqshl) || (type == kVqshlu) || (type == kVrshr) || + (type == kVrsra) || (type == kVshl) || (type == kVshr) || + (type == kVsli) || (type == kVsra) || (type == kVsri)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtQQQop /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + QRegister /*rd*/, + QRegister /*rn*/, + const QOperand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kVand) || (type == kVbic) || (type == kVceq) || + (type == kVcge) || (type == kVcgt) || (type == kVcle) || + (type == kVclt) || (type == kVorn) || (type == kVorr) || + (type == kVqshl) || (type == kVqshlu) || (type == kVrshr) || + (type == kVrsra) || (type == kVshl) || (type == kVshr) || + (type == kVsli) || (type == kVsra) || (type == kVsri)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtSSop /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + SRegister /*rd*/, + const SOperand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kVcmp) || (type == kVcmpe) || (type == kVmov)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDDop /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + DRegister /*rd*/, + const DOperand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kVcmp) || (type == kVcmpe) || (type == kVmov) || + (type == kVmvn)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDtDS /*instruction*/, + Condition /*cond*/, + DataType /*dt1*/, + DataType /*dt2*/, + DRegister /*rd*/, + SRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVcvt) || (type == kVcvtb) || (type == kVcvtt)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDtSD /*instruction*/, + Condition /*cond*/, + DataType /*dt1*/, + DataType /*dt2*/, + SRegister /*rd*/, + DRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVcvt) || (type == kVcvtb) || (type == kVcvtr) || + (type == kVcvtt)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDtDDSi /*instruction*/, + Condition /*cond*/, + DataType /*dt1*/, + DataType /*dt2*/, + DRegister /*rd*/, + DRegister /*rm*/, + int32_t /*fbits*/) { + USE(type); + VIXL_ASSERT((type == kVcvt)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDtQQSi /*instruction*/, + Condition /*cond*/, + DataType /*dt1*/, + DataType /*dt2*/, + QRegister /*rd*/, + QRegister /*rm*/, + int32_t /*fbits*/) { + USE(type); + VIXL_ASSERT((type == kVcvt)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDtSSSi /*instruction*/, + Condition /*cond*/, + DataType /*dt1*/, + DataType /*dt2*/, + SRegister /*rd*/, + SRegister /*rm*/, + int32_t /*fbits*/) { + USE(type); + VIXL_ASSERT((type == kVcvt)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDtDD /*instruction*/, + Condition /*cond*/, + DataType /*dt1*/, + DataType /*dt2*/, + DRegister /*rd*/, + DRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVcvt)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDtQQ /*instruction*/, + Condition /*cond*/, + DataType /*dt1*/, + DataType /*dt2*/, + QRegister /*rd*/, + QRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVcvt)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDtDQ /*instruction*/, + Condition /*cond*/, + DataType /*dt1*/, + DataType /*dt2*/, + DRegister /*rd*/, + QRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVcvt)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDtQD /*instruction*/, + Condition /*cond*/, + DataType /*dt1*/, + DataType /*dt2*/, + QRegister /*rd*/, + DRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVcvt)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDtSS /*instruction*/, + Condition /*cond*/, + DataType /*dt1*/, + DataType /*dt2*/, + SRegister /*rd*/, + SRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVcvt) || (type == kVcvtb) || (type == kVcvtr) || + (type == kVcvtt)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionDtDtDD /*instruction*/, + DataType /*dt1*/, + DataType /*dt2*/, + DRegister /*rd*/, + DRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVcvta) || (type == kVcvtm) || (type == kVcvtn) || + (type == kVcvtp)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionDtDtQQ /*instruction*/, + DataType /*dt1*/, + DataType /*dt2*/, + QRegister /*rd*/, + QRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVcvta) || (type == kVcvtm) || (type == kVcvtn) || + (type == kVcvtp)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionDtDtSS /*instruction*/, + DataType /*dt1*/, + DataType /*dt2*/, + SRegister /*rd*/, + SRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVcvta) || (type == kVcvtm) || (type == kVcvtn) || + (type == kVcvtp)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionDtDtSD /*instruction*/, + DataType /*dt1*/, + DataType /*dt2*/, + SRegister /*rd*/, + DRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVcvta) || (type == kVcvtm) || (type == kVcvtn) || + (type == kVcvtp)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtQR /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + QRegister /*rd*/, + Register /*rt*/) { + USE(type); + VIXL_ASSERT((type == kVdup)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDR /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + DRegister /*rd*/, + Register /*rt*/) { + USE(type); + VIXL_ASSERT((type == kVdup)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDDx /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + DRegister /*rd*/, + DRegisterLane /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVdup)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtQDx /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + QRegister /*rd*/, + DRegisterLane /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVdup)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDDDDop /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + DRegister /*rd*/, + DRegister /*rn*/, + DRegister /*rm*/, + const DOperand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kVext)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtQQQQop /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + QRegister /*rd*/, + QRegister /*rn*/, + QRegister /*rm*/, + const QOperand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kVext)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtNrlAmop /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + const NeonRegisterList& /*nreglist*/, + const AlignedMemOperand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kVld1) || (type == kVld2) || (type == kVld3) || + (type == kVld4) || (type == kVst1) || (type == kVst2) || + (type == kVst3) || (type == kVst4)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtNrlMop /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + const NeonRegisterList& /*nreglist*/, + const MemOperand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kVld3) || (type == kVst3)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtRwbDrl /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + Register /*rn*/, + WriteBack /*write_back*/, + DRegisterList /*dreglist*/) { + USE(type); + VIXL_ASSERT((type == kVldm) || (type == kVldmdb) || (type == kVldmia) || + (type == kVstm) || (type == kVstmdb) || (type == kVstmia)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtRwbSrl /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + Register /*rn*/, + WriteBack /*write_back*/, + SRegisterList /*sreglist*/) { + USE(type); + VIXL_ASSERT((type == kVldm) || (type == kVldmdb) || (type == kVldmia) || + (type == kVstm) || (type == kVstmdb) || (type == kVstmia)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDL /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + DRegister /*rd*/, + Location* /*location*/) { + USE(type); + VIXL_ASSERT((type == kVldr)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDMop /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + DRegister /*rd*/, + const MemOperand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kVldr) || (type == kVstr)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtSL /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + SRegister /*rd*/, + Location* /*location*/) { + USE(type); + VIXL_ASSERT((type == kVldr)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtSMop /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + SRegister /*rd*/, + const MemOperand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kVldr) || (type == kVstr)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionDtDDD /*instruction*/, + DataType /*dt*/, + DRegister /*rd*/, + DRegister /*rn*/, + DRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVmaxnm) || (type == kVminnm) || (type == kVseleq) || + (type == kVselge) || (type == kVselgt) || (type == kVselvs)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionDtSSS /*instruction*/, + DataType /*dt*/, + SRegister /*rd*/, + SRegister /*rn*/, + SRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVmaxnm) || (type == kVminnm) || (type == kVseleq) || + (type == kVselge) || (type == kVselgt) || (type == kVselvs)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDDDx /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + DRegister /*rd*/, + DRegister /*rn*/, + DRegisterLane /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVmla) || (type == kVmls) || (type == kVqdmulh) || + (type == kVqrdmulh)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtQQDx /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + QRegister /*rd*/, + QRegister /*rn*/, + DRegisterLane /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVmla) || (type == kVmls) || (type == kVqdmulh) || + (type == kVqrdmulh)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtQDDx /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + QRegister /*rd*/, + DRegister /*rn*/, + DRegisterLane /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVmlal) || (type == kVmlsl) || (type == kVqdmull)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondRS /*instruction*/, + Condition /*cond*/, + Register /*rt*/, + SRegister /*rn*/) { + USE(type); + VIXL_ASSERT((type == kVmov)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondSR /*instruction*/, + Condition /*cond*/, + SRegister /*rn*/, + Register /*rt*/) { + USE(type); + VIXL_ASSERT((type == kVmov)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondRRD /*instruction*/, + Condition /*cond*/, + Register /*rt*/, + Register /*rt2*/, + DRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVmov)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDRR /*instruction*/, + Condition /*cond*/, + DRegister /*rm*/, + Register /*rt*/, + Register /*rt2*/) { + USE(type); + VIXL_ASSERT((type == kVmov)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondRRSS /*instruction*/, + Condition /*cond*/, + Register /*rt*/, + Register /*rt2*/, + SRegister /*rm*/, + SRegister /*rm1*/) { + USE(type); + VIXL_ASSERT((type == kVmov)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondSSRR /*instruction*/, + Condition /*cond*/, + SRegister /*rm*/, + SRegister /*rm1*/, + Register /*rt*/, + Register /*rt2*/) { + USE(type); + VIXL_ASSERT((type == kVmov)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDxR /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + DRegisterLane /*rd*/, + Register /*rt*/) { + USE(type); + VIXL_ASSERT((type == kVmov)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtQQop /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + QRegister /*rd*/, + const QOperand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kVmov) || (type == kVmvn)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtRDx /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + Register /*rt*/, + DRegisterLane /*rn*/) { + USE(type); + VIXL_ASSERT((type == kVmov)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtQD /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + QRegister /*rd*/, + DRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVmovl)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDQ /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + DRegister /*rd*/, + QRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVmovn) || (type == kVqmovn) || (type == kVqmovun)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondRoaSfp /*instruction*/, + Condition /*cond*/, + RegisterOrAPSR_nzcv /*rt*/, + SpecialFPRegister /*spec_reg*/) { + USE(type); + VIXL_ASSERT((type == kVmrs)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondSfpR /*instruction*/, + Condition /*cond*/, + SpecialFPRegister /*spec_reg*/, + Register /*rt*/) { + USE(type); + VIXL_ASSERT((type == kVmsr)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDDIr /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + DRegister /*rd*/, + DRegister /*rn*/, + DRegister /*dm*/, + unsigned /*index*/) { + USE(type); + VIXL_ASSERT((type == kVmul)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtQQIr /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + QRegister /*rd*/, + QRegister /*rn*/, + DRegister /*dm*/, + unsigned /*index*/) { + USE(type); + VIXL_ASSERT((type == kVmul)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtQDIr /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + QRegister /*rd*/, + DRegister /*rn*/, + DRegister /*dm*/, + unsigned /*index*/) { + USE(type); + VIXL_ASSERT((type == kVmull) || (type == kVqdmlal) || (type == kVqdmlsl)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDrl /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + DRegisterList /*dreglist*/) { + USE(type); + VIXL_ASSERT((type == kVpop) || (type == kVpush)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtSrl /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + SRegisterList /*sreglist*/) { + USE(type); + VIXL_ASSERT((type == kVpop) || (type == kVpush)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDQQop /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + DRegister /*rd*/, + QRegister /*rm*/, + const QOperand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kVqrshrn) || (type == kVqrshrun) || + (type == kVqshrn) || (type == kVqshrun) || (type == kVrshrn) || + (type == kVshrn)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionDtDD /*instruction*/, + DataType /*dt*/, + DRegister /*rd*/, + DRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVrinta) || (type == kVrintm) || (type == kVrintn) || + (type == kVrintp)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionDtSS /*instruction*/, + DataType /*dt*/, + SRegister /*rd*/, + SRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVrinta) || (type == kVrintm) || (type == kVrintn) || + (type == kVrintp)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtQDDop /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + QRegister /*rd*/, + DRegister /*rm*/, + const DOperand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kVshll)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDNrlD /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + DRegister /*rd*/, + const NeonRegisterList& /*nreglist*/, + DRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVtbl) || (type == kVtbx)); + UnimplementedDelegate(type); + } + + void adc(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + void adc(Register rd, Register rn, const Operand& operand) { + adc(al, Best, rd, rn, operand); + } + void adc(Condition cond, Register rd, Register rn, const Operand& operand) { + adc(cond, Best, rd, rn, operand); + } + void adc(EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + adc(al, size, rd, rn, operand); + } + + void adcs(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + void adcs(Register rd, Register rn, const Operand& operand) { + adcs(al, Best, rd, rn, operand); + } + void adcs(Condition cond, Register rd, Register rn, const Operand& operand) { + adcs(cond, Best, rd, rn, operand); + } + void adcs(EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + adcs(al, size, rd, rn, operand); + } + + void add(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + void add(Register rd, Register rn, const Operand& operand) { + add(al, Best, rd, rn, operand); + } + void add(Condition cond, Register rd, Register rn, const Operand& operand) { + add(cond, Best, rd, rn, operand); + } + void add(EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + add(al, size, rd, rn, operand); + } + + void add(Condition cond, Register rd, const Operand& operand); + void add(Register rd, const Operand& operand) { add(al, rd, operand); } + + void adds(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + void adds(Register rd, Register rn, const Operand& operand) { + adds(al, Best, rd, rn, operand); + } + void adds(Condition cond, Register rd, Register rn, const Operand& operand) { + adds(cond, Best, rd, rn, operand); + } + void adds(EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + adds(al, size, rd, rn, operand); + } + + void adds(Register rd, const Operand& operand); + + void addw(Condition cond, Register rd, Register rn, const Operand& operand); + void addw(Register rd, Register rn, const Operand& operand) { + addw(al, rd, rn, operand); + } + + void adr(Condition cond, EncodingSize size, Register rd, Location* location); + bool adr_info(Condition cond, + EncodingSize size, + Register rd, + Location* location, + const struct ReferenceInfo** info); + void adr(Register rd, Location* location) { adr(al, Best, rd, location); } + void adr(Condition cond, Register rd, Location* location) { + adr(cond, Best, rd, location); + } + void adr(EncodingSize size, Register rd, Location* location) { + adr(al, size, rd, location); + } + + void and_(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + void and_(Register rd, Register rn, const Operand& operand) { + and_(al, Best, rd, rn, operand); + } + void and_(Condition cond, Register rd, Register rn, const Operand& operand) { + and_(cond, Best, rd, rn, operand); + } + void and_(EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + and_(al, size, rd, rn, operand); + } + + void ands(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + void ands(Register rd, Register rn, const Operand& operand) { + ands(al, Best, rd, rn, operand); + } + void ands(Condition cond, Register rd, Register rn, const Operand& operand) { + ands(cond, Best, rd, rn, operand); + } + void ands(EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + ands(al, size, rd, rn, operand); + } + + void asr(Condition cond, + EncodingSize size, + Register rd, + Register rm, + const Operand& operand); + void asr(Register rd, Register rm, const Operand& operand) { + asr(al, Best, rd, rm, operand); + } + void asr(Condition cond, Register rd, Register rm, const Operand& operand) { + asr(cond, Best, rd, rm, operand); + } + void asr(EncodingSize size, + Register rd, + Register rm, + const Operand& operand) { + asr(al, size, rd, rm, operand); + } + + void asrs(Condition cond, + EncodingSize size, + Register rd, + Register rm, + const Operand& operand); + void asrs(Register rd, Register rm, const Operand& operand) { + asrs(al, Best, rd, rm, operand); + } + void asrs(Condition cond, Register rd, Register rm, const Operand& operand) { + asrs(cond, Best, rd, rm, operand); + } + void asrs(EncodingSize size, + Register rd, + Register rm, + const Operand& operand) { + asrs(al, size, rd, rm, operand); + } + + void b(Condition cond, EncodingSize size, Location* location); + bool b_info(Condition cond, + EncodingSize size, + Location* location, + const struct ReferenceInfo** info); + void b(Location* location) { b(al, Best, location); } + void b(Condition cond, Location* location) { b(cond, Best, location); } + void b(EncodingSize size, Location* location) { b(al, size, location); } + + void bfc(Condition cond, Register rd, uint32_t lsb, uint32_t width); + void bfc(Register rd, uint32_t lsb, uint32_t width) { + bfc(al, rd, lsb, width); + } + + void bfi( + Condition cond, Register rd, Register rn, uint32_t lsb, uint32_t width); + void bfi(Register rd, Register rn, uint32_t lsb, uint32_t width) { + bfi(al, rd, rn, lsb, width); + } + + void bic(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + void bic(Register rd, Register rn, const Operand& operand) { + bic(al, Best, rd, rn, operand); + } + void bic(Condition cond, Register rd, Register rn, const Operand& operand) { + bic(cond, Best, rd, rn, operand); + } + void bic(EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + bic(al, size, rd, rn, operand); + } + + void bics(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + void bics(Register rd, Register rn, const Operand& operand) { + bics(al, Best, rd, rn, operand); + } + void bics(Condition cond, Register rd, Register rn, const Operand& operand) { + bics(cond, Best, rd, rn, operand); + } + void bics(EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + bics(al, size, rd, rn, operand); + } + + void bkpt(Condition cond, uint32_t imm); + void bkpt(uint32_t imm) { bkpt(al, imm); } + + void bl(Condition cond, Location* location); + bool bl_info(Condition cond, + Location* location, + const struct ReferenceInfo** info); + void bl(Location* location) { bl(al, location); } + + void blx(Condition cond, Location* location); + bool blx_info(Condition cond, + Location* location, + const struct ReferenceInfo** info); + void blx(Location* location) { blx(al, location); } + + void blx(Condition cond, Register rm); + void blx(Register rm) { blx(al, rm); } + + void bx(Condition cond, Register rm); + void bx(Register rm) { bx(al, rm); } + + void bxj(Condition cond, Register rm); + void bxj(Register rm) { bxj(al, rm); } + + void cbnz(Register rn, Location* location); + bool cbnz_info(Register rn, + Location* location, + const struct ReferenceInfo** info); + + void cbz(Register rn, Location* location); + bool cbz_info(Register rn, + Location* location, + const struct ReferenceInfo** info); + + void clrex(Condition cond); + void clrex() { clrex(al); } + + void clz(Condition cond, Register rd, Register rm); + void clz(Register rd, Register rm) { clz(al, rd, rm); } + + void cmn(Condition cond, + EncodingSize size, + Register rn, + const Operand& operand); + void cmn(Register rn, const Operand& operand) { cmn(al, Best, rn, operand); } + void cmn(Condition cond, Register rn, const Operand& operand) { + cmn(cond, Best, rn, operand); + } + void cmn(EncodingSize size, Register rn, const Operand& operand) { + cmn(al, size, rn, operand); + } + + void cmp(Condition cond, + EncodingSize size, + Register rn, + const Operand& operand); + void cmp(Register rn, const Operand& operand) { cmp(al, Best, rn, operand); } + void cmp(Condition cond, Register rn, const Operand& operand) { + cmp(cond, Best, rn, operand); + } + void cmp(EncodingSize size, Register rn, const Operand& operand) { + cmp(al, size, rn, operand); + } + + void crc32b(Condition cond, Register rd, Register rn, Register rm); + void crc32b(Register rd, Register rn, Register rm) { crc32b(al, rd, rn, rm); } + + void crc32cb(Condition cond, Register rd, Register rn, Register rm); + void crc32cb(Register rd, Register rn, Register rm) { + crc32cb(al, rd, rn, rm); + } + + void crc32ch(Condition cond, Register rd, Register rn, Register rm); + void crc32ch(Register rd, Register rn, Register rm) { + crc32ch(al, rd, rn, rm); + } + + void crc32cw(Condition cond, Register rd, Register rn, Register rm); + void crc32cw(Register rd, Register rn, Register rm) { + crc32cw(al, rd, rn, rm); + } + + void crc32h(Condition cond, Register rd, Register rn, Register rm); + void crc32h(Register rd, Register rn, Register rm) { crc32h(al, rd, rn, rm); } + + void crc32w(Condition cond, Register rd, Register rn, Register rm); + void crc32w(Register rd, Register rn, Register rm) { crc32w(al, rd, rn, rm); } + + void dmb(Condition cond, MemoryBarrier option); + void dmb(MemoryBarrier option) { dmb(al, option); } + + void dsb(Condition cond, MemoryBarrier option); + void dsb(MemoryBarrier option) { dsb(al, option); } + + void eor(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + void eor(Register rd, Register rn, const Operand& operand) { + eor(al, Best, rd, rn, operand); + } + void eor(Condition cond, Register rd, Register rn, const Operand& operand) { + eor(cond, Best, rd, rn, operand); + } + void eor(EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + eor(al, size, rd, rn, operand); + } + + void eors(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + void eors(Register rd, Register rn, const Operand& operand) { + eors(al, Best, rd, rn, operand); + } + void eors(Condition cond, Register rd, Register rn, const Operand& operand) { + eors(cond, Best, rd, rn, operand); + } + void eors(EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + eors(al, size, rd, rn, operand); + } + + void fldmdbx(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist); + void fldmdbx(Register rn, WriteBack write_back, DRegisterList dreglist) { + fldmdbx(al, rn, write_back, dreglist); + } + + void fldmiax(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist); + void fldmiax(Register rn, WriteBack write_back, DRegisterList dreglist) { + fldmiax(al, rn, write_back, dreglist); + } + + void fstmdbx(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist); + void fstmdbx(Register rn, WriteBack write_back, DRegisterList dreglist) { + fstmdbx(al, rn, write_back, dreglist); + } + + void fstmiax(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist); + void fstmiax(Register rn, WriteBack write_back, DRegisterList dreglist) { + fstmiax(al, rn, write_back, dreglist); + } + + void hlt(Condition cond, uint32_t imm); + void hlt(uint32_t imm) { hlt(al, imm); } + + void hvc(Condition cond, uint32_t imm); + void hvc(uint32_t imm) { hvc(al, imm); } + + void isb(Condition cond, MemoryBarrier option); + void isb(MemoryBarrier option) { isb(al, option); } + + void it(Condition cond, uint16_t mask); + + void lda(Condition cond, Register rt, const MemOperand& operand); + void lda(Register rt, const MemOperand& operand) { lda(al, rt, operand); } + + void ldab(Condition cond, Register rt, const MemOperand& operand); + void ldab(Register rt, const MemOperand& operand) { ldab(al, rt, operand); } + + void ldaex(Condition cond, Register rt, const MemOperand& operand); + void ldaex(Register rt, const MemOperand& operand) { ldaex(al, rt, operand); } + + void ldaexb(Condition cond, Register rt, const MemOperand& operand); + void ldaexb(Register rt, const MemOperand& operand) { + ldaexb(al, rt, operand); + } + + void ldaexd(Condition cond, + Register rt, + Register rt2, + const MemOperand& operand); + void ldaexd(Register rt, Register rt2, const MemOperand& operand) { + ldaexd(al, rt, rt2, operand); + } + + void ldaexh(Condition cond, Register rt, const MemOperand& operand); + void ldaexh(Register rt, const MemOperand& operand) { + ldaexh(al, rt, operand); + } + + void ldah(Condition cond, Register rt, const MemOperand& operand); + void ldah(Register rt, const MemOperand& operand) { ldah(al, rt, operand); } + + void ldm(Condition cond, + EncodingSize size, + Register rn, + WriteBack write_back, + RegisterList registers); + void ldm(Register rn, WriteBack write_back, RegisterList registers) { + ldm(al, Best, rn, write_back, registers); + } + void ldm(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + ldm(cond, Best, rn, write_back, registers); + } + void ldm(EncodingSize size, + Register rn, + WriteBack write_back, + RegisterList registers) { + ldm(al, size, rn, write_back, registers); + } + + void ldmda(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers); + void ldmda(Register rn, WriteBack write_back, RegisterList registers) { + ldmda(al, rn, write_back, registers); + } + + void ldmdb(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers); + void ldmdb(Register rn, WriteBack write_back, RegisterList registers) { + ldmdb(al, rn, write_back, registers); + } + + void ldmea(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers); + void ldmea(Register rn, WriteBack write_back, RegisterList registers) { + ldmea(al, rn, write_back, registers); + } + + void ldmed(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers); + void ldmed(Register rn, WriteBack write_back, RegisterList registers) { + ldmed(al, rn, write_back, registers); + } + + void ldmfa(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers); + void ldmfa(Register rn, WriteBack write_back, RegisterList registers) { + ldmfa(al, rn, write_back, registers); + } + + void ldmfd(Condition cond, + EncodingSize size, + Register rn, + WriteBack write_back, + RegisterList registers); + void ldmfd(Register rn, WriteBack write_back, RegisterList registers) { + ldmfd(al, Best, rn, write_back, registers); + } + void ldmfd(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + ldmfd(cond, Best, rn, write_back, registers); + } + void ldmfd(EncodingSize size, + Register rn, + WriteBack write_back, + RegisterList registers) { + ldmfd(al, size, rn, write_back, registers); + } + + void ldmib(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers); + void ldmib(Register rn, WriteBack write_back, RegisterList registers) { + ldmib(al, rn, write_back, registers); + } + + void ldr(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand); + void ldr(Register rt, const MemOperand& operand) { + ldr(al, Best, rt, operand); + } + void ldr(Condition cond, Register rt, const MemOperand& operand) { + ldr(cond, Best, rt, operand); + } + void ldr(EncodingSize size, Register rt, const MemOperand& operand) { + ldr(al, size, rt, operand); + } + + void ldr(Condition cond, EncodingSize size, Register rt, Location* location); + bool ldr_info(Condition cond, + EncodingSize size, + Register rt, + Location* location, + const struct ReferenceInfo** info); + void ldr(Register rt, Location* location) { ldr(al, Best, rt, location); } + void ldr(Condition cond, Register rt, Location* location) { + ldr(cond, Best, rt, location); + } + void ldr(EncodingSize size, Register rt, Location* location) { + ldr(al, size, rt, location); + } + + void ldrb(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand); + void ldrb(Register rt, const MemOperand& operand) { + ldrb(al, Best, rt, operand); + } + void ldrb(Condition cond, Register rt, const MemOperand& operand) { + ldrb(cond, Best, rt, operand); + } + void ldrb(EncodingSize size, Register rt, const MemOperand& operand) { + ldrb(al, size, rt, operand); + } + + void ldrb(Condition cond, Register rt, Location* location); + bool ldrb_info(Condition cond, + Register rt, + Location* location, + const struct ReferenceInfo** info); + void ldrb(Register rt, Location* location) { ldrb(al, rt, location); } + + void ldrd(Condition cond, + Register rt, + Register rt2, + const MemOperand& operand); + void ldrd(Register rt, Register rt2, const MemOperand& operand) { + ldrd(al, rt, rt2, operand); + } + + void ldrd(Condition cond, Register rt, Register rt2, Location* location); + bool ldrd_info(Condition cond, + Register rt, + Register rt2, + Location* location, + const struct ReferenceInfo** info); + void ldrd(Register rt, Register rt2, Location* location) { + ldrd(al, rt, rt2, location); + } + + void ldrex(Condition cond, Register rt, const MemOperand& operand); + void ldrex(Register rt, const MemOperand& operand) { ldrex(al, rt, operand); } + + void ldrexb(Condition cond, Register rt, const MemOperand& operand); + void ldrexb(Register rt, const MemOperand& operand) { + ldrexb(al, rt, operand); + } + + void ldrexd(Condition cond, + Register rt, + Register rt2, + const MemOperand& operand); + void ldrexd(Register rt, Register rt2, const MemOperand& operand) { + ldrexd(al, rt, rt2, operand); + } + + void ldrexh(Condition cond, Register rt, const MemOperand& operand); + void ldrexh(Register rt, const MemOperand& operand) { + ldrexh(al, rt, operand); + } + + void ldrh(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand); + void ldrh(Register rt, const MemOperand& operand) { + ldrh(al, Best, rt, operand); + } + void ldrh(Condition cond, Register rt, const MemOperand& operand) { + ldrh(cond, Best, rt, operand); + } + void ldrh(EncodingSize size, Register rt, const MemOperand& operand) { + ldrh(al, size, rt, operand); + } + + void ldrh(Condition cond, Register rt, Location* location); + bool ldrh_info(Condition cond, + Register rt, + Location* location, + const struct ReferenceInfo** info); + void ldrh(Register rt, Location* location) { ldrh(al, rt, location); } + + void ldrsb(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand); + void ldrsb(Register rt, const MemOperand& operand) { + ldrsb(al, Best, rt, operand); + } + void ldrsb(Condition cond, Register rt, const MemOperand& operand) { + ldrsb(cond, Best, rt, operand); + } + void ldrsb(EncodingSize size, Register rt, const MemOperand& operand) { + ldrsb(al, size, rt, operand); + } + + void ldrsb(Condition cond, Register rt, Location* location); + bool ldrsb_info(Condition cond, + Register rt, + Location* location, + const struct ReferenceInfo** info); + void ldrsb(Register rt, Location* location) { ldrsb(al, rt, location); } + + void ldrsh(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand); + void ldrsh(Register rt, const MemOperand& operand) { + ldrsh(al, Best, rt, operand); + } + void ldrsh(Condition cond, Register rt, const MemOperand& operand) { + ldrsh(cond, Best, rt, operand); + } + void ldrsh(EncodingSize size, Register rt, const MemOperand& operand) { + ldrsh(al, size, rt, operand); + } + + void ldrsh(Condition cond, Register rt, Location* location); + bool ldrsh_info(Condition cond, + Register rt, + Location* location, + const struct ReferenceInfo** info); + void ldrsh(Register rt, Location* location) { ldrsh(al, rt, location); } + + void lsl(Condition cond, + EncodingSize size, + Register rd, + Register rm, + const Operand& operand); + void lsl(Register rd, Register rm, const Operand& operand) { + lsl(al, Best, rd, rm, operand); + } + void lsl(Condition cond, Register rd, Register rm, const Operand& operand) { + lsl(cond, Best, rd, rm, operand); + } + void lsl(EncodingSize size, + Register rd, + Register rm, + const Operand& operand) { + lsl(al, size, rd, rm, operand); + } + + void lsls(Condition cond, + EncodingSize size, + Register rd, + Register rm, + const Operand& operand); + void lsls(Register rd, Register rm, const Operand& operand) { + lsls(al, Best, rd, rm, operand); + } + void lsls(Condition cond, Register rd, Register rm, const Operand& operand) { + lsls(cond, Best, rd, rm, operand); + } + void lsls(EncodingSize size, + Register rd, + Register rm, + const Operand& operand) { + lsls(al, size, rd, rm, operand); + } + + void lsr(Condition cond, + EncodingSize size, + Register rd, + Register rm, + const Operand& operand); + void lsr(Register rd, Register rm, const Operand& operand) { + lsr(al, Best, rd, rm, operand); + } + void lsr(Condition cond, Register rd, Register rm, const Operand& operand) { + lsr(cond, Best, rd, rm, operand); + } + void lsr(EncodingSize size, + Register rd, + Register rm, + const Operand& operand) { + lsr(al, size, rd, rm, operand); + } + + void lsrs(Condition cond, + EncodingSize size, + Register rd, + Register rm, + const Operand& operand); + void lsrs(Register rd, Register rm, const Operand& operand) { + lsrs(al, Best, rd, rm, operand); + } + void lsrs(Condition cond, Register rd, Register rm, const Operand& operand) { + lsrs(cond, Best, rd, rm, operand); + } + void lsrs(EncodingSize size, + Register rd, + Register rm, + const Operand& operand) { + lsrs(al, size, rd, rm, operand); + } + + void mla(Condition cond, Register rd, Register rn, Register rm, Register ra); + void mla(Register rd, Register rn, Register rm, Register ra) { + mla(al, rd, rn, rm, ra); + } + + void mlas(Condition cond, Register rd, Register rn, Register rm, Register ra); + void mlas(Register rd, Register rn, Register rm, Register ra) { + mlas(al, rd, rn, rm, ra); + } + + void mls(Condition cond, Register rd, Register rn, Register rm, Register ra); + void mls(Register rd, Register rn, Register rm, Register ra) { + mls(al, rd, rn, rm, ra); + } + + void mov(Condition cond, + EncodingSize size, + Register rd, + const Operand& operand); + void mov(Register rd, const Operand& operand) { mov(al, Best, rd, operand); } + void mov(Condition cond, Register rd, const Operand& operand) { + mov(cond, Best, rd, operand); + } + void mov(EncodingSize size, Register rd, const Operand& operand) { + mov(al, size, rd, operand); + } + + void movs(Condition cond, + EncodingSize size, + Register rd, + const Operand& operand); + void movs(Register rd, const Operand& operand) { + movs(al, Best, rd, operand); + } + void movs(Condition cond, Register rd, const Operand& operand) { + movs(cond, Best, rd, operand); + } + void movs(EncodingSize size, Register rd, const Operand& operand) { + movs(al, size, rd, operand); + } + + void movt(Condition cond, Register rd, const Operand& operand); + void movt(Register rd, const Operand& operand) { movt(al, rd, operand); } + + void movw(Condition cond, Register rd, const Operand& operand); + void movw(Register rd, const Operand& operand) { movw(al, rd, operand); } + + void mrs(Condition cond, Register rd, SpecialRegister spec_reg); + void mrs(Register rd, SpecialRegister spec_reg) { mrs(al, rd, spec_reg); } + + void msr(Condition cond, + MaskedSpecialRegister spec_reg, + const Operand& operand); + void msr(MaskedSpecialRegister spec_reg, const Operand& operand) { + msr(al, spec_reg, operand); + } + + void mul( + Condition cond, EncodingSize size, Register rd, Register rn, Register rm); + void mul(Register rd, Register rn, Register rm) { mul(al, Best, rd, rn, rm); } + void mul(Condition cond, Register rd, Register rn, Register rm) { + mul(cond, Best, rd, rn, rm); + } + void mul(EncodingSize size, Register rd, Register rn, Register rm) { + mul(al, size, rd, rn, rm); + } + + void muls(Condition cond, Register rd, Register rn, Register rm); + void muls(Register rd, Register rn, Register rm) { muls(al, rd, rn, rm); } + + void mvn(Condition cond, + EncodingSize size, + Register rd, + const Operand& operand); + void mvn(Register rd, const Operand& operand) { mvn(al, Best, rd, operand); } + void mvn(Condition cond, Register rd, const Operand& operand) { + mvn(cond, Best, rd, operand); + } + void mvn(EncodingSize size, Register rd, const Operand& operand) { + mvn(al, size, rd, operand); + } + + void mvns(Condition cond, + EncodingSize size, + Register rd, + const Operand& operand); + void mvns(Register rd, const Operand& operand) { + mvns(al, Best, rd, operand); + } + void mvns(Condition cond, Register rd, const Operand& operand) { + mvns(cond, Best, rd, operand); + } + void mvns(EncodingSize size, Register rd, const Operand& operand) { + mvns(al, size, rd, operand); + } + + void nop(Condition cond, EncodingSize size); + void nop() { nop(al, Best); } + void nop(Condition cond) { nop(cond, Best); } + void nop(EncodingSize size) { nop(al, size); } + + void orn(Condition cond, Register rd, Register rn, const Operand& operand); + void orn(Register rd, Register rn, const Operand& operand) { + orn(al, rd, rn, operand); + } + + void orns(Condition cond, Register rd, Register rn, const Operand& operand); + void orns(Register rd, Register rn, const Operand& operand) { + orns(al, rd, rn, operand); + } + + void orr(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + void orr(Register rd, Register rn, const Operand& operand) { + orr(al, Best, rd, rn, operand); + } + void orr(Condition cond, Register rd, Register rn, const Operand& operand) { + orr(cond, Best, rd, rn, operand); + } + void orr(EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + orr(al, size, rd, rn, operand); + } + + void orrs(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + void orrs(Register rd, Register rn, const Operand& operand) { + orrs(al, Best, rd, rn, operand); + } + void orrs(Condition cond, Register rd, Register rn, const Operand& operand) { + orrs(cond, Best, rd, rn, operand); + } + void orrs(EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + orrs(al, size, rd, rn, operand); + } + + void pkhbt(Condition cond, Register rd, Register rn, const Operand& operand); + void pkhbt(Register rd, Register rn, const Operand& operand) { + pkhbt(al, rd, rn, operand); + } + + void pkhtb(Condition cond, Register rd, Register rn, const Operand& operand); + void pkhtb(Register rd, Register rn, const Operand& operand) { + pkhtb(al, rd, rn, operand); + } + + void pld(Condition cond, Location* location); + bool pld_info(Condition cond, + Location* location, + const struct ReferenceInfo** info); + void pld(Location* location) { pld(al, location); } + + void pld(Condition cond, const MemOperand& operand); + void pld(const MemOperand& operand) { pld(al, operand); } + + void pldw(Condition cond, const MemOperand& operand); + void pldw(const MemOperand& operand) { pldw(al, operand); } + + void pli(Condition cond, const MemOperand& operand); + void pli(const MemOperand& operand) { pli(al, operand); } + + void pli(Condition cond, Location* location); + bool pli_info(Condition cond, + Location* location, + const struct ReferenceInfo** info); + void pli(Location* location) { pli(al, location); } + + void pop(Condition cond, EncodingSize size, RegisterList registers); + void pop(RegisterList registers) { pop(al, Best, registers); } + void pop(Condition cond, RegisterList registers) { + pop(cond, Best, registers); + } + void pop(EncodingSize size, RegisterList registers) { + pop(al, size, registers); + } + + void pop(Condition cond, EncodingSize size, Register rt); + void pop(Register rt) { pop(al, Best, rt); } + void pop(Condition cond, Register rt) { pop(cond, Best, rt); } + void pop(EncodingSize size, Register rt) { pop(al, size, rt); } + + void push(Condition cond, EncodingSize size, RegisterList registers); + void push(RegisterList registers) { push(al, Best, registers); } + void push(Condition cond, RegisterList registers) { + push(cond, Best, registers); + } + void push(EncodingSize size, RegisterList registers) { + push(al, size, registers); + } + + void push(Condition cond, EncodingSize size, Register rt); + void push(Register rt) { push(al, Best, rt); } + void push(Condition cond, Register rt) { push(cond, Best, rt); } + void push(EncodingSize size, Register rt) { push(al, size, rt); } + + void qadd(Condition cond, Register rd, Register rm, Register rn); + void qadd(Register rd, Register rm, Register rn) { qadd(al, rd, rm, rn); } + + void qadd16(Condition cond, Register rd, Register rn, Register rm); + void qadd16(Register rd, Register rn, Register rm) { qadd16(al, rd, rn, rm); } + + void qadd8(Condition cond, Register rd, Register rn, Register rm); + void qadd8(Register rd, Register rn, Register rm) { qadd8(al, rd, rn, rm); } + + void qasx(Condition cond, Register rd, Register rn, Register rm); + void qasx(Register rd, Register rn, Register rm) { qasx(al, rd, rn, rm); } + + void qdadd(Condition cond, Register rd, Register rm, Register rn); + void qdadd(Register rd, Register rm, Register rn) { qdadd(al, rd, rm, rn); } + + void qdsub(Condition cond, Register rd, Register rm, Register rn); + void qdsub(Register rd, Register rm, Register rn) { qdsub(al, rd, rm, rn); } + + void qsax(Condition cond, Register rd, Register rn, Register rm); + void qsax(Register rd, Register rn, Register rm) { qsax(al, rd, rn, rm); } + + void qsub(Condition cond, Register rd, Register rm, Register rn); + void qsub(Register rd, Register rm, Register rn) { qsub(al, rd, rm, rn); } + + void qsub16(Condition cond, Register rd, Register rn, Register rm); + void qsub16(Register rd, Register rn, Register rm) { qsub16(al, rd, rn, rm); } + + void qsub8(Condition cond, Register rd, Register rn, Register rm); + void qsub8(Register rd, Register rn, Register rm) { qsub8(al, rd, rn, rm); } + + void rbit(Condition cond, Register rd, Register rm); + void rbit(Register rd, Register rm) { rbit(al, rd, rm); } + + void rev(Condition cond, EncodingSize size, Register rd, Register rm); + void rev(Register rd, Register rm) { rev(al, Best, rd, rm); } + void rev(Condition cond, Register rd, Register rm) { + rev(cond, Best, rd, rm); + } + void rev(EncodingSize size, Register rd, Register rm) { + rev(al, size, rd, rm); + } + + void rev16(Condition cond, EncodingSize size, Register rd, Register rm); + void rev16(Register rd, Register rm) { rev16(al, Best, rd, rm); } + void rev16(Condition cond, Register rd, Register rm) { + rev16(cond, Best, rd, rm); + } + void rev16(EncodingSize size, Register rd, Register rm) { + rev16(al, size, rd, rm); + } + + void revsh(Condition cond, EncodingSize size, Register rd, Register rm); + void revsh(Register rd, Register rm) { revsh(al, Best, rd, rm); } + void revsh(Condition cond, Register rd, Register rm) { + revsh(cond, Best, rd, rm); + } + void revsh(EncodingSize size, Register rd, Register rm) { + revsh(al, size, rd, rm); + } + + void ror(Condition cond, + EncodingSize size, + Register rd, + Register rm, + const Operand& operand); + void ror(Register rd, Register rm, const Operand& operand) { + ror(al, Best, rd, rm, operand); + } + void ror(Condition cond, Register rd, Register rm, const Operand& operand) { + ror(cond, Best, rd, rm, operand); + } + void ror(EncodingSize size, + Register rd, + Register rm, + const Operand& operand) { + ror(al, size, rd, rm, operand); + } + + void rors(Condition cond, + EncodingSize size, + Register rd, + Register rm, + const Operand& operand); + void rors(Register rd, Register rm, const Operand& operand) { + rors(al, Best, rd, rm, operand); + } + void rors(Condition cond, Register rd, Register rm, const Operand& operand) { + rors(cond, Best, rd, rm, operand); + } + void rors(EncodingSize size, + Register rd, + Register rm, + const Operand& operand) { + rors(al, size, rd, rm, operand); + } + + void rrx(Condition cond, Register rd, Register rm); + void rrx(Register rd, Register rm) { rrx(al, rd, rm); } + + void rrxs(Condition cond, Register rd, Register rm); + void rrxs(Register rd, Register rm) { rrxs(al, rd, rm); } + + void rsb(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + void rsb(Register rd, Register rn, const Operand& operand) { + rsb(al, Best, rd, rn, operand); + } + void rsb(Condition cond, Register rd, Register rn, const Operand& operand) { + rsb(cond, Best, rd, rn, operand); + } + void rsb(EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + rsb(al, size, rd, rn, operand); + } + + void rsbs(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + void rsbs(Register rd, Register rn, const Operand& operand) { + rsbs(al, Best, rd, rn, operand); + } + void rsbs(Condition cond, Register rd, Register rn, const Operand& operand) { + rsbs(cond, Best, rd, rn, operand); + } + void rsbs(EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + rsbs(al, size, rd, rn, operand); + } + + void rsc(Condition cond, Register rd, Register rn, const Operand& operand); + void rsc(Register rd, Register rn, const Operand& operand) { + rsc(al, rd, rn, operand); + } + + void rscs(Condition cond, Register rd, Register rn, const Operand& operand); + void rscs(Register rd, Register rn, const Operand& operand) { + rscs(al, rd, rn, operand); + } + + void sadd16(Condition cond, Register rd, Register rn, Register rm); + void sadd16(Register rd, Register rn, Register rm) { sadd16(al, rd, rn, rm); } + + void sadd8(Condition cond, Register rd, Register rn, Register rm); + void sadd8(Register rd, Register rn, Register rm) { sadd8(al, rd, rn, rm); } + + void sasx(Condition cond, Register rd, Register rn, Register rm); + void sasx(Register rd, Register rn, Register rm) { sasx(al, rd, rn, rm); } + + void sbc(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + void sbc(Register rd, Register rn, const Operand& operand) { + sbc(al, Best, rd, rn, operand); + } + void sbc(Condition cond, Register rd, Register rn, const Operand& operand) { + sbc(cond, Best, rd, rn, operand); + } + void sbc(EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + sbc(al, size, rd, rn, operand); + } + + void sbcs(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + void sbcs(Register rd, Register rn, const Operand& operand) { + sbcs(al, Best, rd, rn, operand); + } + void sbcs(Condition cond, Register rd, Register rn, const Operand& operand) { + sbcs(cond, Best, rd, rn, operand); + } + void sbcs(EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + sbcs(al, size, rd, rn, operand); + } + + void sbfx( + Condition cond, Register rd, Register rn, uint32_t lsb, uint32_t width); + void sbfx(Register rd, Register rn, uint32_t lsb, uint32_t width) { + sbfx(al, rd, rn, lsb, width); + } + + void sdiv(Condition cond, Register rd, Register rn, Register rm); + void sdiv(Register rd, Register rn, Register rm) { sdiv(al, rd, rn, rm); } + + void sel(Condition cond, Register rd, Register rn, Register rm); + void sel(Register rd, Register rn, Register rm) { sel(al, rd, rn, rm); } + + void shadd16(Condition cond, Register rd, Register rn, Register rm); + void shadd16(Register rd, Register rn, Register rm) { + shadd16(al, rd, rn, rm); + } + + void shadd8(Condition cond, Register rd, Register rn, Register rm); + void shadd8(Register rd, Register rn, Register rm) { shadd8(al, rd, rn, rm); } + + void shasx(Condition cond, Register rd, Register rn, Register rm); + void shasx(Register rd, Register rn, Register rm) { shasx(al, rd, rn, rm); } + + void shsax(Condition cond, Register rd, Register rn, Register rm); + void shsax(Register rd, Register rn, Register rm) { shsax(al, rd, rn, rm); } + + void shsub16(Condition cond, Register rd, Register rn, Register rm); + void shsub16(Register rd, Register rn, Register rm) { + shsub16(al, rd, rn, rm); + } + + void shsub8(Condition cond, Register rd, Register rn, Register rm); + void shsub8(Register rd, Register rn, Register rm) { shsub8(al, rd, rn, rm); } + + void smlabb( + Condition cond, Register rd, Register rn, Register rm, Register ra); + void smlabb(Register rd, Register rn, Register rm, Register ra) { + smlabb(al, rd, rn, rm, ra); + } + + void smlabt( + Condition cond, Register rd, Register rn, Register rm, Register ra); + void smlabt(Register rd, Register rn, Register rm, Register ra) { + smlabt(al, rd, rn, rm, ra); + } + + void smlad( + Condition cond, Register rd, Register rn, Register rm, Register ra); + void smlad(Register rd, Register rn, Register rm, Register ra) { + smlad(al, rd, rn, rm, ra); + } + + void smladx( + Condition cond, Register rd, Register rn, Register rm, Register ra); + void smladx(Register rd, Register rn, Register rm, Register ra) { + smladx(al, rd, rn, rm, ra); + } + + void smlal( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + void smlal(Register rdlo, Register rdhi, Register rn, Register rm) { + smlal(al, rdlo, rdhi, rn, rm); + } + + void smlalbb( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + void smlalbb(Register rdlo, Register rdhi, Register rn, Register rm) { + smlalbb(al, rdlo, rdhi, rn, rm); + } + + void smlalbt( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + void smlalbt(Register rdlo, Register rdhi, Register rn, Register rm) { + smlalbt(al, rdlo, rdhi, rn, rm); + } + + void smlald( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + void smlald(Register rdlo, Register rdhi, Register rn, Register rm) { + smlald(al, rdlo, rdhi, rn, rm); + } + + void smlaldx( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + void smlaldx(Register rdlo, Register rdhi, Register rn, Register rm) { + smlaldx(al, rdlo, rdhi, rn, rm); + } + + void smlals( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + void smlals(Register rdlo, Register rdhi, Register rn, Register rm) { + smlals(al, rdlo, rdhi, rn, rm); + } + + void smlaltb( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + void smlaltb(Register rdlo, Register rdhi, Register rn, Register rm) { + smlaltb(al, rdlo, rdhi, rn, rm); + } + + void smlaltt( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + void smlaltt(Register rdlo, Register rdhi, Register rn, Register rm) { + smlaltt(al, rdlo, rdhi, rn, rm); + } + + void smlatb( + Condition cond, Register rd, Register rn, Register rm, Register ra); + void smlatb(Register rd, Register rn, Register rm, Register ra) { + smlatb(al, rd, rn, rm, ra); + } + + void smlatt( + Condition cond, Register rd, Register rn, Register rm, Register ra); + void smlatt(Register rd, Register rn, Register rm, Register ra) { + smlatt(al, rd, rn, rm, ra); + } + + void smlawb( + Condition cond, Register rd, Register rn, Register rm, Register ra); + void smlawb(Register rd, Register rn, Register rm, Register ra) { + smlawb(al, rd, rn, rm, ra); + } + + void smlawt( + Condition cond, Register rd, Register rn, Register rm, Register ra); + void smlawt(Register rd, Register rn, Register rm, Register ra) { + smlawt(al, rd, rn, rm, ra); + } + + void smlsd( + Condition cond, Register rd, Register rn, Register rm, Register ra); + void smlsd(Register rd, Register rn, Register rm, Register ra) { + smlsd(al, rd, rn, rm, ra); + } + + void smlsdx( + Condition cond, Register rd, Register rn, Register rm, Register ra); + void smlsdx(Register rd, Register rn, Register rm, Register ra) { + smlsdx(al, rd, rn, rm, ra); + } + + void smlsld( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + void smlsld(Register rdlo, Register rdhi, Register rn, Register rm) { + smlsld(al, rdlo, rdhi, rn, rm); + } + + void smlsldx( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + void smlsldx(Register rdlo, Register rdhi, Register rn, Register rm) { + smlsldx(al, rdlo, rdhi, rn, rm); + } + + void smmla( + Condition cond, Register rd, Register rn, Register rm, Register ra); + void smmla(Register rd, Register rn, Register rm, Register ra) { + smmla(al, rd, rn, rm, ra); + } + + void smmlar( + Condition cond, Register rd, Register rn, Register rm, Register ra); + void smmlar(Register rd, Register rn, Register rm, Register ra) { + smmlar(al, rd, rn, rm, ra); + } + + void smmls( + Condition cond, Register rd, Register rn, Register rm, Register ra); + void smmls(Register rd, Register rn, Register rm, Register ra) { + smmls(al, rd, rn, rm, ra); + } + + void smmlsr( + Condition cond, Register rd, Register rn, Register rm, Register ra); + void smmlsr(Register rd, Register rn, Register rm, Register ra) { + smmlsr(al, rd, rn, rm, ra); + } + + void smmul(Condition cond, Register rd, Register rn, Register rm); + void smmul(Register rd, Register rn, Register rm) { smmul(al, rd, rn, rm); } + + void smmulr(Condition cond, Register rd, Register rn, Register rm); + void smmulr(Register rd, Register rn, Register rm) { smmulr(al, rd, rn, rm); } + + void smuad(Condition cond, Register rd, Register rn, Register rm); + void smuad(Register rd, Register rn, Register rm) { smuad(al, rd, rn, rm); } + + void smuadx(Condition cond, Register rd, Register rn, Register rm); + void smuadx(Register rd, Register rn, Register rm) { smuadx(al, rd, rn, rm); } + + void smulbb(Condition cond, Register rd, Register rn, Register rm); + void smulbb(Register rd, Register rn, Register rm) { smulbb(al, rd, rn, rm); } + + void smulbt(Condition cond, Register rd, Register rn, Register rm); + void smulbt(Register rd, Register rn, Register rm) { smulbt(al, rd, rn, rm); } + + void smull( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + void smull(Register rdlo, Register rdhi, Register rn, Register rm) { + smull(al, rdlo, rdhi, rn, rm); + } + + void smulls( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + void smulls(Register rdlo, Register rdhi, Register rn, Register rm) { + smulls(al, rdlo, rdhi, rn, rm); + } + + void smultb(Condition cond, Register rd, Register rn, Register rm); + void smultb(Register rd, Register rn, Register rm) { smultb(al, rd, rn, rm); } + + void smultt(Condition cond, Register rd, Register rn, Register rm); + void smultt(Register rd, Register rn, Register rm) { smultt(al, rd, rn, rm); } + + void smulwb(Condition cond, Register rd, Register rn, Register rm); + void smulwb(Register rd, Register rn, Register rm) { smulwb(al, rd, rn, rm); } + + void smulwt(Condition cond, Register rd, Register rn, Register rm); + void smulwt(Register rd, Register rn, Register rm) { smulwt(al, rd, rn, rm); } + + void smusd(Condition cond, Register rd, Register rn, Register rm); + void smusd(Register rd, Register rn, Register rm) { smusd(al, rd, rn, rm); } + + void smusdx(Condition cond, Register rd, Register rn, Register rm); + void smusdx(Register rd, Register rn, Register rm) { smusdx(al, rd, rn, rm); } + + void ssat(Condition cond, Register rd, uint32_t imm, const Operand& operand); + void ssat(Register rd, uint32_t imm, const Operand& operand) { + ssat(al, rd, imm, operand); + } + + void ssat16(Condition cond, Register rd, uint32_t imm, Register rn); + void ssat16(Register rd, uint32_t imm, Register rn) { + ssat16(al, rd, imm, rn); + } + + void ssax(Condition cond, Register rd, Register rn, Register rm); + void ssax(Register rd, Register rn, Register rm) { ssax(al, rd, rn, rm); } + + void ssub16(Condition cond, Register rd, Register rn, Register rm); + void ssub16(Register rd, Register rn, Register rm) { ssub16(al, rd, rn, rm); } + + void ssub8(Condition cond, Register rd, Register rn, Register rm); + void ssub8(Register rd, Register rn, Register rm) { ssub8(al, rd, rn, rm); } + + void stl(Condition cond, Register rt, const MemOperand& operand); + void stl(Register rt, const MemOperand& operand) { stl(al, rt, operand); } + + void stlb(Condition cond, Register rt, const MemOperand& operand); + void stlb(Register rt, const MemOperand& operand) { stlb(al, rt, operand); } + + void stlex(Condition cond, + Register rd, + Register rt, + const MemOperand& operand); + void stlex(Register rd, Register rt, const MemOperand& operand) { + stlex(al, rd, rt, operand); + } + + void stlexb(Condition cond, + Register rd, + Register rt, + const MemOperand& operand); + void stlexb(Register rd, Register rt, const MemOperand& operand) { + stlexb(al, rd, rt, operand); + } + + void stlexd(Condition cond, + Register rd, + Register rt, + Register rt2, + const MemOperand& operand); + void stlexd(Register rd, + Register rt, + Register rt2, + const MemOperand& operand) { + stlexd(al, rd, rt, rt2, operand); + } + + void stlexh(Condition cond, + Register rd, + Register rt, + const MemOperand& operand); + void stlexh(Register rd, Register rt, const MemOperand& operand) { + stlexh(al, rd, rt, operand); + } + + void stlh(Condition cond, Register rt, const MemOperand& operand); + void stlh(Register rt, const MemOperand& operand) { stlh(al, rt, operand); } + + void stm(Condition cond, + EncodingSize size, + Register rn, + WriteBack write_back, + RegisterList registers); + void stm(Register rn, WriteBack write_back, RegisterList registers) { + stm(al, Best, rn, write_back, registers); + } + void stm(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + stm(cond, Best, rn, write_back, registers); + } + void stm(EncodingSize size, + Register rn, + WriteBack write_back, + RegisterList registers) { + stm(al, size, rn, write_back, registers); + } + + void stmda(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers); + void stmda(Register rn, WriteBack write_back, RegisterList registers) { + stmda(al, rn, write_back, registers); + } + + void stmdb(Condition cond, + EncodingSize size, + Register rn, + WriteBack write_back, + RegisterList registers); + void stmdb(Register rn, WriteBack write_back, RegisterList registers) { + stmdb(al, Best, rn, write_back, registers); + } + void stmdb(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + stmdb(cond, Best, rn, write_back, registers); + } + void stmdb(EncodingSize size, + Register rn, + WriteBack write_back, + RegisterList registers) { + stmdb(al, size, rn, write_back, registers); + } + + void stmea(Condition cond, + EncodingSize size, + Register rn, + WriteBack write_back, + RegisterList registers); + void stmea(Register rn, WriteBack write_back, RegisterList registers) { + stmea(al, Best, rn, write_back, registers); + } + void stmea(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + stmea(cond, Best, rn, write_back, registers); + } + void stmea(EncodingSize size, + Register rn, + WriteBack write_back, + RegisterList registers) { + stmea(al, size, rn, write_back, registers); + } + + void stmed(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers); + void stmed(Register rn, WriteBack write_back, RegisterList registers) { + stmed(al, rn, write_back, registers); + } + + void stmfa(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers); + void stmfa(Register rn, WriteBack write_back, RegisterList registers) { + stmfa(al, rn, write_back, registers); + } + + void stmfd(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers); + void stmfd(Register rn, WriteBack write_back, RegisterList registers) { + stmfd(al, rn, write_back, registers); + } + + void stmib(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers); + void stmib(Register rn, WriteBack write_back, RegisterList registers) { + stmib(al, rn, write_back, registers); + } + + void str(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand); + void str(Register rt, const MemOperand& operand) { + str(al, Best, rt, operand); + } + void str(Condition cond, Register rt, const MemOperand& operand) { + str(cond, Best, rt, operand); + } + void str(EncodingSize size, Register rt, const MemOperand& operand) { + str(al, size, rt, operand); + } + + void strb(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand); + void strb(Register rt, const MemOperand& operand) { + strb(al, Best, rt, operand); + } + void strb(Condition cond, Register rt, const MemOperand& operand) { + strb(cond, Best, rt, operand); + } + void strb(EncodingSize size, Register rt, const MemOperand& operand) { + strb(al, size, rt, operand); + } + + void strd(Condition cond, + Register rt, + Register rt2, + const MemOperand& operand); + void strd(Register rt, Register rt2, const MemOperand& operand) { + strd(al, rt, rt2, operand); + } + + void strex(Condition cond, + Register rd, + Register rt, + const MemOperand& operand); + void strex(Register rd, Register rt, const MemOperand& operand) { + strex(al, rd, rt, operand); + } + + void strexb(Condition cond, + Register rd, + Register rt, + const MemOperand& operand); + void strexb(Register rd, Register rt, const MemOperand& operand) { + strexb(al, rd, rt, operand); + } + + void strexd(Condition cond, + Register rd, + Register rt, + Register rt2, + const MemOperand& operand); + void strexd(Register rd, + Register rt, + Register rt2, + const MemOperand& operand) { + strexd(al, rd, rt, rt2, operand); + } + + void strexh(Condition cond, + Register rd, + Register rt, + const MemOperand& operand); + void strexh(Register rd, Register rt, const MemOperand& operand) { + strexh(al, rd, rt, operand); + } + + void strh(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand); + void strh(Register rt, const MemOperand& operand) { + strh(al, Best, rt, operand); + } + void strh(Condition cond, Register rt, const MemOperand& operand) { + strh(cond, Best, rt, operand); + } + void strh(EncodingSize size, Register rt, const MemOperand& operand) { + strh(al, size, rt, operand); + } + + void sub(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + void sub(Register rd, Register rn, const Operand& operand) { + sub(al, Best, rd, rn, operand); + } + void sub(Condition cond, Register rd, Register rn, const Operand& operand) { + sub(cond, Best, rd, rn, operand); + } + void sub(EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + sub(al, size, rd, rn, operand); + } + + void sub(Condition cond, Register rd, const Operand& operand); + void sub(Register rd, const Operand& operand) { sub(al, rd, operand); } + + void subs(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + void subs(Register rd, Register rn, const Operand& operand) { + subs(al, Best, rd, rn, operand); + } + void subs(Condition cond, Register rd, Register rn, const Operand& operand) { + subs(cond, Best, rd, rn, operand); + } + void subs(EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + subs(al, size, rd, rn, operand); + } + + void subs(Register rd, const Operand& operand); + + void subw(Condition cond, Register rd, Register rn, const Operand& operand); + void subw(Register rd, Register rn, const Operand& operand) { + subw(al, rd, rn, operand); + } + + void svc(Condition cond, uint32_t imm); + void svc(uint32_t imm) { svc(al, imm); } + + void sxtab(Condition cond, Register rd, Register rn, const Operand& operand); + void sxtab(Register rd, Register rn, const Operand& operand) { + sxtab(al, rd, rn, operand); + } + + void sxtab16(Condition cond, + Register rd, + Register rn, + const Operand& operand); + void sxtab16(Register rd, Register rn, const Operand& operand) { + sxtab16(al, rd, rn, operand); + } + + void sxtah(Condition cond, Register rd, Register rn, const Operand& operand); + void sxtah(Register rd, Register rn, const Operand& operand) { + sxtah(al, rd, rn, operand); + } + + void sxtb(Condition cond, + EncodingSize size, + Register rd, + const Operand& operand); + void sxtb(Register rd, const Operand& operand) { + sxtb(al, Best, rd, operand); + } + void sxtb(Condition cond, Register rd, const Operand& operand) { + sxtb(cond, Best, rd, operand); + } + void sxtb(EncodingSize size, Register rd, const Operand& operand) { + sxtb(al, size, rd, operand); + } + + void sxtb16(Condition cond, Register rd, const Operand& operand); + void sxtb16(Register rd, const Operand& operand) { sxtb16(al, rd, operand); } + + void sxth(Condition cond, + EncodingSize size, + Register rd, + const Operand& operand); + void sxth(Register rd, const Operand& operand) { + sxth(al, Best, rd, operand); + } + void sxth(Condition cond, Register rd, const Operand& operand) { + sxth(cond, Best, rd, operand); + } + void sxth(EncodingSize size, Register rd, const Operand& operand) { + sxth(al, size, rd, operand); + } + + void tbb(Condition cond, Register rn, Register rm); + void tbb(Register rn, Register rm) { tbb(al, rn, rm); } + + void tbh(Condition cond, Register rn, Register rm); + void tbh(Register rn, Register rm) { tbh(al, rn, rm); } + + void teq(Condition cond, Register rn, const Operand& operand); + void teq(Register rn, const Operand& operand) { teq(al, rn, operand); } + + void tst(Condition cond, + EncodingSize size, + Register rn, + const Operand& operand); + void tst(Register rn, const Operand& operand) { tst(al, Best, rn, operand); } + void tst(Condition cond, Register rn, const Operand& operand) { + tst(cond, Best, rn, operand); + } + void tst(EncodingSize size, Register rn, const Operand& operand) { + tst(al, size, rn, operand); + } + + void uadd16(Condition cond, Register rd, Register rn, Register rm); + void uadd16(Register rd, Register rn, Register rm) { uadd16(al, rd, rn, rm); } + + void uadd8(Condition cond, Register rd, Register rn, Register rm); + void uadd8(Register rd, Register rn, Register rm) { uadd8(al, rd, rn, rm); } + + void uasx(Condition cond, Register rd, Register rn, Register rm); + void uasx(Register rd, Register rn, Register rm) { uasx(al, rd, rn, rm); } + + void ubfx( + Condition cond, Register rd, Register rn, uint32_t lsb, uint32_t width); + void ubfx(Register rd, Register rn, uint32_t lsb, uint32_t width) { + ubfx(al, rd, rn, lsb, width); + } + + void udf(Condition cond, EncodingSize size, uint32_t imm); + void udf(uint32_t imm) { udf(al, Best, imm); } + void udf(Condition cond, uint32_t imm) { udf(cond, Best, imm); } + void udf(EncodingSize size, uint32_t imm) { udf(al, size, imm); } + + void udiv(Condition cond, Register rd, Register rn, Register rm); + void udiv(Register rd, Register rn, Register rm) { udiv(al, rd, rn, rm); } + + void uhadd16(Condition cond, Register rd, Register rn, Register rm); + void uhadd16(Register rd, Register rn, Register rm) { + uhadd16(al, rd, rn, rm); + } + + void uhadd8(Condition cond, Register rd, Register rn, Register rm); + void uhadd8(Register rd, Register rn, Register rm) { uhadd8(al, rd, rn, rm); } + + void uhasx(Condition cond, Register rd, Register rn, Register rm); + void uhasx(Register rd, Register rn, Register rm) { uhasx(al, rd, rn, rm); } + + void uhsax(Condition cond, Register rd, Register rn, Register rm); + void uhsax(Register rd, Register rn, Register rm) { uhsax(al, rd, rn, rm); } + + void uhsub16(Condition cond, Register rd, Register rn, Register rm); + void uhsub16(Register rd, Register rn, Register rm) { + uhsub16(al, rd, rn, rm); + } + + void uhsub8(Condition cond, Register rd, Register rn, Register rm); + void uhsub8(Register rd, Register rn, Register rm) { uhsub8(al, rd, rn, rm); } + + void umaal( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + void umaal(Register rdlo, Register rdhi, Register rn, Register rm) { + umaal(al, rdlo, rdhi, rn, rm); + } + + void umlal( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + void umlal(Register rdlo, Register rdhi, Register rn, Register rm) { + umlal(al, rdlo, rdhi, rn, rm); + } + + void umlals( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + void umlals(Register rdlo, Register rdhi, Register rn, Register rm) { + umlals(al, rdlo, rdhi, rn, rm); + } + + void umull( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + void umull(Register rdlo, Register rdhi, Register rn, Register rm) { + umull(al, rdlo, rdhi, rn, rm); + } + + void umulls( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + void umulls(Register rdlo, Register rdhi, Register rn, Register rm) { + umulls(al, rdlo, rdhi, rn, rm); + } + + void uqadd16(Condition cond, Register rd, Register rn, Register rm); + void uqadd16(Register rd, Register rn, Register rm) { + uqadd16(al, rd, rn, rm); + } + + void uqadd8(Condition cond, Register rd, Register rn, Register rm); + void uqadd8(Register rd, Register rn, Register rm) { uqadd8(al, rd, rn, rm); } + + void uqasx(Condition cond, Register rd, Register rn, Register rm); + void uqasx(Register rd, Register rn, Register rm) { uqasx(al, rd, rn, rm); } + + void uqsax(Condition cond, Register rd, Register rn, Register rm); + void uqsax(Register rd, Register rn, Register rm) { uqsax(al, rd, rn, rm); } + + void uqsub16(Condition cond, Register rd, Register rn, Register rm); + void uqsub16(Register rd, Register rn, Register rm) { + uqsub16(al, rd, rn, rm); + } + + void uqsub8(Condition cond, Register rd, Register rn, Register rm); + void uqsub8(Register rd, Register rn, Register rm) { uqsub8(al, rd, rn, rm); } + + void usad8(Condition cond, Register rd, Register rn, Register rm); + void usad8(Register rd, Register rn, Register rm) { usad8(al, rd, rn, rm); } + + void usada8( + Condition cond, Register rd, Register rn, Register rm, Register ra); + void usada8(Register rd, Register rn, Register rm, Register ra) { + usada8(al, rd, rn, rm, ra); + } + + void usat(Condition cond, Register rd, uint32_t imm, const Operand& operand); + void usat(Register rd, uint32_t imm, const Operand& operand) { + usat(al, rd, imm, operand); + } + + void usat16(Condition cond, Register rd, uint32_t imm, Register rn); + void usat16(Register rd, uint32_t imm, Register rn) { + usat16(al, rd, imm, rn); + } + + void usax(Condition cond, Register rd, Register rn, Register rm); + void usax(Register rd, Register rn, Register rm) { usax(al, rd, rn, rm); } + + void usub16(Condition cond, Register rd, Register rn, Register rm); + void usub16(Register rd, Register rn, Register rm) { usub16(al, rd, rn, rm); } + + void usub8(Condition cond, Register rd, Register rn, Register rm); + void usub8(Register rd, Register rn, Register rm) { usub8(al, rd, rn, rm); } + + void uxtab(Condition cond, Register rd, Register rn, const Operand& operand); + void uxtab(Register rd, Register rn, const Operand& operand) { + uxtab(al, rd, rn, operand); + } + + void uxtab16(Condition cond, + Register rd, + Register rn, + const Operand& operand); + void uxtab16(Register rd, Register rn, const Operand& operand) { + uxtab16(al, rd, rn, operand); + } + + void uxtah(Condition cond, Register rd, Register rn, const Operand& operand); + void uxtah(Register rd, Register rn, const Operand& operand) { + uxtah(al, rd, rn, operand); + } + + void uxtb(Condition cond, + EncodingSize size, + Register rd, + const Operand& operand); + void uxtb(Register rd, const Operand& operand) { + uxtb(al, Best, rd, operand); + } + void uxtb(Condition cond, Register rd, const Operand& operand) { + uxtb(cond, Best, rd, operand); + } + void uxtb(EncodingSize size, Register rd, const Operand& operand) { + uxtb(al, size, rd, operand); + } + + void uxtb16(Condition cond, Register rd, const Operand& operand); + void uxtb16(Register rd, const Operand& operand) { uxtb16(al, rd, operand); } + + void uxth(Condition cond, + EncodingSize size, + Register rd, + const Operand& operand); + void uxth(Register rd, const Operand& operand) { + uxth(al, Best, rd, operand); + } + void uxth(Condition cond, Register rd, const Operand& operand) { + uxth(cond, Best, rd, operand); + } + void uxth(EncodingSize size, Register rd, const Operand& operand) { + uxth(al, size, rd, operand); + } + + void vaba( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vaba(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vaba(al, dt, rd, rn, rm); + } + + void vaba( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vaba(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vaba(al, dt, rd, rn, rm); + } + + void vabal( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm); + void vabal(DataType dt, QRegister rd, DRegister rn, DRegister rm) { + vabal(al, dt, rd, rn, rm); + } + + void vabd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vabd(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vabd(al, dt, rd, rn, rm); + } + + void vabd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vabd(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vabd(al, dt, rd, rn, rm); + } + + void vabdl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm); + void vabdl(DataType dt, QRegister rd, DRegister rn, DRegister rm) { + vabdl(al, dt, rd, rn, rm); + } + + void vabs(Condition cond, DataType dt, DRegister rd, DRegister rm); + void vabs(DataType dt, DRegister rd, DRegister rm) { vabs(al, dt, rd, rm); } + + void vabs(Condition cond, DataType dt, QRegister rd, QRegister rm); + void vabs(DataType dt, QRegister rd, QRegister rm) { vabs(al, dt, rd, rm); } + + void vabs(Condition cond, DataType dt, SRegister rd, SRegister rm); + void vabs(DataType dt, SRegister rd, SRegister rm) { vabs(al, dt, rd, rm); } + + void vacge( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vacge(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vacge(al, dt, rd, rn, rm); + } + + void vacge( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vacge(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vacge(al, dt, rd, rn, rm); + } + + void vacgt( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vacgt(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vacgt(al, dt, rd, rn, rm); + } + + void vacgt( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vacgt(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vacgt(al, dt, rd, rn, rm); + } + + void vacle( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vacle(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vacle(al, dt, rd, rn, rm); + } + + void vacle( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vacle(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vacle(al, dt, rd, rn, rm); + } + + void vaclt( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vaclt(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vaclt(al, dt, rd, rn, rm); + } + + void vaclt( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vaclt(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vaclt(al, dt, rd, rn, rm); + } + + void vadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vadd(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vadd(al, dt, rd, rn, rm); + } + + void vadd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vadd(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vadd(al, dt, rd, rn, rm); + } + + void vadd( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + void vadd(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + vadd(al, dt, rd, rn, rm); + } + + void vaddhn( + Condition cond, DataType dt, DRegister rd, QRegister rn, QRegister rm); + void vaddhn(DataType dt, DRegister rd, QRegister rn, QRegister rm) { + vaddhn(al, dt, rd, rn, rm); + } + + void vaddl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm); + void vaddl(DataType dt, QRegister rd, DRegister rn, DRegister rm) { + vaddl(al, dt, rd, rn, rm); + } + + void vaddw( + Condition cond, DataType dt, QRegister rd, QRegister rn, DRegister rm); + void vaddw(DataType dt, QRegister rd, QRegister rn, DRegister rm) { + vaddw(al, dt, rd, rn, rm); + } + + void vand(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + const DOperand& operand); + void vand(DataType dt, DRegister rd, DRegister rn, const DOperand& operand) { + vand(al, dt, rd, rn, operand); + } + + void vand(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + const QOperand& operand); + void vand(DataType dt, QRegister rd, QRegister rn, const QOperand& operand) { + vand(al, dt, rd, rn, operand); + } + + void vbic(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + const DOperand& operand); + void vbic(DataType dt, DRegister rd, DRegister rn, const DOperand& operand) { + vbic(al, dt, rd, rn, operand); + } + + void vbic(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + const QOperand& operand); + void vbic(DataType dt, QRegister rd, QRegister rn, const QOperand& operand) { + vbic(al, dt, rd, rn, operand); + } + + void vbif( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vbif(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vbif(al, dt, rd, rn, rm); + } + void vbif(DRegister rd, DRegister rn, DRegister rm) { + vbif(al, kDataTypeValueNone, rd, rn, rm); + } + void vbif(Condition cond, DRegister rd, DRegister rn, DRegister rm) { + vbif(cond, kDataTypeValueNone, rd, rn, rm); + } + + void vbif( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vbif(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vbif(al, dt, rd, rn, rm); + } + void vbif(QRegister rd, QRegister rn, QRegister rm) { + vbif(al, kDataTypeValueNone, rd, rn, rm); + } + void vbif(Condition cond, QRegister rd, QRegister rn, QRegister rm) { + vbif(cond, kDataTypeValueNone, rd, rn, rm); + } + + void vbit( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vbit(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vbit(al, dt, rd, rn, rm); + } + void vbit(DRegister rd, DRegister rn, DRegister rm) { + vbit(al, kDataTypeValueNone, rd, rn, rm); + } + void vbit(Condition cond, DRegister rd, DRegister rn, DRegister rm) { + vbit(cond, kDataTypeValueNone, rd, rn, rm); + } + + void vbit( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vbit(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vbit(al, dt, rd, rn, rm); + } + void vbit(QRegister rd, QRegister rn, QRegister rm) { + vbit(al, kDataTypeValueNone, rd, rn, rm); + } + void vbit(Condition cond, QRegister rd, QRegister rn, QRegister rm) { + vbit(cond, kDataTypeValueNone, rd, rn, rm); + } + + void vbsl( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vbsl(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vbsl(al, dt, rd, rn, rm); + } + void vbsl(DRegister rd, DRegister rn, DRegister rm) { + vbsl(al, kDataTypeValueNone, rd, rn, rm); + } + void vbsl(Condition cond, DRegister rd, DRegister rn, DRegister rm) { + vbsl(cond, kDataTypeValueNone, rd, rn, rm); + } + + void vbsl( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vbsl(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vbsl(al, dt, rd, rn, rm); + } + void vbsl(QRegister rd, QRegister rn, QRegister rm) { + vbsl(al, kDataTypeValueNone, rd, rn, rm); + } + void vbsl(Condition cond, QRegister rd, QRegister rn, QRegister rm) { + vbsl(cond, kDataTypeValueNone, rd, rn, rm); + } + + void vceq(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + void vceq(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + vceq(al, dt, rd, rm, operand); + } + + void vceq(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + void vceq(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + vceq(al, dt, rd, rm, operand); + } + + void vceq( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vceq(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vceq(al, dt, rd, rn, rm); + } + + void vceq( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vceq(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vceq(al, dt, rd, rn, rm); + } + + void vcge(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + void vcge(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + vcge(al, dt, rd, rm, operand); + } + + void vcge(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + void vcge(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + vcge(al, dt, rd, rm, operand); + } + + void vcge( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vcge(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vcge(al, dt, rd, rn, rm); + } + + void vcge( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vcge(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vcge(al, dt, rd, rn, rm); + } + + void vcgt(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + void vcgt(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + vcgt(al, dt, rd, rm, operand); + } + + void vcgt(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + void vcgt(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + vcgt(al, dt, rd, rm, operand); + } + + void vcgt( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vcgt(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vcgt(al, dt, rd, rn, rm); + } + + void vcgt( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vcgt(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vcgt(al, dt, rd, rn, rm); + } + + void vcle(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + void vcle(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + vcle(al, dt, rd, rm, operand); + } + + void vcle(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + void vcle(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + vcle(al, dt, rd, rm, operand); + } + + void vcle( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vcle(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vcle(al, dt, rd, rn, rm); + } + + void vcle( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vcle(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vcle(al, dt, rd, rn, rm); + } + + void vcls(Condition cond, DataType dt, DRegister rd, DRegister rm); + void vcls(DataType dt, DRegister rd, DRegister rm) { vcls(al, dt, rd, rm); } + + void vcls(Condition cond, DataType dt, QRegister rd, QRegister rm); + void vcls(DataType dt, QRegister rd, QRegister rm) { vcls(al, dt, rd, rm); } + + void vclt(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + void vclt(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + vclt(al, dt, rd, rm, operand); + } + + void vclt(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + void vclt(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + vclt(al, dt, rd, rm, operand); + } + + void vclt( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vclt(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vclt(al, dt, rd, rn, rm); + } + + void vclt( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vclt(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vclt(al, dt, rd, rn, rm); + } + + void vclz(Condition cond, DataType dt, DRegister rd, DRegister rm); + void vclz(DataType dt, DRegister rd, DRegister rm) { vclz(al, dt, rd, rm); } + + void vclz(Condition cond, DataType dt, QRegister rd, QRegister rm); + void vclz(DataType dt, QRegister rd, QRegister rm) { vclz(al, dt, rd, rm); } + + void vcmp(Condition cond, DataType dt, SRegister rd, const SOperand& operand); + void vcmp(DataType dt, SRegister rd, const SOperand& operand) { + vcmp(al, dt, rd, operand); + } + + void vcmp(Condition cond, DataType dt, DRegister rd, const DOperand& operand); + void vcmp(DataType dt, DRegister rd, const DOperand& operand) { + vcmp(al, dt, rd, operand); + } + + void vcmpe(Condition cond, + DataType dt, + SRegister rd, + const SOperand& operand); + void vcmpe(DataType dt, SRegister rd, const SOperand& operand) { + vcmpe(al, dt, rd, operand); + } + + void vcmpe(Condition cond, + DataType dt, + DRegister rd, + const DOperand& operand); + void vcmpe(DataType dt, DRegister rd, const DOperand& operand) { + vcmpe(al, dt, rd, operand); + } + + void vcnt(Condition cond, DataType dt, DRegister rd, DRegister rm); + void vcnt(DataType dt, DRegister rd, DRegister rm) { vcnt(al, dt, rd, rm); } + + void vcnt(Condition cond, DataType dt, QRegister rd, QRegister rm); + void vcnt(DataType dt, QRegister rd, QRegister rm) { vcnt(al, dt, rd, rm); } + + void vcvt( + Condition cond, DataType dt1, DataType dt2, DRegister rd, SRegister rm); + void vcvt(DataType dt1, DataType dt2, DRegister rd, SRegister rm) { + vcvt(al, dt1, dt2, rd, rm); + } + + void vcvt( + Condition cond, DataType dt1, DataType dt2, SRegister rd, DRegister rm); + void vcvt(DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + vcvt(al, dt1, dt2, rd, rm); + } + + void vcvt(Condition cond, + DataType dt1, + DataType dt2, + DRegister rd, + DRegister rm, + int32_t fbits); + void vcvt( + DataType dt1, DataType dt2, DRegister rd, DRegister rm, int32_t fbits) { + vcvt(al, dt1, dt2, rd, rm, fbits); + } + + void vcvt(Condition cond, + DataType dt1, + DataType dt2, + QRegister rd, + QRegister rm, + int32_t fbits); + void vcvt( + DataType dt1, DataType dt2, QRegister rd, QRegister rm, int32_t fbits) { + vcvt(al, dt1, dt2, rd, rm, fbits); + } + + void vcvt(Condition cond, + DataType dt1, + DataType dt2, + SRegister rd, + SRegister rm, + int32_t fbits); + void vcvt( + DataType dt1, DataType dt2, SRegister rd, SRegister rm, int32_t fbits) { + vcvt(al, dt1, dt2, rd, rm, fbits); + } + + void vcvt( + Condition cond, DataType dt1, DataType dt2, DRegister rd, DRegister rm); + void vcvt(DataType dt1, DataType dt2, DRegister rd, DRegister rm) { + vcvt(al, dt1, dt2, rd, rm); + } + + void vcvt( + Condition cond, DataType dt1, DataType dt2, QRegister rd, QRegister rm); + void vcvt(DataType dt1, DataType dt2, QRegister rd, QRegister rm) { + vcvt(al, dt1, dt2, rd, rm); + } + + void vcvt( + Condition cond, DataType dt1, DataType dt2, DRegister rd, QRegister rm); + void vcvt(DataType dt1, DataType dt2, DRegister rd, QRegister rm) { + vcvt(al, dt1, dt2, rd, rm); + } + + void vcvt( + Condition cond, DataType dt1, DataType dt2, QRegister rd, DRegister rm); + void vcvt(DataType dt1, DataType dt2, QRegister rd, DRegister rm) { + vcvt(al, dt1, dt2, rd, rm); + } + + void vcvt( + Condition cond, DataType dt1, DataType dt2, SRegister rd, SRegister rm); + void vcvt(DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + vcvt(al, dt1, dt2, rd, rm); + } + + void vcvta(DataType dt1, DataType dt2, DRegister rd, DRegister rm); + + void vcvta(DataType dt1, DataType dt2, QRegister rd, QRegister rm); + + void vcvta(DataType dt1, DataType dt2, SRegister rd, SRegister rm); + + void vcvta(DataType dt1, DataType dt2, SRegister rd, DRegister rm); + + void vcvtb( + Condition cond, DataType dt1, DataType dt2, SRegister rd, SRegister rm); + void vcvtb(DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + vcvtb(al, dt1, dt2, rd, rm); + } + + void vcvtb( + Condition cond, DataType dt1, DataType dt2, DRegister rd, SRegister rm); + void vcvtb(DataType dt1, DataType dt2, DRegister rd, SRegister rm) { + vcvtb(al, dt1, dt2, rd, rm); + } + + void vcvtb( + Condition cond, DataType dt1, DataType dt2, SRegister rd, DRegister rm); + void vcvtb(DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + vcvtb(al, dt1, dt2, rd, rm); + } + + void vcvtm(DataType dt1, DataType dt2, DRegister rd, DRegister rm); + + void vcvtm(DataType dt1, DataType dt2, QRegister rd, QRegister rm); + + void vcvtm(DataType dt1, DataType dt2, SRegister rd, SRegister rm); + + void vcvtm(DataType dt1, DataType dt2, SRegister rd, DRegister rm); + + void vcvtn(DataType dt1, DataType dt2, DRegister rd, DRegister rm); + + void vcvtn(DataType dt1, DataType dt2, QRegister rd, QRegister rm); + + void vcvtn(DataType dt1, DataType dt2, SRegister rd, SRegister rm); + + void vcvtn(DataType dt1, DataType dt2, SRegister rd, DRegister rm); + + void vcvtp(DataType dt1, DataType dt2, DRegister rd, DRegister rm); + + void vcvtp(DataType dt1, DataType dt2, QRegister rd, QRegister rm); + + void vcvtp(DataType dt1, DataType dt2, SRegister rd, SRegister rm); + + void vcvtp(DataType dt1, DataType dt2, SRegister rd, DRegister rm); + + void vcvtr( + Condition cond, DataType dt1, DataType dt2, SRegister rd, SRegister rm); + void vcvtr(DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + vcvtr(al, dt1, dt2, rd, rm); + } + + void vcvtr( + Condition cond, DataType dt1, DataType dt2, SRegister rd, DRegister rm); + void vcvtr(DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + vcvtr(al, dt1, dt2, rd, rm); + } + + void vcvtt( + Condition cond, DataType dt1, DataType dt2, SRegister rd, SRegister rm); + void vcvtt(DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + vcvtt(al, dt1, dt2, rd, rm); + } + + void vcvtt( + Condition cond, DataType dt1, DataType dt2, DRegister rd, SRegister rm); + void vcvtt(DataType dt1, DataType dt2, DRegister rd, SRegister rm) { + vcvtt(al, dt1, dt2, rd, rm); + } + + void vcvtt( + Condition cond, DataType dt1, DataType dt2, SRegister rd, DRegister rm); + void vcvtt(DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + vcvtt(al, dt1, dt2, rd, rm); + } + + void vdiv( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + void vdiv(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + vdiv(al, dt, rd, rn, rm); + } + + void vdiv( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vdiv(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vdiv(al, dt, rd, rn, rm); + } + + void vdup(Condition cond, DataType dt, QRegister rd, Register rt); + void vdup(DataType dt, QRegister rd, Register rt) { vdup(al, dt, rd, rt); } + + void vdup(Condition cond, DataType dt, DRegister rd, Register rt); + void vdup(DataType dt, DRegister rd, Register rt) { vdup(al, dt, rd, rt); } + + void vdup(Condition cond, DataType dt, DRegister rd, DRegisterLane rm); + void vdup(DataType dt, DRegister rd, DRegisterLane rm) { + vdup(al, dt, rd, rm); + } + + void vdup(Condition cond, DataType dt, QRegister rd, DRegisterLane rm); + void vdup(DataType dt, QRegister rd, DRegisterLane rm) { + vdup(al, dt, rd, rm); + } + + void veor( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void veor(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + veor(al, dt, rd, rn, rm); + } + void veor(DRegister rd, DRegister rn, DRegister rm) { + veor(al, kDataTypeValueNone, rd, rn, rm); + } + void veor(Condition cond, DRegister rd, DRegister rn, DRegister rm) { + veor(cond, kDataTypeValueNone, rd, rn, rm); + } + + void veor( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void veor(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + veor(al, dt, rd, rn, rm); + } + void veor(QRegister rd, QRegister rn, QRegister rm) { + veor(al, kDataTypeValueNone, rd, rn, rm); + } + void veor(Condition cond, QRegister rd, QRegister rn, QRegister rm) { + veor(cond, kDataTypeValueNone, rd, rn, rm); + } + + void vext(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegister rm, + const DOperand& operand); + void vext(DataType dt, + DRegister rd, + DRegister rn, + DRegister rm, + const DOperand& operand) { + vext(al, dt, rd, rn, rm, operand); + } + + void vext(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + QRegister rm, + const QOperand& operand); + void vext(DataType dt, + QRegister rd, + QRegister rn, + QRegister rm, + const QOperand& operand) { + vext(al, dt, rd, rn, rm, operand); + } + + void vfma( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vfma(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vfma(al, dt, rd, rn, rm); + } + + void vfma( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vfma(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vfma(al, dt, rd, rn, rm); + } + + void vfma( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + void vfma(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + vfma(al, dt, rd, rn, rm); + } + + void vfms( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vfms(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vfms(al, dt, rd, rn, rm); + } + + void vfms( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vfms(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vfms(al, dt, rd, rn, rm); + } + + void vfms( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + void vfms(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + vfms(al, dt, rd, rn, rm); + } + + void vfnma( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + void vfnma(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + vfnma(al, dt, rd, rn, rm); + } + + void vfnma( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vfnma(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vfnma(al, dt, rd, rn, rm); + } + + void vfnms( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + void vfnms(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + vfnms(al, dt, rd, rn, rm); + } + + void vfnms( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vfnms(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vfnms(al, dt, rd, rn, rm); + } + + void vhadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vhadd(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vhadd(al, dt, rd, rn, rm); + } + + void vhadd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vhadd(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vhadd(al, dt, rd, rn, rm); + } + + void vhsub( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vhsub(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vhsub(al, dt, rd, rn, rm); + } + + void vhsub( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vhsub(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vhsub(al, dt, rd, rn, rm); + } + + void vld1(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand); + void vld1(DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + vld1(al, dt, nreglist, operand); + } + + void vld2(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand); + void vld2(DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + vld2(al, dt, nreglist, operand); + } + + void vld3(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand); + void vld3(DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + vld3(al, dt, nreglist, operand); + } + + void vld3(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const MemOperand& operand); + void vld3(DataType dt, + const NeonRegisterList& nreglist, + const MemOperand& operand) { + vld3(al, dt, nreglist, operand); + } + + void vld4(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand); + void vld4(DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + vld4(al, dt, nreglist, operand); + } + + void vldm(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist); + void vldm(DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + vldm(al, dt, rn, write_back, dreglist); + } + void vldm(Register rn, WriteBack write_back, DRegisterList dreglist) { + vldm(al, kDataTypeValueNone, rn, write_back, dreglist); + } + void vldm(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + vldm(cond, kDataTypeValueNone, rn, write_back, dreglist); + } + + void vldm(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist); + void vldm(DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + vldm(al, dt, rn, write_back, sreglist); + } + void vldm(Register rn, WriteBack write_back, SRegisterList sreglist) { + vldm(al, kDataTypeValueNone, rn, write_back, sreglist); + } + void vldm(Condition cond, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + vldm(cond, kDataTypeValueNone, rn, write_back, sreglist); + } + + void vldmdb(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist); + void vldmdb(DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + vldmdb(al, dt, rn, write_back, dreglist); + } + void vldmdb(Register rn, WriteBack write_back, DRegisterList dreglist) { + vldmdb(al, kDataTypeValueNone, rn, write_back, dreglist); + } + void vldmdb(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + vldmdb(cond, kDataTypeValueNone, rn, write_back, dreglist); + } + + void vldmdb(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist); + void vldmdb(DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + vldmdb(al, dt, rn, write_back, sreglist); + } + void vldmdb(Register rn, WriteBack write_back, SRegisterList sreglist) { + vldmdb(al, kDataTypeValueNone, rn, write_back, sreglist); + } + void vldmdb(Condition cond, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + vldmdb(cond, kDataTypeValueNone, rn, write_back, sreglist); + } + + void vldmia(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist); + void vldmia(DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + vldmia(al, dt, rn, write_back, dreglist); + } + void vldmia(Register rn, WriteBack write_back, DRegisterList dreglist) { + vldmia(al, kDataTypeValueNone, rn, write_back, dreglist); + } + void vldmia(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + vldmia(cond, kDataTypeValueNone, rn, write_back, dreglist); + } + + void vldmia(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist); + void vldmia(DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + vldmia(al, dt, rn, write_back, sreglist); + } + void vldmia(Register rn, WriteBack write_back, SRegisterList sreglist) { + vldmia(al, kDataTypeValueNone, rn, write_back, sreglist); + } + void vldmia(Condition cond, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + vldmia(cond, kDataTypeValueNone, rn, write_back, sreglist); + } + + void vldr(Condition cond, DataType dt, DRegister rd, Location* location); + bool vldr_info(Condition cond, + DataType dt, + DRegister rd, + Location* location, + const struct ReferenceInfo** info); + void vldr(DataType dt, DRegister rd, Location* location) { + vldr(al, dt, rd, location); + } + void vldr(DRegister rd, Location* location) { + vldr(al, Untyped64, rd, location); + } + void vldr(Condition cond, DRegister rd, Location* location) { + vldr(cond, Untyped64, rd, location); + } + + void vldr(Condition cond, + DataType dt, + DRegister rd, + const MemOperand& operand); + void vldr(DataType dt, DRegister rd, const MemOperand& operand) { + vldr(al, dt, rd, operand); + } + void vldr(DRegister rd, const MemOperand& operand) { + vldr(al, Untyped64, rd, operand); + } + void vldr(Condition cond, DRegister rd, const MemOperand& operand) { + vldr(cond, Untyped64, rd, operand); + } + + void vldr(Condition cond, DataType dt, SRegister rd, Location* location); + bool vldr_info(Condition cond, + DataType dt, + SRegister rd, + Location* location, + const struct ReferenceInfo** info); + void vldr(DataType dt, SRegister rd, Location* location) { + vldr(al, dt, rd, location); + } + void vldr(SRegister rd, Location* location) { + vldr(al, Untyped32, rd, location); + } + void vldr(Condition cond, SRegister rd, Location* location) { + vldr(cond, Untyped32, rd, location); + } + + void vldr(Condition cond, + DataType dt, + SRegister rd, + const MemOperand& operand); + void vldr(DataType dt, SRegister rd, const MemOperand& operand) { + vldr(al, dt, rd, operand); + } + void vldr(SRegister rd, const MemOperand& operand) { + vldr(al, Untyped32, rd, operand); + } + void vldr(Condition cond, SRegister rd, const MemOperand& operand) { + vldr(cond, Untyped32, rd, operand); + } + + void vmax( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vmax(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vmax(al, dt, rd, rn, rm); + } + + void vmax( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vmax(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vmax(al, dt, rd, rn, rm); + } + + void vmaxnm(DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vmaxnm(DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vmaxnm(DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vmin( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vmin(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vmin(al, dt, rd, rn, rm); + } + + void vmin( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vmin(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vmin(al, dt, rd, rn, rm); + } + + void vminnm(DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vminnm(DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vminnm(DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vmla(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegisterLane rm); + void vmla(DataType dt, DRegister rd, DRegister rn, DRegisterLane rm) { + vmla(al, dt, rd, rn, rm); + } + + void vmla(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + DRegisterLane rm); + void vmla(DataType dt, QRegister rd, QRegister rn, DRegisterLane rm) { + vmla(al, dt, rd, rn, rm); + } + + void vmla( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vmla(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vmla(al, dt, rd, rn, rm); + } + + void vmla( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vmla(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vmla(al, dt, rd, rn, rm); + } + + void vmla( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + void vmla(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + vmla(al, dt, rd, rn, rm); + } + + void vmlal(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegisterLane rm); + void vmlal(DataType dt, QRegister rd, DRegister rn, DRegisterLane rm) { + vmlal(al, dt, rd, rn, rm); + } + + void vmlal( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm); + void vmlal(DataType dt, QRegister rd, DRegister rn, DRegister rm) { + vmlal(al, dt, rd, rn, rm); + } + + void vmls(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegisterLane rm); + void vmls(DataType dt, DRegister rd, DRegister rn, DRegisterLane rm) { + vmls(al, dt, rd, rn, rm); + } + + void vmls(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + DRegisterLane rm); + void vmls(DataType dt, QRegister rd, QRegister rn, DRegisterLane rm) { + vmls(al, dt, rd, rn, rm); + } + + void vmls( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vmls(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vmls(al, dt, rd, rn, rm); + } + + void vmls( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vmls(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vmls(al, dt, rd, rn, rm); + } + + void vmls( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + void vmls(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + vmls(al, dt, rd, rn, rm); + } + + void vmlsl(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegisterLane rm); + void vmlsl(DataType dt, QRegister rd, DRegister rn, DRegisterLane rm) { + vmlsl(al, dt, rd, rn, rm); + } + + void vmlsl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm); + void vmlsl(DataType dt, QRegister rd, DRegister rn, DRegister rm) { + vmlsl(al, dt, rd, rn, rm); + } + + void vmov(Condition cond, Register rt, SRegister rn); + void vmov(Register rt, SRegister rn) { vmov(al, rt, rn); } + + void vmov(Condition cond, SRegister rn, Register rt); + void vmov(SRegister rn, Register rt) { vmov(al, rn, rt); } + + void vmov(Condition cond, Register rt, Register rt2, DRegister rm); + void vmov(Register rt, Register rt2, DRegister rm) { vmov(al, rt, rt2, rm); } + + void vmov(Condition cond, DRegister rm, Register rt, Register rt2); + void vmov(DRegister rm, Register rt, Register rt2) { vmov(al, rm, rt, rt2); } + + void vmov( + Condition cond, Register rt, Register rt2, SRegister rm, SRegister rm1); + void vmov(Register rt, Register rt2, SRegister rm, SRegister rm1) { + vmov(al, rt, rt2, rm, rm1); + } + + void vmov( + Condition cond, SRegister rm, SRegister rm1, Register rt, Register rt2); + void vmov(SRegister rm, SRegister rm1, Register rt, Register rt2) { + vmov(al, rm, rm1, rt, rt2); + } + + void vmov(Condition cond, DataType dt, DRegisterLane rd, Register rt); + void vmov(DataType dt, DRegisterLane rd, Register rt) { + vmov(al, dt, rd, rt); + } + void vmov(DRegisterLane rd, Register rt) { + vmov(al, kDataTypeValueNone, rd, rt); + } + void vmov(Condition cond, DRegisterLane rd, Register rt) { + vmov(cond, kDataTypeValueNone, rd, rt); + } + + void vmov(Condition cond, DataType dt, DRegister rd, const DOperand& operand); + void vmov(DataType dt, DRegister rd, const DOperand& operand) { + vmov(al, dt, rd, operand); + } + + void vmov(Condition cond, DataType dt, QRegister rd, const QOperand& operand); + void vmov(DataType dt, QRegister rd, const QOperand& operand) { + vmov(al, dt, rd, operand); + } + + void vmov(Condition cond, DataType dt, SRegister rd, const SOperand& operand); + void vmov(DataType dt, SRegister rd, const SOperand& operand) { + vmov(al, dt, rd, operand); + } + + void vmov(Condition cond, DataType dt, Register rt, DRegisterLane rn); + void vmov(DataType dt, Register rt, DRegisterLane rn) { + vmov(al, dt, rt, rn); + } + void vmov(Register rt, DRegisterLane rn) { + vmov(al, kDataTypeValueNone, rt, rn); + } + void vmov(Condition cond, Register rt, DRegisterLane rn) { + vmov(cond, kDataTypeValueNone, rt, rn); + } + + void vmovl(Condition cond, DataType dt, QRegister rd, DRegister rm); + void vmovl(DataType dt, QRegister rd, DRegister rm) { vmovl(al, dt, rd, rm); } + + void vmovn(Condition cond, DataType dt, DRegister rd, QRegister rm); + void vmovn(DataType dt, DRegister rd, QRegister rm) { vmovn(al, dt, rd, rm); } + + void vmrs(Condition cond, RegisterOrAPSR_nzcv rt, SpecialFPRegister spec_reg); + void vmrs(RegisterOrAPSR_nzcv rt, SpecialFPRegister spec_reg) { + vmrs(al, rt, spec_reg); + } + + void vmsr(Condition cond, SpecialFPRegister spec_reg, Register rt); + void vmsr(SpecialFPRegister spec_reg, Register rt) { vmsr(al, spec_reg, rt); } + + void vmul(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegister dm, + unsigned index); + void vmul( + DataType dt, DRegister rd, DRegister rn, DRegister dm, unsigned index) { + vmul(al, dt, rd, rn, dm, index); + } + + void vmul(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + DRegister dm, + unsigned index); + void vmul( + DataType dt, QRegister rd, QRegister rn, DRegister dm, unsigned index) { + vmul(al, dt, rd, rn, dm, index); + } + + void vmul( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vmul(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vmul(al, dt, rd, rn, rm); + } + + void vmul( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vmul(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vmul(al, dt, rd, rn, rm); + } + + void vmul( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + void vmul(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + vmul(al, dt, rd, rn, rm); + } + + void vmull(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegister dm, + unsigned index); + void vmull( + DataType dt, QRegister rd, DRegister rn, DRegister dm, unsigned index) { + vmull(al, dt, rd, rn, dm, index); + } + + void vmull( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm); + void vmull(DataType dt, QRegister rd, DRegister rn, DRegister rm) { + vmull(al, dt, rd, rn, rm); + } + + void vmvn(Condition cond, DataType dt, DRegister rd, const DOperand& operand); + void vmvn(DataType dt, DRegister rd, const DOperand& operand) { + vmvn(al, dt, rd, operand); + } + + void vmvn(Condition cond, DataType dt, QRegister rd, const QOperand& operand); + void vmvn(DataType dt, QRegister rd, const QOperand& operand) { + vmvn(al, dt, rd, operand); + } + + void vneg(Condition cond, DataType dt, DRegister rd, DRegister rm); + void vneg(DataType dt, DRegister rd, DRegister rm) { vneg(al, dt, rd, rm); } + + void vneg(Condition cond, DataType dt, QRegister rd, QRegister rm); + void vneg(DataType dt, QRegister rd, QRegister rm) { vneg(al, dt, rd, rm); } + + void vneg(Condition cond, DataType dt, SRegister rd, SRegister rm); + void vneg(DataType dt, SRegister rd, SRegister rm) { vneg(al, dt, rd, rm); } + + void vnmla( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + void vnmla(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + vnmla(al, dt, rd, rn, rm); + } + + void vnmla( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vnmla(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vnmla(al, dt, rd, rn, rm); + } + + void vnmls( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + void vnmls(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + vnmls(al, dt, rd, rn, rm); + } + + void vnmls( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vnmls(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vnmls(al, dt, rd, rn, rm); + } + + void vnmul( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + void vnmul(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + vnmul(al, dt, rd, rn, rm); + } + + void vnmul( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vnmul(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vnmul(al, dt, rd, rn, rm); + } + + void vorn(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + const DOperand& operand); + void vorn(DataType dt, DRegister rd, DRegister rn, const DOperand& operand) { + vorn(al, dt, rd, rn, operand); + } + + void vorn(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + const QOperand& operand); + void vorn(DataType dt, QRegister rd, QRegister rn, const QOperand& operand) { + vorn(al, dt, rd, rn, operand); + } + + void vorr(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + const DOperand& operand); + void vorr(DataType dt, DRegister rd, DRegister rn, const DOperand& operand) { + vorr(al, dt, rd, rn, operand); + } + void vorr(DRegister rd, DRegister rn, const DOperand& operand) { + vorr(al, kDataTypeValueNone, rd, rn, operand); + } + void vorr(Condition cond, + DRegister rd, + DRegister rn, + const DOperand& operand) { + vorr(cond, kDataTypeValueNone, rd, rn, operand); + } + + void vorr(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + const QOperand& operand); + void vorr(DataType dt, QRegister rd, QRegister rn, const QOperand& operand) { + vorr(al, dt, rd, rn, operand); + } + void vorr(QRegister rd, QRegister rn, const QOperand& operand) { + vorr(al, kDataTypeValueNone, rd, rn, operand); + } + void vorr(Condition cond, + QRegister rd, + QRegister rn, + const QOperand& operand) { + vorr(cond, kDataTypeValueNone, rd, rn, operand); + } + + void vpadal(Condition cond, DataType dt, DRegister rd, DRegister rm); + void vpadal(DataType dt, DRegister rd, DRegister rm) { + vpadal(al, dt, rd, rm); + } + + void vpadal(Condition cond, DataType dt, QRegister rd, QRegister rm); + void vpadal(DataType dt, QRegister rd, QRegister rm) { + vpadal(al, dt, rd, rm); + } + + void vpadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vpadd(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vpadd(al, dt, rd, rn, rm); + } + + void vpaddl(Condition cond, DataType dt, DRegister rd, DRegister rm); + void vpaddl(DataType dt, DRegister rd, DRegister rm) { + vpaddl(al, dt, rd, rm); + } + + void vpaddl(Condition cond, DataType dt, QRegister rd, QRegister rm); + void vpaddl(DataType dt, QRegister rd, QRegister rm) { + vpaddl(al, dt, rd, rm); + } + + void vpmax( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vpmax(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vpmax(al, dt, rd, rn, rm); + } + + void vpmin( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vpmin(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vpmin(al, dt, rd, rn, rm); + } + + void vpop(Condition cond, DataType dt, DRegisterList dreglist); + void vpop(DataType dt, DRegisterList dreglist) { vpop(al, dt, dreglist); } + void vpop(DRegisterList dreglist) { vpop(al, kDataTypeValueNone, dreglist); } + void vpop(Condition cond, DRegisterList dreglist) { + vpop(cond, kDataTypeValueNone, dreglist); + } + + void vpop(Condition cond, DataType dt, SRegisterList sreglist); + void vpop(DataType dt, SRegisterList sreglist) { vpop(al, dt, sreglist); } + void vpop(SRegisterList sreglist) { vpop(al, kDataTypeValueNone, sreglist); } + void vpop(Condition cond, SRegisterList sreglist) { + vpop(cond, kDataTypeValueNone, sreglist); + } + + void vpush(Condition cond, DataType dt, DRegisterList dreglist); + void vpush(DataType dt, DRegisterList dreglist) { vpush(al, dt, dreglist); } + void vpush(DRegisterList dreglist) { + vpush(al, kDataTypeValueNone, dreglist); + } + void vpush(Condition cond, DRegisterList dreglist) { + vpush(cond, kDataTypeValueNone, dreglist); + } + + void vpush(Condition cond, DataType dt, SRegisterList sreglist); + void vpush(DataType dt, SRegisterList sreglist) { vpush(al, dt, sreglist); } + void vpush(SRegisterList sreglist) { + vpush(al, kDataTypeValueNone, sreglist); + } + void vpush(Condition cond, SRegisterList sreglist) { + vpush(cond, kDataTypeValueNone, sreglist); + } + + void vqabs(Condition cond, DataType dt, DRegister rd, DRegister rm); + void vqabs(DataType dt, DRegister rd, DRegister rm) { vqabs(al, dt, rd, rm); } + + void vqabs(Condition cond, DataType dt, QRegister rd, QRegister rm); + void vqabs(DataType dt, QRegister rd, QRegister rm) { vqabs(al, dt, rd, rm); } + + void vqadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vqadd(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vqadd(al, dt, rd, rn, rm); + } + + void vqadd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vqadd(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vqadd(al, dt, rd, rn, rm); + } + + void vqdmlal( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm); + void vqdmlal(DataType dt, QRegister rd, DRegister rn, DRegister rm) { + vqdmlal(al, dt, rd, rn, rm); + } + + void vqdmlal(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegister dm, + unsigned index); + void vqdmlal( + DataType dt, QRegister rd, DRegister rn, DRegister dm, unsigned index) { + vqdmlal(al, dt, rd, rn, dm, index); + } + + void vqdmlsl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm); + void vqdmlsl(DataType dt, QRegister rd, DRegister rn, DRegister rm) { + vqdmlsl(al, dt, rd, rn, rm); + } + + void vqdmlsl(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegister dm, + unsigned index); + void vqdmlsl( + DataType dt, QRegister rd, DRegister rn, DRegister dm, unsigned index) { + vqdmlsl(al, dt, rd, rn, dm, index); + } + + void vqdmulh( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vqdmulh(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vqdmulh(al, dt, rd, rn, rm); + } + + void vqdmulh( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vqdmulh(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vqdmulh(al, dt, rd, rn, rm); + } + + void vqdmulh(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegisterLane rm); + void vqdmulh(DataType dt, DRegister rd, DRegister rn, DRegisterLane rm) { + vqdmulh(al, dt, rd, rn, rm); + } + + void vqdmulh(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + DRegisterLane rm); + void vqdmulh(DataType dt, QRegister rd, QRegister rn, DRegisterLane rm) { + vqdmulh(al, dt, rd, rn, rm); + } + + void vqdmull( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm); + void vqdmull(DataType dt, QRegister rd, DRegister rn, DRegister rm) { + vqdmull(al, dt, rd, rn, rm); + } + + void vqdmull(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegisterLane rm); + void vqdmull(DataType dt, QRegister rd, DRegister rn, DRegisterLane rm) { + vqdmull(al, dt, rd, rn, rm); + } + + void vqmovn(Condition cond, DataType dt, DRegister rd, QRegister rm); + void vqmovn(DataType dt, DRegister rd, QRegister rm) { + vqmovn(al, dt, rd, rm); + } + + void vqmovun(Condition cond, DataType dt, DRegister rd, QRegister rm); + void vqmovun(DataType dt, DRegister rd, QRegister rm) { + vqmovun(al, dt, rd, rm); + } + + void vqneg(Condition cond, DataType dt, DRegister rd, DRegister rm); + void vqneg(DataType dt, DRegister rd, DRegister rm) { vqneg(al, dt, rd, rm); } + + void vqneg(Condition cond, DataType dt, QRegister rd, QRegister rm); + void vqneg(DataType dt, QRegister rd, QRegister rm) { vqneg(al, dt, rd, rm); } + + void vqrdmulh( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vqrdmulh(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vqrdmulh(al, dt, rd, rn, rm); + } + + void vqrdmulh( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vqrdmulh(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vqrdmulh(al, dt, rd, rn, rm); + } + + void vqrdmulh(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegisterLane rm); + void vqrdmulh(DataType dt, DRegister rd, DRegister rn, DRegisterLane rm) { + vqrdmulh(al, dt, rd, rn, rm); + } + + void vqrdmulh(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + DRegisterLane rm); + void vqrdmulh(DataType dt, QRegister rd, QRegister rn, DRegisterLane rm) { + vqrdmulh(al, dt, rd, rn, rm); + } + + void vqrshl( + Condition cond, DataType dt, DRegister rd, DRegister rm, DRegister rn); + void vqrshl(DataType dt, DRegister rd, DRegister rm, DRegister rn) { + vqrshl(al, dt, rd, rm, rn); + } + + void vqrshl( + Condition cond, DataType dt, QRegister rd, QRegister rm, QRegister rn); + void vqrshl(DataType dt, QRegister rd, QRegister rm, QRegister rn) { + vqrshl(al, dt, rd, rm, rn); + } + + void vqrshrn(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand); + void vqrshrn(DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + vqrshrn(al, dt, rd, rm, operand); + } + + void vqrshrun(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand); + void vqrshrun(DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + vqrshrun(al, dt, rd, rm, operand); + } + + void vqshl(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + void vqshl(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + vqshl(al, dt, rd, rm, operand); + } + + void vqshl(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + void vqshl(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + vqshl(al, dt, rd, rm, operand); + } + + void vqshlu(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + void vqshlu(DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + vqshlu(al, dt, rd, rm, operand); + } + + void vqshlu(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + void vqshlu(DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + vqshlu(al, dt, rd, rm, operand); + } + + void vqshrn(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand); + void vqshrn(DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + vqshrn(al, dt, rd, rm, operand); + } + + void vqshrun(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand); + void vqshrun(DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + vqshrun(al, dt, rd, rm, operand); + } + + void vqsub( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vqsub(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vqsub(al, dt, rd, rn, rm); + } + + void vqsub( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vqsub(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vqsub(al, dt, rd, rn, rm); + } + + void vraddhn( + Condition cond, DataType dt, DRegister rd, QRegister rn, QRegister rm); + void vraddhn(DataType dt, DRegister rd, QRegister rn, QRegister rm) { + vraddhn(al, dt, rd, rn, rm); + } + + void vrecpe(Condition cond, DataType dt, DRegister rd, DRegister rm); + void vrecpe(DataType dt, DRegister rd, DRegister rm) { + vrecpe(al, dt, rd, rm); + } + + void vrecpe(Condition cond, DataType dt, QRegister rd, QRegister rm); + void vrecpe(DataType dt, QRegister rd, QRegister rm) { + vrecpe(al, dt, rd, rm); + } + + void vrecps( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vrecps(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vrecps(al, dt, rd, rn, rm); + } + + void vrecps( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vrecps(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vrecps(al, dt, rd, rn, rm); + } + + void vrev16(Condition cond, DataType dt, DRegister rd, DRegister rm); + void vrev16(DataType dt, DRegister rd, DRegister rm) { + vrev16(al, dt, rd, rm); + } + + void vrev16(Condition cond, DataType dt, QRegister rd, QRegister rm); + void vrev16(DataType dt, QRegister rd, QRegister rm) { + vrev16(al, dt, rd, rm); + } + + void vrev32(Condition cond, DataType dt, DRegister rd, DRegister rm); + void vrev32(DataType dt, DRegister rd, DRegister rm) { + vrev32(al, dt, rd, rm); + } + + void vrev32(Condition cond, DataType dt, QRegister rd, QRegister rm); + void vrev32(DataType dt, QRegister rd, QRegister rm) { + vrev32(al, dt, rd, rm); + } + + void vrev64(Condition cond, DataType dt, DRegister rd, DRegister rm); + void vrev64(DataType dt, DRegister rd, DRegister rm) { + vrev64(al, dt, rd, rm); + } + + void vrev64(Condition cond, DataType dt, QRegister rd, QRegister rm); + void vrev64(DataType dt, QRegister rd, QRegister rm) { + vrev64(al, dt, rd, rm); + } + + void vrhadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vrhadd(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vrhadd(al, dt, rd, rn, rm); + } + + void vrhadd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vrhadd(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vrhadd(al, dt, rd, rn, rm); + } + + void vrinta(DataType dt, DRegister rd, DRegister rm); + + void vrinta(DataType dt, QRegister rd, QRegister rm); + + void vrinta(DataType dt, SRegister rd, SRegister rm); + + void vrintm(DataType dt, DRegister rd, DRegister rm); + + void vrintm(DataType dt, QRegister rd, QRegister rm); + + void vrintm(DataType dt, SRegister rd, SRegister rm); + + void vrintn(DataType dt, DRegister rd, DRegister rm); + + void vrintn(DataType dt, QRegister rd, QRegister rm); + + void vrintn(DataType dt, SRegister rd, SRegister rm); + + void vrintp(DataType dt, DRegister rd, DRegister rm); + + void vrintp(DataType dt, QRegister rd, QRegister rm); + + void vrintp(DataType dt, SRegister rd, SRegister rm); + + void vrintr(Condition cond, DataType dt, SRegister rd, SRegister rm); + void vrintr(DataType dt, SRegister rd, SRegister rm) { + vrintr(al, dt, rd, rm); + } + + void vrintr(Condition cond, DataType dt, DRegister rd, DRegister rm); + void vrintr(DataType dt, DRegister rd, DRegister rm) { + vrintr(al, dt, rd, rm); + } + + void vrintx(Condition cond, DataType dt, DRegister rd, DRegister rm); + void vrintx(DataType dt, DRegister rd, DRegister rm) { + vrintx(al, dt, rd, rm); + } + + void vrintx(DataType dt, QRegister rd, QRegister rm); + + void vrintx(Condition cond, DataType dt, SRegister rd, SRegister rm); + void vrintx(DataType dt, SRegister rd, SRegister rm) { + vrintx(al, dt, rd, rm); + } + + void vrintz(Condition cond, DataType dt, DRegister rd, DRegister rm); + void vrintz(DataType dt, DRegister rd, DRegister rm) { + vrintz(al, dt, rd, rm); + } + + void vrintz(DataType dt, QRegister rd, QRegister rm); + + void vrintz(Condition cond, DataType dt, SRegister rd, SRegister rm); + void vrintz(DataType dt, SRegister rd, SRegister rm) { + vrintz(al, dt, rd, rm); + } + + void vrshl( + Condition cond, DataType dt, DRegister rd, DRegister rm, DRegister rn); + void vrshl(DataType dt, DRegister rd, DRegister rm, DRegister rn) { + vrshl(al, dt, rd, rm, rn); + } + + void vrshl( + Condition cond, DataType dt, QRegister rd, QRegister rm, QRegister rn); + void vrshl(DataType dt, QRegister rd, QRegister rm, QRegister rn) { + vrshl(al, dt, rd, rm, rn); + } + + void vrshr(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + void vrshr(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + vrshr(al, dt, rd, rm, operand); + } + + void vrshr(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + void vrshr(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + vrshr(al, dt, rd, rm, operand); + } + + void vrshrn(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand); + void vrshrn(DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + vrshrn(al, dt, rd, rm, operand); + } + + void vrsqrte(Condition cond, DataType dt, DRegister rd, DRegister rm); + void vrsqrte(DataType dt, DRegister rd, DRegister rm) { + vrsqrte(al, dt, rd, rm); + } + + void vrsqrte(Condition cond, DataType dt, QRegister rd, QRegister rm); + void vrsqrte(DataType dt, QRegister rd, QRegister rm) { + vrsqrte(al, dt, rd, rm); + } + + void vrsqrts( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vrsqrts(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vrsqrts(al, dt, rd, rn, rm); + } + + void vrsqrts( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vrsqrts(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vrsqrts(al, dt, rd, rn, rm); + } + + void vrsra(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + void vrsra(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + vrsra(al, dt, rd, rm, operand); + } + + void vrsra(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + void vrsra(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + vrsra(al, dt, rd, rm, operand); + } + + void vrsubhn( + Condition cond, DataType dt, DRegister rd, QRegister rn, QRegister rm); + void vrsubhn(DataType dt, DRegister rd, QRegister rn, QRegister rm) { + vrsubhn(al, dt, rd, rn, rm); + } + + void vseleq(DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vseleq(DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vselge(DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vselge(DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vselgt(DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vselgt(DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vselvs(DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vselvs(DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vshl(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + void vshl(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + vshl(al, dt, rd, rm, operand); + } + + void vshl(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + void vshl(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + vshl(al, dt, rd, rm, operand); + } + + void vshll(Condition cond, + DataType dt, + QRegister rd, + DRegister rm, + const DOperand& operand); + void vshll(DataType dt, QRegister rd, DRegister rm, const DOperand& operand) { + vshll(al, dt, rd, rm, operand); + } + + void vshr(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + void vshr(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + vshr(al, dt, rd, rm, operand); + } + + void vshr(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + void vshr(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + vshr(al, dt, rd, rm, operand); + } + + void vshrn(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand); + void vshrn(DataType dt, DRegister rd, QRegister rm, const QOperand& operand) { + vshrn(al, dt, rd, rm, operand); + } + + void vsli(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + void vsli(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + vsli(al, dt, rd, rm, operand); + } + + void vsli(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + void vsli(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + vsli(al, dt, rd, rm, operand); + } + + void vsqrt(Condition cond, DataType dt, SRegister rd, SRegister rm); + void vsqrt(DataType dt, SRegister rd, SRegister rm) { vsqrt(al, dt, rd, rm); } + + void vsqrt(Condition cond, DataType dt, DRegister rd, DRegister rm); + void vsqrt(DataType dt, DRegister rd, DRegister rm) { vsqrt(al, dt, rd, rm); } + + void vsra(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + void vsra(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + vsra(al, dt, rd, rm, operand); + } + + void vsra(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + void vsra(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + vsra(al, dt, rd, rm, operand); + } + + void vsri(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + void vsri(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + vsri(al, dt, rd, rm, operand); + } + + void vsri(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + void vsri(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + vsri(al, dt, rd, rm, operand); + } + + void vst1(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand); + void vst1(DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + vst1(al, dt, nreglist, operand); + } + + void vst2(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand); + void vst2(DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + vst2(al, dt, nreglist, operand); + } + + void vst3(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand); + void vst3(DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + vst3(al, dt, nreglist, operand); + } + + void vst3(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const MemOperand& operand); + void vst3(DataType dt, + const NeonRegisterList& nreglist, + const MemOperand& operand) { + vst3(al, dt, nreglist, operand); + } + + void vst4(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand); + void vst4(DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + vst4(al, dt, nreglist, operand); + } + + void vstm(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist); + void vstm(DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + vstm(al, dt, rn, write_back, dreglist); + } + void vstm(Register rn, WriteBack write_back, DRegisterList dreglist) { + vstm(al, kDataTypeValueNone, rn, write_back, dreglist); + } + void vstm(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + vstm(cond, kDataTypeValueNone, rn, write_back, dreglist); + } + + void vstm(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist); + void vstm(DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + vstm(al, dt, rn, write_back, sreglist); + } + void vstm(Register rn, WriteBack write_back, SRegisterList sreglist) { + vstm(al, kDataTypeValueNone, rn, write_back, sreglist); + } + void vstm(Condition cond, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + vstm(cond, kDataTypeValueNone, rn, write_back, sreglist); + } + + void vstmdb(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist); + void vstmdb(DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + vstmdb(al, dt, rn, write_back, dreglist); + } + void vstmdb(Register rn, WriteBack write_back, DRegisterList dreglist) { + vstmdb(al, kDataTypeValueNone, rn, write_back, dreglist); + } + void vstmdb(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + vstmdb(cond, kDataTypeValueNone, rn, write_back, dreglist); + } + + void vstmdb(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist); + void vstmdb(DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + vstmdb(al, dt, rn, write_back, sreglist); + } + void vstmdb(Register rn, WriteBack write_back, SRegisterList sreglist) { + vstmdb(al, kDataTypeValueNone, rn, write_back, sreglist); + } + void vstmdb(Condition cond, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + vstmdb(cond, kDataTypeValueNone, rn, write_back, sreglist); + } + + void vstmia(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist); + void vstmia(DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + vstmia(al, dt, rn, write_back, dreglist); + } + void vstmia(Register rn, WriteBack write_back, DRegisterList dreglist) { + vstmia(al, kDataTypeValueNone, rn, write_back, dreglist); + } + void vstmia(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + vstmia(cond, kDataTypeValueNone, rn, write_back, dreglist); + } + + void vstmia(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist); + void vstmia(DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + vstmia(al, dt, rn, write_back, sreglist); + } + void vstmia(Register rn, WriteBack write_back, SRegisterList sreglist) { + vstmia(al, kDataTypeValueNone, rn, write_back, sreglist); + } + void vstmia(Condition cond, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + vstmia(cond, kDataTypeValueNone, rn, write_back, sreglist); + } + + void vstr(Condition cond, + DataType dt, + DRegister rd, + const MemOperand& operand); + void vstr(DataType dt, DRegister rd, const MemOperand& operand) { + vstr(al, dt, rd, operand); + } + void vstr(DRegister rd, const MemOperand& operand) { + vstr(al, Untyped64, rd, operand); + } + void vstr(Condition cond, DRegister rd, const MemOperand& operand) { + vstr(cond, Untyped64, rd, operand); + } + + void vstr(Condition cond, + DataType dt, + SRegister rd, + const MemOperand& operand); + void vstr(DataType dt, SRegister rd, const MemOperand& operand) { + vstr(al, dt, rd, operand); + } + void vstr(SRegister rd, const MemOperand& operand) { + vstr(al, Untyped32, rd, operand); + } + void vstr(Condition cond, SRegister rd, const MemOperand& operand) { + vstr(cond, Untyped32, rd, operand); + } + + void vsub( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vsub(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vsub(al, dt, rd, rn, rm); + } + + void vsub( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vsub(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vsub(al, dt, rd, rn, rm); + } + + void vsub( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + void vsub(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + vsub(al, dt, rd, rn, rm); + } + + void vsubhn( + Condition cond, DataType dt, DRegister rd, QRegister rn, QRegister rm); + void vsubhn(DataType dt, DRegister rd, QRegister rn, QRegister rm) { + vsubhn(al, dt, rd, rn, rm); + } + + void vsubl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm); + void vsubl(DataType dt, QRegister rd, DRegister rn, DRegister rm) { + vsubl(al, dt, rd, rn, rm); + } + + void vsubw( + Condition cond, DataType dt, QRegister rd, QRegister rn, DRegister rm); + void vsubw(DataType dt, QRegister rd, QRegister rn, DRegister rm) { + vsubw(al, dt, rd, rn, rm); + } + + void vswp(Condition cond, DataType dt, DRegister rd, DRegister rm); + void vswp(DataType dt, DRegister rd, DRegister rm) { vswp(al, dt, rd, rm); } + void vswp(DRegister rd, DRegister rm) { + vswp(al, kDataTypeValueNone, rd, rm); + } + void vswp(Condition cond, DRegister rd, DRegister rm) { + vswp(cond, kDataTypeValueNone, rd, rm); + } + + void vswp(Condition cond, DataType dt, QRegister rd, QRegister rm); + void vswp(DataType dt, QRegister rd, QRegister rm) { vswp(al, dt, rd, rm); } + void vswp(QRegister rd, QRegister rm) { + vswp(al, kDataTypeValueNone, rd, rm); + } + void vswp(Condition cond, QRegister rd, QRegister rm) { + vswp(cond, kDataTypeValueNone, rd, rm); + } + + void vtbl(Condition cond, + DataType dt, + DRegister rd, + const NeonRegisterList& nreglist, + DRegister rm); + void vtbl(DataType dt, + DRegister rd, + const NeonRegisterList& nreglist, + DRegister rm) { + vtbl(al, dt, rd, nreglist, rm); + } + + void vtbx(Condition cond, + DataType dt, + DRegister rd, + const NeonRegisterList& nreglist, + DRegister rm); + void vtbx(DataType dt, + DRegister rd, + const NeonRegisterList& nreglist, + DRegister rm) { + vtbx(al, dt, rd, nreglist, rm); + } + + void vtrn(Condition cond, DataType dt, DRegister rd, DRegister rm); + void vtrn(DataType dt, DRegister rd, DRegister rm) { vtrn(al, dt, rd, rm); } + + void vtrn(Condition cond, DataType dt, QRegister rd, QRegister rm); + void vtrn(DataType dt, QRegister rd, QRegister rm) { vtrn(al, dt, rd, rm); } + + void vtst( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vtst(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vtst(al, dt, rd, rn, rm); + } + + void vtst( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vtst(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vtst(al, dt, rd, rn, rm); + } + + void vuzp(Condition cond, DataType dt, DRegister rd, DRegister rm); + void vuzp(DataType dt, DRegister rd, DRegister rm) { vuzp(al, dt, rd, rm); } + + void vuzp(Condition cond, DataType dt, QRegister rd, QRegister rm); + void vuzp(DataType dt, QRegister rd, QRegister rm) { vuzp(al, dt, rd, rm); } + + void vzip(Condition cond, DataType dt, DRegister rd, DRegister rm); + void vzip(DataType dt, DRegister rd, DRegister rm) { vzip(al, dt, rd, rm); } + + void vzip(Condition cond, DataType dt, QRegister rd, QRegister rm); + void vzip(DataType dt, QRegister rd, QRegister rm) { vzip(al, dt, rd, rm); } + + void yield(Condition cond, EncodingSize size); + void yield() { yield(al, Best); } + void yield(Condition cond) { yield(cond, Best); } + void yield(EncodingSize size) { yield(al, size); } + // End of generated code. + virtual void UnimplementedDelegate(InstructionType type) { + std::string error_message(std::string("Ill-formed '") + + std::string(ToCString(type)) + + std::string("' instruction.\n")); + VIXL_ABORT_WITH_MSG(error_message.c_str()); + } + virtual bool AllowUnpredictable() { return allow_unpredictable_; } + virtual bool AllowStronglyDiscouraged() { + return allow_strongly_discouraged_; + } +}; + +} // namespace aarch32 +} // namespace vixl + +#endif // VIXL_AARCH32_ASSEMBLER_AARCH32_H_ diff --git a/dep/vixl/include/vixl/aarch32/constants-aarch32.h b/dep/vixl/include/vixl/aarch32/constants-aarch32.h new file mode 100644 index 000000000..6d79834d9 --- /dev/null +++ b/dep/vixl/include/vixl/aarch32/constants-aarch32.h @@ -0,0 +1,541 @@ +// Copyright 2015, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may +// be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_CONSTANTS_AARCH32_H_ +#define VIXL_CONSTANTS_AARCH32_H_ + +extern "C" { +#include +} + +#include "globals-vixl.h" + + +namespace vixl { +namespace aarch32 { + +enum InstructionSet { A32, T32 }; +#ifdef VIXL_INCLUDE_TARGET_T32_ONLY +const InstructionSet kDefaultISA = T32; +#else +const InstructionSet kDefaultISA = A32; +#endif + +const unsigned kRegSizeInBits = 32; +const unsigned kRegSizeInBytes = kRegSizeInBits / 8; +const unsigned kSRegSizeInBits = 32; +const unsigned kSRegSizeInBytes = kSRegSizeInBits / 8; +const unsigned kDRegSizeInBits = 64; +const unsigned kDRegSizeInBytes = kDRegSizeInBits / 8; +const unsigned kQRegSizeInBits = 128; +const unsigned kQRegSizeInBytes = kQRegSizeInBits / 8; + +const unsigned kNumberOfRegisters = 16; +const unsigned kNumberOfSRegisters = 32; +const unsigned kMaxNumberOfDRegisters = 32; +const unsigned kNumberOfQRegisters = 16; +const unsigned kNumberOfT32LowRegisters = 8; + +const unsigned kIpCode = 12; +const unsigned kSpCode = 13; +const unsigned kLrCode = 14; +const unsigned kPcCode = 15; + +const unsigned kT32PcDelta = 4; +const unsigned kA32PcDelta = 8; + +const unsigned kRRXEncodedValue = 3; + +const unsigned kCoprocMask = 0xe; +const unsigned kInvalidCoprocMask = 0xa; + +const unsigned kLowestT32_32Opcode = 0xe8000000; + +const uint32_t kUnknownValue = 0xdeadbeef; + +const uint32_t kMaxInstructionSizeInBytes = 4; +const uint32_t kA32InstructionSizeInBytes = 4; +const uint32_t k32BitT32InstructionSizeInBytes = 4; +const uint32_t k16BitT32InstructionSizeInBytes = 2; + +// Maximum size emitted by a single T32 unconditional macro-instruction. +const uint32_t kMaxT32MacroInstructionSizeInBytes = 32; + +const uint32_t kCallerSavedRegistersMask = 0x500f; + +const uint16_t k16BitT32NopOpcode = 0xbf00; +const uint16_t kCbzCbnzMask = 0xf500; +const uint16_t kCbzCbnzValue = 0xb100; + +const int32_t kCbzCbnzRange = 126; +const int32_t kBConditionalNarrowRange = 254; +const int32_t kBNarrowRange = 2046; +const int32_t kNearLabelRange = kBNarrowRange; + +enum SystemFunctionsOpcodes { kPrintfCode }; + +enum BranchHint { kNear, kFar, kBranchWithoutHint }; + +// Start of generated code. +// AArch32 version implemented by the library (v8.0). +// The encoding for vX.Y is: (X << 8) | Y. +#define AARCH32_VERSION 0x0800 + +enum InstructionAttribute { + kNoAttribute = 0, + kArithmetic = 0x1, + kBitwise = 0x2, + kShift = 0x4, + kAddress = 0x8, + kBranch = 0x10, + kSystem = 0x20, + kFpNeon = 0x40, + kLoadStore = 0x80, + kLoadStoreMultiple = 0x100 +}; + +enum InstructionType { + kUndefInstructionType, + kAdc, + kAdcs, + kAdd, + kAdds, + kAddw, + kAdr, + kAnd, + kAnds, + kAsr, + kAsrs, + kB, + kBfc, + kBfi, + kBic, + kBics, + kBkpt, + kBl, + kBlx, + kBx, + kBxj, + kCbnz, + kCbz, + kClrex, + kClz, + kCmn, + kCmp, + kCrc32b, + kCrc32cb, + kCrc32ch, + kCrc32cw, + kCrc32h, + kCrc32w, + kDmb, + kDsb, + kEor, + kEors, + kFldmdbx, + kFldmiax, + kFstmdbx, + kFstmiax, + kHlt, + kHvc, + kIsb, + kIt, + kLda, + kLdab, + kLdaex, + kLdaexb, + kLdaexd, + kLdaexh, + kLdah, + kLdm, + kLdmda, + kLdmdb, + kLdmea, + kLdmed, + kLdmfa, + kLdmfd, + kLdmib, + kLdr, + kLdrb, + kLdrd, + kLdrex, + kLdrexb, + kLdrexd, + kLdrexh, + kLdrh, + kLdrsb, + kLdrsh, + kLsl, + kLsls, + kLsr, + kLsrs, + kMla, + kMlas, + kMls, + kMov, + kMovs, + kMovt, + kMovw, + kMrs, + kMsr, + kMul, + kMuls, + kMvn, + kMvns, + kNop, + kOrn, + kOrns, + kOrr, + kOrrs, + kPkhbt, + kPkhtb, + kPld, + kPldw, + kPli, + kPop, + kPush, + kQadd, + kQadd16, + kQadd8, + kQasx, + kQdadd, + kQdsub, + kQsax, + kQsub, + kQsub16, + kQsub8, + kRbit, + kRev, + kRev16, + kRevsh, + kRor, + kRors, + kRrx, + kRrxs, + kRsb, + kRsbs, + kRsc, + kRscs, + kSadd16, + kSadd8, + kSasx, + kSbc, + kSbcs, + kSbfx, + kSdiv, + kSel, + kShadd16, + kShadd8, + kShasx, + kShsax, + kShsub16, + kShsub8, + kSmlabb, + kSmlabt, + kSmlad, + kSmladx, + kSmlal, + kSmlalbb, + kSmlalbt, + kSmlald, + kSmlaldx, + kSmlals, + kSmlaltb, + kSmlaltt, + kSmlatb, + kSmlatt, + kSmlawb, + kSmlawt, + kSmlsd, + kSmlsdx, + kSmlsld, + kSmlsldx, + kSmmla, + kSmmlar, + kSmmls, + kSmmlsr, + kSmmul, + kSmmulr, + kSmuad, + kSmuadx, + kSmulbb, + kSmulbt, + kSmull, + kSmulls, + kSmultb, + kSmultt, + kSmulwb, + kSmulwt, + kSmusd, + kSmusdx, + kSsat, + kSsat16, + kSsax, + kSsub16, + kSsub8, + kStl, + kStlb, + kStlex, + kStlexb, + kStlexd, + kStlexh, + kStlh, + kStm, + kStmda, + kStmdb, + kStmea, + kStmed, + kStmfa, + kStmfd, + kStmib, + kStr, + kStrb, + kStrd, + kStrex, + kStrexb, + kStrexd, + kStrexh, + kStrh, + kSub, + kSubs, + kSubw, + kSvc, + kSxtab, + kSxtab16, + kSxtah, + kSxtb, + kSxtb16, + kSxth, + kTbb, + kTbh, + kTeq, + kTst, + kUadd16, + kUadd8, + kUasx, + kUbfx, + kUdf, + kUdiv, + kUhadd16, + kUhadd8, + kUhasx, + kUhsax, + kUhsub16, + kUhsub8, + kUmaal, + kUmlal, + kUmlals, + kUmull, + kUmulls, + kUqadd16, + kUqadd8, + kUqasx, + kUqsax, + kUqsub16, + kUqsub8, + kUsad8, + kUsada8, + kUsat, + kUsat16, + kUsax, + kUsub16, + kUsub8, + kUxtab, + kUxtab16, + kUxtah, + kUxtb, + kUxtb16, + kUxth, + kVaba, + kVabal, + kVabd, + kVabdl, + kVabs, + kVacge, + kVacgt, + kVacle, + kVaclt, + kVadd, + kVaddhn, + kVaddl, + kVaddw, + kVand, + kVbic, + kVbif, + kVbit, + kVbsl, + kVceq, + kVcge, + kVcgt, + kVcle, + kVcls, + kVclt, + kVclz, + kVcmp, + kVcmpe, + kVcnt, + kVcvt, + kVcvta, + kVcvtb, + kVcvtm, + kVcvtn, + kVcvtp, + kVcvtr, + kVcvtt, + kVdiv, + kVdup, + kVeor, + kVext, + kVfma, + kVfms, + kVfnma, + kVfnms, + kVhadd, + kVhsub, + kVld1, + kVld2, + kVld3, + kVld4, + kVldm, + kVldmdb, + kVldmia, + kVldr, + kVmax, + kVmaxnm, + kVmin, + kVminnm, + kVmla, + kVmlal, + kVmls, + kVmlsl, + kVmov, + kVmovl, + kVmovn, + kVmrs, + kVmsr, + kVmul, + kVmull, + kVmvn, + kVneg, + kVnmla, + kVnmls, + kVnmul, + kVorn, + kVorr, + kVpadal, + kVpadd, + kVpaddl, + kVpmax, + kVpmin, + kVpop, + kVpush, + kVqabs, + kVqadd, + kVqdmlal, + kVqdmlsl, + kVqdmulh, + kVqdmull, + kVqmovn, + kVqmovun, + kVqneg, + kVqrdmulh, + kVqrshl, + kVqrshrn, + kVqrshrun, + kVqshl, + kVqshlu, + kVqshrn, + kVqshrun, + kVqsub, + kVraddhn, + kVrecpe, + kVrecps, + kVrev16, + kVrev32, + kVrev64, + kVrhadd, + kVrinta, + kVrintm, + kVrintn, + kVrintp, + kVrintr, + kVrintx, + kVrintz, + kVrshl, + kVrshr, + kVrshrn, + kVrsqrte, + kVrsqrts, + kVrsra, + kVrsubhn, + kVseleq, + kVselge, + kVselgt, + kVselvs, + kVshl, + kVshll, + kVshr, + kVshrn, + kVsli, + kVsqrt, + kVsra, + kVsri, + kVst1, + kVst2, + kVst3, + kVst4, + kVstm, + kVstmdb, + kVstmia, + kVstr, + kVsub, + kVsubhn, + kVsubl, + kVsubw, + kVswp, + kVtbl, + kVtbx, + kVtrn, + kVtst, + kVuzp, + kVzip, + kYield +}; + +const char* ToCString(InstructionType type); +// End of generated code. + +inline InstructionAttribute operator|(InstructionAttribute left, + InstructionAttribute right) { + return static_cast(static_cast(left) | + static_cast(right)); +} + +} // namespace aarch32 +} // namespace vixl + +#endif // VIXL_CONSTANTS_AARCH32_H_ diff --git a/dep/vixl/include/vixl/aarch32/disasm-aarch32.h b/dep/vixl/include/vixl/aarch32/disasm-aarch32.h new file mode 100644 index 000000000..679f47ba6 --- /dev/null +++ b/dep/vixl/include/vixl/aarch32/disasm-aarch32.h @@ -0,0 +1,2723 @@ +// Copyright 2017, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_DISASM_AARCH32_H_ +#define VIXL_DISASM_AARCH32_H_ + +extern "C" { +#include +} + +#include + +#include "aarch32/constants-aarch32.h" +#include "aarch32/operands-aarch32.h" + +namespace vixl { +namespace aarch32 { + +class ITBlock { + Condition first_condition_; + Condition condition_; + uint16_t it_mask_; + + public: + ITBlock() : first_condition_(al), condition_(al), it_mask_(0) {} + void Advance() { + condition_ = Condition((condition_.GetCondition() & 0xe) | (it_mask_ >> 3)); + it_mask_ = (it_mask_ << 1) & 0xf; + } + bool InITBlock() const { return it_mask_ != 0; } + bool OutsideITBlock() const { return !InITBlock(); } + bool LastInITBlock() const { return it_mask_ == 0x8; } + bool OutsideITBlockOrLast() const { + return OutsideITBlock() || LastInITBlock(); + } + void Set(Condition first_condition, uint16_t mask) { + condition_ = first_condition_ = first_condition; + it_mask_ = mask; + } + Condition GetFirstCondition() const { return first_condition_; } + Condition GetCurrentCondition() const { return condition_; } +}; + +class Disassembler { + public: + enum LocationType { + kAnyLocation, + kCodeLocation, + kDataLocation, + kCoprocLocation, + kLoadByteLocation, + kLoadHalfWordLocation, + kLoadWordLocation, + kLoadDoubleWordLocation, + kLoadSignedByteLocation, + kLoadSignedHalfWordLocation, + kLoadSinglePrecisionLocation, + kLoadDoublePrecisionLocation, + kStoreByteLocation, + kStoreHalfWordLocation, + kStoreWordLocation, + kStoreDoubleWordLocation, + kStoreSinglePrecisionLocation, + kStoreDoublePrecisionLocation, + kVld1Location, + kVld2Location, + kVld3Location, + kVld4Location, + kVst1Location, + kVst2Location, + kVst3Location, + kVst4Location + }; + + class ConditionPrinter { + const ITBlock& it_block_; + Condition cond_; + + public: + ConditionPrinter(const ITBlock& it_block, Condition cond) + : it_block_(it_block), cond_(cond) {} + const ITBlock& GetITBlock() const { return it_block_; } + Condition GetCond() const { return cond_; } + friend std::ostream& operator<<(std::ostream& os, ConditionPrinter cond) { + if (cond.it_block_.InITBlock() && cond.cond_.Is(al) && + !cond.cond_.IsNone()) { + return os << "al"; + } + return os << cond.cond_; + } + }; + + class ImmediatePrinter { + uint32_t imm_; + + public: + explicit ImmediatePrinter(uint32_t imm) : imm_(imm) {} + uint32_t GetImm() const { return imm_; } + friend std::ostream& operator<<(std::ostream& os, ImmediatePrinter imm) { + return os << "#" << imm.GetImm(); + } + }; + + class SignedImmediatePrinter { + int32_t imm_; + + public: + explicit SignedImmediatePrinter(int32_t imm) : imm_(imm) {} + int32_t GetImm() const { return imm_; } + friend std::ostream& operator<<(std::ostream& os, + SignedImmediatePrinter imm) { + return os << "#" << imm.GetImm(); + } + }; + + class RawImmediatePrinter { + uint32_t imm_; + + public: + explicit RawImmediatePrinter(uint32_t imm) : imm_(imm) {} + uint32_t GetImm() const { return imm_; } + friend std::ostream& operator<<(std::ostream& os, RawImmediatePrinter imm) { + return os << imm.GetImm(); + } + }; + + class DtPrinter { + DataType dt_; + DataType default_dt_; + + public: + DtPrinter(DataType dt, DataType default_dt) + : dt_(dt), default_dt_(default_dt) {} + DataType GetDt() const { return dt_; } + DataType GetDefaultDt() const { return default_dt_; } + friend std::ostream& operator<<(std::ostream& os, DtPrinter dt) { + if (dt.dt_.Is(dt.default_dt_)) return os; + return os << dt.dt_; + } + }; + + class IndexedRegisterPrinter { + DRegister reg_; + uint32_t index_; + + public: + IndexedRegisterPrinter(DRegister reg, uint32_t index) + : reg_(reg), index_(index) {} + DRegister GetReg() const { return reg_; } + uint32_t GetIndex() const { return index_; } + friend std::ostream& operator<<(std::ostream& os, + IndexedRegisterPrinter reg) { + return os << reg.GetReg() << "[" << reg.GetIndex() << "]"; + } + }; + + // TODO: Merge this class with PrintLabel below. This Location class + // represents a PC-relative offset, not an address. + class Location { + public: + typedef int32_t Offset; + + Location(Offset immediate, Offset pc_offset) + : immediate_(immediate), pc_offset_(pc_offset) {} + Offset GetImmediate() const { return immediate_; } + Offset GetPCOffset() const { return pc_offset_; } + + private: + Offset immediate_; + Offset pc_offset_; + }; + + class PrintLabel { + LocationType location_type_; + Location::Offset immediate_; + Location::Offset location_; + + public: + PrintLabel(LocationType location_type, + Location* offset, + Location::Offset position) + : location_type_(location_type), + immediate_(offset->GetImmediate()), + location_(static_cast( + static_cast(offset->GetPCOffset()) + + offset->GetImmediate() + position)) {} + + LocationType GetLocationType() const { return location_type_; } + Location::Offset GetLocation() const { return location_; } + Location::Offset GetImmediate() const { return immediate_; } + + friend inline std::ostream& operator<<(std::ostream& os, + const PrintLabel& label) { + os << "0x" << std::hex << std::setw(8) << std::setfill('0') + << label.GetLocation() << std::dec; + return os; + } + }; + + + class PrintMemOperand { + LocationType location_type_; + const MemOperand& operand_; + + public: + PrintMemOperand(LocationType location_type, const MemOperand& operand) + : location_type_(location_type), operand_(operand) {} + LocationType GetLocationType() const { return location_type_; } + const MemOperand& GetOperand() const { return operand_; } + }; + + class PrintAlignedMemOperand { + LocationType location_type_; + const AlignedMemOperand& operand_; + + public: + PrintAlignedMemOperand(LocationType location_type, + const AlignedMemOperand& operand) + : location_type_(location_type), operand_(operand) {} + LocationType GetLocationType() const { return location_type_; } + const AlignedMemOperand& GetOperand() const { return operand_; } + }; + + class DisassemblerStream { + std::ostream& os_; + InstructionType current_instruction_type_; + InstructionAttribute current_instruction_attributes_; + + public: + explicit DisassemblerStream(std::ostream& os) // NOLINT(runtime/references) + : os_(os), + current_instruction_type_(kUndefInstructionType), + current_instruction_attributes_(kNoAttribute) {} + virtual ~DisassemblerStream() {} + std::ostream& os() const { return os_; } + void SetCurrentInstruction( + InstructionType current_instruction_type, + InstructionAttribute current_instruction_attributes) { + current_instruction_type_ = current_instruction_type; + current_instruction_attributes_ = current_instruction_attributes; + } + InstructionType GetCurrentInstructionType() const { + return current_instruction_type_; + } + InstructionAttribute GetCurrentInstructionAttributes() const { + return current_instruction_attributes_; + } + bool Has(InstructionAttribute attributes) const { + return (current_instruction_attributes_ & attributes) == attributes; + } + template + DisassemblerStream& operator<<(T value) { + os_ << value; + return *this; + } + virtual DisassemblerStream& operator<<(const char* string) { + os_ << string; + return *this; + } + virtual DisassemblerStream& operator<<(const ConditionPrinter& cond) { + os_ << cond; + return *this; + } + virtual DisassemblerStream& operator<<(Condition cond) { + os_ << cond; + return *this; + } + virtual DisassemblerStream& operator<<(const EncodingSize& size) { + os_ << size; + return *this; + } + virtual DisassemblerStream& operator<<(const ImmediatePrinter& imm) { + os_ << imm; + return *this; + } + virtual DisassemblerStream& operator<<(const SignedImmediatePrinter& imm) { + os_ << imm; + return *this; + } + virtual DisassemblerStream& operator<<(const RawImmediatePrinter& imm) { + os_ << imm; + return *this; + } + virtual DisassemblerStream& operator<<(const DtPrinter& dt) { + os_ << dt; + return *this; + } + virtual DisassemblerStream& operator<<(const DataType& type) { + os_ << type; + return *this; + } + virtual DisassemblerStream& operator<<(Shift shift) { + os_ << shift; + return *this; + } + virtual DisassemblerStream& operator<<(Sign sign) { + os_ << sign; + return *this; + } + virtual DisassemblerStream& operator<<(Alignment alignment) { + os_ << alignment; + return *this; + } + virtual DisassemblerStream& operator<<(const PrintLabel& label) { + os_ << label; + return *this; + } + virtual DisassemblerStream& operator<<(const WriteBack& write_back) { + os_ << write_back; + return *this; + } + virtual DisassemblerStream& operator<<(const NeonImmediate& immediate) { + os_ << immediate; + return *this; + } + virtual DisassemblerStream& operator<<(Register reg) { + os_ << reg; + return *this; + } + virtual DisassemblerStream& operator<<(SRegister reg) { + os_ << reg; + return *this; + } + virtual DisassemblerStream& operator<<(DRegister reg) { + os_ << reg; + return *this; + } + virtual DisassemblerStream& operator<<(QRegister reg) { + os_ << reg; + return *this; + } + virtual DisassemblerStream& operator<<(const RegisterOrAPSR_nzcv reg) { + os_ << reg; + return *this; + } + virtual DisassemblerStream& operator<<(SpecialRegister reg) { + os_ << reg; + return *this; + } + virtual DisassemblerStream& operator<<(MaskedSpecialRegister reg) { + os_ << reg; + return *this; + } + virtual DisassemblerStream& operator<<(SpecialFPRegister reg) { + os_ << reg; + return *this; + } + virtual DisassemblerStream& operator<<(BankedRegister reg) { + os_ << reg; + return *this; + } + virtual DisassemblerStream& operator<<(const RegisterList& list) { + os_ << list; + return *this; + } + virtual DisassemblerStream& operator<<(const SRegisterList& list) { + os_ << list; + return *this; + } + virtual DisassemblerStream& operator<<(const DRegisterList& list) { + os_ << list; + return *this; + } + virtual DisassemblerStream& operator<<(const NeonRegisterList& list) { + os_ << list; + return *this; + } + virtual DisassemblerStream& operator<<(const DRegisterLane& reg) { + os_ << reg; + return *this; + } + virtual DisassemblerStream& operator<<(const IndexedRegisterPrinter& reg) { + os_ << reg; + return *this; + } + virtual DisassemblerStream& operator<<(Coprocessor coproc) { + os_ << coproc; + return *this; + } + virtual DisassemblerStream& operator<<(CRegister reg) { + os_ << reg; + return *this; + } + virtual DisassemblerStream& operator<<(Endianness endian_specifier) { + os_ << endian_specifier; + return *this; + } + virtual DisassemblerStream& operator<<(MemoryBarrier option) { + os_ << option; + return *this; + } + virtual DisassemblerStream& operator<<(InterruptFlags iflags) { + os_ << iflags; + return *this; + } + virtual DisassemblerStream& operator<<(const Operand& operand) { + if (operand.IsImmediate()) { + if (Has(kBitwise)) { + return *this << "#0x" << std::hex << operand.GetImmediate() + << std::dec; + } + return *this << "#" << operand.GetImmediate(); + } + if (operand.IsImmediateShiftedRegister()) { + if ((operand.GetShift().IsLSL() || operand.GetShift().IsROR()) && + (operand.GetShiftAmount() == 0)) { + return *this << operand.GetBaseRegister(); + } + if (operand.GetShift().IsRRX()) { + return *this << operand.GetBaseRegister() << ", rrx"; + } + return *this << operand.GetBaseRegister() << ", " << operand.GetShift() + << " #" << operand.GetShiftAmount(); + } + if (operand.IsRegisterShiftedRegister()) { + return *this << operand.GetBaseRegister() << ", " << operand.GetShift() + << " " << operand.GetShiftRegister(); + } + VIXL_UNREACHABLE(); + return *this; + } + virtual DisassemblerStream& operator<<(const SOperand& operand) { + if (operand.IsImmediate()) { + return *this << operand.GetNeonImmediate(); + } + return *this << operand.GetRegister(); + } + virtual DisassemblerStream& operator<<(const DOperand& operand) { + if (operand.IsImmediate()) { + return *this << operand.GetNeonImmediate(); + } + return *this << operand.GetRegister(); + } + virtual DisassemblerStream& operator<<(const QOperand& operand) { + if (operand.IsImmediate()) { + return *this << operand.GetNeonImmediate(); + } + return *this << operand.GetRegister(); + } + virtual DisassemblerStream& operator<<(const MemOperand& operand) { + *this << "[" << operand.GetBaseRegister(); + if (operand.GetAddrMode() == PostIndex) { + *this << "]"; + if (operand.IsRegisterOnly()) return *this << "!"; + } + if (operand.IsImmediate()) { + if ((operand.GetOffsetImmediate() != 0) || + operand.GetSign().IsMinus() || + ((operand.GetAddrMode() != Offset) && !operand.IsRegisterOnly())) { + if (operand.GetOffsetImmediate() == 0) { + *this << ", #" << operand.GetSign() << operand.GetOffsetImmediate(); + } else { + *this << ", #" << operand.GetOffsetImmediate(); + } + } + } else if (operand.IsPlainRegister()) { + *this << ", " << operand.GetSign() << operand.GetOffsetRegister(); + } else if (operand.IsShiftedRegister()) { + *this << ", " << operand.GetSign() << operand.GetOffsetRegister() + << ImmediateShiftOperand(operand.GetShift(), + operand.GetShiftAmount()); + } else { + VIXL_UNREACHABLE(); + return *this; + } + if (operand.GetAddrMode() == Offset) { + *this << "]"; + } else if (operand.GetAddrMode() == PreIndex) { + *this << "]!"; + } + return *this; + } + virtual DisassemblerStream& operator<<(const PrintMemOperand& operand) { + return *this << operand.GetOperand(); + } + virtual DisassemblerStream& operator<<(const AlignedMemOperand& operand) { + *this << "[" << operand.GetBaseRegister() << operand.GetAlignment() + << "]"; + if (operand.GetAddrMode() == PostIndex) { + if (operand.IsPlainRegister()) { + *this << ", " << operand.GetOffsetRegister(); + } else { + *this << "!"; + } + } + return *this; + } + virtual DisassemblerStream& operator<<( + const PrintAlignedMemOperand& operand) { + return *this << operand.GetOperand(); + } + }; + + private: + class ITBlockScope { + ITBlock* const it_block_; + bool inside_; + + public: + explicit ITBlockScope(ITBlock* it_block) + : it_block_(it_block), inside_(it_block->InITBlock()) {} + ~ITBlockScope() { + if (inside_) it_block_->Advance(); + } + }; + + ITBlock it_block_; + DisassemblerStream* os_; + bool owns_os_; + uint32_t code_address_; + // True if the disassembler always output instructions with all the + // registers (even if two registers are identical and only one could be + // output). + bool use_short_hand_form_; + + public: + explicit Disassembler(std::ostream& os, // NOLINT(runtime/references) + uint32_t code_address = 0) + : os_(new DisassemblerStream(os)), + owns_os_(true), + code_address_(code_address), + use_short_hand_form_(true) {} + explicit Disassembler(DisassemblerStream* os, uint32_t code_address = 0) + : os_(os), + owns_os_(false), + code_address_(code_address), + use_short_hand_form_(true) {} + virtual ~Disassembler() { + if (owns_os_) { + delete os_; + } + } + DisassemblerStream& os() const { return *os_; } + void SetIT(Condition first_condition, uint16_t it_mask) { + it_block_.Set(first_condition, it_mask); + } + const ITBlock& GetITBlock() const { return it_block_; } + bool InITBlock() const { return it_block_.InITBlock(); } + bool OutsideITBlock() const { return it_block_.OutsideITBlock(); } + bool OutsideITBlockOrLast() const { return it_block_.OutsideITBlockOrLast(); } + void CheckNotIT() const { VIXL_ASSERT(it_block_.OutsideITBlock()); } + // Return the current condition depending on the IT state for T32. + Condition CurrentCond() const { + if (it_block_.OutsideITBlock()) return al; + return it_block_.GetCurrentCondition(); + } + bool UseShortHandForm() const { return use_short_hand_form_; } + void SetUseShortHandForm(bool use_short_hand_form) { + use_short_hand_form_ = use_short_hand_form; + } + + virtual void UnallocatedT32(uint32_t instruction) { + if (T32Size(instruction) == 2) { + os() << "unallocated " << std::hex << std::setw(4) << std::setfill('0') + << (instruction >> 16) << std::dec; + } else { + os() << "unallocated " << std::hex << std::setw(8) << std::setfill('0') + << instruction << std::dec; + } + } + virtual void UnallocatedA32(uint32_t instruction) { + os() << "unallocated " << std::hex << std::setw(8) << std::setfill('0') + << instruction << std::dec; + } + virtual void UnimplementedT32_16(const char* name, uint32_t instruction) { + os() << "unimplemented " << name << " T32:" << std::hex << std::setw(4) + << std::setfill('0') << (instruction >> 16) << std::dec; + } + virtual void UnimplementedT32_32(const char* name, uint32_t instruction) { + os() << "unimplemented " << name << " T32:" << std::hex << std::setw(8) + << std::setfill('0') << instruction << std::dec; + } + virtual void UnimplementedA32(const char* name, uint32_t instruction) { + os() << "unimplemented " << name << " ARM:" << std::hex << std::setw(8) + << std::setfill('0') << instruction << std::dec; + } + virtual void Unpredictable() { os() << " ; unpredictable"; } + virtual void UnpredictableT32(uint32_t /*instr*/) { return Unpredictable(); } + virtual void UnpredictableA32(uint32_t /*instr*/) { return Unpredictable(); } + + static bool Is16BitEncoding(uint32_t instr) { return instr < 0xe8000000; } + uint32_t GetCodeAddress() const { return code_address_; } + void SetCodeAddress(uint32_t code_address) { code_address_ = code_address; } + + // Start of generated code. + + void adc(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + + void adcs(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + + void add(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + + void add(Condition cond, Register rd, const Operand& operand); + + void adds(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + + void adds(Register rd, const Operand& operand); + + void addw(Condition cond, Register rd, Register rn, const Operand& operand); + + void adr(Condition cond, EncodingSize size, Register rd, Location* location); + + void and_(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + + void ands(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + + void asr(Condition cond, + EncodingSize size, + Register rd, + Register rm, + const Operand& operand); + + void asrs(Condition cond, + EncodingSize size, + Register rd, + Register rm, + const Operand& operand); + + void b(Condition cond, EncodingSize size, Location* location); + + void bfc(Condition cond, Register rd, uint32_t lsb, uint32_t width); + + void bfi( + Condition cond, Register rd, Register rn, uint32_t lsb, uint32_t width); + + void bic(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + + void bics(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + + void bkpt(Condition cond, uint32_t imm); + + void bl(Condition cond, Location* location); + + void blx(Condition cond, Location* location); + + void blx(Condition cond, Register rm); + + void bx(Condition cond, Register rm); + + void bxj(Condition cond, Register rm); + + void cbnz(Register rn, Location* location); + + void cbz(Register rn, Location* location); + + void clrex(Condition cond); + + void clz(Condition cond, Register rd, Register rm); + + void cmn(Condition cond, + EncodingSize size, + Register rn, + const Operand& operand); + + void cmp(Condition cond, + EncodingSize size, + Register rn, + const Operand& operand); + + void crc32b(Condition cond, Register rd, Register rn, Register rm); + + void crc32cb(Condition cond, Register rd, Register rn, Register rm); + + void crc32ch(Condition cond, Register rd, Register rn, Register rm); + + void crc32cw(Condition cond, Register rd, Register rn, Register rm); + + void crc32h(Condition cond, Register rd, Register rn, Register rm); + + void crc32w(Condition cond, Register rd, Register rn, Register rm); + + void dmb(Condition cond, MemoryBarrier option); + + void dsb(Condition cond, MemoryBarrier option); + + void eor(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + + void eors(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + + void fldmdbx(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist); + + void fldmiax(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist); + + void fstmdbx(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist); + + void fstmiax(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist); + + void hlt(Condition cond, uint32_t imm); + + void hvc(Condition cond, uint32_t imm); + + void isb(Condition cond, MemoryBarrier option); + + void it(Condition cond, uint16_t mask); + + void lda(Condition cond, Register rt, const MemOperand& operand); + + void ldab(Condition cond, Register rt, const MemOperand& operand); + + void ldaex(Condition cond, Register rt, const MemOperand& operand); + + void ldaexb(Condition cond, Register rt, const MemOperand& operand); + + void ldaexd(Condition cond, + Register rt, + Register rt2, + const MemOperand& operand); + + void ldaexh(Condition cond, Register rt, const MemOperand& operand); + + void ldah(Condition cond, Register rt, const MemOperand& operand); + + void ldm(Condition cond, + EncodingSize size, + Register rn, + WriteBack write_back, + RegisterList registers); + + void ldmda(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers); + + void ldmdb(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers); + + void ldmea(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers); + + void ldmed(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers); + + void ldmfa(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers); + + void ldmfd(Condition cond, + EncodingSize size, + Register rn, + WriteBack write_back, + RegisterList registers); + + void ldmib(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers); + + void ldr(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand); + + void ldr(Condition cond, EncodingSize size, Register rt, Location* location); + + void ldrb(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand); + + void ldrb(Condition cond, Register rt, Location* location); + + void ldrd(Condition cond, + Register rt, + Register rt2, + const MemOperand& operand); + + void ldrd(Condition cond, Register rt, Register rt2, Location* location); + + void ldrex(Condition cond, Register rt, const MemOperand& operand); + + void ldrexb(Condition cond, Register rt, const MemOperand& operand); + + void ldrexd(Condition cond, + Register rt, + Register rt2, + const MemOperand& operand); + + void ldrexh(Condition cond, Register rt, const MemOperand& operand); + + void ldrh(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand); + + void ldrh(Condition cond, Register rt, Location* location); + + void ldrsb(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand); + + void ldrsb(Condition cond, Register rt, Location* location); + + void ldrsh(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand); + + void ldrsh(Condition cond, Register rt, Location* location); + + void lsl(Condition cond, + EncodingSize size, + Register rd, + Register rm, + const Operand& operand); + + void lsls(Condition cond, + EncodingSize size, + Register rd, + Register rm, + const Operand& operand); + + void lsr(Condition cond, + EncodingSize size, + Register rd, + Register rm, + const Operand& operand); + + void lsrs(Condition cond, + EncodingSize size, + Register rd, + Register rm, + const Operand& operand); + + void mla(Condition cond, Register rd, Register rn, Register rm, Register ra); + + void mlas(Condition cond, Register rd, Register rn, Register rm, Register ra); + + void mls(Condition cond, Register rd, Register rn, Register rm, Register ra); + + void mov(Condition cond, + EncodingSize size, + Register rd, + const Operand& operand); + + void movs(Condition cond, + EncodingSize size, + Register rd, + const Operand& operand); + + void movt(Condition cond, Register rd, const Operand& operand); + + void movw(Condition cond, Register rd, const Operand& operand); + + void mrs(Condition cond, Register rd, SpecialRegister spec_reg); + + void msr(Condition cond, + MaskedSpecialRegister spec_reg, + const Operand& operand); + + void mul( + Condition cond, EncodingSize size, Register rd, Register rn, Register rm); + + void muls(Condition cond, Register rd, Register rn, Register rm); + + void mvn(Condition cond, + EncodingSize size, + Register rd, + const Operand& operand); + + void mvns(Condition cond, + EncodingSize size, + Register rd, + const Operand& operand); + + void nop(Condition cond, EncodingSize size); + + void orn(Condition cond, Register rd, Register rn, const Operand& operand); + + void orns(Condition cond, Register rd, Register rn, const Operand& operand); + + void orr(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + + void orrs(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + + void pkhbt(Condition cond, Register rd, Register rn, const Operand& operand); + + void pkhtb(Condition cond, Register rd, Register rn, const Operand& operand); + + void pld(Condition cond, Location* location); + + void pld(Condition cond, const MemOperand& operand); + + void pldw(Condition cond, const MemOperand& operand); + + void pli(Condition cond, const MemOperand& operand); + + void pli(Condition cond, Location* location); + + void pop(Condition cond, EncodingSize size, RegisterList registers); + + void pop(Condition cond, EncodingSize size, Register rt); + + void push(Condition cond, EncodingSize size, RegisterList registers); + + void push(Condition cond, EncodingSize size, Register rt); + + void qadd(Condition cond, Register rd, Register rm, Register rn); + + void qadd16(Condition cond, Register rd, Register rn, Register rm); + + void qadd8(Condition cond, Register rd, Register rn, Register rm); + + void qasx(Condition cond, Register rd, Register rn, Register rm); + + void qdadd(Condition cond, Register rd, Register rm, Register rn); + + void qdsub(Condition cond, Register rd, Register rm, Register rn); + + void qsax(Condition cond, Register rd, Register rn, Register rm); + + void qsub(Condition cond, Register rd, Register rm, Register rn); + + void qsub16(Condition cond, Register rd, Register rn, Register rm); + + void qsub8(Condition cond, Register rd, Register rn, Register rm); + + void rbit(Condition cond, Register rd, Register rm); + + void rev(Condition cond, EncodingSize size, Register rd, Register rm); + + void rev16(Condition cond, EncodingSize size, Register rd, Register rm); + + void revsh(Condition cond, EncodingSize size, Register rd, Register rm); + + void ror(Condition cond, + EncodingSize size, + Register rd, + Register rm, + const Operand& operand); + + void rors(Condition cond, + EncodingSize size, + Register rd, + Register rm, + const Operand& operand); + + void rrx(Condition cond, Register rd, Register rm); + + void rrxs(Condition cond, Register rd, Register rm); + + void rsb(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + + void rsbs(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + + void rsc(Condition cond, Register rd, Register rn, const Operand& operand); + + void rscs(Condition cond, Register rd, Register rn, const Operand& operand); + + void sadd16(Condition cond, Register rd, Register rn, Register rm); + + void sadd8(Condition cond, Register rd, Register rn, Register rm); + + void sasx(Condition cond, Register rd, Register rn, Register rm); + + void sbc(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + + void sbcs(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + + void sbfx( + Condition cond, Register rd, Register rn, uint32_t lsb, uint32_t width); + + void sdiv(Condition cond, Register rd, Register rn, Register rm); + + void sel(Condition cond, Register rd, Register rn, Register rm); + + void shadd16(Condition cond, Register rd, Register rn, Register rm); + + void shadd8(Condition cond, Register rd, Register rn, Register rm); + + void shasx(Condition cond, Register rd, Register rn, Register rm); + + void shsax(Condition cond, Register rd, Register rn, Register rm); + + void shsub16(Condition cond, Register rd, Register rn, Register rm); + + void shsub8(Condition cond, Register rd, Register rn, Register rm); + + void smlabb( + Condition cond, Register rd, Register rn, Register rm, Register ra); + + void smlabt( + Condition cond, Register rd, Register rn, Register rm, Register ra); + + void smlad( + Condition cond, Register rd, Register rn, Register rm, Register ra); + + void smladx( + Condition cond, Register rd, Register rn, Register rm, Register ra); + + void smlal( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + + void smlalbb( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + + void smlalbt( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + + void smlald( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + + void smlaldx( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + + void smlals( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + + void smlaltb( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + + void smlaltt( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + + void smlatb( + Condition cond, Register rd, Register rn, Register rm, Register ra); + + void smlatt( + Condition cond, Register rd, Register rn, Register rm, Register ra); + + void smlawb( + Condition cond, Register rd, Register rn, Register rm, Register ra); + + void smlawt( + Condition cond, Register rd, Register rn, Register rm, Register ra); + + void smlsd( + Condition cond, Register rd, Register rn, Register rm, Register ra); + + void smlsdx( + Condition cond, Register rd, Register rn, Register rm, Register ra); + + void smlsld( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + + void smlsldx( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + + void smmla( + Condition cond, Register rd, Register rn, Register rm, Register ra); + + void smmlar( + Condition cond, Register rd, Register rn, Register rm, Register ra); + + void smmls( + Condition cond, Register rd, Register rn, Register rm, Register ra); + + void smmlsr( + Condition cond, Register rd, Register rn, Register rm, Register ra); + + void smmul(Condition cond, Register rd, Register rn, Register rm); + + void smmulr(Condition cond, Register rd, Register rn, Register rm); + + void smuad(Condition cond, Register rd, Register rn, Register rm); + + void smuadx(Condition cond, Register rd, Register rn, Register rm); + + void smulbb(Condition cond, Register rd, Register rn, Register rm); + + void smulbt(Condition cond, Register rd, Register rn, Register rm); + + void smull( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + + void smulls( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + + void smultb(Condition cond, Register rd, Register rn, Register rm); + + void smultt(Condition cond, Register rd, Register rn, Register rm); + + void smulwb(Condition cond, Register rd, Register rn, Register rm); + + void smulwt(Condition cond, Register rd, Register rn, Register rm); + + void smusd(Condition cond, Register rd, Register rn, Register rm); + + void smusdx(Condition cond, Register rd, Register rn, Register rm); + + void ssat(Condition cond, Register rd, uint32_t imm, const Operand& operand); + + void ssat16(Condition cond, Register rd, uint32_t imm, Register rn); + + void ssax(Condition cond, Register rd, Register rn, Register rm); + + void ssub16(Condition cond, Register rd, Register rn, Register rm); + + void ssub8(Condition cond, Register rd, Register rn, Register rm); + + void stl(Condition cond, Register rt, const MemOperand& operand); + + void stlb(Condition cond, Register rt, const MemOperand& operand); + + void stlex(Condition cond, + Register rd, + Register rt, + const MemOperand& operand); + + void stlexb(Condition cond, + Register rd, + Register rt, + const MemOperand& operand); + + void stlexd(Condition cond, + Register rd, + Register rt, + Register rt2, + const MemOperand& operand); + + void stlexh(Condition cond, + Register rd, + Register rt, + const MemOperand& operand); + + void stlh(Condition cond, Register rt, const MemOperand& operand); + + void stm(Condition cond, + EncodingSize size, + Register rn, + WriteBack write_back, + RegisterList registers); + + void stmda(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers); + + void stmdb(Condition cond, + EncodingSize size, + Register rn, + WriteBack write_back, + RegisterList registers); + + void stmea(Condition cond, + EncodingSize size, + Register rn, + WriteBack write_back, + RegisterList registers); + + void stmed(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers); + + void stmfa(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers); + + void stmfd(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers); + + void stmib(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers); + + void str(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand); + + void strb(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand); + + void strd(Condition cond, + Register rt, + Register rt2, + const MemOperand& operand); + + void strex(Condition cond, + Register rd, + Register rt, + const MemOperand& operand); + + void strexb(Condition cond, + Register rd, + Register rt, + const MemOperand& operand); + + void strexd(Condition cond, + Register rd, + Register rt, + Register rt2, + const MemOperand& operand); + + void strexh(Condition cond, + Register rd, + Register rt, + const MemOperand& operand); + + void strh(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand); + + void sub(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + + void sub(Condition cond, Register rd, const Operand& operand); + + void subs(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + + void subs(Register rd, const Operand& operand); + + void subw(Condition cond, Register rd, Register rn, const Operand& operand); + + void svc(Condition cond, uint32_t imm); + + void sxtab(Condition cond, Register rd, Register rn, const Operand& operand); + + void sxtab16(Condition cond, + Register rd, + Register rn, + const Operand& operand); + + void sxtah(Condition cond, Register rd, Register rn, const Operand& operand); + + void sxtb(Condition cond, + EncodingSize size, + Register rd, + const Operand& operand); + + void sxtb16(Condition cond, Register rd, const Operand& operand); + + void sxth(Condition cond, + EncodingSize size, + Register rd, + const Operand& operand); + + void tbb(Condition cond, Register rn, Register rm); + + void tbh(Condition cond, Register rn, Register rm); + + void teq(Condition cond, Register rn, const Operand& operand); + + void tst(Condition cond, + EncodingSize size, + Register rn, + const Operand& operand); + + void uadd16(Condition cond, Register rd, Register rn, Register rm); + + void uadd8(Condition cond, Register rd, Register rn, Register rm); + + void uasx(Condition cond, Register rd, Register rn, Register rm); + + void ubfx( + Condition cond, Register rd, Register rn, uint32_t lsb, uint32_t width); + + void udf(Condition cond, EncodingSize size, uint32_t imm); + + void udiv(Condition cond, Register rd, Register rn, Register rm); + + void uhadd16(Condition cond, Register rd, Register rn, Register rm); + + void uhadd8(Condition cond, Register rd, Register rn, Register rm); + + void uhasx(Condition cond, Register rd, Register rn, Register rm); + + void uhsax(Condition cond, Register rd, Register rn, Register rm); + + void uhsub16(Condition cond, Register rd, Register rn, Register rm); + + void uhsub8(Condition cond, Register rd, Register rn, Register rm); + + void umaal( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + + void umlal( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + + void umlals( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + + void umull( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + + void umulls( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + + void uqadd16(Condition cond, Register rd, Register rn, Register rm); + + void uqadd8(Condition cond, Register rd, Register rn, Register rm); + + void uqasx(Condition cond, Register rd, Register rn, Register rm); + + void uqsax(Condition cond, Register rd, Register rn, Register rm); + + void uqsub16(Condition cond, Register rd, Register rn, Register rm); + + void uqsub8(Condition cond, Register rd, Register rn, Register rm); + + void usad8(Condition cond, Register rd, Register rn, Register rm); + + void usada8( + Condition cond, Register rd, Register rn, Register rm, Register ra); + + void usat(Condition cond, Register rd, uint32_t imm, const Operand& operand); + + void usat16(Condition cond, Register rd, uint32_t imm, Register rn); + + void usax(Condition cond, Register rd, Register rn, Register rm); + + void usub16(Condition cond, Register rd, Register rn, Register rm); + + void usub8(Condition cond, Register rd, Register rn, Register rm); + + void uxtab(Condition cond, Register rd, Register rn, const Operand& operand); + + void uxtab16(Condition cond, + Register rd, + Register rn, + const Operand& operand); + + void uxtah(Condition cond, Register rd, Register rn, const Operand& operand); + + void uxtb(Condition cond, + EncodingSize size, + Register rd, + const Operand& operand); + + void uxtb16(Condition cond, Register rd, const Operand& operand); + + void uxth(Condition cond, + EncodingSize size, + Register rd, + const Operand& operand); + + void vaba( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vaba( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vabal( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm); + + void vabd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vabd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vabdl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm); + + void vabs(Condition cond, DataType dt, DRegister rd, DRegister rm); + + void vabs(Condition cond, DataType dt, QRegister rd, QRegister rm); + + void vabs(Condition cond, DataType dt, SRegister rd, SRegister rm); + + void vacge( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vacge( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vacgt( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vacgt( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vacle( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vacle( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vaclt( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vaclt( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vadd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vadd( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vaddhn( + Condition cond, DataType dt, DRegister rd, QRegister rn, QRegister rm); + + void vaddl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm); + + void vaddw( + Condition cond, DataType dt, QRegister rd, QRegister rn, DRegister rm); + + void vand(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + const DOperand& operand); + + void vand(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + const QOperand& operand); + + void vbic(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + const DOperand& operand); + + void vbic(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + const QOperand& operand); + + void vbif( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vbif( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vbit( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vbit( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vbsl( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vbsl( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vceq(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + + void vceq(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + + void vceq( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vceq( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vcge(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + + void vcge(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + + void vcge( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vcge( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vcgt(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + + void vcgt(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + + void vcgt( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vcgt( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vcle(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + + void vcle(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + + void vcle( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vcle( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vcls(Condition cond, DataType dt, DRegister rd, DRegister rm); + + void vcls(Condition cond, DataType dt, QRegister rd, QRegister rm); + + void vclt(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + + void vclt(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + + void vclt( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vclt( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vclz(Condition cond, DataType dt, DRegister rd, DRegister rm); + + void vclz(Condition cond, DataType dt, QRegister rd, QRegister rm); + + void vcmp(Condition cond, DataType dt, SRegister rd, const SOperand& operand); + + void vcmp(Condition cond, DataType dt, DRegister rd, const DOperand& operand); + + void vcmpe(Condition cond, + DataType dt, + SRegister rd, + const SOperand& operand); + + void vcmpe(Condition cond, + DataType dt, + DRegister rd, + const DOperand& operand); + + void vcnt(Condition cond, DataType dt, DRegister rd, DRegister rm); + + void vcnt(Condition cond, DataType dt, QRegister rd, QRegister rm); + + void vcvt( + Condition cond, DataType dt1, DataType dt2, DRegister rd, SRegister rm); + + void vcvt( + Condition cond, DataType dt1, DataType dt2, SRegister rd, DRegister rm); + + void vcvt(Condition cond, + DataType dt1, + DataType dt2, + DRegister rd, + DRegister rm, + int32_t fbits); + + void vcvt(Condition cond, + DataType dt1, + DataType dt2, + QRegister rd, + QRegister rm, + int32_t fbits); + + void vcvt(Condition cond, + DataType dt1, + DataType dt2, + SRegister rd, + SRegister rm, + int32_t fbits); + + void vcvt( + Condition cond, DataType dt1, DataType dt2, DRegister rd, DRegister rm); + + void vcvt( + Condition cond, DataType dt1, DataType dt2, QRegister rd, QRegister rm); + + void vcvt( + Condition cond, DataType dt1, DataType dt2, DRegister rd, QRegister rm); + + void vcvt( + Condition cond, DataType dt1, DataType dt2, QRegister rd, DRegister rm); + + void vcvt( + Condition cond, DataType dt1, DataType dt2, SRegister rd, SRegister rm); + + void vcvta(DataType dt1, DataType dt2, DRegister rd, DRegister rm); + + void vcvta(DataType dt1, DataType dt2, QRegister rd, QRegister rm); + + void vcvta(DataType dt1, DataType dt2, SRegister rd, SRegister rm); + + void vcvta(DataType dt1, DataType dt2, SRegister rd, DRegister rm); + + void vcvtb( + Condition cond, DataType dt1, DataType dt2, SRegister rd, SRegister rm); + + void vcvtb( + Condition cond, DataType dt1, DataType dt2, DRegister rd, SRegister rm); + + void vcvtb( + Condition cond, DataType dt1, DataType dt2, SRegister rd, DRegister rm); + + void vcvtm(DataType dt1, DataType dt2, DRegister rd, DRegister rm); + + void vcvtm(DataType dt1, DataType dt2, QRegister rd, QRegister rm); + + void vcvtm(DataType dt1, DataType dt2, SRegister rd, SRegister rm); + + void vcvtm(DataType dt1, DataType dt2, SRegister rd, DRegister rm); + + void vcvtn(DataType dt1, DataType dt2, DRegister rd, DRegister rm); + + void vcvtn(DataType dt1, DataType dt2, QRegister rd, QRegister rm); + + void vcvtn(DataType dt1, DataType dt2, SRegister rd, SRegister rm); + + void vcvtn(DataType dt1, DataType dt2, SRegister rd, DRegister rm); + + void vcvtp(DataType dt1, DataType dt2, DRegister rd, DRegister rm); + + void vcvtp(DataType dt1, DataType dt2, QRegister rd, QRegister rm); + + void vcvtp(DataType dt1, DataType dt2, SRegister rd, SRegister rm); + + void vcvtp(DataType dt1, DataType dt2, SRegister rd, DRegister rm); + + void vcvtr( + Condition cond, DataType dt1, DataType dt2, SRegister rd, SRegister rm); + + void vcvtr( + Condition cond, DataType dt1, DataType dt2, SRegister rd, DRegister rm); + + void vcvtt( + Condition cond, DataType dt1, DataType dt2, SRegister rd, SRegister rm); + + void vcvtt( + Condition cond, DataType dt1, DataType dt2, DRegister rd, SRegister rm); + + void vcvtt( + Condition cond, DataType dt1, DataType dt2, SRegister rd, DRegister rm); + + void vdiv( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vdiv( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vdup(Condition cond, DataType dt, QRegister rd, Register rt); + + void vdup(Condition cond, DataType dt, DRegister rd, Register rt); + + void vdup(Condition cond, DataType dt, DRegister rd, DRegisterLane rm); + + void vdup(Condition cond, DataType dt, QRegister rd, DRegisterLane rm); + + void veor( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void veor( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vext(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegister rm, + const DOperand& operand); + + void vext(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + QRegister rm, + const QOperand& operand); + + void vfma( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vfma( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vfma( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vfms( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vfms( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vfms( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vfnma( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vfnma( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vfnms( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vfnms( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vhadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vhadd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vhsub( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vhsub( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vld1(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand); + + void vld2(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand); + + void vld3(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand); + + void vld3(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const MemOperand& operand); + + void vld4(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand); + + void vldm(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist); + + void vldm(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist); + + void vldmdb(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist); + + void vldmdb(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist); + + void vldmia(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist); + + void vldmia(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist); + + void vldr(Condition cond, DataType dt, DRegister rd, Location* location); + + void vldr(Condition cond, + DataType dt, + DRegister rd, + const MemOperand& operand); + + void vldr(Condition cond, DataType dt, SRegister rd, Location* location); + + void vldr(Condition cond, + DataType dt, + SRegister rd, + const MemOperand& operand); + + void vmax( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vmax( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vmaxnm(DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vmaxnm(DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vmaxnm(DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vmin( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vmin( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vminnm(DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vminnm(DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vminnm(DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vmla(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegisterLane rm); + + void vmla(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + DRegisterLane rm); + + void vmla( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vmla( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vmla( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vmlal(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegisterLane rm); + + void vmlal( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm); + + void vmls(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegisterLane rm); + + void vmls(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + DRegisterLane rm); + + void vmls( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vmls( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vmls( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vmlsl(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegisterLane rm); + + void vmlsl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm); + + void vmov(Condition cond, Register rt, SRegister rn); + + void vmov(Condition cond, SRegister rn, Register rt); + + void vmov(Condition cond, Register rt, Register rt2, DRegister rm); + + void vmov(Condition cond, DRegister rm, Register rt, Register rt2); + + void vmov( + Condition cond, Register rt, Register rt2, SRegister rm, SRegister rm1); + + void vmov( + Condition cond, SRegister rm, SRegister rm1, Register rt, Register rt2); + + void vmov(Condition cond, DataType dt, DRegisterLane rd, Register rt); + + void vmov(Condition cond, DataType dt, DRegister rd, const DOperand& operand); + + void vmov(Condition cond, DataType dt, QRegister rd, const QOperand& operand); + + void vmov(Condition cond, DataType dt, SRegister rd, const SOperand& operand); + + void vmov(Condition cond, DataType dt, Register rt, DRegisterLane rn); + + void vmovl(Condition cond, DataType dt, QRegister rd, DRegister rm); + + void vmovn(Condition cond, DataType dt, DRegister rd, QRegister rm); + + void vmrs(Condition cond, RegisterOrAPSR_nzcv rt, SpecialFPRegister spec_reg); + + void vmsr(Condition cond, SpecialFPRegister spec_reg, Register rt); + + void vmul(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegister dm, + unsigned index); + + void vmul(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + DRegister dm, + unsigned index); + + void vmul( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vmul( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vmul( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vmull(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegister dm, + unsigned index); + + void vmull( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm); + + void vmvn(Condition cond, DataType dt, DRegister rd, const DOperand& operand); + + void vmvn(Condition cond, DataType dt, QRegister rd, const QOperand& operand); + + void vneg(Condition cond, DataType dt, DRegister rd, DRegister rm); + + void vneg(Condition cond, DataType dt, QRegister rd, QRegister rm); + + void vneg(Condition cond, DataType dt, SRegister rd, SRegister rm); + + void vnmla( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vnmla( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vnmls( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vnmls( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vnmul( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vnmul( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vorn(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + const DOperand& operand); + + void vorn(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + const QOperand& operand); + + void vorr(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + const DOperand& operand); + + void vorr(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + const QOperand& operand); + + void vpadal(Condition cond, DataType dt, DRegister rd, DRegister rm); + + void vpadal(Condition cond, DataType dt, QRegister rd, QRegister rm); + + void vpadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vpaddl(Condition cond, DataType dt, DRegister rd, DRegister rm); + + void vpaddl(Condition cond, DataType dt, QRegister rd, QRegister rm); + + void vpmax( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vpmin( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vpop(Condition cond, DataType dt, DRegisterList dreglist); + + void vpop(Condition cond, DataType dt, SRegisterList sreglist); + + void vpush(Condition cond, DataType dt, DRegisterList dreglist); + + void vpush(Condition cond, DataType dt, SRegisterList sreglist); + + void vqabs(Condition cond, DataType dt, DRegister rd, DRegister rm); + + void vqabs(Condition cond, DataType dt, QRegister rd, QRegister rm); + + void vqadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vqadd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vqdmlal( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm); + + void vqdmlal(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegister dm, + unsigned index); + + void vqdmlsl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm); + + void vqdmlsl(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegister dm, + unsigned index); + + void vqdmulh( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vqdmulh( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vqdmulh(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegisterLane rm); + + void vqdmulh(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + DRegisterLane rm); + + void vqdmull( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm); + + void vqdmull(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegisterLane rm); + + void vqmovn(Condition cond, DataType dt, DRegister rd, QRegister rm); + + void vqmovun(Condition cond, DataType dt, DRegister rd, QRegister rm); + + void vqneg(Condition cond, DataType dt, DRegister rd, DRegister rm); + + void vqneg(Condition cond, DataType dt, QRegister rd, QRegister rm); + + void vqrdmulh( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vqrdmulh( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vqrdmulh(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegisterLane rm); + + void vqrdmulh(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + DRegisterLane rm); + + void vqrshl( + Condition cond, DataType dt, DRegister rd, DRegister rm, DRegister rn); + + void vqrshl( + Condition cond, DataType dt, QRegister rd, QRegister rm, QRegister rn); + + void vqrshrn(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand); + + void vqrshrun(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand); + + void vqshl(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + + void vqshl(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + + void vqshlu(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + + void vqshlu(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + + void vqshrn(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand); + + void vqshrun(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand); + + void vqsub( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vqsub( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vraddhn( + Condition cond, DataType dt, DRegister rd, QRegister rn, QRegister rm); + + void vrecpe(Condition cond, DataType dt, DRegister rd, DRegister rm); + + void vrecpe(Condition cond, DataType dt, QRegister rd, QRegister rm); + + void vrecps( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vrecps( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vrev16(Condition cond, DataType dt, DRegister rd, DRegister rm); + + void vrev16(Condition cond, DataType dt, QRegister rd, QRegister rm); + + void vrev32(Condition cond, DataType dt, DRegister rd, DRegister rm); + + void vrev32(Condition cond, DataType dt, QRegister rd, QRegister rm); + + void vrev64(Condition cond, DataType dt, DRegister rd, DRegister rm); + + void vrev64(Condition cond, DataType dt, QRegister rd, QRegister rm); + + void vrhadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vrhadd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vrinta(DataType dt, DRegister rd, DRegister rm); + + void vrinta(DataType dt, QRegister rd, QRegister rm); + + void vrinta(DataType dt, SRegister rd, SRegister rm); + + void vrintm(DataType dt, DRegister rd, DRegister rm); + + void vrintm(DataType dt, QRegister rd, QRegister rm); + + void vrintm(DataType dt, SRegister rd, SRegister rm); + + void vrintn(DataType dt, DRegister rd, DRegister rm); + + void vrintn(DataType dt, QRegister rd, QRegister rm); + + void vrintn(DataType dt, SRegister rd, SRegister rm); + + void vrintp(DataType dt, DRegister rd, DRegister rm); + + void vrintp(DataType dt, QRegister rd, QRegister rm); + + void vrintp(DataType dt, SRegister rd, SRegister rm); + + void vrintr(Condition cond, DataType dt, SRegister rd, SRegister rm); + + void vrintr(Condition cond, DataType dt, DRegister rd, DRegister rm); + + void vrintx(Condition cond, DataType dt, DRegister rd, DRegister rm); + + void vrintx(DataType dt, QRegister rd, QRegister rm); + + void vrintx(Condition cond, DataType dt, SRegister rd, SRegister rm); + + void vrintz(Condition cond, DataType dt, DRegister rd, DRegister rm); + + void vrintz(DataType dt, QRegister rd, QRegister rm); + + void vrintz(Condition cond, DataType dt, SRegister rd, SRegister rm); + + void vrshl( + Condition cond, DataType dt, DRegister rd, DRegister rm, DRegister rn); + + void vrshl( + Condition cond, DataType dt, QRegister rd, QRegister rm, QRegister rn); + + void vrshr(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + + void vrshr(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + + void vrshrn(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand); + + void vrsqrte(Condition cond, DataType dt, DRegister rd, DRegister rm); + + void vrsqrte(Condition cond, DataType dt, QRegister rd, QRegister rm); + + void vrsqrts( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vrsqrts( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vrsra(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + + void vrsra(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + + void vrsubhn( + Condition cond, DataType dt, DRegister rd, QRegister rn, QRegister rm); + + void vseleq(DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vseleq(DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vselge(DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vselge(DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vselgt(DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vselgt(DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vselvs(DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vselvs(DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vshl(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + + void vshl(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + + void vshll(Condition cond, + DataType dt, + QRegister rd, + DRegister rm, + const DOperand& operand); + + void vshr(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + + void vshr(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + + void vshrn(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand); + + void vsli(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + + void vsli(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + + void vsqrt(Condition cond, DataType dt, SRegister rd, SRegister rm); + + void vsqrt(Condition cond, DataType dt, DRegister rd, DRegister rm); + + void vsra(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + + void vsra(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + + void vsri(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + + void vsri(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + + void vst1(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand); + + void vst2(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand); + + void vst3(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand); + + void vst3(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const MemOperand& operand); + + void vst4(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand); + + void vstm(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist); + + void vstm(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist); + + void vstmdb(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist); + + void vstmdb(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist); + + void vstmia(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist); + + void vstmia(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist); + + void vstr(Condition cond, + DataType dt, + DRegister rd, + const MemOperand& operand); + + void vstr(Condition cond, + DataType dt, + SRegister rd, + const MemOperand& operand); + + void vsub( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vsub( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vsub( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vsubhn( + Condition cond, DataType dt, DRegister rd, QRegister rn, QRegister rm); + + void vsubl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm); + + void vsubw( + Condition cond, DataType dt, QRegister rd, QRegister rn, DRegister rm); + + void vswp(Condition cond, DataType dt, DRegister rd, DRegister rm); + + void vswp(Condition cond, DataType dt, QRegister rd, QRegister rm); + + void vtbl(Condition cond, + DataType dt, + DRegister rd, + const NeonRegisterList& nreglist, + DRegister rm); + + void vtbx(Condition cond, + DataType dt, + DRegister rd, + const NeonRegisterList& nreglist, + DRegister rm); + + void vtrn(Condition cond, DataType dt, DRegister rd, DRegister rm); + + void vtrn(Condition cond, DataType dt, QRegister rd, QRegister rm); + + void vtst( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vtst( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vuzp(Condition cond, DataType dt, DRegister rd, DRegister rm); + + void vuzp(Condition cond, DataType dt, QRegister rd, QRegister rm); + + void vzip(Condition cond, DataType dt, DRegister rd, DRegister rm); + + void vzip(Condition cond, DataType dt, QRegister rd, QRegister rm); + + void yield(Condition cond, EncodingSize size); + + int T32Size(uint32_t instr); + void DecodeT32(uint32_t instr); + void DecodeA32(uint32_t instr); +}; + +DataTypeValue Dt_L_imm6_1_Decode(uint32_t value, uint32_t type_value); +DataTypeValue Dt_L_imm6_2_Decode(uint32_t value, uint32_t type_value); +DataTypeValue Dt_L_imm6_3_Decode(uint32_t value); +DataTypeValue Dt_L_imm6_4_Decode(uint32_t value); +DataTypeValue Dt_imm6_1_Decode(uint32_t value, uint32_t type_value); +DataTypeValue Dt_imm6_2_Decode(uint32_t value, uint32_t type_value); +DataTypeValue Dt_imm6_3_Decode(uint32_t value); +DataTypeValue Dt_imm6_4_Decode(uint32_t value, uint32_t type_value); +DataTypeValue Dt_op_U_size_1_Decode(uint32_t value); +DataTypeValue Dt_op_size_1_Decode(uint32_t value); +DataTypeValue Dt_op_size_2_Decode(uint32_t value); +DataTypeValue Dt_op_size_3_Decode(uint32_t value); +DataTypeValue Dt_U_imm3H_1_Decode(uint32_t value); +DataTypeValue Dt_U_opc1_opc2_1_Decode(uint32_t value, unsigned* lane); +DataTypeValue Dt_opc1_opc2_1_Decode(uint32_t value, unsigned* lane); +DataTypeValue Dt_imm4_1_Decode(uint32_t value, unsigned* lane); +DataTypeValue Dt_B_E_1_Decode(uint32_t value); +DataTypeValue Dt_op_1_Decode1(uint32_t value); +DataTypeValue Dt_op_1_Decode2(uint32_t value); +DataTypeValue Dt_op_2_Decode(uint32_t value); +DataTypeValue Dt_op_3_Decode(uint32_t value); +DataTypeValue Dt_U_sx_1_Decode(uint32_t value); +DataTypeValue Dt_op_U_1_Decode1(uint32_t value); +DataTypeValue Dt_op_U_1_Decode2(uint32_t value); +DataTypeValue Dt_sz_1_Decode(uint32_t value); +DataTypeValue Dt_F_size_1_Decode(uint32_t value); +DataTypeValue Dt_F_size_2_Decode(uint32_t value); +DataTypeValue Dt_F_size_3_Decode(uint32_t value); +DataTypeValue Dt_F_size_4_Decode(uint32_t value); +DataTypeValue Dt_U_size_1_Decode(uint32_t value); +DataTypeValue Dt_U_size_2_Decode(uint32_t value); +DataTypeValue Dt_U_size_3_Decode(uint32_t value); +DataTypeValue Dt_size_1_Decode(uint32_t value); +DataTypeValue Dt_size_2_Decode(uint32_t value); +DataTypeValue Dt_size_3_Decode(uint32_t value); +DataTypeValue Dt_size_4_Decode(uint32_t value); +DataTypeValue Dt_size_5_Decode(uint32_t value); +DataTypeValue Dt_size_6_Decode(uint32_t value); +DataTypeValue Dt_size_7_Decode(uint32_t value); +DataTypeValue Dt_size_8_Decode(uint32_t value); +DataTypeValue Dt_size_9_Decode(uint32_t value, uint32_t type_value); +DataTypeValue Dt_size_10_Decode(uint32_t value); +DataTypeValue Dt_size_11_Decode(uint32_t value, uint32_t type_value); +DataTypeValue Dt_size_12_Decode(uint32_t value, uint32_t type_value); +DataTypeValue Dt_size_13_Decode(uint32_t value); +DataTypeValue Dt_size_14_Decode(uint32_t value); +DataTypeValue Dt_size_15_Decode(uint32_t value); +DataTypeValue Dt_size_16_Decode(uint32_t value); +DataTypeValue Dt_size_17_Decode(uint32_t value); +// End of generated code. + +class PrintDisassembler : public Disassembler { + public: + explicit PrintDisassembler(std::ostream& os, // NOLINT(runtime/references) + uint32_t code_address = 0) + : Disassembler(os, code_address) {} + explicit PrintDisassembler(DisassemblerStream* os, uint32_t code_address = 0) + : Disassembler(os, code_address) {} + + virtual void PrintCodeAddress(uint32_t code_address) { + os() << "0x" << std::hex << std::setw(8) << std::setfill('0') + << code_address << "\t"; + } + + virtual void PrintOpcode16(uint32_t opcode) { + os() << std::hex << std::setw(4) << std::setfill('0') << opcode << " " + << std::dec << "\t"; + } + + virtual void PrintOpcode32(uint32_t opcode) { + os() << std::hex << std::setw(8) << std::setfill('0') << opcode << std::dec + << "\t"; + } + + const uint32_t* DecodeA32At(const uint32_t* instruction_address) { + DecodeA32(*instruction_address); + return instruction_address + 1; + } + + // Returns the address of the next instruction. + const uint16_t* DecodeT32At(const uint16_t* instruction_address, + const uint16_t* buffer_end); + void DecodeT32(uint32_t instruction); + void DecodeA32(uint32_t instruction); + void DisassembleA32Buffer(const uint32_t* buffer, size_t size_in_bytes); + void DisassembleT32Buffer(const uint16_t* buffer, size_t size_in_bytes); +}; + +} // namespace aarch32 +} // namespace vixl + +#endif // VIXL_DISASM_AARCH32_H_ diff --git a/dep/vixl/include/vixl/aarch32/instructions-aarch32.h b/dep/vixl/include/vixl/aarch32/instructions-aarch32.h new file mode 100644 index 000000000..f11f2b02b --- /dev/null +++ b/dep/vixl/include/vixl/aarch32/instructions-aarch32.h @@ -0,0 +1,1359 @@ +// Copyright 2017, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_AARCH32_INSTRUCTIONS_AARCH32_H_ +#define VIXL_AARCH32_INSTRUCTIONS_AARCH32_H_ + +extern "C" { +#include +} + +#include +#include + +#include "code-buffer-vixl.h" +#include "utils-vixl.h" +#include "aarch32/constants-aarch32.h" + +#ifdef __arm__ +#define HARDFLOAT __attribute__((noinline, pcs("aapcs-vfp"))) +#else +#define HARDFLOAT __attribute__((noinline)) +#endif + +namespace vixl { +namespace aarch32 { + +class Operand; +class SOperand; +class DOperand; +class QOperand; +class MemOperand; +class AlignedMemOperand; + +enum AddrMode { Offset = 0, PreIndex = 1, PostIndex = 2 }; + +class CPURegister { + public: + enum RegisterType { + kNoRegister = 0, + kRRegister = 1, + kSRegister = 2, + kDRegister = 3, + kQRegister = 4 + }; + + private: + static const int kCodeBits = 5; + static const int kTypeBits = 4; + static const int kSizeBits = 8; + static const int kCodeShift = 0; + static const int kTypeShift = kCodeShift + kCodeBits; + static const int kSizeShift = kTypeShift + kTypeBits; + static const uint32_t kCodeMask = ((1 << kCodeBits) - 1) << kCodeShift; + static const uint32_t kTypeMask = ((1 << kTypeBits) - 1) << kTypeShift; + static const uint32_t kSizeMask = ((1 << kSizeBits) - 1) << kSizeShift; + uint32_t value_; + + public: + CPURegister(RegisterType type, uint32_t code, int size) + : value_((type << kTypeShift) | (code << kCodeShift) | + (size << kSizeShift)) { +#ifdef VIXL_DEBUG + switch (type) { + case kNoRegister: + break; + case kRRegister: + VIXL_ASSERT(code < kNumberOfRegisters); + VIXL_ASSERT(size == kRegSizeInBits); + break; + case kSRegister: + VIXL_ASSERT(code < kNumberOfSRegisters); + VIXL_ASSERT(size == kSRegSizeInBits); + break; + case kDRegister: + VIXL_ASSERT(code < kMaxNumberOfDRegisters); + VIXL_ASSERT(size == kDRegSizeInBits); + break; + case kQRegister: + VIXL_ASSERT(code < kNumberOfQRegisters); + VIXL_ASSERT(size == kQRegSizeInBits); + break; + default: + VIXL_UNREACHABLE(); + break; + } +#endif + } + RegisterType GetType() const { + return static_cast((value_ & kTypeMask) >> kTypeShift); + } + bool IsRegister() const { return GetType() == kRRegister; } + bool IsS() const { return GetType() == kSRegister; } + bool IsD() const { return GetType() == kDRegister; } + bool IsQ() const { return GetType() == kQRegister; } + bool IsVRegister() const { return IsS() || IsD() || IsQ(); } + bool IsFPRegister() const { return IsS() || IsD(); } + uint32_t GetCode() const { return (value_ & kCodeMask) >> kCodeShift; } + uint32_t GetReg() const { return value_; } + int GetSizeInBits() const { return (value_ & kSizeMask) >> kSizeShift; } + int GetRegSizeInBytes() const { + return (GetType() == kNoRegister) ? 0 : (GetSizeInBits() / 8); + } + bool Is64Bits() const { return GetSizeInBits() == 64; } + bool Is128Bits() const { return GetSizeInBits() == 128; } + bool IsSameFormat(CPURegister reg) { + return (value_ & ~kCodeMask) == (reg.value_ & ~kCodeMask); + } + bool Is(CPURegister ref) const { return GetReg() == ref.GetReg(); } + bool IsValid() const { return GetType() != kNoRegister; } +}; + +class Register : public CPURegister { + public: + Register() : CPURegister(kNoRegister, 0, kRegSizeInBits) {} + explicit Register(uint32_t code) + : CPURegister(kRRegister, code % kNumberOfRegisters, kRegSizeInBits) { + VIXL_ASSERT(GetCode() < kNumberOfRegisters); + } + bool Is(Register ref) const { return GetCode() == ref.GetCode(); } + bool IsLow() const { return GetCode() < kNumberOfT32LowRegisters; } + bool IsLR() const { return GetCode() == kLrCode; } + bool IsPC() const { return GetCode() == kPcCode; } + bool IsSP() const { return GetCode() == kSpCode; } +}; + +std::ostream& operator<<(std::ostream& os, const Register reg); + +class RegisterOrAPSR_nzcv { + uint32_t code_; + + public: + explicit RegisterOrAPSR_nzcv(uint32_t code) : code_(code) { + VIXL_ASSERT(code_ < kNumberOfRegisters); + } + bool IsAPSR_nzcv() const { return code_ == kPcCode; } + uint32_t GetCode() const { return code_; } + Register AsRegister() const { + VIXL_ASSERT(!IsAPSR_nzcv()); + return Register(code_); + } +}; + +const RegisterOrAPSR_nzcv APSR_nzcv(kPcCode); + +inline std::ostream& operator<<(std::ostream& os, + const RegisterOrAPSR_nzcv reg) { + if (reg.IsAPSR_nzcv()) return os << "APSR_nzcv"; + return os << reg.AsRegister(); +} + +class SRegister; +class DRegister; +class QRegister; + +class VRegister : public CPURegister { + public: + VRegister() : CPURegister(kNoRegister, 0, 0) {} + VRegister(RegisterType type, uint32_t code, int size) + : CPURegister(type, code, size) {} + + SRegister S() const; + DRegister D() const; + QRegister Q() const; +}; + +class SRegister : public VRegister { + public: + SRegister() : VRegister(kNoRegister, 0, kSRegSizeInBits) {} + explicit SRegister(uint32_t code) + : VRegister(kSRegister, code, kSRegSizeInBits) {} + uint32_t Encode(int single_bit_field, int four_bit_field_lowest_bit) const { + if (four_bit_field_lowest_bit == 0) { + return ((GetCode() & 0x1) << single_bit_field) | + ((GetCode() & 0x1e) >> 1); + } + return ((GetCode() & 0x1) << single_bit_field) | + ((GetCode() & 0x1e) << (four_bit_field_lowest_bit - 1)); + } +}; + +inline unsigned ExtractSRegister(uint32_t instr, + int single_bit_field, + int four_bit_field_lowest_bit) { + VIXL_ASSERT(single_bit_field > 0); + if (four_bit_field_lowest_bit == 0) { + return ((instr << 1) & 0x1e) | ((instr >> single_bit_field) & 0x1); + } + return ((instr >> (four_bit_field_lowest_bit - 1)) & 0x1e) | + ((instr >> single_bit_field) & 0x1); +} + +inline std::ostream& operator<<(std::ostream& os, const SRegister reg) { + return os << "s" << reg.GetCode(); +} + +class DRegister : public VRegister { + public: + DRegister() : VRegister(kNoRegister, 0, kDRegSizeInBits) {} + explicit DRegister(uint32_t code) + : VRegister(kDRegister, code, kDRegSizeInBits) {} + SRegister GetLane(uint32_t lane) const { + uint32_t lane_count = kDRegSizeInBits / kSRegSizeInBits; + VIXL_ASSERT(lane < lane_count); + VIXL_ASSERT(GetCode() * lane_count < kNumberOfSRegisters); + return SRegister(GetCode() * lane_count + lane); + } + uint32_t Encode(int single_bit_field, int four_bit_field_lowest_bit) const { + VIXL_ASSERT(single_bit_field >= 4); + return ((GetCode() & 0x10) << (single_bit_field - 4)) | + ((GetCode() & 0xf) << four_bit_field_lowest_bit); + } +}; + +inline unsigned ExtractDRegister(uint32_t instr, + int single_bit_field, + int four_bit_field_lowest_bit) { + VIXL_ASSERT(single_bit_field >= 4); + return ((instr >> (single_bit_field - 4)) & 0x10) | + ((instr >> four_bit_field_lowest_bit) & 0xf); +} + +inline std::ostream& operator<<(std::ostream& os, const DRegister reg) { + return os << "d" << reg.GetCode(); +} + +enum DataTypeType { + kDataTypeS = 0x100, + kDataTypeU = 0x200, + kDataTypeF = 0x300, + kDataTypeI = 0x400, + kDataTypeP = 0x500, + kDataTypeUntyped = 0x600 +}; +const int kDataTypeSizeMask = 0x0ff; +const int kDataTypeTypeMask = 0x100; +enum DataTypeValue { + kDataTypeValueInvalid = 0x000, + kDataTypeValueNone = 0x001, // value used when dt is ignored. + S8 = kDataTypeS | 8, + S16 = kDataTypeS | 16, + S32 = kDataTypeS | 32, + S64 = kDataTypeS | 64, + U8 = kDataTypeU | 8, + U16 = kDataTypeU | 16, + U32 = kDataTypeU | 32, + U64 = kDataTypeU | 64, + F16 = kDataTypeF | 16, + F32 = kDataTypeF | 32, + F64 = kDataTypeF | 64, + I8 = kDataTypeI | 8, + I16 = kDataTypeI | 16, + I32 = kDataTypeI | 32, + I64 = kDataTypeI | 64, + P8 = kDataTypeP | 8, + P64 = kDataTypeP | 64, + Untyped8 = kDataTypeUntyped | 8, + Untyped16 = kDataTypeUntyped | 16, + Untyped32 = kDataTypeUntyped | 32, + Untyped64 = kDataTypeUntyped | 64 +}; + +class DataType { + DataTypeValue value_; + + public: + explicit DataType(uint32_t size) + : value_(static_cast(kDataTypeUntyped | size)) { + VIXL_ASSERT((size == 8) || (size == 16) || (size == 32) || (size == 64)); + } + // Users should be able to use "S8", "S6" and so forth to instantiate this + // class. + DataType(DataTypeValue value) : value_(value) {} // NOLINT(runtime/explicit) + DataTypeValue GetValue() const { return value_; } + DataTypeType GetType() const { + return static_cast(value_ & kDataTypeTypeMask); + } + uint32_t GetSize() const { return value_ & kDataTypeSizeMask; } + bool IsSize(uint32_t size) const { + return (value_ & kDataTypeSizeMask) == size; + } + const char* GetName() const; + bool Is(DataType type) const { return value_ == type.value_; } + bool Is(DataTypeValue value) const { return value_ == value; } + bool Is(DataTypeType type) const { return GetType() == type; } + bool IsNoneOr(DataTypeValue value) const { + return (value_ == value) || (value_ == kDataTypeValueNone); + } + bool Is(DataTypeType type, uint32_t size) const { + return value_ == static_cast(type | size); + } + bool IsNoneOr(DataTypeType type, uint32_t size) const { + return Is(type, size) || Is(kDataTypeValueNone); + } +}; + +inline std::ostream& operator<<(std::ostream& os, DataType dt) { + return os << dt.GetName(); +} + +class DRegisterLane : public DRegister { + uint32_t lane_; + + public: + DRegisterLane(DRegister reg, uint32_t lane) + : DRegister(reg.GetCode()), lane_(lane) {} + DRegisterLane(uint32_t code, uint32_t lane) : DRegister(code), lane_(lane) {} + uint32_t GetLane() const { return lane_; } + uint32_t EncodeX(DataType dt, + int single_bit_field, + int four_bit_field_lowest_bit) const { + VIXL_ASSERT(single_bit_field >= 4); + uint32_t value = lane_ << ((dt.GetSize() == 16) ? 3 : 4) | GetCode(); + return ((value & 0x10) << (single_bit_field - 4)) | + ((value & 0xf) << four_bit_field_lowest_bit); + } +}; + +inline unsigned ExtractDRegisterAndLane(uint32_t instr, + DataType dt, + int single_bit_field, + int four_bit_field_lowest_bit, + int* lane) { + VIXL_ASSERT(single_bit_field >= 4); + uint32_t value = ((instr >> (single_bit_field - 4)) & 0x10) | + ((instr >> four_bit_field_lowest_bit) & 0xf); + if (dt.GetSize() == 16) { + *lane = value >> 3; + return value & 0x7; + } + *lane = value >> 4; + return value & 0xf; +} + +inline std::ostream& operator<<(std::ostream& os, const DRegisterLane lane) { + os << "d" << lane.GetCode() << "["; + if (lane.GetLane() == static_cast(-1)) return os << "??]"; + return os << lane.GetLane() << "]"; +} + +class QRegister : public VRegister { + public: + QRegister() : VRegister(kNoRegister, 0, kQRegSizeInBits) {} + explicit QRegister(uint32_t code) + : VRegister(kQRegister, code, kQRegSizeInBits) {} + uint32_t Encode(int offset) { return GetCode() << offset; } + DRegister GetDLane(uint32_t lane) const { + uint32_t lane_count = kQRegSizeInBits / kDRegSizeInBits; + VIXL_ASSERT(lane < lane_count); + return DRegister(GetCode() * lane_count + lane); + } + DRegister GetLowDRegister() const { return DRegister(GetCode() * 2); } + DRegister GetHighDRegister() const { return DRegister(1 + GetCode() * 2); } + SRegister GetSLane(uint32_t lane) const { + uint32_t lane_count = kQRegSizeInBits / kSRegSizeInBits; + VIXL_ASSERT(lane < lane_count); + VIXL_ASSERT(GetCode() * lane_count < kNumberOfSRegisters); + return SRegister(GetCode() * lane_count + lane); + } + uint32_t Encode(int single_bit_field, int four_bit_field_lowest_bit) { + // Encode "code * 2". + VIXL_ASSERT(single_bit_field >= 3); + return ((GetCode() & 0x8) << (single_bit_field - 3)) | + ((GetCode() & 0x7) << (four_bit_field_lowest_bit + 1)); + } +}; + +inline unsigned ExtractQRegister(uint32_t instr, + int single_bit_field, + int four_bit_field_lowest_bit) { + VIXL_ASSERT(single_bit_field >= 3); + return ((instr >> (single_bit_field - 3)) & 0x8) | + ((instr >> (four_bit_field_lowest_bit + 1)) & 0x7); +} + +inline std::ostream& operator<<(std::ostream& os, const QRegister reg) { + return os << "q" << reg.GetCode(); +} + +// clang-format off +#define AARCH32_REGISTER_CODE_LIST(R) \ + R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \ + R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) +// clang-format on +#define DEFINE_REGISTER(N) const Register r##N(N); +AARCH32_REGISTER_CODE_LIST(DEFINE_REGISTER) +#undef DEFINE_REGISTER +#undef AARCH32_REGISTER_CODE_LIST + +enum RegNum { kIPRegNum = 12, kSPRegNum = 13, kLRRegNum = 14, kPCRegNum = 15 }; + +const Register ip(kIPRegNum); +const Register sp(kSPRegNum); +const Register pc(kPCRegNum); +const Register lr(kLRRegNum); +const Register NoReg; +const VRegister NoVReg; + +// clang-format off +#define SREGISTER_CODE_LIST(R) \ + R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \ + R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \ + R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \ + R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31) +// clang-format on +#define DEFINE_REGISTER(N) const SRegister s##N(N); +SREGISTER_CODE_LIST(DEFINE_REGISTER) +#undef DEFINE_REGISTER +#undef SREGISTER_CODE_LIST +const SRegister NoSReg; + +// clang-format off +#define DREGISTER_CODE_LIST(R) \ +R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \ +R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \ +R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \ +R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31) +// clang-format on +#define DEFINE_REGISTER(N) const DRegister d##N(N); +DREGISTER_CODE_LIST(DEFINE_REGISTER) +#undef DEFINE_REGISTER +#undef DREGISTER_CODE_LIST +const DRegister NoDReg; + +// clang-format off +#define QREGISTER_CODE_LIST(R) \ + R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \ + R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) +// clang-format on +#define DEFINE_REGISTER(N) const QRegister q##N(N); +QREGISTER_CODE_LIST(DEFINE_REGISTER) +#undef DEFINE_REGISTER +#undef QREGISTER_CODE_LIST +const QRegister NoQReg; + +class RegisterList { + public: + RegisterList() : list_(0) {} + RegisterList(Register reg) // NOLINT(runtime/explicit) + : list_(RegisterToList(reg)) {} + RegisterList(Register reg1, Register reg2) + : list_(RegisterToList(reg1) | RegisterToList(reg2)) {} + RegisterList(Register reg1, Register reg2, Register reg3) + : list_(RegisterToList(reg1) | RegisterToList(reg2) | + RegisterToList(reg3)) {} + RegisterList(Register reg1, Register reg2, Register reg3, Register reg4) + : list_(RegisterToList(reg1) | RegisterToList(reg2) | + RegisterToList(reg3) | RegisterToList(reg4)) {} + explicit RegisterList(uint32_t list) : list_(list) {} + uint32_t GetList() const { return list_; } + void SetList(uint32_t list) { list_ = list; } + bool Includes(const Register& reg) const { + return (list_ & RegisterToList(reg)) != 0; + } + void Combine(const RegisterList& other) { list_ |= other.GetList(); } + void Combine(const Register& reg) { list_ |= RegisterToList(reg); } + void Remove(const RegisterList& other) { list_ &= ~other.GetList(); } + void Remove(const Register& reg) { list_ &= ~RegisterToList(reg); } + bool Overlaps(const RegisterList& other) const { + return (list_ & other.list_) != 0; + } + bool IsR0toR7orPC() const { + // True if all the registers from the list are not from r8-r14. + return (list_ & 0x7f00) == 0; + } + bool IsR0toR7orLR() const { + // True if all the registers from the list are not from r8-r13 nor from r15. + return (list_ & 0xbf00) == 0; + } + Register GetFirstAvailableRegister() const; + bool IsEmpty() const { return list_ == 0; } + static RegisterList Union(const RegisterList& list_1, + const RegisterList& list_2) { + return RegisterList(list_1.list_ | list_2.list_); + } + static RegisterList Union(const RegisterList& list_1, + const RegisterList& list_2, + const RegisterList& list_3) { + return Union(list_1, Union(list_2, list_3)); + } + static RegisterList Union(const RegisterList& list_1, + const RegisterList& list_2, + const RegisterList& list_3, + const RegisterList& list_4) { + return Union(Union(list_1, list_2), Union(list_3, list_4)); + } + static RegisterList Intersection(const RegisterList& list_1, + const RegisterList& list_2) { + return RegisterList(list_1.list_ & list_2.list_); + } + static RegisterList Intersection(const RegisterList& list_1, + const RegisterList& list_2, + const RegisterList& list_3) { + return Intersection(list_1, Intersection(list_2, list_3)); + } + static RegisterList Intersection(const RegisterList& list_1, + const RegisterList& list_2, + const RegisterList& list_3, + const RegisterList& list_4) { + return Intersection(Intersection(list_1, list_2), + Intersection(list_3, list_4)); + } + + private: + static uint32_t RegisterToList(Register reg) { + if (reg.GetType() == CPURegister::kNoRegister) { + return 0; + } else { + return UINT32_C(1) << reg.GetCode(); + } + } + + // Bitfield representation of all registers in the list + // (1 for r0, 2 for r1, 4 for r2, ...). + uint32_t list_; +}; + +inline uint32_t GetRegisterListEncoding(const RegisterList& registers, + int first, + int count) { + return (registers.GetList() >> first) & ((1 << count) - 1); +} + +std::ostream& operator<<(std::ostream& os, RegisterList registers); + +class VRegisterList { + public: + VRegisterList() : list_(0) {} + explicit VRegisterList(VRegister reg) : list_(RegisterToList(reg)) {} + VRegisterList(VRegister reg1, VRegister reg2) + : list_(RegisterToList(reg1) | RegisterToList(reg2)) {} + VRegisterList(VRegister reg1, VRegister reg2, VRegister reg3) + : list_(RegisterToList(reg1) | RegisterToList(reg2) | + RegisterToList(reg3)) {} + VRegisterList(VRegister reg1, VRegister reg2, VRegister reg3, VRegister reg4) + : list_(RegisterToList(reg1) | RegisterToList(reg2) | + RegisterToList(reg3) | RegisterToList(reg4)) {} + explicit VRegisterList(uint64_t list) : list_(list) {} + uint64_t GetList() const { return list_; } + void SetList(uint64_t list) { list_ = list; } + // Because differently-sized V registers overlap with one another, there is no + // way to implement a single 'Includes' function in a way that is unsurprising + // for all existing uses. + bool IncludesAllOf(const VRegister& reg) const { + return (list_ & RegisterToList(reg)) == RegisterToList(reg); + } + bool IncludesAliasOf(const VRegister& reg) const { + return (list_ & RegisterToList(reg)) != 0; + } + void Combine(const VRegisterList& other) { list_ |= other.GetList(); } + void Combine(const VRegister& reg) { list_ |= RegisterToList(reg); } + void Remove(const VRegisterList& other) { list_ &= ~other.GetList(); } + void Remove(const VRegister& reg) { list_ &= ~RegisterToList(reg); } + bool Overlaps(const VRegisterList& other) const { + return (list_ & other.list_) != 0; + } + QRegister GetFirstAvailableQRegister() const; + DRegister GetFirstAvailableDRegister() const; + SRegister GetFirstAvailableSRegister() const; + bool IsEmpty() const { return list_ == 0; } + static VRegisterList Union(const VRegisterList& list_1, + const VRegisterList& list_2) { + return VRegisterList(list_1.list_ | list_2.list_); + } + static VRegisterList Union(const VRegisterList& list_1, + const VRegisterList& list_2, + const VRegisterList& list_3) { + return Union(list_1, Union(list_2, list_3)); + } + static VRegisterList Union(const VRegisterList& list_1, + const VRegisterList& list_2, + const VRegisterList& list_3, + const VRegisterList& list_4) { + return Union(Union(list_1, list_2), Union(list_3, list_4)); + } + static VRegisterList Intersection(const VRegisterList& list_1, + const VRegisterList& list_2) { + return VRegisterList(list_1.list_ & list_2.list_); + } + static VRegisterList Intersection(const VRegisterList& list_1, + const VRegisterList& list_2, + const VRegisterList& list_3) { + return Intersection(list_1, Intersection(list_2, list_3)); + } + static VRegisterList Intersection(const VRegisterList& list_1, + const VRegisterList& list_2, + const VRegisterList& list_3, + const VRegisterList& list_4) { + return Intersection(Intersection(list_1, list_2), + Intersection(list_3, list_4)); + } + + private: + static uint64_t RegisterToList(VRegister reg) { + if (reg.GetType() == CPURegister::kNoRegister) { + return 0; + } else { + switch (reg.GetSizeInBits()) { + case kQRegSizeInBits: + return UINT64_C(0xf) << (reg.GetCode() * 4); + case kDRegSizeInBits: + return UINT64_C(0x3) << (reg.GetCode() * 2); + case kSRegSizeInBits: + return UINT64_C(0x1) << reg.GetCode(); + default: + VIXL_UNREACHABLE(); + return 0; + } + } + } + + // Bitfield representation of all registers in the list. + // (0x3 for d0, 0xc0 for d1, 0x30 for d2, ...). We have one, two or four bits + // per register according to their size. This way we can make sure that we + // account for overlapping registers. + // A register is wholly included in this list only if all of its bits are set. + // A register is aliased by the list if at least one of its bits are set. + // The IncludesAllOf and IncludesAliasOf helpers are provided to make this + // distinction clear. + uint64_t list_; +}; + +class SRegisterList { + SRegister first_; + int length_; + + public: + explicit SRegisterList(SRegister reg) : first_(reg.GetCode()), length_(1) {} + SRegisterList(SRegister first, int length) + : first_(first.GetCode()), length_(length) { + VIXL_ASSERT(length >= 0); + } + SRegister GetSRegister(int n) const { + VIXL_ASSERT(n >= 0); + VIXL_ASSERT(n < length_); + return SRegister((first_.GetCode() + n) % kNumberOfSRegisters); + } + const SRegister& GetFirstSRegister() const { return first_; } + SRegister GetLastSRegister() const { return GetSRegister(length_ - 1); } + int GetLength() const { return length_; } +}; + +std::ostream& operator<<(std::ostream& os, SRegisterList registers); + +class DRegisterList { + DRegister first_; + int length_; + + public: + explicit DRegisterList(DRegister reg) : first_(reg.GetCode()), length_(1) {} + DRegisterList(DRegister first, int length) + : first_(first.GetCode()), length_(length) { + VIXL_ASSERT(length >= 0); + } + DRegister GetDRegister(int n) const { + VIXL_ASSERT(n >= 0); + VIXL_ASSERT(n < length_); + return DRegister((first_.GetCode() + n) % kMaxNumberOfDRegisters); + } + const DRegister& GetFirstDRegister() const { return first_; } + DRegister GetLastDRegister() const { return GetDRegister(length_ - 1); } + int GetLength() const { return length_; } +}; + +std::ostream& operator<<(std::ostream& os, DRegisterList registers); + +enum SpacingType { kSingle, kDouble }; + +enum TransferType { kMultipleLanes, kOneLane, kAllLanes }; + +class NeonRegisterList { + DRegister first_; + SpacingType spacing_; + TransferType type_; + int lane_; + int length_; + + public: + NeonRegisterList(DRegister reg, TransferType type) + : first_(reg.GetCode()), + spacing_(kSingle), + type_(type), + lane_(-1), + length_(1) { + VIXL_ASSERT(type_ != kOneLane); + } + NeonRegisterList(DRegister reg, int lane) + : first_(reg.GetCode()), + spacing_(kSingle), + type_(kOneLane), + lane_(lane), + length_(1) { + VIXL_ASSERT((lane_ >= 0) && (lane_ < 8)); + } + NeonRegisterList(DRegister first, + DRegister last, + SpacingType spacing, + TransferType type) + : first_(first.GetCode()), spacing_(spacing), type_(type), lane_(-1) { + VIXL_ASSERT(type != kOneLane); + VIXL_ASSERT(first.GetCode() <= last.GetCode()); + + int range = last.GetCode() - first.GetCode(); + VIXL_ASSERT(IsSingleSpaced() || IsMultiple(range, 2)); + length_ = (IsDoubleSpaced() ? (range / 2) : range) + 1; + + VIXL_ASSERT(length_ <= 4); + } + NeonRegisterList(DRegister first, + DRegister last, + SpacingType spacing, + int lane) + : first_(first.GetCode()), + spacing_(spacing), + type_(kOneLane), + lane_(lane) { + VIXL_ASSERT((lane >= 0) && (lane < 8)); + VIXL_ASSERT(first.GetCode() <= last.GetCode()); + + int range = last.GetCode() - first.GetCode(); + VIXL_ASSERT(IsSingleSpaced() || IsMultiple(range, 2)); + length_ = (IsDoubleSpaced() ? (range / 2) : range) + 1; + + VIXL_ASSERT(length_ <= 4); + } + DRegister GetDRegister(int n) const { + VIXL_ASSERT(n >= 0); + VIXL_ASSERT(n < length_); + unsigned code = first_.GetCode() + (IsDoubleSpaced() ? (2 * n) : n); + VIXL_ASSERT(code < kMaxNumberOfDRegisters); + return DRegister(code); + } + const DRegister& GetFirstDRegister() const { return first_; } + DRegister GetLastDRegister() const { return GetDRegister(length_ - 1); } + int GetLength() const { return length_; } + bool IsSingleSpaced() const { return spacing_ == kSingle; } + bool IsDoubleSpaced() const { return spacing_ == kDouble; } + bool IsTransferAllLanes() const { return type_ == kAllLanes; } + bool IsTransferOneLane() const { return type_ == kOneLane; } + bool IsTransferMultipleLanes() const { return type_ == kMultipleLanes; } + int GetTransferLane() const { return lane_; } +}; + +std::ostream& operator<<(std::ostream& os, NeonRegisterList registers); + +enum SpecialRegisterType { APSR = 0, CPSR = 0, SPSR = 1 }; + +class SpecialRegister { + uint32_t reg_; + + public: + explicit SpecialRegister(uint32_t reg) : reg_(reg) {} + SpecialRegister(SpecialRegisterType reg) // NOLINT(runtime/explicit) + : reg_(reg) {} + uint32_t GetReg() const { return reg_; } + const char* GetName() const; + bool Is(SpecialRegister value) const { return reg_ == value.reg_; } + bool Is(uint32_t value) const { return reg_ == value; } + bool IsNot(uint32_t value) const { return reg_ != value; } +}; + +inline std::ostream& operator<<(std::ostream& os, SpecialRegister reg) { + return os << reg.GetName(); +} + +enum BankedRegisterType { + R8_usr = 0x00, + R9_usr = 0x01, + R10_usr = 0x02, + R11_usr = 0x03, + R12_usr = 0x04, + SP_usr = 0x05, + LR_usr = 0x06, + R8_fiq = 0x08, + R9_fiq = 0x09, + R10_fiq = 0x0a, + R11_fiq = 0x0b, + R12_fiq = 0x0c, + SP_fiq = 0x0d, + LR_fiq = 0x0e, + LR_irq = 0x10, + SP_irq = 0x11, + LR_svc = 0x12, + SP_svc = 0x13, + LR_abt = 0x14, + SP_abt = 0x15, + LR_und = 0x16, + SP_und = 0x17, + LR_mon = 0x1c, + SP_mon = 0x1d, + ELR_hyp = 0x1e, + SP_hyp = 0x1f, + SPSR_fiq = 0x2e, + SPSR_irq = 0x30, + SPSR_svc = 0x32, + SPSR_abt = 0x34, + SPSR_und = 0x36, + SPSR_mon = 0x3c, + SPSR_hyp = 0x3e +}; + +class BankedRegister { + uint32_t reg_; + + public: + explicit BankedRegister(unsigned reg) : reg_(reg) {} + BankedRegister(BankedRegisterType reg) // NOLINT(runtime/explicit) + : reg_(reg) {} + uint32_t GetCode() const { return reg_; } + const char* GetName() const; +}; + +inline std::ostream& operator<<(std::ostream& os, BankedRegister reg) { + return os << reg.GetName(); +} + +enum MaskedSpecialRegisterType { + APSR_nzcvq = 0x08, + APSR_g = 0x04, + APSR_nzcvqg = 0x0c, + CPSR_c = 0x01, + CPSR_x = 0x02, + CPSR_xc = 0x03, + CPSR_s = APSR_g, + CPSR_sc = 0x05, + CPSR_sx = 0x06, + CPSR_sxc = 0x07, + CPSR_f = APSR_nzcvq, + CPSR_fc = 0x09, + CPSR_fx = 0x0a, + CPSR_fxc = 0x0b, + CPSR_fs = APSR_nzcvqg, + CPSR_fsc = 0x0d, + CPSR_fsx = 0x0e, + CPSR_fsxc = 0x0f, + SPSR_c = 0x11, + SPSR_x = 0x12, + SPSR_xc = 0x13, + SPSR_s = 0x14, + SPSR_sc = 0x15, + SPSR_sx = 0x16, + SPSR_sxc = 0x17, + SPSR_f = 0x18, + SPSR_fc = 0x19, + SPSR_fx = 0x1a, + SPSR_fxc = 0x1b, + SPSR_fs = 0x1c, + SPSR_fsc = 0x1d, + SPSR_fsx = 0x1e, + SPSR_fsxc = 0x1f +}; + +class MaskedSpecialRegister { + uint32_t reg_; + + public: + explicit MaskedSpecialRegister(uint32_t reg) : reg_(reg) { + VIXL_ASSERT(reg <= SPSR_fsxc); + } + MaskedSpecialRegister( + MaskedSpecialRegisterType reg) // NOLINT(runtime/explicit) + : reg_(reg) {} + uint32_t GetReg() const { return reg_; } + const char* GetName() const; + bool Is(MaskedSpecialRegister value) const { return reg_ == value.reg_; } + bool Is(uint32_t value) const { return reg_ == value; } + bool IsNot(uint32_t value) const { return reg_ != value; } +}; + +inline std::ostream& operator<<(std::ostream& os, MaskedSpecialRegister reg) { + return os << reg.GetName(); +} + +enum SpecialFPRegisterType { + FPSID = 0x0, + FPSCR = 0x1, + MVFR2 = 0x5, + MVFR1 = 0x6, + MVFR0 = 0x7, + FPEXC = 0x8 +}; + +class SpecialFPRegister { + uint32_t reg_; + + public: + explicit SpecialFPRegister(uint32_t reg) : reg_(reg) { +#ifdef VIXL_DEBUG + switch (reg) { + case FPSID: + case FPSCR: + case MVFR2: + case MVFR1: + case MVFR0: + case FPEXC: + break; + default: + VIXL_UNREACHABLE(); + } +#endif + } + SpecialFPRegister(SpecialFPRegisterType reg) // NOLINT(runtime/explicit) + : reg_(reg) {} + uint32_t GetReg() const { return reg_; } + const char* GetName() const; + bool Is(SpecialFPRegister value) const { return reg_ == value.reg_; } + bool Is(uint32_t value) const { return reg_ == value; } + bool IsNot(uint32_t value) const { return reg_ != value; } +}; + +inline std::ostream& operator<<(std::ostream& os, SpecialFPRegister reg) { + return os << reg.GetName(); +} + +class CRegister { + uint32_t code_; + + public: + explicit CRegister(uint32_t code) : code_(code) { + VIXL_ASSERT(code < kNumberOfRegisters); + } + uint32_t GetCode() const { return code_; } + bool Is(CRegister value) const { return code_ == value.code_; } +}; + +inline std::ostream& operator<<(std::ostream& os, const CRegister reg) { + return os << "c" << reg.GetCode(); +} + +// clang-format off +#define CREGISTER_CODE_LIST(R) \ + R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \ + R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) +// clang-format on +#define DEFINE_CREGISTER(N) const CRegister c##N(N); +CREGISTER_CODE_LIST(DEFINE_CREGISTER) + +enum CoprocessorName { p10 = 10, p11 = 11, p14 = 14, p15 = 15 }; + +class Coprocessor { + uint32_t coproc_; + + public: + explicit Coprocessor(uint32_t coproc) : coproc_(coproc) {} + Coprocessor(CoprocessorName coproc) // NOLINT(runtime/explicit) + : coproc_(static_cast(coproc)) {} + bool Is(Coprocessor coproc) const { return coproc_ == coproc.coproc_; } + bool Is(CoprocessorName coproc) const { return coproc_ == coproc; } + uint32_t GetCoprocessor() const { return coproc_; } +}; + +inline std::ostream& operator<<(std::ostream& os, Coprocessor coproc) { + return os << "p" << coproc.GetCoprocessor(); +} + +enum ConditionType { + eq = 0, + ne = 1, + cs = 2, + cc = 3, + mi = 4, + pl = 5, + vs = 6, + vc = 7, + hi = 8, + ls = 9, + ge = 10, + lt = 11, + gt = 12, + le = 13, + al = 14, + hs = cs, + lo = cc +}; + +class Condition { + uint32_t condition_; + static const uint32_t kNever = 15; + static const uint32_t kMask = 0xf; + static const uint32_t kNone = 0x10 | al; + + public: + static const Condition None() { return Condition(kNone); } + static const Condition Never() { return Condition(kNever); } + explicit Condition(uint32_t condition) : condition_(condition) { + VIXL_ASSERT(condition <= kNone); + } + // Users should be able to use "eq", "ne" and so forth to instantiate this + // class. + Condition(ConditionType condition) // NOLINT(runtime/explicit) + : condition_(condition) {} + uint32_t GetCondition() const { return condition_ & kMask; } + bool IsNone() const { return condition_ == kNone; } + const char* GetName() const; + bool Is(Condition value) const { return condition_ == value.condition_; } + bool Is(uint32_t value) const { return condition_ == value; } + bool IsNot(uint32_t value) const { return condition_ != value; } + bool IsNever() const { return condition_ == kNever; } + bool IsNotNever() const { return condition_ != kNever; } + Condition Negate() const { + VIXL_ASSERT(IsNot(al) && IsNot(kNever)); + return Condition(condition_ ^ 1); + } +}; + +inline std::ostream& operator<<(std::ostream& os, Condition condition) { + return os << condition.GetName(); +} + +enum SignType { plus, minus }; + +class Sign { + public: + Sign() : sign_(plus) {} + Sign(SignType sign) : sign_(sign) {} // NOLINT(runtime/explicit) + const char* GetName() const { return (IsPlus() ? "" : "-"); } + bool IsPlus() const { return sign_ == plus; } + bool IsMinus() const { return sign_ == minus; } + int32_t ApplyTo(uint32_t value) { return IsPlus() ? value : -value; } + + private: + SignType sign_; +}; + +inline std::ostream& operator<<(std::ostream& os, Sign sign) { + return os << sign.GetName(); +} + +enum ShiftType { LSL = 0x0, LSR = 0x1, ASR = 0x2, ROR = 0x3, RRX = 0x4 }; + +class Shift { + public: + Shift() : shift_(LSL) {} + Shift(ShiftType shift) : shift_(shift) {} // NOLINT(runtime/explicit) + explicit Shift(uint32_t shift) : shift_(static_cast(shift)) {} + const Shift& GetShift() const { return *this; } + ShiftType GetType() const { return shift_; } + uint32_t GetValue() const { return shift_; } + const char* GetName() const; + bool IsLSL() const { return shift_ == LSL; } + bool IsLSR() const { return shift_ == LSR; } + bool IsASR() const { return shift_ == ASR; } + bool IsROR() const { return shift_ == ROR; } + bool IsRRX() const { return shift_ == RRX; } + bool Is(Shift value) const { return shift_ == value.shift_; } + bool IsNot(Shift value) const { return shift_ != value.shift_; } + bool IsValidAmount(uint32_t amount) const; + static const Shift NoShift; + + protected: + void SetType(ShiftType s) { shift_ = s; } + + private: + ShiftType shift_; +}; + +inline std::ostream& operator<<(std::ostream& os, Shift shift) { + return os << shift.GetName(); +} + +class ImmediateShiftOperand : public Shift { + public: + // Constructor used for assembly. + ImmediateShiftOperand(Shift shift, uint32_t amount) + : Shift(shift), amount_(amount) { +#ifdef VIXL_DEBUG + switch (shift.GetType()) { + case LSL: + VIXL_ASSERT(amount <= 31); + break; + case ROR: + VIXL_ASSERT(amount > 0); + VIXL_ASSERT(amount <= 31); + break; + case LSR: + case ASR: + VIXL_ASSERT(amount > 0); + VIXL_ASSERT(amount <= 32); + break; + case RRX: + VIXL_ASSERT(amount == 0); + break; + default: + VIXL_UNREACHABLE(); + break; + } +#endif + } + // Constructor used for disassembly. + ImmediateShiftOperand(int shift, int amount); + uint32_t GetAmount() const { return amount_; } + bool Is(const ImmediateShiftOperand& rhs) const { + return amount_ == (rhs.amount_) && Shift::Is(*this); + } + + private: + uint32_t amount_; +}; + +inline std::ostream& operator<<(std::ostream& os, + ImmediateShiftOperand const& shift_operand) { + if (shift_operand.IsLSL() && shift_operand.GetAmount() == 0) return os; + if (shift_operand.IsRRX()) return os << ", rrx"; + return os << ", " << shift_operand.GetName() << " #" + << shift_operand.GetAmount(); +} + +class RegisterShiftOperand : public Shift { + public: + RegisterShiftOperand(ShiftType shift, Register shift_register) + : Shift(shift), shift_register_(shift_register) { + VIXL_ASSERT(!IsRRX() && shift_register_.IsValid()); + } + const Register GetShiftRegister() const { return shift_register_; } + bool Is(const RegisterShiftOperand& rhs) const { + return shift_register_.Is(rhs.shift_register_) && Shift::Is(*this); + } + + private: + Register shift_register_; +}; + +inline std::ostream& operator<<(std::ostream& s, + const RegisterShiftOperand& shift_operand) { + return s << shift_operand.GetName() << " " + << shift_operand.GetShiftRegister(); +} + +enum EncodingSizeType { Best, Narrow, Wide }; + +class EncodingSize { + uint32_t size_; + + public: + explicit EncodingSize(uint32_t size) : size_(size) {} + EncodingSize(EncodingSizeType size) // NOLINT(runtime/explicit) + : size_(size) {} + uint32_t GetSize() const { return size_; } + const char* GetName() const; + bool IsBest() const { return size_ == Best; } + bool IsNarrow() const { return size_ == Narrow; } + bool IsWide() const { return size_ == Wide; } +}; + +inline std::ostream& operator<<(std::ostream& os, EncodingSize size) { + return os << size.GetName(); +} + +enum WriteBackValue { NO_WRITE_BACK, WRITE_BACK }; + +class WriteBack { + WriteBackValue value_; + + public: + WriteBack(WriteBackValue value) // NOLINT(runtime/explicit) + : value_(value) {} + explicit WriteBack(int value) + : value_((value == 0) ? NO_WRITE_BACK : WRITE_BACK) {} + uint32_t GetWriteBackUint32() const { return (value_ == WRITE_BACK) ? 1 : 0; } + bool DoesWriteBack() const { return value_ == WRITE_BACK; } +}; + +inline std::ostream& operator<<(std::ostream& os, WriteBack write_back) { + if (write_back.DoesWriteBack()) return os << "!"; + return os; +} + +class EncodingValue { + bool valid_; + uint32_t encoding_value_; + + public: + EncodingValue() { + valid_ = false; + encoding_value_ = 0; + } + bool IsValid() const { return valid_; } + uint32_t GetEncodingValue() const { return encoding_value_; } + void SetEncodingValue(uint32_t encoding_value) { + valid_ = true; + encoding_value_ = encoding_value; + } +}; + +class EncodingValueAndImmediate : public EncodingValue { + uint32_t encoded_immediate_; + + public: + EncodingValueAndImmediate() { encoded_immediate_ = 0; } + uint32_t GetEncodedImmediate() const { return encoded_immediate_; } + void SetEncodedImmediate(uint32_t encoded_immediate) { + encoded_immediate_ = encoded_immediate; + } +}; + +class ImmediateT32 : public EncodingValue { + public: + explicit ImmediateT32(uint32_t imm); + static bool IsImmediateT32(uint32_t imm); + static uint32_t Decode(uint32_t value); +}; + +class ImmediateA32 : public EncodingValue { + public: + explicit ImmediateA32(uint32_t imm); + static bool IsImmediateA32(uint32_t imm); + static uint32_t Decode(uint32_t value); +}; + +// Return the encoding value of a shift type. +uint32_t TypeEncodingValue(Shift shift); +// Return the encoding value for a shift amount depending on the shift type. +uint32_t AmountEncodingValue(Shift shift, uint32_t amount); + +enum MemoryBarrierType { + OSHLD = 0x1, + OSHST = 0x2, + OSH = 0x3, + NSHLD = 0x5, + NSHST = 0x6, + NSH = 0x7, + ISHLD = 0x9, + ISHST = 0xa, + ISH = 0xb, + LD = 0xd, + ST = 0xe, + SY = 0xf +}; + +class MemoryBarrier { + MemoryBarrierType type_; + + public: + MemoryBarrier(MemoryBarrierType type) // NOLINT(runtime/explicit) + : type_(type) {} + MemoryBarrier(uint32_t type) // NOLINT(runtime/explicit) + : type_(static_cast(type)) { + VIXL_ASSERT((type & 0x3) != 0); + } + MemoryBarrierType GetType() const { return type_; } + const char* GetName() const; +}; + +inline std::ostream& operator<<(std::ostream& os, MemoryBarrier option) { + return os << option.GetName(); +} + +enum InterruptFlagsType { + F = 0x1, + I = 0x2, + IF = 0x3, + A = 0x4, + AF = 0x5, + AI = 0x6, + AIF = 0x7 +}; + +class InterruptFlags { + InterruptFlagsType type_; + + public: + InterruptFlags(InterruptFlagsType type) // NOLINT(runtime/explicit) + : type_(type) {} + InterruptFlags(uint32_t type) // NOLINT(runtime/explicit) + : type_(static_cast(type)) { + VIXL_ASSERT(type <= 7); + } + InterruptFlagsType GetType() const { return type_; } + const char* GetName() const; +}; + +inline std::ostream& operator<<(std::ostream& os, InterruptFlags option) { + return os << option.GetName(); +} + +enum EndiannessType { LE = 0, BE = 1 }; + +class Endianness { + EndiannessType type_; + + public: + Endianness(EndiannessType type) : type_(type) {} // NOLINT(runtime/explicit) + Endianness(uint32_t type) // NOLINT(runtime/explicit) + : type_(static_cast(type)) { + VIXL_ASSERT(type <= 1); + } + EndiannessType GetType() const { return type_; } + const char* GetName() const; +}; + +inline std::ostream& operator<<(std::ostream& os, Endianness endian_specifier) { + return os << endian_specifier.GetName(); +} + +enum AlignmentType { + k16BitAlign = 0, + k32BitAlign = 1, + k64BitAlign = 2, + k128BitAlign = 3, + k256BitAlign = 4, + kNoAlignment = 5, + kBadAlignment = 6 +}; + +class Alignment { + AlignmentType align_; + + public: + Alignment(AlignmentType align) // NOLINT(runtime/explicit) + : align_(align) {} + Alignment(uint32_t align) // NOLINT(runtime/explicit) + : align_(static_cast(align)) { + VIXL_ASSERT(align <= static_cast(k256BitAlign)); + } + AlignmentType GetType() const { return align_; } + bool Is(AlignmentType type) { return align_ == type; } +}; + +inline std::ostream& operator<<(std::ostream& os, Alignment align) { + if (align.GetType() == kBadAlignment) return os << " :??"; + if (align.GetType() == kNoAlignment) return os; + return os << " :" << (0x10 << static_cast(align.GetType())); +} + +// Structure containing information on forward references. +struct ReferenceInfo { + int size; + int min_offset; + int max_offset; + int alignment; // As a power of two. + enum { kAlignPc, kDontAlignPc } pc_needs_aligning; +}; + +} // namespace aarch32 +} // namespace vixl + +#endif // VIXL_AARCH32_INSTRUCTIONS_AARCH32_H_ diff --git a/dep/vixl/include/vixl/aarch32/location-aarch32.h b/dep/vixl/include/vixl/aarch32/location-aarch32.h new file mode 100644 index 000000000..0f29a6c66 --- /dev/null +++ b/dep/vixl/include/vixl/aarch32/location-aarch32.h @@ -0,0 +1,411 @@ +// Copyright 2017, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_AARCH32_LABEL_AARCH32_H_ +#define VIXL_AARCH32_LABEL_AARCH32_H_ + +extern "C" { +#include +} + +#include +#include +#include +#include + +#include "invalset-vixl.h" +#include "pool-manager.h" +#include "utils-vixl.h" + +#include "constants-aarch32.h" +#include "instructions-aarch32.h" + +namespace vixl { + +namespace aarch32 { + +class MacroAssembler; + +class Location : public LocationBase { + friend class Assembler; + friend class MacroAssembler; + + public: + // Unbound location that can be used with the assembler bind() method and + // with the assembler methods for generating instructions, but will never + // be handled by the pool manager. + Location() + : LocationBase(kRawLocation, 1 /* dummy size*/), + referenced_(false) {} + + typedef int32_t Offset; + + ~Location() { +#ifdef VIXL_DEBUG + if (IsReferenced() && !IsBound()) { + VIXL_ABORT_WITH_MSG("Location, label or literal used but not bound.\n"); + } +#endif + } + + bool IsReferenced() const { return referenced_; } + + private: + class EmitOperator { + public: + explicit EmitOperator(InstructionSet isa) : isa_(isa) { +#if defined(VIXL_INCLUDE_TARGET_A32_ONLY) + USE(isa_); + VIXL_ASSERT(isa == A32); +#elif defined(VIXL_INCLUDE_TARGET_T32_ONLY) + USE(isa_); + VIXL_ASSERT(isa == T32); +#endif + } + virtual ~EmitOperator() {} + virtual uint32_t Encode(uint32_t /*instr*/, + Location::Offset /*pc*/, + const Location* /*label*/) const { + return 0; + } +#if defined(VIXL_INCLUDE_TARGET_A32_ONLY) + bool IsUsingT32() const { return false; } +#elif defined(VIXL_INCLUDE_TARGET_T32_ONLY) + bool IsUsingT32() const { return true; } +#else + bool IsUsingT32() const { return isa_ == T32; } +#endif + + private: + InstructionSet isa_; + }; + + protected: + class ForwardRef : public ForwardReference { + public: + // Default constructor for InvalSet. + ForwardRef() : ForwardReference(0, 0, 0, 0, 1), op_(NULL) {} + + ForwardRef(const Location::EmitOperator* op, + int32_t location, + int size, + int32_t min_object_location, + int32_t max_object_location, + int object_alignment = 1) + : ForwardReference(location, + size, + min_object_location, + max_object_location, + object_alignment), + op_(op) {} + + const Location::EmitOperator* op() const { return op_; } + + // We must provide comparison operators to work with InvalSet. + bool operator==(const ForwardRef& other) const { + return GetLocation() == other.GetLocation(); + } + bool operator<(const ForwardRef& other) const { + return GetLocation() < other.GetLocation(); + } + bool operator<=(const ForwardRef& other) const { + return GetLocation() <= other.GetLocation(); + } + bool operator>(const ForwardRef& other) const { + return GetLocation() > other.GetLocation(); + } + + private: + const Location::EmitOperator* op_; + }; + + static const int kNPreallocatedElements = 4; + // The following parameters will not affect ForwardRefList in practice, as we + // resolve all references at once and clear the list, so we do not need to + // remove individual elements by invalidating them. + static const int32_t kInvalidLinkKey = INT32_MAX; + static const size_t kReclaimFrom = 512; + static const size_t kReclaimFactor = 2; + + typedef InvalSet + ForwardRefListBase; + typedef InvalSetIterator ForwardRefListIteratorBase; + + class ForwardRefList : public ForwardRefListBase { + public: + ForwardRefList() : ForwardRefListBase() {} + + using ForwardRefListBase::Back; + using ForwardRefListBase::Front; + }; + + class ForwardRefListIterator : public ForwardRefListIteratorBase { + public: + explicit ForwardRefListIterator(Location* location) + : ForwardRefListIteratorBase(&location->forward_) {} + + // TODO: Remove these and use the STL-like interface instead. We'll need a + // const_iterator implemented for this. + using ForwardRefListIteratorBase::Advance; + using ForwardRefListIteratorBase::Current; + }; + + // For InvalSet::GetKey() and InvalSet::SetKey(). + friend class InvalSet; + + private: + virtual void ResolveReferences(internal::AssemblerBase* assembler) + VIXL_OVERRIDE; + + void SetReferenced() { referenced_ = true; } + + bool HasForwardReferences() const { return !forward_.empty(); } + + ForwardRef GetLastForwardReference() const { + VIXL_ASSERT(HasForwardReferences()); + return forward_.Back(); + } + + // Add forward reference to this object. Called from the assembler. + void AddForwardRef(int32_t instr_location, + const EmitOperator& op, + const ReferenceInfo* info); + + // Check if we need to add padding when binding this object, in order to + // meet the minimum location requirement. + bool Needs16BitPadding(int location) const; + + void EncodeLocationFor(internal::AssemblerBase* assembler, + int32_t from, + const Location::EmitOperator* encoder); + + // True if the label has been used at least once. + bool referenced_; + + protected: + // Types passed to LocationBase. Must be distinct for unbound Locations (not + // relevant for bound locations, as they don't have a correspoding + // PoolObject). + static const int kRawLocation = 0; // Will not be used by the pool manager. + static const int kVeneerType = 1; + static const int kLiteralType = 2; + + // Contains the references to the unbound label + ForwardRefList forward_; + + // To be used only by derived classes. + Location(uint32_t type, int size, int alignment) + : LocationBase(type, size, alignment), referenced_(false) {} + + // To be used only by derived classes. + explicit Location(Offset location) + : LocationBase(location), referenced_(false) {} + + virtual int GetMaxAlignment() const VIXL_OVERRIDE; + virtual int GetMinLocation() const VIXL_OVERRIDE; + + private: + // Included to make the class concrete, however should never be called. + virtual void EmitPoolObject(MacroAssemblerInterface* masm) VIXL_OVERRIDE { + USE(masm); + VIXL_UNREACHABLE(); + } +}; + +class Label : public Location { + static const int kVeneerSize = 4; + // Use an alignment of 1 for all architectures. Even though we can bind an + // unused label, because of the way the MacroAssembler works we can always be + // sure to have the correct buffer alignment for the instruction set we are + // using, so we do not need to enforce additional alignment requirements + // here. + // TODO: Consider modifying the interface of the pool manager to pass an + // optional additional alignment to Bind() in order to handle cases where the + // buffer could be unaligned. + static const int kVeneerAlignment = 1; + + public: + Label() : Location(kVeneerType, kVeneerSize, kVeneerAlignment) {} + explicit Label(Offset location) : Location(location) {} + + private: + virtual bool ShouldBeDeletedOnPlacementByPoolManager() const VIXL_OVERRIDE { + return false; + } + virtual bool ShouldDeletePoolObjectOnPlacement() const VIXL_OVERRIDE { + return false; + } + + virtual void UpdatePoolObject(PoolObject* object) VIXL_OVERRIDE; + virtual void EmitPoolObject(MacroAssemblerInterface* masm) VIXL_OVERRIDE; + + virtual bool UsePoolObjectEmissionMargin() const VIXL_OVERRIDE { + return true; + } + virtual int32_t GetPoolObjectEmissionMargin() const VIXL_OVERRIDE { + VIXL_ASSERT(UsePoolObjectEmissionMargin() == true); + return 1 * KBytes; + } +}; + +class RawLiteral : public Location { + // Some load instructions require alignment to 4 bytes. Since we do + // not know what instructions will reference a literal after we place + // it, we enforce a 4 byte alignment for literals that are 4 bytes or + // larger. + static const int kLiteralAlignment = 4; + + public: + enum PlacementPolicy { kPlacedWhenUsed, kManuallyPlaced }; + + enum DeletionPolicy { + kDeletedOnPlacementByPool, + kDeletedOnPoolDestruction, + kManuallyDeleted + }; + + RawLiteral(const void* addr, + int size, + PlacementPolicy placement_policy = kPlacedWhenUsed, + DeletionPolicy deletion_policy = kManuallyDeleted) + : Location(kLiteralType, + size, + (size < kLiteralAlignment) ? size : kLiteralAlignment), + addr_(addr), + manually_placed_(placement_policy == kManuallyPlaced), + deletion_policy_(deletion_policy) { + // We can't have manually placed literals that are not manually deleted. + VIXL_ASSERT(!IsManuallyPlaced() || + (GetDeletionPolicy() == kManuallyDeleted)); + } + RawLiteral(const void* addr, int size, DeletionPolicy deletion_policy) + : Location(kLiteralType, + size, + (size < kLiteralAlignment) ? size : kLiteralAlignment), + addr_(addr), + manually_placed_(false), + deletion_policy_(deletion_policy) {} + const void* GetDataAddress() const { return addr_; } + int GetSize() const { return GetPoolObjectSizeInBytes(); } + + bool IsManuallyPlaced() const { return manually_placed_; } + + private: + DeletionPolicy GetDeletionPolicy() const { return deletion_policy_; } + + virtual bool ShouldBeDeletedOnPlacementByPoolManager() const VIXL_OVERRIDE { + return GetDeletionPolicy() == kDeletedOnPlacementByPool; + } + virtual bool ShouldBeDeletedOnPoolManagerDestruction() const VIXL_OVERRIDE { + return GetDeletionPolicy() == kDeletedOnPoolDestruction; + } + virtual void EmitPoolObject(MacroAssemblerInterface* masm) VIXL_OVERRIDE; + + // Data address before it's moved into the code buffer. + const void* const addr_; + // When this flag is true, the label will be placed manually. + bool manually_placed_; + // When is the literal to be removed from the memory + // Can be delete'd when: + // moved into the code buffer: kDeletedOnPlacementByPool + // the pool is delete'd: kDeletedOnPoolDestruction + // or left to the application: kManuallyDeleted. + DeletionPolicy deletion_policy_; + + friend class MacroAssembler; +}; + +template +class Literal : public RawLiteral { + public: + explicit Literal(const T& value, + PlacementPolicy placement_policy = kPlacedWhenUsed, + DeletionPolicy deletion_policy = kManuallyDeleted) + : RawLiteral(&value_, sizeof(T), placement_policy, deletion_policy), + value_(value) {} + explicit Literal(const T& value, DeletionPolicy deletion_policy) + : RawLiteral(&value_, sizeof(T), deletion_policy), value_(value) {} + void UpdateValue(const T& value, CodeBuffer* buffer) { + value_ = value; + if (IsBound()) { + buffer->UpdateData(GetLocation(), GetDataAddress(), GetSize()); + } + } + + private: + T value_; +}; + +class StringLiteral : public RawLiteral { + public: + explicit StringLiteral(const char* str, + PlacementPolicy placement_policy = kPlacedWhenUsed, + DeletionPolicy deletion_policy = kManuallyDeleted) + : RawLiteral(str, + static_cast(strlen(str) + 1), + placement_policy, + deletion_policy) { + VIXL_ASSERT((strlen(str) + 1) <= kMaxObjectSize); + } + explicit StringLiteral(const char* str, DeletionPolicy deletion_policy) + : RawLiteral(str, static_cast(strlen(str) + 1), deletion_policy) { + VIXL_ASSERT((strlen(str) + 1) <= kMaxObjectSize); + } +}; + +} // namespace aarch32 + + +// Required InvalSet template specialisations. +#define INVAL_SET_TEMPLATE_PARAMETERS \ + aarch32::Location::ForwardRef, aarch32::Location::kNPreallocatedElements, \ + int32_t, aarch32::Location::kInvalidLinkKey, \ + aarch32::Location::kReclaimFrom, aarch32::Location::kReclaimFactor +template <> +inline int32_t InvalSet::GetKey( + const aarch32::Location::ForwardRef& element) { + return element.GetLocation(); +} +template <> +inline void InvalSet::SetKey( + aarch32::Location::ForwardRef* element, int32_t key) { + element->SetLocationToInvalidateOnly(key); +} +#undef INVAL_SET_TEMPLATE_PARAMETERS + +} // namespace vixl + +#endif // VIXL_AARCH32_LABEL_AARCH32_H_ diff --git a/dep/vixl/include/vixl/aarch32/macro-assembler-aarch32.h b/dep/vixl/include/vixl/aarch32/macro-assembler-aarch32.h new file mode 100644 index 000000000..115d4d843 --- /dev/null +++ b/dep/vixl/include/vixl/aarch32/macro-assembler-aarch32.h @@ -0,0 +1,11185 @@ +// Copyright 2017, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may +// be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_AARCH32_MACRO_ASSEMBLER_AARCH32_H_ +#define VIXL_AARCH32_MACRO_ASSEMBLER_AARCH32_H_ + +#include "code-generation-scopes-vixl.h" +#include "macro-assembler-interface.h" +#include "pool-manager-impl.h" +#include "pool-manager.h" +#include "utils-vixl.h" + +#include "aarch32/assembler-aarch32.h" +#include "aarch32/instructions-aarch32.h" +#include "aarch32/operands-aarch32.h" + +namespace vixl { + +namespace aarch32 { + +class UseScratchRegisterScope; + +enum FlagsUpdate { LeaveFlags = 0, SetFlags = 1, DontCare = 2 }; + +// We use a subclass to access the protected `ExactAssemblyScope` constructor +// giving us control over the pools, and make the constructor private to limit +// usage to code paths emitting pools. +class ExactAssemblyScopeWithoutPoolsCheck : public ExactAssemblyScope { + private: + ExactAssemblyScopeWithoutPoolsCheck(MacroAssembler* masm, + size_t size, + SizePolicy size_policy = kExactSize); + + friend class MacroAssembler; + friend class Label; +}; +// Macro assembler for aarch32 instruction set. +class MacroAssembler : public Assembler, public MacroAssemblerInterface { + public: + enum FinalizeOption { + kFallThrough, // There may be more code to execute after calling Finalize. + kUnreachable // Anything generated after calling Finalize is unreachable. + }; + + virtual internal::AssemblerBase* AsAssemblerBase() VIXL_OVERRIDE { + return this; + } + + virtual bool ArePoolsBlocked() const VIXL_OVERRIDE { + return pool_manager_.IsBlocked(); + } + + virtual void EmitPoolHeader() VIXL_OVERRIDE { + // Check that we have the correct alignment. + if (IsUsingT32()) { + VIXL_ASSERT(GetBuffer()->Is16bitAligned()); + } else { + VIXL_ASSERT(GetBuffer()->Is32bitAligned()); + } + VIXL_ASSERT(pool_end_ == NULL); + pool_end_ = new Label(); + ExactAssemblyScopeWithoutPoolsCheck guard(this, + kMaxInstructionSizeInBytes, + ExactAssemblyScope::kMaximumSize); + b(pool_end_); + } + virtual void EmitPoolFooter() VIXL_OVERRIDE { + // Align buffer to 4 bytes. + GetBuffer()->Align(); + if (pool_end_ != NULL) { + Bind(pool_end_); + delete pool_end_; + pool_end_ = NULL; + } + } + virtual void EmitPaddingBytes(int n) VIXL_OVERRIDE { + GetBuffer()->EmitZeroedBytes(n); + } + virtual void EmitNopBytes(int n) VIXL_OVERRIDE { + int nops = 0; + int nop_size = IsUsingT32() ? k16BitT32InstructionSizeInBytes + : kA32InstructionSizeInBytes; + VIXL_ASSERT(n % nop_size == 0); + nops = n / nop_size; + ExactAssemblyScopeWithoutPoolsCheck guard(this, + n, + ExactAssemblyScope::kExactSize); + for (int i = 0; i < nops; ++i) { + nop(); + } + } + + + private: + class MacroEmissionCheckScope : public EmissionCheckScope { + public: + explicit MacroEmissionCheckScope(MacroAssemblerInterface* masm, + PoolPolicy pool_policy = kBlockPools) + : EmissionCheckScope(masm, + kTypicalMacroInstructionMaxSize, + kMaximumSize, + pool_policy) {} + + private: + static const size_t kTypicalMacroInstructionMaxSize = + 8 * kMaxInstructionSizeInBytes; + }; + + class MacroAssemblerContext { + public: + MacroAssemblerContext() : count_(0) {} + ~MacroAssemblerContext() {} + unsigned GetRecursiveCount() const { return count_; } + void Up(const char* loc) { + location_stack_[count_] = loc; + count_++; + if (count_ >= kMaxRecursion) { + printf( + "Recursion limit reached; unable to resolve macro assembler " + "call.\n"); + printf("Macro assembler context stack:\n"); + for (unsigned i = 0; i < kMaxRecursion; i++) { + printf("%10s %s\n", (i == 0) ? "oldest -> " : "", location_stack_[i]); + } + VIXL_ABORT(); + } + } + void Down() { + VIXL_ASSERT((count_ > 0) && (count_ < kMaxRecursion)); + count_--; + } + + private: + unsigned count_; + static const uint32_t kMaxRecursion = 6; + const char* location_stack_[kMaxRecursion]; + }; + + // This scope is used at each Delegate entry to avoid infinite recursion of + // Delegate calls. The limit is defined by + // MacroAssemblerContext::kMaxRecursion. + class ContextScope { + public: + explicit ContextScope(MacroAssembler* const masm, const char* loc) + : masm_(masm) { + VIXL_ASSERT(masm_->AllowMacroInstructions()); + masm_->GetContext()->Up(loc); + } + ~ContextScope() { masm_->GetContext()->Down(); } + + private: + MacroAssembler* const masm_; + }; + + MacroAssemblerContext* GetContext() { return &context_; } + + class ITScope { + public: + ITScope(MacroAssembler* masm, + Condition* cond, + const MacroEmissionCheckScope& scope, + bool can_use_it = false) + : masm_(masm), cond_(*cond), can_use_it_(can_use_it) { + // The 'scope' argument is used to remind us to only use this scope inside + // a MacroEmissionCheckScope. This way, we do not need to check whether + // we need to emit the pools or grow the code buffer when emitting the + // IT or B instructions. + USE(scope); + if (!cond_.Is(al) && masm->IsUsingT32()) { + if (can_use_it_) { + // IT is not deprecated (that implies a 16 bit T32 instruction). + // We generate an IT instruction and a conditional instruction. + masm->it(cond_); + } else { + // The usage of IT is deprecated for the instruction. + // We generate a conditional branch and an unconditional instruction. + // Generate the branch. + masm_->b(cond_.Negate(), Narrow, &label_); + // Tell the macro-assembler to generate unconditional instructions. + *cond = al; + } + } +#ifdef VIXL_DEBUG + initial_cursor_offset_ = masm->GetCursorOffset(); +#else + USE(initial_cursor_offset_); +#endif + } + ~ITScope() { + if (label_.IsReferenced()) { + // We only use the label for conditional T32 instructions for which we + // cannot use IT. + VIXL_ASSERT(!cond_.Is(al)); + VIXL_ASSERT(masm_->IsUsingT32()); + VIXL_ASSERT(!can_use_it_); + VIXL_ASSERT(masm_->GetCursorOffset() - initial_cursor_offset_ <= + kMaxT32MacroInstructionSizeInBytes); + masm_->BindHelper(&label_); + } else if (masm_->IsUsingT32() && !cond_.Is(al)) { + // If we've generated a conditional T32 instruction but haven't used the + // label, we must have used IT. Check that we did not generate a + // deprecated sequence. + VIXL_ASSERT(can_use_it_); + VIXL_ASSERT(masm_->GetCursorOffset() - initial_cursor_offset_ <= + k16BitT32InstructionSizeInBytes); + } + } + + private: + MacroAssembler* masm_; + Condition cond_; + Label label_; + bool can_use_it_; + uint32_t initial_cursor_offset_; + }; + + protected: + virtual void BlockPools() VIXL_OVERRIDE { pool_manager_.Block(); } + virtual void ReleasePools() VIXL_OVERRIDE { + pool_manager_.Release(GetCursorOffset()); + } + virtual void EnsureEmitPoolsFor(size_t size) VIXL_OVERRIDE; + + // Tell whether any of the macro instruction can be used. When false the + // MacroAssembler will assert if a method which can emit a variable number + // of instructions is called. + virtual void SetAllowMacroInstructions(bool value) VIXL_OVERRIDE { + allow_macro_instructions_ = value; + } + + void HandleOutOfBoundsImmediate(Condition cond, Register tmp, uint32_t imm); + + public: + // TODO: If we change the MacroAssembler to disallow setting a different ISA, + // we can change the alignment of the pool in the pool manager constructor to + // be 2 bytes for T32. + explicit MacroAssembler(InstructionSet isa = kDefaultISA) + : Assembler(isa), + available_(r12), + current_scratch_scope_(NULL), + pool_manager_(4 /*header_size*/, + 4 /*alignment*/, + 4 /*buffer_alignment*/), + generate_simulator_code_(VIXL_AARCH32_GENERATE_SIMULATOR_CODE), + pool_end_(NULL) { +#ifdef VIXL_DEBUG + SetAllowMacroInstructions(true); +#else + USE(allow_macro_instructions_); +#endif + } + explicit MacroAssembler(size_t size, InstructionSet isa = kDefaultISA) + : Assembler(size, isa), + available_(r12), + current_scratch_scope_(NULL), + pool_manager_(4 /*header_size*/, + 4 /*alignment*/, + 4 /*buffer_alignment*/), + generate_simulator_code_(VIXL_AARCH32_GENERATE_SIMULATOR_CODE), + pool_end_(NULL) { +#ifdef VIXL_DEBUG + SetAllowMacroInstructions(true); +#endif + } + MacroAssembler(byte* buffer, size_t size, InstructionSet isa = kDefaultISA) + : Assembler(buffer, size, isa), + available_(r12), + current_scratch_scope_(NULL), + pool_manager_(4 /*header_size*/, + 4 /*alignment*/, + 4 /*buffer_alignment*/), + generate_simulator_code_(VIXL_AARCH32_GENERATE_SIMULATOR_CODE), + pool_end_(NULL) { +#ifdef VIXL_DEBUG + SetAllowMacroInstructions(true); +#endif + } + + bool GenerateSimulatorCode() const { return generate_simulator_code_; } + + virtual bool AllowMacroInstructions() const VIXL_OVERRIDE { + return allow_macro_instructions_; + } + + void FinalizeCode(FinalizeOption option = kUnreachable) { + EmitLiteralPool(option == kUnreachable + ? PoolManager::kNoBranchRequired + : PoolManager::kBranchRequired); + Assembler::FinalizeCode(); + } + + RegisterList* GetScratchRegisterList() { return &available_; } + VRegisterList* GetScratchVRegisterList() { return &available_vfp_; } + + // Get or set the current (most-deeply-nested) UseScratchRegisterScope. + void SetCurrentScratchRegisterScope(UseScratchRegisterScope* scope) { + current_scratch_scope_ = scope; + } + UseScratchRegisterScope* GetCurrentScratchRegisterScope() { + return current_scratch_scope_; + } + + // Given an address calculation (Register + immediate), generate code to + // partially compute the address. The returned MemOperand will perform any + // remaining computation in a subsequent load or store instruction. + // + // The offset provided should be the offset that would be used in a load or + // store instruction (if it had sufficient range). This only matters where + // base.Is(pc), since load and store instructions align the pc before + // dereferencing it. + // + // TODO: Improve the handling of negative offsets. They are not implemented + // precisely for now because they only have a marginal benefit for the + // existing uses (in delegates). + MemOperand MemOperandComputationHelper(Condition cond, + Register scratch, + Register base, + uint32_t offset, + uint32_t extra_offset_mask = 0); + + MemOperand MemOperandComputationHelper(Register scratch, + Register base, + uint32_t offset, + uint32_t extra_offset_mask = 0) { + return MemOperandComputationHelper(al, + scratch, + base, + offset, + extra_offset_mask); + } + MemOperand MemOperandComputationHelper(Condition cond, + Register scratch, + Location* location, + uint32_t extra_offset_mask = 0) { + // Check for buffer space _before_ calculating the offset, in case we + // generate a pool that affects the offset calculation. + CodeBufferCheckScope scope(this, 4 * kMaxInstructionSizeInBytes); + Label::Offset offset = + location->GetLocation() - + AlignDown(GetCursorOffset() + GetArchitectureStatePCOffset(), 4); + return MemOperandComputationHelper(cond, + scratch, + pc, + offset, + extra_offset_mask); + } + MemOperand MemOperandComputationHelper(Register scratch, + Location* location, + uint32_t extra_offset_mask = 0) { + return MemOperandComputationHelper(al, + scratch, + location, + extra_offset_mask); + } + + // Determine the appropriate mask to pass into MemOperandComputationHelper. + uint32_t GetOffsetMask(InstructionType type, AddrMode addrmode); + + // State and type helpers. + bool IsModifiedImmediate(uint32_t imm) { + return IsUsingT32() ? ImmediateT32::IsImmediateT32(imm) + : ImmediateA32::IsImmediateA32(imm); + } + + void Bind(Label* label) { + VIXL_ASSERT(allow_macro_instructions_); + BindHelper(label); + } + + virtual void BindHelper(Label* label) VIXL_OVERRIDE { + // Assert that we have the correct buffer alignment. + if (IsUsingT32()) { + VIXL_ASSERT(GetBuffer()->Is16bitAligned()); + } else { + VIXL_ASSERT(GetBuffer()->Is32bitAligned()); + } + // If we need to add padding, check if we have to emit the pool. + const int32_t pc = GetCursorOffset(); + if (label->Needs16BitPadding(pc)) { + const int kPaddingBytes = 2; + if (pool_manager_.MustEmit(pc, kPaddingBytes)) { + int32_t new_pc = pool_manager_.Emit(this, pc, kPaddingBytes); + USE(new_pc); + VIXL_ASSERT(new_pc == GetCursorOffset()); + } + } + pool_manager_.Bind(this, label, GetCursorOffset()); + } + + void RegisterLiteralReference(RawLiteral* literal) { + if (literal->IsManuallyPlaced()) return; + RegisterForwardReference(literal); + } + + void RegisterForwardReference(Location* location) { + if (location->IsBound()) return; + VIXL_ASSERT(location->HasForwardReferences()); + const Location::ForwardRef& reference = location->GetLastForwardReference(); + pool_manager_.AddObjectReference(&reference, location); + } + + void CheckEmitPoolForInstruction(const ReferenceInfo* info, + Location* location, + Condition* cond = NULL) { + int size = info->size; + int32_t pc = GetCursorOffset(); + // If we need to emit a branch over the instruction, take this into account. + if ((cond != NULL) && NeedBranch(cond)) { + size += kBranchSize; + pc += kBranchSize; + } + int32_t from = pc; + from += IsUsingT32() ? kT32PcDelta : kA32PcDelta; + if (info->pc_needs_aligning) from = AlignDown(from, 4); + int32_t min = from + info->min_offset; + int32_t max = from + info->max_offset; + ForwardReference temp_ref(pc, + info->size, + min, + max, + info->alignment); + if (pool_manager_.MustEmit(GetCursorOffset(), size, &temp_ref, location)) { + int32_t new_pc = pool_manager_.Emit(this, + GetCursorOffset(), + info->size, + &temp_ref, + location); + USE(new_pc); + VIXL_ASSERT(new_pc == GetCursorOffset()); + } + } + + void Place(RawLiteral* literal) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(literal->IsManuallyPlaced()); + // Check if we need to emit the pools. Take the alignment of the literal + // into account, as well as potential 16-bit padding needed to reach the + // minimum accessible location. + int alignment = literal->GetMaxAlignment(); + int32_t pc = GetCursorOffset(); + int total_size = AlignUp(pc, alignment) - pc + literal->GetSize(); + if (literal->Needs16BitPadding(pc)) total_size += 2; + if (pool_manager_.MustEmit(pc, total_size)) { + int32_t new_pc = pool_manager_.Emit(this, pc, total_size); + USE(new_pc); + VIXL_ASSERT(new_pc == GetCursorOffset()); + } + pool_manager_.Bind(this, literal, GetCursorOffset()); + literal->EmitPoolObject(this); + // Align the buffer, to be ready to generate instructions right after + // this. + GetBuffer()->Align(); + } + + void EmitLiteralPool(PoolManager::EmitOption option = + PoolManager::kBranchRequired) { + VIXL_ASSERT(!ArePoolsBlocked()); + int32_t new_pc = + pool_manager_.Emit(this, GetCursorOffset(), 0, NULL, NULL, option); + VIXL_ASSERT(new_pc == GetCursorOffset()); + USE(new_pc); + } + + void EnsureEmitFor(uint32_t size) { + EnsureEmitPoolsFor(size); + VIXL_ASSERT(GetBuffer()->HasSpaceFor(size) || GetBuffer()->IsManaged()); + GetBuffer()->EnsureSpaceFor(size); + } + + bool AliasesAvailableScratchRegister(Register reg) { + return GetScratchRegisterList()->Includes(reg); + } + + bool AliasesAvailableScratchRegister(RegisterOrAPSR_nzcv reg) { + if (reg.IsAPSR_nzcv()) return false; + return GetScratchRegisterList()->Includes(reg.AsRegister()); + } + + bool AliasesAvailableScratchRegister(VRegister reg) { + return GetScratchVRegisterList()->IncludesAliasOf(reg); + } + + bool AliasesAvailableScratchRegister(const Operand& operand) { + if (operand.IsImmediate()) return false; + return AliasesAvailableScratchRegister(operand.GetBaseRegister()) || + (operand.IsRegisterShiftedRegister() && + AliasesAvailableScratchRegister(operand.GetShiftRegister())); + } + + bool AliasesAvailableScratchRegister(const NeonOperand& operand) { + if (operand.IsImmediate()) return false; + return AliasesAvailableScratchRegister(operand.GetRegister()); + } + + bool AliasesAvailableScratchRegister(SRegisterList list) { + for (int n = 0; n < list.GetLength(); n++) { + if (AliasesAvailableScratchRegister(list.GetSRegister(n))) return true; + } + return false; + } + + bool AliasesAvailableScratchRegister(DRegisterList list) { + for (int n = 0; n < list.GetLength(); n++) { + if (AliasesAvailableScratchRegister(list.GetDRegister(n))) return true; + } + return false; + } + + bool AliasesAvailableScratchRegister(NeonRegisterList list) { + for (int n = 0; n < list.GetLength(); n++) { + if (AliasesAvailableScratchRegister(list.GetDRegister(n))) return true; + } + return false; + } + + bool AliasesAvailableScratchRegister(RegisterList list) { + return GetScratchRegisterList()->Overlaps(list); + } + + bool AliasesAvailableScratchRegister(const MemOperand& operand) { + return AliasesAvailableScratchRegister(operand.GetBaseRegister()) || + (operand.IsShiftedRegister() && + AliasesAvailableScratchRegister(operand.GetOffsetRegister())); + } + + // Adr with a literal already constructed. Add the literal to the pool if it + // is not already done. + void Adr(Condition cond, Register rd, RawLiteral* literal) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope::PoolPolicy pool_policy = + MacroEmissionCheckScope::kBlockPools; + if (!literal->IsBound()) { + const ReferenceInfo* info; + bool can_encode = adr_info(cond, Best, rd, literal, &info); + VIXL_CHECK(can_encode); + CheckEmitPoolForInstruction(info, literal, &cond); + // We have already checked for pool emission. + pool_policy = MacroEmissionCheckScope::kIgnorePools; + } + MacroEmissionCheckScope guard(this, pool_policy); + ITScope it_scope(this, &cond, guard); + adr(cond, Best, rd, literal); + RegisterLiteralReference(literal); + } + void Adr(Register rd, RawLiteral* literal) { Adr(al, rd, literal); } + + // Loads with literals already constructed. Add the literal to the pool + // if it is not already done. + void Ldr(Condition cond, Register rt, RawLiteral* literal) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope::PoolPolicy pool_policy = + MacroEmissionCheckScope::kBlockPools; + if (!literal->IsBound()) { + const ReferenceInfo* info; + bool can_encode = ldr_info(cond, Best, rt, literal, &info); + VIXL_CHECK(can_encode); + CheckEmitPoolForInstruction(info, literal, &cond); + // We have already checked for pool emission. + pool_policy = MacroEmissionCheckScope::kIgnorePools; + } + MacroEmissionCheckScope guard(this, pool_policy); + ITScope it_scope(this, &cond, guard); + ldr(cond, rt, literal); + RegisterLiteralReference(literal); + } + void Ldr(Register rt, RawLiteral* literal) { Ldr(al, rt, literal); } + + void Ldrb(Condition cond, Register rt, RawLiteral* literal) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope::PoolPolicy pool_policy = + MacroEmissionCheckScope::kBlockPools; + if (!literal->IsBound()) { + const ReferenceInfo* info; + bool can_encode = ldrb_info(cond, rt, literal, &info); + VIXL_CHECK(can_encode); + CheckEmitPoolForInstruction(info, literal, &cond); + // We have already checked for pool emission. + pool_policy = MacroEmissionCheckScope::kIgnorePools; + } + MacroEmissionCheckScope guard(this, pool_policy); + ITScope it_scope(this, &cond, guard); + ldrb(cond, rt, literal); + RegisterLiteralReference(literal); + } + void Ldrb(Register rt, RawLiteral* literal) { Ldrb(al, rt, literal); } + + void Ldrd(Condition cond, Register rt, Register rt2, RawLiteral* literal) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt2)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope::PoolPolicy pool_policy = + MacroEmissionCheckScope::kBlockPools; + if (!literal->IsBound()) { + const ReferenceInfo* info; + bool can_encode = ldrd_info(cond, rt, rt2, literal, &info); + VIXL_CHECK(can_encode); + CheckEmitPoolForInstruction(info, literal, &cond); + // We have already checked for pool emission. + pool_policy = MacroEmissionCheckScope::kIgnorePools; + } + MacroEmissionCheckScope guard(this, pool_policy); + ITScope it_scope(this, &cond, guard); + ldrd(cond, rt, rt2, literal); + RegisterLiteralReference(literal); + } + void Ldrd(Register rt, Register rt2, RawLiteral* literal) { + Ldrd(al, rt, rt2, literal); + } + + void Ldrh(Condition cond, Register rt, RawLiteral* literal) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope::PoolPolicy pool_policy = + MacroEmissionCheckScope::kBlockPools; + if (!literal->IsBound()) { + const ReferenceInfo* info; + bool can_encode = ldrh_info(cond, rt, literal, &info); + VIXL_CHECK(can_encode); + CheckEmitPoolForInstruction(info, literal, &cond); + // We have already checked for pool emission. + pool_policy = MacroEmissionCheckScope::kIgnorePools; + } + MacroEmissionCheckScope guard(this, pool_policy); + ITScope it_scope(this, &cond, guard); + ldrh(cond, rt, literal); + RegisterLiteralReference(literal); + } + void Ldrh(Register rt, RawLiteral* literal) { Ldrh(al, rt, literal); } + + void Ldrsb(Condition cond, Register rt, RawLiteral* literal) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope::PoolPolicy pool_policy = + MacroEmissionCheckScope::kBlockPools; + if (!literal->IsBound()) { + const ReferenceInfo* info; + bool can_encode = ldrsb_info(cond, rt, literal, &info); + VIXL_CHECK(can_encode); + CheckEmitPoolForInstruction(info, literal, &cond); + // We have already checked for pool emission. + pool_policy = MacroEmissionCheckScope::kIgnorePools; + } + MacroEmissionCheckScope guard(this, pool_policy); + ITScope it_scope(this, &cond, guard); + ldrsb(cond, rt, literal); + RegisterLiteralReference(literal); + } + void Ldrsb(Register rt, RawLiteral* literal) { Ldrsb(al, rt, literal); } + + void Ldrsh(Condition cond, Register rt, RawLiteral* literal) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope::PoolPolicy pool_policy = + MacroEmissionCheckScope::kBlockPools; + if (!literal->IsBound()) { + const ReferenceInfo* info; + bool can_encode = ldrsh_info(cond, rt, literal, &info); + VIXL_CHECK(can_encode); + CheckEmitPoolForInstruction(info, literal, &cond); + // We have already checked for pool emission. + pool_policy = MacroEmissionCheckScope::kIgnorePools; + } + MacroEmissionCheckScope guard(this, pool_policy); + ITScope it_scope(this, &cond, guard); + ldrsh(cond, rt, literal); + RegisterLiteralReference(literal); + } + void Ldrsh(Register rt, RawLiteral* literal) { Ldrsh(al, rt, literal); } + + void Vldr(Condition cond, DataType dt, DRegister rd, RawLiteral* literal) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope::PoolPolicy pool_policy = + MacroEmissionCheckScope::kBlockPools; + if (!literal->IsBound()) { + const ReferenceInfo* info; + bool can_encode = vldr_info(cond, dt, rd, literal, &info); + VIXL_CHECK(can_encode); + CheckEmitPoolForInstruction(info, literal, &cond); + // We have already checked for pool emission. + pool_policy = MacroEmissionCheckScope::kIgnorePools; + } + MacroEmissionCheckScope guard(this, pool_policy); + ITScope it_scope(this, &cond, guard); + vldr(cond, dt, rd, literal); + RegisterLiteralReference(literal); + } + void Vldr(DataType dt, DRegister rd, RawLiteral* literal) { + Vldr(al, dt, rd, literal); + } + void Vldr(Condition cond, DRegister rd, RawLiteral* literal) { + Vldr(cond, Untyped64, rd, literal); + } + void Vldr(DRegister rd, RawLiteral* literal) { + Vldr(al, Untyped64, rd, literal); + } + + void Vldr(Condition cond, DataType dt, SRegister rd, RawLiteral* literal) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope::PoolPolicy pool_policy = + MacroEmissionCheckScope::kBlockPools; + if (!literal->IsBound()) { + const ReferenceInfo* info; + bool can_encode = vldr_info(cond, dt, rd, literal, &info); + VIXL_CHECK(can_encode); + CheckEmitPoolForInstruction(info, literal, &cond); + // We have already checked for pool emission. + pool_policy = MacroEmissionCheckScope::kIgnorePools; + } + MacroEmissionCheckScope guard(this, pool_policy); + ITScope it_scope(this, &cond, guard); + vldr(cond, dt, rd, literal); + RegisterLiteralReference(literal); + } + void Vldr(DataType dt, SRegister rd, RawLiteral* literal) { + Vldr(al, dt, rd, literal); + } + void Vldr(Condition cond, SRegister rd, RawLiteral* literal) { + Vldr(cond, Untyped32, rd, literal); + } + void Vldr(SRegister rd, RawLiteral* literal) { + Vldr(al, Untyped32, rd, literal); + } + + // Generic Ldr(register, data) + void Ldr(Condition cond, Register rt, uint32_t v) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + RawLiteral* literal = + new Literal(v, RawLiteral::kDeletedOnPlacementByPool); + Ldr(cond, rt, literal); + } + template + void Ldr(Register rt, T v) { + Ldr(al, rt, v); + } + + // Generic Ldrd(rt, rt2, data) + void Ldrd(Condition cond, Register rt, Register rt2, uint64_t v) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt2)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + RawLiteral* literal = + new Literal(v, RawLiteral::kDeletedOnPlacementByPool); + Ldrd(cond, rt, rt2, literal); + } + template + void Ldrd(Register rt, Register rt2, T v) { + Ldrd(al, rt, rt2, v); + } + + void Vldr(Condition cond, SRegister rd, float v) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + RawLiteral* literal = + new Literal(v, RawLiteral::kDeletedOnPlacementByPool); + Vldr(cond, rd, literal); + } + void Vldr(SRegister rd, float v) { Vldr(al, rd, v); } + + void Vldr(Condition cond, DRegister rd, double v) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + RawLiteral* literal = + new Literal(v, RawLiteral::kDeletedOnPlacementByPool); + Vldr(cond, rd, literal); + } + void Vldr(DRegister rd, double v) { Vldr(al, rd, v); } + + void Vmov(Condition cond, DRegister rt, double v) { Vmov(cond, F64, rt, v); } + void Vmov(DRegister rt, double v) { Vmov(al, F64, rt, v); } + void Vmov(Condition cond, SRegister rt, float v) { Vmov(cond, F32, rt, v); } + void Vmov(SRegister rt, float v) { Vmov(al, F32, rt, v); } + + // Claim memory on the stack. + // Note that the Claim, Drop, and Peek helpers below ensure that offsets used + // are multiples of 32 bits to help maintain 32-bit SP alignment. + // We could `Align{Up,Down}(size, 4)`, but that's potentially problematic: + // Claim(3) + // Claim(1) + // Drop(4) + // would seem correct, when in fact: + // Claim(3) -> sp = sp - 4 + // Claim(1) -> sp = sp - 4 + // Drop(4) -> sp = sp + 4 + // + void Claim(int32_t size) { + if (size == 0) return; + // The stack must be kept 32bit aligned. + VIXL_ASSERT((size > 0) && ((size % 4) == 0)); + Sub(sp, sp, size); + } + // Release memory on the stack + void Drop(int32_t size) { + if (size == 0) return; + // The stack must be kept 32bit aligned. + VIXL_ASSERT((size > 0) && ((size % 4) == 0)); + Add(sp, sp, size); + } + void Peek(Register dst, int32_t offset) { + VIXL_ASSERT((offset >= 0) && ((offset % 4) == 0)); + Ldr(dst, MemOperand(sp, offset)); + } + void Poke(Register src, int32_t offset) { + VIXL_ASSERT((offset >= 0) && ((offset % 4) == 0)); + Str(src, MemOperand(sp, offset)); + } + void Printf(const char* format, + CPURegister reg1 = NoReg, + CPURegister reg2 = NoReg, + CPURegister reg3 = NoReg, + CPURegister reg4 = NoReg); + // Functions used by Printf for generation. + void PushRegister(CPURegister reg); + void PreparePrintfArgument(CPURegister reg, + int* core_count, + int* vfp_count, + uint32_t* printf_type); + // Handlers for cases not handled by the assembler. + // ADD, MOVT, MOVW, SUB, SXTB16, TEQ, UXTB16 + virtual void Delegate(InstructionType type, + InstructionCondROp instruction, + Condition cond, + Register rn, + const Operand& operand) VIXL_OVERRIDE; + // CMN, CMP, MOV, MOVS, MVN, MVNS, SXTB, SXTH, TST, UXTB, UXTH + virtual void Delegate(InstructionType type, + InstructionCondSizeROp instruction, + Condition cond, + EncodingSize size, + Register rn, + const Operand& operand) VIXL_OVERRIDE; + // ADDW, ORN, ORNS, PKHBT, PKHTB, RSC, RSCS, SUBW, SXTAB, SXTAB16, SXTAH, + // UXTAB, UXTAB16, UXTAH + virtual void Delegate(InstructionType type, + InstructionCondRROp instruction, + Condition cond, + Register rd, + Register rn, + const Operand& operand) VIXL_OVERRIDE; + // ADC, ADCS, ADD, ADDS, AND, ANDS, ASR, ASRS, BIC, BICS, EOR, EORS, LSL, + // LSLS, LSR, LSRS, ORR, ORRS, ROR, RORS, RSB, RSBS, SBC, SBCS, SUB, SUBS + virtual void Delegate(InstructionType type, + InstructionCondSizeRL instruction, + Condition cond, + EncodingSize size, + Register rd, + Location* location) VIXL_OVERRIDE; + bool GenerateSplitInstruction(InstructionCondSizeRROp instruction, + Condition cond, + Register rd, + Register rn, + uint32_t imm, + uint32_t mask); + virtual void Delegate(InstructionType type, + InstructionCondSizeRROp instruction, + Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand) VIXL_OVERRIDE; + // CBNZ, CBZ + virtual void Delegate(InstructionType type, + InstructionRL instruction, + Register rn, + Location* location) VIXL_OVERRIDE; + // VMOV + virtual void Delegate(InstructionType type, + InstructionCondDtSSop instruction, + Condition cond, + DataType dt, + SRegister rd, + const SOperand& operand) VIXL_OVERRIDE; + // VMOV, VMVN + virtual void Delegate(InstructionType type, + InstructionCondDtDDop instruction, + Condition cond, + DataType dt, + DRegister rd, + const DOperand& operand) VIXL_OVERRIDE; + // VMOV, VMVN + virtual void Delegate(InstructionType type, + InstructionCondDtQQop instruction, + Condition cond, + DataType dt, + QRegister rd, + const QOperand& operand) VIXL_OVERRIDE; + // LDR, LDRB, LDRH, LDRSB, LDRSH, STR, STRB, STRH + virtual void Delegate(InstructionType type, + InstructionCondSizeRMop instruction, + Condition cond, + EncodingSize size, + Register rd, + const MemOperand& operand) VIXL_OVERRIDE; + // LDAEXD, LDRD, LDREXD, STLEX, STLEXB, STLEXH, STRD, STREX, STREXB, STREXH + virtual void Delegate(InstructionType type, + InstructionCondRL instruction, + Condition cond, + Register rt, + Location* location) VIXL_OVERRIDE; + virtual void Delegate(InstructionType type, + InstructionCondRRL instruction, + Condition cond, + Register rt, + Register rt2, + Location* location) VIXL_OVERRIDE; + virtual void Delegate(InstructionType type, + InstructionCondRRMop instruction, + Condition cond, + Register rt, + Register rt2, + const MemOperand& operand) VIXL_OVERRIDE; + // VLDR, VSTR + virtual void Delegate(InstructionType type, + InstructionCondDtSMop instruction, + Condition cond, + DataType dt, + SRegister rd, + const MemOperand& operand) VIXL_OVERRIDE; + // VLDR, VSTR + virtual void Delegate(InstructionType type, + InstructionCondDtDMop instruction, + Condition cond, + DataType dt, + DRegister rd, + const MemOperand& operand) VIXL_OVERRIDE; + // MSR + virtual void Delegate(InstructionType type, + InstructionCondMsrOp instruction, + Condition cond, + MaskedSpecialRegister spec_reg, + const Operand& operand) VIXL_OVERRIDE; + virtual void Delegate(InstructionType type, + InstructionCondDtDL instruction, + Condition cond, + DataType dt, + DRegister rd, + Location* location) VIXL_OVERRIDE; + virtual void Delegate(InstructionType type, + InstructionCondDtSL instruction, + Condition cond, + DataType dt, + SRegister rd, + Location* location) VIXL_OVERRIDE; + + // Start of generated code. + + void Adc(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + bool can_use_it = + // ADC{} {,} , ; T1 + operand.IsPlainRegister() && rn.IsLow() && rd.Is(rn) && + operand.GetBaseRegister().IsLow(); + ITScope it_scope(this, &cond, guard, can_use_it); + adc(cond, rd, rn, operand); + } + void Adc(Register rd, Register rn, const Operand& operand) { + Adc(al, rd, rn, operand); + } + void Adc(FlagsUpdate flags, + Condition cond, + Register rd, + Register rn, + const Operand& operand) { + switch (flags) { + case LeaveFlags: + Adc(cond, rd, rn, operand); + break; + case SetFlags: + Adcs(cond, rd, rn, operand); + break; + case DontCare: + bool setflags_is_smaller = IsUsingT32() && cond.Is(al) && rd.IsLow() && + rn.Is(rd) && operand.IsPlainRegister() && + operand.GetBaseRegister().IsLow(); + if (setflags_is_smaller) { + Adcs(cond, rd, rn, operand); + } else { + Adc(cond, rd, rn, operand); + } + break; + } + } + void Adc(FlagsUpdate flags, + Register rd, + Register rn, + const Operand& operand) { + Adc(flags, al, rd, rn, operand); + } + + void Adcs(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + adcs(cond, rd, rn, operand); + } + void Adcs(Register rd, Register rn, const Operand& operand) { + Adcs(al, rd, rn, operand); + } + + void Add(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + if (cond.Is(al) && rd.Is(rn) && operand.IsImmediate()) { + uint32_t immediate = operand.GetImmediate(); + if (immediate == 0) { + return; + } + } + bool can_use_it = + // ADD{} , , # ; T1 + (operand.IsImmediate() && (operand.GetImmediate() <= 7) && rn.IsLow() && + rd.IsLow()) || + // ADD{} {,} , # ; T2 + (operand.IsImmediate() && (operand.GetImmediate() <= 255) && + rd.IsLow() && rn.Is(rd)) || + // ADD{}{} , SP, # ; T1 + (operand.IsImmediate() && (operand.GetImmediate() <= 1020) && + ((operand.GetImmediate() & 0x3) == 0) && rd.IsLow() && rn.IsSP()) || + // ADD{} , , + (operand.IsPlainRegister() && rd.IsLow() && rn.IsLow() && + operand.GetBaseRegister().IsLow()) || + // ADD{} , ; T2 + (operand.IsPlainRegister() && !rd.IsPC() && rn.Is(rd) && + !operand.GetBaseRegister().IsSP() && + !operand.GetBaseRegister().IsPC()) || + // ADD{}{} {,} SP, ; T1 + (operand.IsPlainRegister() && !rd.IsPC() && rn.IsSP() && + operand.GetBaseRegister().Is(rd)); + ITScope it_scope(this, &cond, guard, can_use_it); + add(cond, rd, rn, operand); + } + void Add(Register rd, Register rn, const Operand& operand) { + Add(al, rd, rn, operand); + } + void Add(FlagsUpdate flags, + Condition cond, + Register rd, + Register rn, + const Operand& operand) { + switch (flags) { + case LeaveFlags: + Add(cond, rd, rn, operand); + break; + case SetFlags: + Adds(cond, rd, rn, operand); + break; + case DontCare: + bool setflags_is_smaller = + IsUsingT32() && cond.Is(al) && + ((operand.IsPlainRegister() && rd.IsLow() && rn.IsLow() && + !rd.Is(rn) && operand.GetBaseRegister().IsLow()) || + (operand.IsImmediate() && + ((rd.IsLow() && rn.IsLow() && (operand.GetImmediate() < 8)) || + (rd.IsLow() && rn.Is(rd) && (operand.GetImmediate() < 256))))); + if (setflags_is_smaller) { + Adds(cond, rd, rn, operand); + } else { + bool changed_op_is_smaller = + operand.IsImmediate() && (operand.GetSignedImmediate() < 0) && + ((rd.IsLow() && rn.IsLow() && + (operand.GetSignedImmediate() >= -7)) || + (rd.IsLow() && rn.Is(rd) && + (operand.GetSignedImmediate() >= -255))); + if (changed_op_is_smaller) { + Subs(cond, rd, rn, -operand.GetSignedImmediate()); + } else { + Add(cond, rd, rn, operand); + } + } + break; + } + } + void Add(FlagsUpdate flags, + Register rd, + Register rn, + const Operand& operand) { + Add(flags, al, rd, rn, operand); + } + + void Adds(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + adds(cond, rd, rn, operand); + } + void Adds(Register rd, Register rn, const Operand& operand) { + Adds(al, rd, rn, operand); + } + + void And(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + if (rd.Is(rn) && operand.IsPlainRegister() && + rd.Is(operand.GetBaseRegister())) { + return; + } + if (cond.Is(al) && operand.IsImmediate()) { + uint32_t immediate = operand.GetImmediate(); + if (immediate == 0) { + mov(rd, 0); + return; + } + if ((immediate == 0xffffffff) && rd.Is(rn)) { + return; + } + } + bool can_use_it = + // AND{} {,} , ; T1 + operand.IsPlainRegister() && rd.Is(rn) && rn.IsLow() && + operand.GetBaseRegister().IsLow(); + ITScope it_scope(this, &cond, guard, can_use_it); + and_(cond, rd, rn, operand); + } + void And(Register rd, Register rn, const Operand& operand) { + And(al, rd, rn, operand); + } + void And(FlagsUpdate flags, + Condition cond, + Register rd, + Register rn, + const Operand& operand) { + switch (flags) { + case LeaveFlags: + And(cond, rd, rn, operand); + break; + case SetFlags: + Ands(cond, rd, rn, operand); + break; + case DontCare: + if (operand.IsPlainRegister() && rd.Is(rn) && + rd.Is(operand.GetBaseRegister())) { + return; + } + bool setflags_is_smaller = IsUsingT32() && cond.Is(al) && rd.IsLow() && + rn.Is(rd) && operand.IsPlainRegister() && + operand.GetBaseRegister().IsLow(); + if (setflags_is_smaller) { + Ands(cond, rd, rn, operand); + } else { + And(cond, rd, rn, operand); + } + break; + } + } + void And(FlagsUpdate flags, + Register rd, + Register rn, + const Operand& operand) { + And(flags, al, rd, rn, operand); + } + + void Ands(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ands(cond, rd, rn, operand); + } + void Ands(Register rd, Register rn, const Operand& operand) { + Ands(al, rd, rn, operand); + } + + void Asr(Condition cond, Register rd, Register rm, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + bool can_use_it = + // ASR{} {,} , # ; T2 + (operand.IsImmediate() && (operand.GetImmediate() >= 1) && + (operand.GetImmediate() <= 32) && rd.IsLow() && rm.IsLow()) || + // ASR{} {,} , ; T1 + (operand.IsPlainRegister() && rd.Is(rm) && rd.IsLow() && + operand.GetBaseRegister().IsLow()); + ITScope it_scope(this, &cond, guard, can_use_it); + asr(cond, rd, rm, operand); + } + void Asr(Register rd, Register rm, const Operand& operand) { + Asr(al, rd, rm, operand); + } + void Asr(FlagsUpdate flags, + Condition cond, + Register rd, + Register rm, + const Operand& operand) { + switch (flags) { + case LeaveFlags: + Asr(cond, rd, rm, operand); + break; + case SetFlags: + Asrs(cond, rd, rm, operand); + break; + case DontCare: + bool setflags_is_smaller = + IsUsingT32() && cond.Is(al) && rd.IsLow() && rm.IsLow() && + ((operand.IsImmediate() && (operand.GetImmediate() >= 1) && + (operand.GetImmediate() <= 32)) || + (operand.IsPlainRegister() && rd.Is(rm))); + if (setflags_is_smaller) { + Asrs(cond, rd, rm, operand); + } else { + Asr(cond, rd, rm, operand); + } + break; + } + } + void Asr(FlagsUpdate flags, + Register rd, + Register rm, + const Operand& operand) { + Asr(flags, al, rd, rm, operand); + } + + void Asrs(Condition cond, Register rd, Register rm, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + asrs(cond, rd, rm, operand); + } + void Asrs(Register rd, Register rm, const Operand& operand) { + Asrs(al, rd, rm, operand); + } + + void B(Condition cond, Label* label, BranchHint hint = kBranchWithoutHint) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + EncodingSize size = Best; + MacroEmissionCheckScope::PoolPolicy pool_policy = + MacroEmissionCheckScope::kBlockPools; + if (!label->IsBound()) { + if (hint == kNear) size = Narrow; + const ReferenceInfo* info; + bool can_encode = b_info(cond, size, label, &info); + VIXL_CHECK(can_encode); + CheckEmitPoolForInstruction(info, label, &cond); + // We have already checked for pool emission. + pool_policy = MacroEmissionCheckScope::kIgnorePools; + } + MacroEmissionCheckScope guard(this, pool_policy); + b(cond, size, label); + RegisterForwardReference(label); + } + void B(Label* label, BranchHint hint = kBranchWithoutHint) { + B(al, label, hint); + } + void BPreferNear(Condition cond, Label* label) { B(cond, label, kNear); } + void BPreferNear(Label* label) { B(al, label, kNear); } + + void Bfc(Condition cond, Register rd, uint32_t lsb, uint32_t width) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + bfc(cond, rd, lsb, width); + } + void Bfc(Register rd, uint32_t lsb, uint32_t width) { + Bfc(al, rd, lsb, width); + } + + void Bfi( + Condition cond, Register rd, Register rn, uint32_t lsb, uint32_t width) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + bfi(cond, rd, rn, lsb, width); + } + void Bfi(Register rd, Register rn, uint32_t lsb, uint32_t width) { + Bfi(al, rd, rn, lsb, width); + } + + void Bic(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + if (cond.Is(al) && operand.IsImmediate()) { + uint32_t immediate = operand.GetImmediate(); + if ((immediate == 0) && rd.Is(rn)) { + return; + } + if (immediate == 0xffffffff) { + mov(rd, 0); + return; + } + } + bool can_use_it = + // BIC{} {,} , ; T1 + operand.IsPlainRegister() && rd.Is(rn) && rn.IsLow() && + operand.GetBaseRegister().IsLow(); + ITScope it_scope(this, &cond, guard, can_use_it); + bic(cond, rd, rn, operand); + } + void Bic(Register rd, Register rn, const Operand& operand) { + Bic(al, rd, rn, operand); + } + void Bic(FlagsUpdate flags, + Condition cond, + Register rd, + Register rn, + const Operand& operand) { + switch (flags) { + case LeaveFlags: + Bic(cond, rd, rn, operand); + break; + case SetFlags: + Bics(cond, rd, rn, operand); + break; + case DontCare: + bool setflags_is_smaller = IsUsingT32() && cond.Is(al) && rd.IsLow() && + rn.Is(rd) && operand.IsPlainRegister() && + operand.GetBaseRegister().IsLow(); + if (setflags_is_smaller) { + Bics(cond, rd, rn, operand); + } else { + Bic(cond, rd, rn, operand); + } + break; + } + } + void Bic(FlagsUpdate flags, + Register rd, + Register rn, + const Operand& operand) { + Bic(flags, al, rd, rn, operand); + } + + void Bics(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + bics(cond, rd, rn, operand); + } + void Bics(Register rd, Register rn, const Operand& operand) { + Bics(al, rd, rn, operand); + } + + void Bkpt(Condition cond, uint32_t imm) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + bkpt(cond, imm); + } + void Bkpt(uint32_t imm) { Bkpt(al, imm); } + + void Bl(Condition cond, Label* label) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope::PoolPolicy pool_policy = + MacroEmissionCheckScope::kBlockPools; + if (!label->IsBound()) { + const ReferenceInfo* info; + bool can_encode = bl_info(cond, label, &info); + VIXL_CHECK(can_encode); + CheckEmitPoolForInstruction(info, label, &cond); + // We have already checked for pool emission. + pool_policy = MacroEmissionCheckScope::kIgnorePools; + } + MacroEmissionCheckScope guard(this, pool_policy); + ITScope it_scope(this, &cond, guard); + bl(cond, label); + RegisterForwardReference(label); + } + void Bl(Label* label) { Bl(al, label); } + + void Blx(Condition cond, Label* label) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope::PoolPolicy pool_policy = + MacroEmissionCheckScope::kBlockPools; + if (!label->IsBound()) { + const ReferenceInfo* info; + bool can_encode = blx_info(cond, label, &info); + VIXL_CHECK(can_encode); + CheckEmitPoolForInstruction(info, label, &cond); + // We have already checked for pool emission. + pool_policy = MacroEmissionCheckScope::kIgnorePools; + } + MacroEmissionCheckScope guard(this, pool_policy); + ITScope it_scope(this, &cond, guard); + blx(cond, label); + RegisterForwardReference(label); + } + void Blx(Label* label) { Blx(al, label); } + + void Blx(Condition cond, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + bool can_use_it = + // BLX{}{} ; T1 + !rm.IsPC(); + ITScope it_scope(this, &cond, guard, can_use_it); + blx(cond, rm); + } + void Blx(Register rm) { Blx(al, rm); } + + void Bx(Condition cond, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + bool can_use_it = + // BX{}{} ; T1 + !rm.IsPC(); + ITScope it_scope(this, &cond, guard, can_use_it); + bx(cond, rm); + } + void Bx(Register rm) { Bx(al, rm); } + + void Bxj(Condition cond, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + bxj(cond, rm); + } + void Bxj(Register rm) { Bxj(al, rm); } + + void Cbnz(Register rn, Label* label) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope::PoolPolicy pool_policy = + MacroEmissionCheckScope::kBlockPools; + if (!label->IsBound()) { + const ReferenceInfo* info; + bool can_encode = cbnz_info(rn, label, &info); + VIXL_CHECK(can_encode); + CheckEmitPoolForInstruction(info, label); + // We have already checked for pool emission. + pool_policy = MacroEmissionCheckScope::kIgnorePools; + } + MacroEmissionCheckScope guard(this, pool_policy); + cbnz(rn, label); + RegisterForwardReference(label); + } + + void Cbz(Register rn, Label* label) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope::PoolPolicy pool_policy = + MacroEmissionCheckScope::kBlockPools; + if (!label->IsBound()) { + const ReferenceInfo* info; + bool can_encode = cbz_info(rn, label, &info); + VIXL_CHECK(can_encode); + CheckEmitPoolForInstruction(info, label); + // We have already checked for pool emission. + pool_policy = MacroEmissionCheckScope::kIgnorePools; + } + MacroEmissionCheckScope guard(this, pool_policy); + cbz(rn, label); + RegisterForwardReference(label); + } + + void Clrex(Condition cond) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + clrex(cond); + } + void Clrex() { Clrex(al); } + + void Clz(Condition cond, Register rd, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + clz(cond, rd, rm); + } + void Clz(Register rd, Register rm) { Clz(al, rd, rm); } + + void Cmn(Condition cond, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + bool can_use_it = + // CMN{}{} , ; T1 + operand.IsPlainRegister() && rn.IsLow() && + operand.GetBaseRegister().IsLow(); + ITScope it_scope(this, &cond, guard, can_use_it); + cmn(cond, rn, operand); + } + void Cmn(Register rn, const Operand& operand) { Cmn(al, rn, operand); } + + void Cmp(Condition cond, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + bool can_use_it = + // CMP{}{} , # ; T1 + (operand.IsImmediate() && (operand.GetImmediate() <= 255) && + rn.IsLow()) || + // CMP{}{} , ; T1 T2 + (operand.IsPlainRegister() && !rn.IsPC() && + !operand.GetBaseRegister().IsPC()); + ITScope it_scope(this, &cond, guard, can_use_it); + cmp(cond, rn, operand); + } + void Cmp(Register rn, const Operand& operand) { Cmp(al, rn, operand); } + + void Crc32b(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + crc32b(cond, rd, rn, rm); + } + void Crc32b(Register rd, Register rn, Register rm) { Crc32b(al, rd, rn, rm); } + + void Crc32cb(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + crc32cb(cond, rd, rn, rm); + } + void Crc32cb(Register rd, Register rn, Register rm) { + Crc32cb(al, rd, rn, rm); + } + + void Crc32ch(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + crc32ch(cond, rd, rn, rm); + } + void Crc32ch(Register rd, Register rn, Register rm) { + Crc32ch(al, rd, rn, rm); + } + + void Crc32cw(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + crc32cw(cond, rd, rn, rm); + } + void Crc32cw(Register rd, Register rn, Register rm) { + Crc32cw(al, rd, rn, rm); + } + + void Crc32h(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + crc32h(cond, rd, rn, rm); + } + void Crc32h(Register rd, Register rn, Register rm) { Crc32h(al, rd, rn, rm); } + + void Crc32w(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + crc32w(cond, rd, rn, rm); + } + void Crc32w(Register rd, Register rn, Register rm) { Crc32w(al, rd, rn, rm); } + + void Dmb(Condition cond, MemoryBarrier option) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + dmb(cond, option); + } + void Dmb(MemoryBarrier option) { Dmb(al, option); } + + void Dsb(Condition cond, MemoryBarrier option) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + dsb(cond, option); + } + void Dsb(MemoryBarrier option) { Dsb(al, option); } + + void Eor(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + if (cond.Is(al) && rd.Is(rn) && operand.IsImmediate()) { + uint32_t immediate = operand.GetImmediate(); + if (immediate == 0) { + return; + } + if (immediate == 0xffffffff) { + mvn(rd, rn); + return; + } + } + bool can_use_it = + // EOR{} {,} , ; T1 + operand.IsPlainRegister() && rd.Is(rn) && rn.IsLow() && + operand.GetBaseRegister().IsLow(); + ITScope it_scope(this, &cond, guard, can_use_it); + eor(cond, rd, rn, operand); + } + void Eor(Register rd, Register rn, const Operand& operand) { + Eor(al, rd, rn, operand); + } + void Eor(FlagsUpdate flags, + Condition cond, + Register rd, + Register rn, + const Operand& operand) { + switch (flags) { + case LeaveFlags: + Eor(cond, rd, rn, operand); + break; + case SetFlags: + Eors(cond, rd, rn, operand); + break; + case DontCare: + bool setflags_is_smaller = IsUsingT32() && cond.Is(al) && rd.IsLow() && + rn.Is(rd) && operand.IsPlainRegister() && + operand.GetBaseRegister().IsLow(); + if (setflags_is_smaller) { + Eors(cond, rd, rn, operand); + } else { + Eor(cond, rd, rn, operand); + } + break; + } + } + void Eor(FlagsUpdate flags, + Register rd, + Register rn, + const Operand& operand) { + Eor(flags, al, rd, rn, operand); + } + + void Eors(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + eors(cond, rd, rn, operand); + } + void Eors(Register rd, Register rn, const Operand& operand) { + Eors(al, rd, rn, operand); + } + + void Fldmdbx(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(dreglist)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + fldmdbx(cond, rn, write_back, dreglist); + } + void Fldmdbx(Register rn, WriteBack write_back, DRegisterList dreglist) { + Fldmdbx(al, rn, write_back, dreglist); + } + + void Fldmiax(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(dreglist)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + fldmiax(cond, rn, write_back, dreglist); + } + void Fldmiax(Register rn, WriteBack write_back, DRegisterList dreglist) { + Fldmiax(al, rn, write_back, dreglist); + } + + void Fstmdbx(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(dreglist)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + fstmdbx(cond, rn, write_back, dreglist); + } + void Fstmdbx(Register rn, WriteBack write_back, DRegisterList dreglist) { + Fstmdbx(al, rn, write_back, dreglist); + } + + void Fstmiax(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(dreglist)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + fstmiax(cond, rn, write_back, dreglist); + } + void Fstmiax(Register rn, WriteBack write_back, DRegisterList dreglist) { + Fstmiax(al, rn, write_back, dreglist); + } + + void Hlt(Condition cond, uint32_t imm) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + hlt(cond, imm); + } + void Hlt(uint32_t imm) { Hlt(al, imm); } + + void Hvc(Condition cond, uint32_t imm) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + hvc(cond, imm); + } + void Hvc(uint32_t imm) { Hvc(al, imm); } + + void Isb(Condition cond, MemoryBarrier option) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + isb(cond, option); + } + void Isb(MemoryBarrier option) { Isb(al, option); } + + void Lda(Condition cond, Register rt, const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + lda(cond, rt, operand); + } + void Lda(Register rt, const MemOperand& operand) { Lda(al, rt, operand); } + + void Ldab(Condition cond, Register rt, const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ldab(cond, rt, operand); + } + void Ldab(Register rt, const MemOperand& operand) { Ldab(al, rt, operand); } + + void Ldaex(Condition cond, Register rt, const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ldaex(cond, rt, operand); + } + void Ldaex(Register rt, const MemOperand& operand) { Ldaex(al, rt, operand); } + + void Ldaexb(Condition cond, Register rt, const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ldaexb(cond, rt, operand); + } + void Ldaexb(Register rt, const MemOperand& operand) { + Ldaexb(al, rt, operand); + } + + void Ldaexd(Condition cond, + Register rt, + Register rt2, + const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt2)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ldaexd(cond, rt, rt2, operand); + } + void Ldaexd(Register rt, Register rt2, const MemOperand& operand) { + Ldaexd(al, rt, rt2, operand); + } + + void Ldaexh(Condition cond, Register rt, const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ldaexh(cond, rt, operand); + } + void Ldaexh(Register rt, const MemOperand& operand) { + Ldaexh(al, rt, operand); + } + + void Ldah(Condition cond, Register rt, const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ldah(cond, rt, operand); + } + void Ldah(Register rt, const MemOperand& operand) { Ldah(al, rt, operand); } + + void Ldm(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(registers)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ldm(cond, rn, write_back, registers); + } + void Ldm(Register rn, WriteBack write_back, RegisterList registers) { + Ldm(al, rn, write_back, registers); + } + + void Ldmda(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(registers)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ldmda(cond, rn, write_back, registers); + } + void Ldmda(Register rn, WriteBack write_back, RegisterList registers) { + Ldmda(al, rn, write_back, registers); + } + + void Ldmdb(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(registers)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ldmdb(cond, rn, write_back, registers); + } + void Ldmdb(Register rn, WriteBack write_back, RegisterList registers) { + Ldmdb(al, rn, write_back, registers); + } + + void Ldmea(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(registers)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ldmea(cond, rn, write_back, registers); + } + void Ldmea(Register rn, WriteBack write_back, RegisterList registers) { + Ldmea(al, rn, write_back, registers); + } + + void Ldmed(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(registers)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ldmed(cond, rn, write_back, registers); + } + void Ldmed(Register rn, WriteBack write_back, RegisterList registers) { + Ldmed(al, rn, write_back, registers); + } + + void Ldmfa(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(registers)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ldmfa(cond, rn, write_back, registers); + } + void Ldmfa(Register rn, WriteBack write_back, RegisterList registers) { + Ldmfa(al, rn, write_back, registers); + } + + void Ldmfd(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(registers)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ldmfd(cond, rn, write_back, registers); + } + void Ldmfd(Register rn, WriteBack write_back, RegisterList registers) { + Ldmfd(al, rn, write_back, registers); + } + + void Ldmib(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(registers)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ldmib(cond, rn, write_back, registers); + } + void Ldmib(Register rn, WriteBack write_back, RegisterList registers) { + Ldmib(al, rn, write_back, registers); + } + + void Ldr(Condition cond, Register rt, const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + bool can_use_it = + // LDR{}{} , [ {, #{+}}] ; T1 + (operand.IsImmediate() && rt.IsLow() && + operand.GetBaseRegister().IsLow() && + operand.IsOffsetImmediateWithinRange(0, 124, 4) && + (operand.GetAddrMode() == Offset)) || + // LDR{}{} , [SP{, #{+}}] ; T2 + (operand.IsImmediate() && rt.IsLow() && + operand.GetBaseRegister().IsSP() && + operand.IsOffsetImmediateWithinRange(0, 1020, 4) && + (operand.GetAddrMode() == Offset)) || + // LDR{}{} , [, {+}] ; T1 + (operand.IsPlainRegister() && rt.IsLow() && + operand.GetBaseRegister().IsLow() && + operand.GetOffsetRegister().IsLow() && operand.GetSign().IsPlus() && + (operand.GetAddrMode() == Offset)); + ITScope it_scope(this, &cond, guard, can_use_it); + ldr(cond, rt, operand); + } + void Ldr(Register rt, const MemOperand& operand) { Ldr(al, rt, operand); } + + + void Ldrb(Condition cond, Register rt, const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + bool can_use_it = + // LDRB{}{} , [ {, #{+}}] ; T1 + (operand.IsImmediate() && rt.IsLow() && + operand.GetBaseRegister().IsLow() && + operand.IsOffsetImmediateWithinRange(0, 31) && + (operand.GetAddrMode() == Offset)) || + // LDRB{}{} , [, {+}] ; T1 + (operand.IsPlainRegister() && rt.IsLow() && + operand.GetBaseRegister().IsLow() && + operand.GetOffsetRegister().IsLow() && operand.GetSign().IsPlus() && + (operand.GetAddrMode() == Offset)); + ITScope it_scope(this, &cond, guard, can_use_it); + ldrb(cond, rt, operand); + } + void Ldrb(Register rt, const MemOperand& operand) { Ldrb(al, rt, operand); } + + + void Ldrd(Condition cond, + Register rt, + Register rt2, + const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt2)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ldrd(cond, rt, rt2, operand); + } + void Ldrd(Register rt, Register rt2, const MemOperand& operand) { + Ldrd(al, rt, rt2, operand); + } + + + void Ldrex(Condition cond, Register rt, const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ldrex(cond, rt, operand); + } + void Ldrex(Register rt, const MemOperand& operand) { Ldrex(al, rt, operand); } + + void Ldrexb(Condition cond, Register rt, const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ldrexb(cond, rt, operand); + } + void Ldrexb(Register rt, const MemOperand& operand) { + Ldrexb(al, rt, operand); + } + + void Ldrexd(Condition cond, + Register rt, + Register rt2, + const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt2)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ldrexd(cond, rt, rt2, operand); + } + void Ldrexd(Register rt, Register rt2, const MemOperand& operand) { + Ldrexd(al, rt, rt2, operand); + } + + void Ldrexh(Condition cond, Register rt, const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ldrexh(cond, rt, operand); + } + void Ldrexh(Register rt, const MemOperand& operand) { + Ldrexh(al, rt, operand); + } + + void Ldrh(Condition cond, Register rt, const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + bool can_use_it = + // LDRH{}{} , [ {, #{+}}] ; T1 + (operand.IsImmediate() && rt.IsLow() && + operand.GetBaseRegister().IsLow() && + operand.IsOffsetImmediateWithinRange(0, 62, 2) && + (operand.GetAddrMode() == Offset)) || + // LDRH{}{} , [, {+}] ; T1 + (operand.IsPlainRegister() && rt.IsLow() && + operand.GetBaseRegister().IsLow() && + operand.GetOffsetRegister().IsLow() && operand.GetSign().IsPlus() && + (operand.GetAddrMode() == Offset)); + ITScope it_scope(this, &cond, guard, can_use_it); + ldrh(cond, rt, operand); + } + void Ldrh(Register rt, const MemOperand& operand) { Ldrh(al, rt, operand); } + + + void Ldrsb(Condition cond, Register rt, const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + bool can_use_it = + // LDRSB{}{} , [, {+}] ; T1 + operand.IsPlainRegister() && rt.IsLow() && + operand.GetBaseRegister().IsLow() && + operand.GetOffsetRegister().IsLow() && operand.GetSign().IsPlus() && + (operand.GetAddrMode() == Offset); + ITScope it_scope(this, &cond, guard, can_use_it); + ldrsb(cond, rt, operand); + } + void Ldrsb(Register rt, const MemOperand& operand) { Ldrsb(al, rt, operand); } + + + void Ldrsh(Condition cond, Register rt, const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + bool can_use_it = + // LDRSH{}{} , [, {+}] ; T1 + operand.IsPlainRegister() && rt.IsLow() && + operand.GetBaseRegister().IsLow() && + operand.GetOffsetRegister().IsLow() && operand.GetSign().IsPlus() && + (operand.GetAddrMode() == Offset); + ITScope it_scope(this, &cond, guard, can_use_it); + ldrsh(cond, rt, operand); + } + void Ldrsh(Register rt, const MemOperand& operand) { Ldrsh(al, rt, operand); } + + + void Lsl(Condition cond, Register rd, Register rm, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + bool can_use_it = + // LSL{} {,} , # ; T2 + (operand.IsImmediate() && (operand.GetImmediate() >= 1) && + (operand.GetImmediate() <= 31) && rd.IsLow() && rm.IsLow()) || + // LSL{} {,} , ; T1 + (operand.IsPlainRegister() && rd.Is(rm) && rd.IsLow() && + operand.GetBaseRegister().IsLow()); + ITScope it_scope(this, &cond, guard, can_use_it); + lsl(cond, rd, rm, operand); + } + void Lsl(Register rd, Register rm, const Operand& operand) { + Lsl(al, rd, rm, operand); + } + void Lsl(FlagsUpdate flags, + Condition cond, + Register rd, + Register rm, + const Operand& operand) { + switch (flags) { + case LeaveFlags: + Lsl(cond, rd, rm, operand); + break; + case SetFlags: + Lsls(cond, rd, rm, operand); + break; + case DontCare: + bool setflags_is_smaller = + IsUsingT32() && cond.Is(al) && rd.IsLow() && rm.IsLow() && + ((operand.IsImmediate() && (operand.GetImmediate() >= 1) && + (operand.GetImmediate() < 32)) || + (operand.IsPlainRegister() && rd.Is(rm))); + if (setflags_is_smaller) { + Lsls(cond, rd, rm, operand); + } else { + Lsl(cond, rd, rm, operand); + } + break; + } + } + void Lsl(FlagsUpdate flags, + Register rd, + Register rm, + const Operand& operand) { + Lsl(flags, al, rd, rm, operand); + } + + void Lsls(Condition cond, Register rd, Register rm, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + lsls(cond, rd, rm, operand); + } + void Lsls(Register rd, Register rm, const Operand& operand) { + Lsls(al, rd, rm, operand); + } + + void Lsr(Condition cond, Register rd, Register rm, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + bool can_use_it = + // LSR{} {,} , # ; T2 + (operand.IsImmediate() && (operand.GetImmediate() >= 1) && + (operand.GetImmediate() <= 32) && rd.IsLow() && rm.IsLow()) || + // LSR{} {,} , ; T1 + (operand.IsPlainRegister() && rd.Is(rm) && rd.IsLow() && + operand.GetBaseRegister().IsLow()); + ITScope it_scope(this, &cond, guard, can_use_it); + lsr(cond, rd, rm, operand); + } + void Lsr(Register rd, Register rm, const Operand& operand) { + Lsr(al, rd, rm, operand); + } + void Lsr(FlagsUpdate flags, + Condition cond, + Register rd, + Register rm, + const Operand& operand) { + switch (flags) { + case LeaveFlags: + Lsr(cond, rd, rm, operand); + break; + case SetFlags: + Lsrs(cond, rd, rm, operand); + break; + case DontCare: + bool setflags_is_smaller = + IsUsingT32() && cond.Is(al) && rd.IsLow() && rm.IsLow() && + ((operand.IsImmediate() && (operand.GetImmediate() >= 1) && + (operand.GetImmediate() <= 32)) || + (operand.IsPlainRegister() && rd.Is(rm))); + if (setflags_is_smaller) { + Lsrs(cond, rd, rm, operand); + } else { + Lsr(cond, rd, rm, operand); + } + break; + } + } + void Lsr(FlagsUpdate flags, + Register rd, + Register rm, + const Operand& operand) { + Lsr(flags, al, rd, rm, operand); + } + + void Lsrs(Condition cond, Register rd, Register rm, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + lsrs(cond, rd, rm, operand); + } + void Lsrs(Register rd, Register rm, const Operand& operand) { + Lsrs(al, rd, rm, operand); + } + + void Mla(Condition cond, Register rd, Register rn, Register rm, Register ra) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(ra)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + mla(cond, rd, rn, rm, ra); + } + void Mla(Register rd, Register rn, Register rm, Register ra) { + Mla(al, rd, rn, rm, ra); + } + void Mla(FlagsUpdate flags, + Condition cond, + Register rd, + Register rn, + Register rm, + Register ra) { + switch (flags) { + case LeaveFlags: + Mla(cond, rd, rn, rm, ra); + break; + case SetFlags: + Mlas(cond, rd, rn, rm, ra); + break; + case DontCare: + Mla(cond, rd, rn, rm, ra); + break; + } + } + void Mla( + FlagsUpdate flags, Register rd, Register rn, Register rm, Register ra) { + Mla(flags, al, rd, rn, rm, ra); + } + + void Mlas( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(ra)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + mlas(cond, rd, rn, rm, ra); + } + void Mlas(Register rd, Register rn, Register rm, Register ra) { + Mlas(al, rd, rn, rm, ra); + } + + void Mls(Condition cond, Register rd, Register rn, Register rm, Register ra) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(ra)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + mls(cond, rd, rn, rm, ra); + } + void Mls(Register rd, Register rn, Register rm, Register ra) { + Mls(al, rd, rn, rm, ra); + } + + void Mov(Condition cond, Register rd, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + if (operand.IsPlainRegister() && rd.Is(operand.GetBaseRegister())) { + return; + } + bool can_use_it = + // MOV{} , # ; T1 + (operand.IsImmediate() && rd.IsLow() && + (operand.GetImmediate() <= 255)) || + // MOV{}{} , ; T1 + (operand.IsPlainRegister() && !rd.IsPC() && + !operand.GetBaseRegister().IsPC()) || + // MOV{} , {, #} ; T2 + (operand.IsImmediateShiftedRegister() && rd.IsLow() && + operand.GetBaseRegister().IsLow() && + (operand.GetShift().Is(LSL) || operand.GetShift().Is(LSR) || + operand.GetShift().Is(ASR))) || + // MOV{} , , LSL ; T1 + // MOV{} , , LSR ; T1 + // MOV{} , , ASR ; T1 + // MOV{} , , ROR ; T1 + (operand.IsRegisterShiftedRegister() && + rd.Is(operand.GetBaseRegister()) && rd.IsLow() && + (operand.GetShift().Is(LSL) || operand.GetShift().Is(LSR) || + operand.GetShift().Is(ASR) || operand.GetShift().Is(ROR)) && + operand.GetShiftRegister().IsLow()); + ITScope it_scope(this, &cond, guard, can_use_it); + mov(cond, rd, operand); + } + void Mov(Register rd, const Operand& operand) { Mov(al, rd, operand); } + void Mov(FlagsUpdate flags, + Condition cond, + Register rd, + const Operand& operand) { + switch (flags) { + case LeaveFlags: + Mov(cond, rd, operand); + break; + case SetFlags: + Movs(cond, rd, operand); + break; + case DontCare: + if (operand.IsPlainRegister() && rd.Is(operand.GetBaseRegister())) { + return; + } + bool setflags_is_smaller = + IsUsingT32() && cond.Is(al) && + ((operand.IsImmediateShiftedRegister() && rd.IsLow() && + operand.GetBaseRegister().IsLow() && + (operand.GetShiftAmount() >= 1) && + (((operand.GetShiftAmount() <= 32) && + ((operand.GetShift().IsLSR() || operand.GetShift().IsASR()))) || + ((operand.GetShiftAmount() < 32) && + operand.GetShift().IsLSL()))) || + (operand.IsRegisterShiftedRegister() && rd.IsLow() && + operand.GetBaseRegister().Is(rd) && + operand.GetShiftRegister().IsLow() && + (operand.GetShift().IsLSL() || operand.GetShift().IsLSR() || + operand.GetShift().IsASR() || operand.GetShift().IsROR())) || + (operand.IsImmediate() && rd.IsLow() && + (operand.GetImmediate() < 256))); + if (setflags_is_smaller) { + Movs(cond, rd, operand); + } else { + Mov(cond, rd, operand); + } + break; + } + } + void Mov(FlagsUpdate flags, Register rd, const Operand& operand) { + Mov(flags, al, rd, operand); + } + + void Movs(Condition cond, Register rd, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + movs(cond, rd, operand); + } + void Movs(Register rd, const Operand& operand) { Movs(al, rd, operand); } + + void Movt(Condition cond, Register rd, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + movt(cond, rd, operand); + } + void Movt(Register rd, const Operand& operand) { Movt(al, rd, operand); } + + void Mrs(Condition cond, Register rd, SpecialRegister spec_reg) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + mrs(cond, rd, spec_reg); + } + void Mrs(Register rd, SpecialRegister spec_reg) { Mrs(al, rd, spec_reg); } + + void Msr(Condition cond, + MaskedSpecialRegister spec_reg, + const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + msr(cond, spec_reg, operand); + } + void Msr(MaskedSpecialRegister spec_reg, const Operand& operand) { + Msr(al, spec_reg, operand); + } + + void Mul(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + bool can_use_it = + // MUL{} , {, } ; T1 + rd.Is(rm) && rn.IsLow() && rm.IsLow(); + ITScope it_scope(this, &cond, guard, can_use_it); + mul(cond, rd, rn, rm); + } + void Mul(Register rd, Register rn, Register rm) { Mul(al, rd, rn, rm); } + void Mul(FlagsUpdate flags, + Condition cond, + Register rd, + Register rn, + Register rm) { + switch (flags) { + case LeaveFlags: + Mul(cond, rd, rn, rm); + break; + case SetFlags: + Muls(cond, rd, rn, rm); + break; + case DontCare: + bool setflags_is_smaller = IsUsingT32() && cond.Is(al) && rd.IsLow() && + rn.IsLow() && rm.Is(rd); + if (setflags_is_smaller) { + Muls(cond, rd, rn, rm); + } else { + Mul(cond, rd, rn, rm); + } + break; + } + } + void Mul(FlagsUpdate flags, Register rd, Register rn, Register rm) { + Mul(flags, al, rd, rn, rm); + } + + void Muls(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + muls(cond, rd, rn, rm); + } + void Muls(Register rd, Register rn, Register rm) { Muls(al, rd, rn, rm); } + + void Mvn(Condition cond, Register rd, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + bool can_use_it = + // MVN{} , ; T1 + operand.IsPlainRegister() && rd.IsLow() && + operand.GetBaseRegister().IsLow(); + ITScope it_scope(this, &cond, guard, can_use_it); + mvn(cond, rd, operand); + } + void Mvn(Register rd, const Operand& operand) { Mvn(al, rd, operand); } + void Mvn(FlagsUpdate flags, + Condition cond, + Register rd, + const Operand& operand) { + switch (flags) { + case LeaveFlags: + Mvn(cond, rd, operand); + break; + case SetFlags: + Mvns(cond, rd, operand); + break; + case DontCare: + bool setflags_is_smaller = IsUsingT32() && cond.Is(al) && rd.IsLow() && + operand.IsPlainRegister() && + operand.GetBaseRegister().IsLow(); + if (setflags_is_smaller) { + Mvns(cond, rd, operand); + } else { + Mvn(cond, rd, operand); + } + break; + } + } + void Mvn(FlagsUpdate flags, Register rd, const Operand& operand) { + Mvn(flags, al, rd, operand); + } + + void Mvns(Condition cond, Register rd, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + mvns(cond, rd, operand); + } + void Mvns(Register rd, const Operand& operand) { Mvns(al, rd, operand); } + + void Nop(Condition cond) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + nop(cond); + } + void Nop() { Nop(al); } + + void Orn(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + if (cond.Is(al) && operand.IsImmediate()) { + uint32_t immediate = operand.GetImmediate(); + if (immediate == 0) { + mvn(rd, 0); + return; + } + if ((immediate == 0xffffffff) && rd.Is(rn)) { + return; + } + } + ITScope it_scope(this, &cond, guard); + orn(cond, rd, rn, operand); + } + void Orn(Register rd, Register rn, const Operand& operand) { + Orn(al, rd, rn, operand); + } + void Orn(FlagsUpdate flags, + Condition cond, + Register rd, + Register rn, + const Operand& operand) { + switch (flags) { + case LeaveFlags: + Orn(cond, rd, rn, operand); + break; + case SetFlags: + Orns(cond, rd, rn, operand); + break; + case DontCare: + Orn(cond, rd, rn, operand); + break; + } + } + void Orn(FlagsUpdate flags, + Register rd, + Register rn, + const Operand& operand) { + Orn(flags, al, rd, rn, operand); + } + + void Orns(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + orns(cond, rd, rn, operand); + } + void Orns(Register rd, Register rn, const Operand& operand) { + Orns(al, rd, rn, operand); + } + + void Orr(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + if (rd.Is(rn) && operand.IsPlainRegister() && + rd.Is(operand.GetBaseRegister())) { + return; + } + if (cond.Is(al) && operand.IsImmediate()) { + uint32_t immediate = operand.GetImmediate(); + if ((immediate == 0) && rd.Is(rn)) { + return; + } + if (immediate == 0xffffffff) { + mvn(rd, 0); + return; + } + } + bool can_use_it = + // ORR{} {,} , ; T1 + operand.IsPlainRegister() && rd.Is(rn) && rn.IsLow() && + operand.GetBaseRegister().IsLow(); + ITScope it_scope(this, &cond, guard, can_use_it); + orr(cond, rd, rn, operand); + } + void Orr(Register rd, Register rn, const Operand& operand) { + Orr(al, rd, rn, operand); + } + void Orr(FlagsUpdate flags, + Condition cond, + Register rd, + Register rn, + const Operand& operand) { + switch (flags) { + case LeaveFlags: + Orr(cond, rd, rn, operand); + break; + case SetFlags: + Orrs(cond, rd, rn, operand); + break; + case DontCare: + if (operand.IsPlainRegister() && rd.Is(rn) && + rd.Is(operand.GetBaseRegister())) { + return; + } + bool setflags_is_smaller = IsUsingT32() && cond.Is(al) && rd.IsLow() && + rn.Is(rd) && operand.IsPlainRegister() && + operand.GetBaseRegister().IsLow(); + if (setflags_is_smaller) { + Orrs(cond, rd, rn, operand); + } else { + Orr(cond, rd, rn, operand); + } + break; + } + } + void Orr(FlagsUpdate flags, + Register rd, + Register rn, + const Operand& operand) { + Orr(flags, al, rd, rn, operand); + } + + void Orrs(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + orrs(cond, rd, rn, operand); + } + void Orrs(Register rd, Register rn, const Operand& operand) { + Orrs(al, rd, rn, operand); + } + + void Pkhbt(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + pkhbt(cond, rd, rn, operand); + } + void Pkhbt(Register rd, Register rn, const Operand& operand) { + Pkhbt(al, rd, rn, operand); + } + + void Pkhtb(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + pkhtb(cond, rd, rn, operand); + } + void Pkhtb(Register rd, Register rn, const Operand& operand) { + Pkhtb(al, rd, rn, operand); + } + + + void Pld(Condition cond, const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + pld(cond, operand); + } + void Pld(const MemOperand& operand) { Pld(al, operand); } + + void Pldw(Condition cond, const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + pldw(cond, operand); + } + void Pldw(const MemOperand& operand) { Pldw(al, operand); } + + void Pli(Condition cond, const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + pli(cond, operand); + } + void Pli(const MemOperand& operand) { Pli(al, operand); } + + + void Pop(Condition cond, RegisterList registers) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(registers)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + pop(cond, registers); + } + void Pop(RegisterList registers) { Pop(al, registers); } + + void Pop(Condition cond, Register rt) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + pop(cond, rt); + } + void Pop(Register rt) { Pop(al, rt); } + + void Push(Condition cond, RegisterList registers) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(registers)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + push(cond, registers); + } + void Push(RegisterList registers) { Push(al, registers); } + + void Push(Condition cond, Register rt) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + push(cond, rt); + } + void Push(Register rt) { Push(al, rt); } + + void Qadd(Condition cond, Register rd, Register rm, Register rn) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + qadd(cond, rd, rm, rn); + } + void Qadd(Register rd, Register rm, Register rn) { Qadd(al, rd, rm, rn); } + + void Qadd16(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + qadd16(cond, rd, rn, rm); + } + void Qadd16(Register rd, Register rn, Register rm) { Qadd16(al, rd, rn, rm); } + + void Qadd8(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + qadd8(cond, rd, rn, rm); + } + void Qadd8(Register rd, Register rn, Register rm) { Qadd8(al, rd, rn, rm); } + + void Qasx(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + qasx(cond, rd, rn, rm); + } + void Qasx(Register rd, Register rn, Register rm) { Qasx(al, rd, rn, rm); } + + void Qdadd(Condition cond, Register rd, Register rm, Register rn) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + qdadd(cond, rd, rm, rn); + } + void Qdadd(Register rd, Register rm, Register rn) { Qdadd(al, rd, rm, rn); } + + void Qdsub(Condition cond, Register rd, Register rm, Register rn) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + qdsub(cond, rd, rm, rn); + } + void Qdsub(Register rd, Register rm, Register rn) { Qdsub(al, rd, rm, rn); } + + void Qsax(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + qsax(cond, rd, rn, rm); + } + void Qsax(Register rd, Register rn, Register rm) { Qsax(al, rd, rn, rm); } + + void Qsub(Condition cond, Register rd, Register rm, Register rn) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + qsub(cond, rd, rm, rn); + } + void Qsub(Register rd, Register rm, Register rn) { Qsub(al, rd, rm, rn); } + + void Qsub16(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + qsub16(cond, rd, rn, rm); + } + void Qsub16(Register rd, Register rn, Register rm) { Qsub16(al, rd, rn, rm); } + + void Qsub8(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + qsub8(cond, rd, rn, rm); + } + void Qsub8(Register rd, Register rn, Register rm) { Qsub8(al, rd, rn, rm); } + + void Rbit(Condition cond, Register rd, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + rbit(cond, rd, rm); + } + void Rbit(Register rd, Register rm) { Rbit(al, rd, rm); } + + void Rev(Condition cond, Register rd, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + rev(cond, rd, rm); + } + void Rev(Register rd, Register rm) { Rev(al, rd, rm); } + + void Rev16(Condition cond, Register rd, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + rev16(cond, rd, rm); + } + void Rev16(Register rd, Register rm) { Rev16(al, rd, rm); } + + void Revsh(Condition cond, Register rd, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + revsh(cond, rd, rm); + } + void Revsh(Register rd, Register rm) { Revsh(al, rd, rm); } + + void Ror(Condition cond, Register rd, Register rm, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + bool can_use_it = + // ROR{} {,} , ; T1 + operand.IsPlainRegister() && rd.Is(rm) && rd.IsLow() && + operand.GetBaseRegister().IsLow(); + ITScope it_scope(this, &cond, guard, can_use_it); + ror(cond, rd, rm, operand); + } + void Ror(Register rd, Register rm, const Operand& operand) { + Ror(al, rd, rm, operand); + } + void Ror(FlagsUpdate flags, + Condition cond, + Register rd, + Register rm, + const Operand& operand) { + switch (flags) { + case LeaveFlags: + Ror(cond, rd, rm, operand); + break; + case SetFlags: + Rors(cond, rd, rm, operand); + break; + case DontCare: + bool setflags_is_smaller = IsUsingT32() && cond.Is(al) && rd.IsLow() && + rm.IsLow() && operand.IsPlainRegister() && + rd.Is(rm); + if (setflags_is_smaller) { + Rors(cond, rd, rm, operand); + } else { + Ror(cond, rd, rm, operand); + } + break; + } + } + void Ror(FlagsUpdate flags, + Register rd, + Register rm, + const Operand& operand) { + Ror(flags, al, rd, rm, operand); + } + + void Rors(Condition cond, Register rd, Register rm, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + rors(cond, rd, rm, operand); + } + void Rors(Register rd, Register rm, const Operand& operand) { + Rors(al, rd, rm, operand); + } + + void Rrx(Condition cond, Register rd, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + rrx(cond, rd, rm); + } + void Rrx(Register rd, Register rm) { Rrx(al, rd, rm); } + void Rrx(FlagsUpdate flags, Condition cond, Register rd, Register rm) { + switch (flags) { + case LeaveFlags: + Rrx(cond, rd, rm); + break; + case SetFlags: + Rrxs(cond, rd, rm); + break; + case DontCare: + Rrx(cond, rd, rm); + break; + } + } + void Rrx(FlagsUpdate flags, Register rd, Register rm) { + Rrx(flags, al, rd, rm); + } + + void Rrxs(Condition cond, Register rd, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + rrxs(cond, rd, rm); + } + void Rrxs(Register rd, Register rm) { Rrxs(al, rd, rm); } + + void Rsb(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + bool can_use_it = + // RSB{} {, }, #0 ; T1 + operand.IsImmediate() && rd.IsLow() && rn.IsLow() && + (operand.GetImmediate() == 0); + ITScope it_scope(this, &cond, guard, can_use_it); + rsb(cond, rd, rn, operand); + } + void Rsb(Register rd, Register rn, const Operand& operand) { + Rsb(al, rd, rn, operand); + } + void Rsb(FlagsUpdate flags, + Condition cond, + Register rd, + Register rn, + const Operand& operand) { + switch (flags) { + case LeaveFlags: + Rsb(cond, rd, rn, operand); + break; + case SetFlags: + Rsbs(cond, rd, rn, operand); + break; + case DontCare: + bool setflags_is_smaller = IsUsingT32() && cond.Is(al) && rd.IsLow() && + rn.IsLow() && operand.IsImmediate() && + (operand.GetImmediate() == 0); + if (setflags_is_smaller) { + Rsbs(cond, rd, rn, operand); + } else { + Rsb(cond, rd, rn, operand); + } + break; + } + } + void Rsb(FlagsUpdate flags, + Register rd, + Register rn, + const Operand& operand) { + Rsb(flags, al, rd, rn, operand); + } + + void Rsbs(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + rsbs(cond, rd, rn, operand); + } + void Rsbs(Register rd, Register rn, const Operand& operand) { + Rsbs(al, rd, rn, operand); + } + + void Rsc(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + rsc(cond, rd, rn, operand); + } + void Rsc(Register rd, Register rn, const Operand& operand) { + Rsc(al, rd, rn, operand); + } + void Rsc(FlagsUpdate flags, + Condition cond, + Register rd, + Register rn, + const Operand& operand) { + switch (flags) { + case LeaveFlags: + Rsc(cond, rd, rn, operand); + break; + case SetFlags: + Rscs(cond, rd, rn, operand); + break; + case DontCare: + Rsc(cond, rd, rn, operand); + break; + } + } + void Rsc(FlagsUpdate flags, + Register rd, + Register rn, + const Operand& operand) { + Rsc(flags, al, rd, rn, operand); + } + + void Rscs(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + rscs(cond, rd, rn, operand); + } + void Rscs(Register rd, Register rn, const Operand& operand) { + Rscs(al, rd, rn, operand); + } + + void Sadd16(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + sadd16(cond, rd, rn, rm); + } + void Sadd16(Register rd, Register rn, Register rm) { Sadd16(al, rd, rn, rm); } + + void Sadd8(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + sadd8(cond, rd, rn, rm); + } + void Sadd8(Register rd, Register rn, Register rm) { Sadd8(al, rd, rn, rm); } + + void Sasx(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + sasx(cond, rd, rn, rm); + } + void Sasx(Register rd, Register rn, Register rm) { Sasx(al, rd, rn, rm); } + + void Sbc(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + bool can_use_it = + // SBC{} {,} , ; T1 + operand.IsPlainRegister() && rn.IsLow() && rd.Is(rn) && + operand.GetBaseRegister().IsLow(); + ITScope it_scope(this, &cond, guard, can_use_it); + sbc(cond, rd, rn, operand); + } + void Sbc(Register rd, Register rn, const Operand& operand) { + Sbc(al, rd, rn, operand); + } + void Sbc(FlagsUpdate flags, + Condition cond, + Register rd, + Register rn, + const Operand& operand) { + switch (flags) { + case LeaveFlags: + Sbc(cond, rd, rn, operand); + break; + case SetFlags: + Sbcs(cond, rd, rn, operand); + break; + case DontCare: + bool setflags_is_smaller = IsUsingT32() && cond.Is(al) && rd.IsLow() && + rn.Is(rd) && operand.IsPlainRegister() && + operand.GetBaseRegister().IsLow(); + if (setflags_is_smaller) { + Sbcs(cond, rd, rn, operand); + } else { + Sbc(cond, rd, rn, operand); + } + break; + } + } + void Sbc(FlagsUpdate flags, + Register rd, + Register rn, + const Operand& operand) { + Sbc(flags, al, rd, rn, operand); + } + + void Sbcs(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + sbcs(cond, rd, rn, operand); + } + void Sbcs(Register rd, Register rn, const Operand& operand) { + Sbcs(al, rd, rn, operand); + } + + void Sbfx( + Condition cond, Register rd, Register rn, uint32_t lsb, uint32_t width) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + sbfx(cond, rd, rn, lsb, width); + } + void Sbfx(Register rd, Register rn, uint32_t lsb, uint32_t width) { + Sbfx(al, rd, rn, lsb, width); + } + + void Sdiv(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + sdiv(cond, rd, rn, rm); + } + void Sdiv(Register rd, Register rn, Register rm) { Sdiv(al, rd, rn, rm); } + + void Sel(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + sel(cond, rd, rn, rm); + } + void Sel(Register rd, Register rn, Register rm) { Sel(al, rd, rn, rm); } + + void Shadd16(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + shadd16(cond, rd, rn, rm); + } + void Shadd16(Register rd, Register rn, Register rm) { + Shadd16(al, rd, rn, rm); + } + + void Shadd8(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + shadd8(cond, rd, rn, rm); + } + void Shadd8(Register rd, Register rn, Register rm) { Shadd8(al, rd, rn, rm); } + + void Shasx(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + shasx(cond, rd, rn, rm); + } + void Shasx(Register rd, Register rn, Register rm) { Shasx(al, rd, rn, rm); } + + void Shsax(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + shsax(cond, rd, rn, rm); + } + void Shsax(Register rd, Register rn, Register rm) { Shsax(al, rd, rn, rm); } + + void Shsub16(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + shsub16(cond, rd, rn, rm); + } + void Shsub16(Register rd, Register rn, Register rm) { + Shsub16(al, rd, rn, rm); + } + + void Shsub8(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + shsub8(cond, rd, rn, rm); + } + void Shsub8(Register rd, Register rn, Register rm) { Shsub8(al, rd, rn, rm); } + + void Smlabb( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(ra)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smlabb(cond, rd, rn, rm, ra); + } + void Smlabb(Register rd, Register rn, Register rm, Register ra) { + Smlabb(al, rd, rn, rm, ra); + } + + void Smlabt( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(ra)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smlabt(cond, rd, rn, rm, ra); + } + void Smlabt(Register rd, Register rn, Register rm, Register ra) { + Smlabt(al, rd, rn, rm, ra); + } + + void Smlad( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(ra)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smlad(cond, rd, rn, rm, ra); + } + void Smlad(Register rd, Register rn, Register rm, Register ra) { + Smlad(al, rd, rn, rm, ra); + } + + void Smladx( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(ra)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smladx(cond, rd, rn, rm, ra); + } + void Smladx(Register rd, Register rn, Register rm, Register ra) { + Smladx(al, rd, rn, rm, ra); + } + + void Smlal( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdlo)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdhi)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smlal(cond, rdlo, rdhi, rn, rm); + } + void Smlal(Register rdlo, Register rdhi, Register rn, Register rm) { + Smlal(al, rdlo, rdhi, rn, rm); + } + + void Smlalbb( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdlo)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdhi)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smlalbb(cond, rdlo, rdhi, rn, rm); + } + void Smlalbb(Register rdlo, Register rdhi, Register rn, Register rm) { + Smlalbb(al, rdlo, rdhi, rn, rm); + } + + void Smlalbt( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdlo)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdhi)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smlalbt(cond, rdlo, rdhi, rn, rm); + } + void Smlalbt(Register rdlo, Register rdhi, Register rn, Register rm) { + Smlalbt(al, rdlo, rdhi, rn, rm); + } + + void Smlald( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdlo)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdhi)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smlald(cond, rdlo, rdhi, rn, rm); + } + void Smlald(Register rdlo, Register rdhi, Register rn, Register rm) { + Smlald(al, rdlo, rdhi, rn, rm); + } + + void Smlaldx( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdlo)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdhi)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smlaldx(cond, rdlo, rdhi, rn, rm); + } + void Smlaldx(Register rdlo, Register rdhi, Register rn, Register rm) { + Smlaldx(al, rdlo, rdhi, rn, rm); + } + + void Smlals( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdlo)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdhi)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smlals(cond, rdlo, rdhi, rn, rm); + } + void Smlals(Register rdlo, Register rdhi, Register rn, Register rm) { + Smlals(al, rdlo, rdhi, rn, rm); + } + + void Smlaltb( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdlo)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdhi)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smlaltb(cond, rdlo, rdhi, rn, rm); + } + void Smlaltb(Register rdlo, Register rdhi, Register rn, Register rm) { + Smlaltb(al, rdlo, rdhi, rn, rm); + } + + void Smlaltt( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdlo)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdhi)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smlaltt(cond, rdlo, rdhi, rn, rm); + } + void Smlaltt(Register rdlo, Register rdhi, Register rn, Register rm) { + Smlaltt(al, rdlo, rdhi, rn, rm); + } + + void Smlatb( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(ra)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smlatb(cond, rd, rn, rm, ra); + } + void Smlatb(Register rd, Register rn, Register rm, Register ra) { + Smlatb(al, rd, rn, rm, ra); + } + + void Smlatt( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(ra)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smlatt(cond, rd, rn, rm, ra); + } + void Smlatt(Register rd, Register rn, Register rm, Register ra) { + Smlatt(al, rd, rn, rm, ra); + } + + void Smlawb( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(ra)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smlawb(cond, rd, rn, rm, ra); + } + void Smlawb(Register rd, Register rn, Register rm, Register ra) { + Smlawb(al, rd, rn, rm, ra); + } + + void Smlawt( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(ra)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smlawt(cond, rd, rn, rm, ra); + } + void Smlawt(Register rd, Register rn, Register rm, Register ra) { + Smlawt(al, rd, rn, rm, ra); + } + + void Smlsd( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(ra)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smlsd(cond, rd, rn, rm, ra); + } + void Smlsd(Register rd, Register rn, Register rm, Register ra) { + Smlsd(al, rd, rn, rm, ra); + } + + void Smlsdx( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(ra)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smlsdx(cond, rd, rn, rm, ra); + } + void Smlsdx(Register rd, Register rn, Register rm, Register ra) { + Smlsdx(al, rd, rn, rm, ra); + } + + void Smlsld( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdlo)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdhi)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smlsld(cond, rdlo, rdhi, rn, rm); + } + void Smlsld(Register rdlo, Register rdhi, Register rn, Register rm) { + Smlsld(al, rdlo, rdhi, rn, rm); + } + + void Smlsldx( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdlo)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdhi)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smlsldx(cond, rdlo, rdhi, rn, rm); + } + void Smlsldx(Register rdlo, Register rdhi, Register rn, Register rm) { + Smlsldx(al, rdlo, rdhi, rn, rm); + } + + void Smmla( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(ra)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smmla(cond, rd, rn, rm, ra); + } + void Smmla(Register rd, Register rn, Register rm, Register ra) { + Smmla(al, rd, rn, rm, ra); + } + + void Smmlar( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(ra)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smmlar(cond, rd, rn, rm, ra); + } + void Smmlar(Register rd, Register rn, Register rm, Register ra) { + Smmlar(al, rd, rn, rm, ra); + } + + void Smmls( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(ra)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smmls(cond, rd, rn, rm, ra); + } + void Smmls(Register rd, Register rn, Register rm, Register ra) { + Smmls(al, rd, rn, rm, ra); + } + + void Smmlsr( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(ra)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smmlsr(cond, rd, rn, rm, ra); + } + void Smmlsr(Register rd, Register rn, Register rm, Register ra) { + Smmlsr(al, rd, rn, rm, ra); + } + + void Smmul(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smmul(cond, rd, rn, rm); + } + void Smmul(Register rd, Register rn, Register rm) { Smmul(al, rd, rn, rm); } + + void Smmulr(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smmulr(cond, rd, rn, rm); + } + void Smmulr(Register rd, Register rn, Register rm) { Smmulr(al, rd, rn, rm); } + + void Smuad(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smuad(cond, rd, rn, rm); + } + void Smuad(Register rd, Register rn, Register rm) { Smuad(al, rd, rn, rm); } + + void Smuadx(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smuadx(cond, rd, rn, rm); + } + void Smuadx(Register rd, Register rn, Register rm) { Smuadx(al, rd, rn, rm); } + + void Smulbb(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smulbb(cond, rd, rn, rm); + } + void Smulbb(Register rd, Register rn, Register rm) { Smulbb(al, rd, rn, rm); } + + void Smulbt(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smulbt(cond, rd, rn, rm); + } + void Smulbt(Register rd, Register rn, Register rm) { Smulbt(al, rd, rn, rm); } + + void Smull( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdlo)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdhi)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smull(cond, rdlo, rdhi, rn, rm); + } + void Smull(Register rdlo, Register rdhi, Register rn, Register rm) { + Smull(al, rdlo, rdhi, rn, rm); + } + void Smull(FlagsUpdate flags, + Condition cond, + Register rdlo, + Register rdhi, + Register rn, + Register rm) { + switch (flags) { + case LeaveFlags: + Smull(cond, rdlo, rdhi, rn, rm); + break; + case SetFlags: + Smulls(cond, rdlo, rdhi, rn, rm); + break; + case DontCare: + Smull(cond, rdlo, rdhi, rn, rm); + break; + } + } + void Smull(FlagsUpdate flags, + Register rdlo, + Register rdhi, + Register rn, + Register rm) { + Smull(flags, al, rdlo, rdhi, rn, rm); + } + + void Smulls( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdlo)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdhi)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smulls(cond, rdlo, rdhi, rn, rm); + } + void Smulls(Register rdlo, Register rdhi, Register rn, Register rm) { + Smulls(al, rdlo, rdhi, rn, rm); + } + + void Smultb(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smultb(cond, rd, rn, rm); + } + void Smultb(Register rd, Register rn, Register rm) { Smultb(al, rd, rn, rm); } + + void Smultt(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smultt(cond, rd, rn, rm); + } + void Smultt(Register rd, Register rn, Register rm) { Smultt(al, rd, rn, rm); } + + void Smulwb(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smulwb(cond, rd, rn, rm); + } + void Smulwb(Register rd, Register rn, Register rm) { Smulwb(al, rd, rn, rm); } + + void Smulwt(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smulwt(cond, rd, rn, rm); + } + void Smulwt(Register rd, Register rn, Register rm) { Smulwt(al, rd, rn, rm); } + + void Smusd(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smusd(cond, rd, rn, rm); + } + void Smusd(Register rd, Register rn, Register rm) { Smusd(al, rd, rn, rm); } + + void Smusdx(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smusdx(cond, rd, rn, rm); + } + void Smusdx(Register rd, Register rn, Register rm) { Smusdx(al, rd, rn, rm); } + + void Ssat(Condition cond, Register rd, uint32_t imm, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ssat(cond, rd, imm, operand); + } + void Ssat(Register rd, uint32_t imm, const Operand& operand) { + Ssat(al, rd, imm, operand); + } + + void Ssat16(Condition cond, Register rd, uint32_t imm, Register rn) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ssat16(cond, rd, imm, rn); + } + void Ssat16(Register rd, uint32_t imm, Register rn) { + Ssat16(al, rd, imm, rn); + } + + void Ssax(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ssax(cond, rd, rn, rm); + } + void Ssax(Register rd, Register rn, Register rm) { Ssax(al, rd, rn, rm); } + + void Ssub16(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ssub16(cond, rd, rn, rm); + } + void Ssub16(Register rd, Register rn, Register rm) { Ssub16(al, rd, rn, rm); } + + void Ssub8(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ssub8(cond, rd, rn, rm); + } + void Ssub8(Register rd, Register rn, Register rm) { Ssub8(al, rd, rn, rm); } + + void Stl(Condition cond, Register rt, const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + stl(cond, rt, operand); + } + void Stl(Register rt, const MemOperand& operand) { Stl(al, rt, operand); } + + void Stlb(Condition cond, Register rt, const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + stlb(cond, rt, operand); + } + void Stlb(Register rt, const MemOperand& operand) { Stlb(al, rt, operand); } + + void Stlex(Condition cond, + Register rd, + Register rt, + const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + stlex(cond, rd, rt, operand); + } + void Stlex(Register rd, Register rt, const MemOperand& operand) { + Stlex(al, rd, rt, operand); + } + + void Stlexb(Condition cond, + Register rd, + Register rt, + const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + stlexb(cond, rd, rt, operand); + } + void Stlexb(Register rd, Register rt, const MemOperand& operand) { + Stlexb(al, rd, rt, operand); + } + + void Stlexd(Condition cond, + Register rd, + Register rt, + Register rt2, + const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt2)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + stlexd(cond, rd, rt, rt2, operand); + } + void Stlexd(Register rd, + Register rt, + Register rt2, + const MemOperand& operand) { + Stlexd(al, rd, rt, rt2, operand); + } + + void Stlexh(Condition cond, + Register rd, + Register rt, + const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + stlexh(cond, rd, rt, operand); + } + void Stlexh(Register rd, Register rt, const MemOperand& operand) { + Stlexh(al, rd, rt, operand); + } + + void Stlh(Condition cond, Register rt, const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + stlh(cond, rt, operand); + } + void Stlh(Register rt, const MemOperand& operand) { Stlh(al, rt, operand); } + + void Stm(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(registers)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + stm(cond, rn, write_back, registers); + } + void Stm(Register rn, WriteBack write_back, RegisterList registers) { + Stm(al, rn, write_back, registers); + } + + void Stmda(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(registers)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + stmda(cond, rn, write_back, registers); + } + void Stmda(Register rn, WriteBack write_back, RegisterList registers) { + Stmda(al, rn, write_back, registers); + } + + void Stmdb(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(registers)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + stmdb(cond, rn, write_back, registers); + } + void Stmdb(Register rn, WriteBack write_back, RegisterList registers) { + Stmdb(al, rn, write_back, registers); + } + + void Stmea(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(registers)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + stmea(cond, rn, write_back, registers); + } + void Stmea(Register rn, WriteBack write_back, RegisterList registers) { + Stmea(al, rn, write_back, registers); + } + + void Stmed(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(registers)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + stmed(cond, rn, write_back, registers); + } + void Stmed(Register rn, WriteBack write_back, RegisterList registers) { + Stmed(al, rn, write_back, registers); + } + + void Stmfa(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(registers)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + stmfa(cond, rn, write_back, registers); + } + void Stmfa(Register rn, WriteBack write_back, RegisterList registers) { + Stmfa(al, rn, write_back, registers); + } + + void Stmfd(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(registers)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + stmfd(cond, rn, write_back, registers); + } + void Stmfd(Register rn, WriteBack write_back, RegisterList registers) { + Stmfd(al, rn, write_back, registers); + } + + void Stmib(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(registers)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + stmib(cond, rn, write_back, registers); + } + void Stmib(Register rn, WriteBack write_back, RegisterList registers) { + Stmib(al, rn, write_back, registers); + } + + void Str(Condition cond, Register rt, const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + bool can_use_it = + // STR{}{} , [ {, #{+}}] ; T1 + (operand.IsImmediate() && rt.IsLow() && + operand.GetBaseRegister().IsLow() && + operand.IsOffsetImmediateWithinRange(0, 124, 4) && + (operand.GetAddrMode() == Offset)) || + // STR{}{} , [SP{, #{+}}] ; T2 + (operand.IsImmediate() && rt.IsLow() && + operand.GetBaseRegister().IsSP() && + operand.IsOffsetImmediateWithinRange(0, 1020, 4) && + (operand.GetAddrMode() == Offset)) || + // STR{}{} , [, {+}] ; T1 + (operand.IsPlainRegister() && rt.IsLow() && + operand.GetBaseRegister().IsLow() && + operand.GetOffsetRegister().IsLow() && operand.GetSign().IsPlus() && + (operand.GetAddrMode() == Offset)); + ITScope it_scope(this, &cond, guard, can_use_it); + str(cond, rt, operand); + } + void Str(Register rt, const MemOperand& operand) { Str(al, rt, operand); } + + void Strb(Condition cond, Register rt, const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + bool can_use_it = + // STRB{}{} , [ {, #{+}}] ; T1 + (operand.IsImmediate() && rt.IsLow() && + operand.GetBaseRegister().IsLow() && + operand.IsOffsetImmediateWithinRange(0, 31) && + (operand.GetAddrMode() == Offset)) || + // STRB{}{} , [, {+}] ; T1 + (operand.IsPlainRegister() && rt.IsLow() && + operand.GetBaseRegister().IsLow() && + operand.GetOffsetRegister().IsLow() && operand.GetSign().IsPlus() && + (operand.GetAddrMode() == Offset)); + ITScope it_scope(this, &cond, guard, can_use_it); + strb(cond, rt, operand); + } + void Strb(Register rt, const MemOperand& operand) { Strb(al, rt, operand); } + + void Strd(Condition cond, + Register rt, + Register rt2, + const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt2)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + strd(cond, rt, rt2, operand); + } + void Strd(Register rt, Register rt2, const MemOperand& operand) { + Strd(al, rt, rt2, operand); + } + + void Strex(Condition cond, + Register rd, + Register rt, + const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + strex(cond, rd, rt, operand); + } + void Strex(Register rd, Register rt, const MemOperand& operand) { + Strex(al, rd, rt, operand); + } + + void Strexb(Condition cond, + Register rd, + Register rt, + const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + strexb(cond, rd, rt, operand); + } + void Strexb(Register rd, Register rt, const MemOperand& operand) { + Strexb(al, rd, rt, operand); + } + + void Strexd(Condition cond, + Register rd, + Register rt, + Register rt2, + const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt2)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + strexd(cond, rd, rt, rt2, operand); + } + void Strexd(Register rd, + Register rt, + Register rt2, + const MemOperand& operand) { + Strexd(al, rd, rt, rt2, operand); + } + + void Strexh(Condition cond, + Register rd, + Register rt, + const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + strexh(cond, rd, rt, operand); + } + void Strexh(Register rd, Register rt, const MemOperand& operand) { + Strexh(al, rd, rt, operand); + } + + void Strh(Condition cond, Register rt, const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + bool can_use_it = + // STRH{}{} , [ {, #{+}}] ; T1 + (operand.IsImmediate() && rt.IsLow() && + operand.GetBaseRegister().IsLow() && + operand.IsOffsetImmediateWithinRange(0, 62, 2) && + (operand.GetAddrMode() == Offset)) || + // STRH{}{} , [, {+}] ; T1 + (operand.IsPlainRegister() && rt.IsLow() && + operand.GetBaseRegister().IsLow() && + operand.GetOffsetRegister().IsLow() && operand.GetSign().IsPlus() && + (operand.GetAddrMode() == Offset)); + ITScope it_scope(this, &cond, guard, can_use_it); + strh(cond, rt, operand); + } + void Strh(Register rt, const MemOperand& operand) { Strh(al, rt, operand); } + + void Sub(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + if (cond.Is(al) && rd.Is(rn) && operand.IsImmediate()) { + uint32_t immediate = operand.GetImmediate(); + if (immediate == 0) { + return; + } + } + bool can_use_it = + // SUB{} , , # ; T1 + (operand.IsImmediate() && (operand.GetImmediate() <= 7) && rn.IsLow() && + rd.IsLow()) || + // SUB{} {,} , # ; T2 + (operand.IsImmediate() && (operand.GetImmediate() <= 255) && + rd.IsLow() && rn.Is(rd)) || + // SUB{} , , + (operand.IsPlainRegister() && rd.IsLow() && rn.IsLow() && + operand.GetBaseRegister().IsLow()); + ITScope it_scope(this, &cond, guard, can_use_it); + sub(cond, rd, rn, operand); + } + void Sub(Register rd, Register rn, const Operand& operand) { + Sub(al, rd, rn, operand); + } + void Sub(FlagsUpdate flags, + Condition cond, + Register rd, + Register rn, + const Operand& operand) { + switch (flags) { + case LeaveFlags: + Sub(cond, rd, rn, operand); + break; + case SetFlags: + Subs(cond, rd, rn, operand); + break; + case DontCare: + bool setflags_is_smaller = + IsUsingT32() && cond.Is(al) && + ((operand.IsPlainRegister() && rd.IsLow() && rn.IsLow() && + operand.GetBaseRegister().IsLow()) || + (operand.IsImmediate() && + ((rd.IsLow() && rn.IsLow() && (operand.GetImmediate() < 8)) || + (rd.IsLow() && rn.Is(rd) && (operand.GetImmediate() < 256))))); + if (setflags_is_smaller) { + Subs(cond, rd, rn, operand); + } else { + bool changed_op_is_smaller = + operand.IsImmediate() && (operand.GetSignedImmediate() < 0) && + ((rd.IsLow() && rn.IsLow() && + (operand.GetSignedImmediate() >= -7)) || + (rd.IsLow() && rn.Is(rd) && + (operand.GetSignedImmediate() >= -255))); + if (changed_op_is_smaller) { + Adds(cond, rd, rn, -operand.GetSignedImmediate()); + } else { + Sub(cond, rd, rn, operand); + } + } + break; + } + } + void Sub(FlagsUpdate flags, + Register rd, + Register rn, + const Operand& operand) { + Sub(flags, al, rd, rn, operand); + } + + void Subs(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + subs(cond, rd, rn, operand); + } + void Subs(Register rd, Register rn, const Operand& operand) { + Subs(al, rd, rn, operand); + } + + void Svc(Condition cond, uint32_t imm) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + svc(cond, imm); + } + void Svc(uint32_t imm) { Svc(al, imm); } + + void Sxtab(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + sxtab(cond, rd, rn, operand); + } + void Sxtab(Register rd, Register rn, const Operand& operand) { + Sxtab(al, rd, rn, operand); + } + + void Sxtab16(Condition cond, + Register rd, + Register rn, + const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + sxtab16(cond, rd, rn, operand); + } + void Sxtab16(Register rd, Register rn, const Operand& operand) { + Sxtab16(al, rd, rn, operand); + } + + void Sxtah(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + sxtah(cond, rd, rn, operand); + } + void Sxtah(Register rd, Register rn, const Operand& operand) { + Sxtah(al, rd, rn, operand); + } + + void Sxtb(Condition cond, Register rd, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + sxtb(cond, rd, operand); + } + void Sxtb(Register rd, const Operand& operand) { Sxtb(al, rd, operand); } + + void Sxtb16(Condition cond, Register rd, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + sxtb16(cond, rd, operand); + } + void Sxtb16(Register rd, const Operand& operand) { Sxtb16(al, rd, operand); } + + void Sxth(Condition cond, Register rd, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + sxth(cond, rd, operand); + } + void Sxth(Register rd, const Operand& operand) { Sxth(al, rd, operand); } + + void Teq(Condition cond, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + teq(cond, rn, operand); + } + void Teq(Register rn, const Operand& operand) { Teq(al, rn, operand); } + + void Tst(Condition cond, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + bool can_use_it = + // TST{}{} , ; T1 + operand.IsPlainRegister() && rn.IsLow() && + operand.GetBaseRegister().IsLow(); + ITScope it_scope(this, &cond, guard, can_use_it); + tst(cond, rn, operand); + } + void Tst(Register rn, const Operand& operand) { Tst(al, rn, operand); } + + void Uadd16(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + uadd16(cond, rd, rn, rm); + } + void Uadd16(Register rd, Register rn, Register rm) { Uadd16(al, rd, rn, rm); } + + void Uadd8(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + uadd8(cond, rd, rn, rm); + } + void Uadd8(Register rd, Register rn, Register rm) { Uadd8(al, rd, rn, rm); } + + void Uasx(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + uasx(cond, rd, rn, rm); + } + void Uasx(Register rd, Register rn, Register rm) { Uasx(al, rd, rn, rm); } + + void Ubfx( + Condition cond, Register rd, Register rn, uint32_t lsb, uint32_t width) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ubfx(cond, rd, rn, lsb, width); + } + void Ubfx(Register rd, Register rn, uint32_t lsb, uint32_t width) { + Ubfx(al, rd, rn, lsb, width); + } + + void Udf(Condition cond, uint32_t imm) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + udf(cond, imm); + } + void Udf(uint32_t imm) { Udf(al, imm); } + + void Udiv(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + udiv(cond, rd, rn, rm); + } + void Udiv(Register rd, Register rn, Register rm) { Udiv(al, rd, rn, rm); } + + void Uhadd16(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + uhadd16(cond, rd, rn, rm); + } + void Uhadd16(Register rd, Register rn, Register rm) { + Uhadd16(al, rd, rn, rm); + } + + void Uhadd8(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + uhadd8(cond, rd, rn, rm); + } + void Uhadd8(Register rd, Register rn, Register rm) { Uhadd8(al, rd, rn, rm); } + + void Uhasx(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + uhasx(cond, rd, rn, rm); + } + void Uhasx(Register rd, Register rn, Register rm) { Uhasx(al, rd, rn, rm); } + + void Uhsax(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + uhsax(cond, rd, rn, rm); + } + void Uhsax(Register rd, Register rn, Register rm) { Uhsax(al, rd, rn, rm); } + + void Uhsub16(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + uhsub16(cond, rd, rn, rm); + } + void Uhsub16(Register rd, Register rn, Register rm) { + Uhsub16(al, rd, rn, rm); + } + + void Uhsub8(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + uhsub8(cond, rd, rn, rm); + } + void Uhsub8(Register rd, Register rn, Register rm) { Uhsub8(al, rd, rn, rm); } + + void Umaal( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdlo)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdhi)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + umaal(cond, rdlo, rdhi, rn, rm); + } + void Umaal(Register rdlo, Register rdhi, Register rn, Register rm) { + Umaal(al, rdlo, rdhi, rn, rm); + } + + void Umlal( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdlo)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdhi)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + umlal(cond, rdlo, rdhi, rn, rm); + } + void Umlal(Register rdlo, Register rdhi, Register rn, Register rm) { + Umlal(al, rdlo, rdhi, rn, rm); + } + void Umlal(FlagsUpdate flags, + Condition cond, + Register rdlo, + Register rdhi, + Register rn, + Register rm) { + switch (flags) { + case LeaveFlags: + Umlal(cond, rdlo, rdhi, rn, rm); + break; + case SetFlags: + Umlals(cond, rdlo, rdhi, rn, rm); + break; + case DontCare: + Umlal(cond, rdlo, rdhi, rn, rm); + break; + } + } + void Umlal(FlagsUpdate flags, + Register rdlo, + Register rdhi, + Register rn, + Register rm) { + Umlal(flags, al, rdlo, rdhi, rn, rm); + } + + void Umlals( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdlo)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdhi)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + umlals(cond, rdlo, rdhi, rn, rm); + } + void Umlals(Register rdlo, Register rdhi, Register rn, Register rm) { + Umlals(al, rdlo, rdhi, rn, rm); + } + + void Umull( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdlo)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdhi)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + umull(cond, rdlo, rdhi, rn, rm); + } + void Umull(Register rdlo, Register rdhi, Register rn, Register rm) { + Umull(al, rdlo, rdhi, rn, rm); + } + void Umull(FlagsUpdate flags, + Condition cond, + Register rdlo, + Register rdhi, + Register rn, + Register rm) { + switch (flags) { + case LeaveFlags: + Umull(cond, rdlo, rdhi, rn, rm); + break; + case SetFlags: + Umulls(cond, rdlo, rdhi, rn, rm); + break; + case DontCare: + Umull(cond, rdlo, rdhi, rn, rm); + break; + } + } + void Umull(FlagsUpdate flags, + Register rdlo, + Register rdhi, + Register rn, + Register rm) { + Umull(flags, al, rdlo, rdhi, rn, rm); + } + + void Umulls( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdlo)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdhi)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + umulls(cond, rdlo, rdhi, rn, rm); + } + void Umulls(Register rdlo, Register rdhi, Register rn, Register rm) { + Umulls(al, rdlo, rdhi, rn, rm); + } + + void Uqadd16(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + uqadd16(cond, rd, rn, rm); + } + void Uqadd16(Register rd, Register rn, Register rm) { + Uqadd16(al, rd, rn, rm); + } + + void Uqadd8(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + uqadd8(cond, rd, rn, rm); + } + void Uqadd8(Register rd, Register rn, Register rm) { Uqadd8(al, rd, rn, rm); } + + void Uqasx(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + uqasx(cond, rd, rn, rm); + } + void Uqasx(Register rd, Register rn, Register rm) { Uqasx(al, rd, rn, rm); } + + void Uqsax(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + uqsax(cond, rd, rn, rm); + } + void Uqsax(Register rd, Register rn, Register rm) { Uqsax(al, rd, rn, rm); } + + void Uqsub16(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + uqsub16(cond, rd, rn, rm); + } + void Uqsub16(Register rd, Register rn, Register rm) { + Uqsub16(al, rd, rn, rm); + } + + void Uqsub8(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + uqsub8(cond, rd, rn, rm); + } + void Uqsub8(Register rd, Register rn, Register rm) { Uqsub8(al, rd, rn, rm); } + + void Usad8(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + usad8(cond, rd, rn, rm); + } + void Usad8(Register rd, Register rn, Register rm) { Usad8(al, rd, rn, rm); } + + void Usada8( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(ra)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + usada8(cond, rd, rn, rm, ra); + } + void Usada8(Register rd, Register rn, Register rm, Register ra) { + Usada8(al, rd, rn, rm, ra); + } + + void Usat(Condition cond, Register rd, uint32_t imm, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + usat(cond, rd, imm, operand); + } + void Usat(Register rd, uint32_t imm, const Operand& operand) { + Usat(al, rd, imm, operand); + } + + void Usat16(Condition cond, Register rd, uint32_t imm, Register rn) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + usat16(cond, rd, imm, rn); + } + void Usat16(Register rd, uint32_t imm, Register rn) { + Usat16(al, rd, imm, rn); + } + + void Usax(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + usax(cond, rd, rn, rm); + } + void Usax(Register rd, Register rn, Register rm) { Usax(al, rd, rn, rm); } + + void Usub16(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + usub16(cond, rd, rn, rm); + } + void Usub16(Register rd, Register rn, Register rm) { Usub16(al, rd, rn, rm); } + + void Usub8(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + usub8(cond, rd, rn, rm); + } + void Usub8(Register rd, Register rn, Register rm) { Usub8(al, rd, rn, rm); } + + void Uxtab(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + uxtab(cond, rd, rn, operand); + } + void Uxtab(Register rd, Register rn, const Operand& operand) { + Uxtab(al, rd, rn, operand); + } + + void Uxtab16(Condition cond, + Register rd, + Register rn, + const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + uxtab16(cond, rd, rn, operand); + } + void Uxtab16(Register rd, Register rn, const Operand& operand) { + Uxtab16(al, rd, rn, operand); + } + + void Uxtah(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + uxtah(cond, rd, rn, operand); + } + void Uxtah(Register rd, Register rn, const Operand& operand) { + Uxtah(al, rd, rn, operand); + } + + void Uxtb(Condition cond, Register rd, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + uxtb(cond, rd, operand); + } + void Uxtb(Register rd, const Operand& operand) { Uxtb(al, rd, operand); } + + void Uxtb16(Condition cond, Register rd, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + uxtb16(cond, rd, operand); + } + void Uxtb16(Register rd, const Operand& operand) { Uxtb16(al, rd, operand); } + + void Uxth(Condition cond, Register rd, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + uxth(cond, rd, operand); + } + void Uxth(Register rd, const Operand& operand) { Uxth(al, rd, operand); } + + void Vaba( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vaba(cond, dt, rd, rn, rm); + } + void Vaba(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vaba(al, dt, rd, rn, rm); + } + + void Vaba( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vaba(cond, dt, rd, rn, rm); + } + void Vaba(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vaba(al, dt, rd, rn, rm); + } + + void Vabal( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vabal(cond, dt, rd, rn, rm); + } + void Vabal(DataType dt, QRegister rd, DRegister rn, DRegister rm) { + Vabal(al, dt, rd, rn, rm); + } + + void Vabd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vabd(cond, dt, rd, rn, rm); + } + void Vabd(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vabd(al, dt, rd, rn, rm); + } + + void Vabd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vabd(cond, dt, rd, rn, rm); + } + void Vabd(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vabd(al, dt, rd, rn, rm); + } + + void Vabdl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vabdl(cond, dt, rd, rn, rm); + } + void Vabdl(DataType dt, QRegister rd, DRegister rn, DRegister rm) { + Vabdl(al, dt, rd, rn, rm); + } + + void Vabs(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vabs(cond, dt, rd, rm); + } + void Vabs(DataType dt, DRegister rd, DRegister rm) { Vabs(al, dt, rd, rm); } + + void Vabs(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vabs(cond, dt, rd, rm); + } + void Vabs(DataType dt, QRegister rd, QRegister rm) { Vabs(al, dt, rd, rm); } + + void Vabs(Condition cond, DataType dt, SRegister rd, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vabs(cond, dt, rd, rm); + } + void Vabs(DataType dt, SRegister rd, SRegister rm) { Vabs(al, dt, rd, rm); } + + void Vacge( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vacge(cond, dt, rd, rn, rm); + } + void Vacge(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vacge(al, dt, rd, rn, rm); + } + + void Vacge( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vacge(cond, dt, rd, rn, rm); + } + void Vacge(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vacge(al, dt, rd, rn, rm); + } + + void Vacgt( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vacgt(cond, dt, rd, rn, rm); + } + void Vacgt(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vacgt(al, dt, rd, rn, rm); + } + + void Vacgt( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vacgt(cond, dt, rd, rn, rm); + } + void Vacgt(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vacgt(al, dt, rd, rn, rm); + } + + void Vacle( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vacle(cond, dt, rd, rn, rm); + } + void Vacle(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vacle(al, dt, rd, rn, rm); + } + + void Vacle( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vacle(cond, dt, rd, rn, rm); + } + void Vacle(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vacle(al, dt, rd, rn, rm); + } + + void Vaclt( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vaclt(cond, dt, rd, rn, rm); + } + void Vaclt(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vaclt(al, dt, rd, rn, rm); + } + + void Vaclt( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vaclt(cond, dt, rd, rn, rm); + } + void Vaclt(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vaclt(al, dt, rd, rn, rm); + } + + void Vadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vadd(cond, dt, rd, rn, rm); + } + void Vadd(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vadd(al, dt, rd, rn, rm); + } + + void Vadd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vadd(cond, dt, rd, rn, rm); + } + void Vadd(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vadd(al, dt, rd, rn, rm); + } + + void Vadd( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vadd(cond, dt, rd, rn, rm); + } + void Vadd(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + Vadd(al, dt, rd, rn, rm); + } + + void Vaddhn( + Condition cond, DataType dt, DRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vaddhn(cond, dt, rd, rn, rm); + } + void Vaddhn(DataType dt, DRegister rd, QRegister rn, QRegister rm) { + Vaddhn(al, dt, rd, rn, rm); + } + + void Vaddl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vaddl(cond, dt, rd, rn, rm); + } + void Vaddl(DataType dt, QRegister rd, DRegister rn, DRegister rm) { + Vaddl(al, dt, rd, rn, rm); + } + + void Vaddw( + Condition cond, DataType dt, QRegister rd, QRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vaddw(cond, dt, rd, rn, rm); + } + void Vaddw(DataType dt, QRegister rd, QRegister rn, DRegister rm) { + Vaddw(al, dt, rd, rn, rm); + } + + void Vand(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + const DOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vand(cond, dt, rd, rn, operand); + } + void Vand(DataType dt, DRegister rd, DRegister rn, const DOperand& operand) { + Vand(al, dt, rd, rn, operand); + } + + void Vand(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vand(cond, dt, rd, rn, operand); + } + void Vand(DataType dt, QRegister rd, QRegister rn, const QOperand& operand) { + Vand(al, dt, rd, rn, operand); + } + + void Vbic(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + const DOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vbic(cond, dt, rd, rn, operand); + } + void Vbic(DataType dt, DRegister rd, DRegister rn, const DOperand& operand) { + Vbic(al, dt, rd, rn, operand); + } + + void Vbic(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vbic(cond, dt, rd, rn, operand); + } + void Vbic(DataType dt, QRegister rd, QRegister rn, const QOperand& operand) { + Vbic(al, dt, rd, rn, operand); + } + + void Vbif( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vbif(cond, dt, rd, rn, rm); + } + void Vbif(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vbif(al, dt, rd, rn, rm); + } + void Vbif(Condition cond, DRegister rd, DRegister rn, DRegister rm) { + Vbif(cond, kDataTypeValueNone, rd, rn, rm); + } + void Vbif(DRegister rd, DRegister rn, DRegister rm) { + Vbif(al, kDataTypeValueNone, rd, rn, rm); + } + + void Vbif( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vbif(cond, dt, rd, rn, rm); + } + void Vbif(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vbif(al, dt, rd, rn, rm); + } + void Vbif(Condition cond, QRegister rd, QRegister rn, QRegister rm) { + Vbif(cond, kDataTypeValueNone, rd, rn, rm); + } + void Vbif(QRegister rd, QRegister rn, QRegister rm) { + Vbif(al, kDataTypeValueNone, rd, rn, rm); + } + + void Vbit( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vbit(cond, dt, rd, rn, rm); + } + void Vbit(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vbit(al, dt, rd, rn, rm); + } + void Vbit(Condition cond, DRegister rd, DRegister rn, DRegister rm) { + Vbit(cond, kDataTypeValueNone, rd, rn, rm); + } + void Vbit(DRegister rd, DRegister rn, DRegister rm) { + Vbit(al, kDataTypeValueNone, rd, rn, rm); + } + + void Vbit( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vbit(cond, dt, rd, rn, rm); + } + void Vbit(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vbit(al, dt, rd, rn, rm); + } + void Vbit(Condition cond, QRegister rd, QRegister rn, QRegister rm) { + Vbit(cond, kDataTypeValueNone, rd, rn, rm); + } + void Vbit(QRegister rd, QRegister rn, QRegister rm) { + Vbit(al, kDataTypeValueNone, rd, rn, rm); + } + + void Vbsl( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vbsl(cond, dt, rd, rn, rm); + } + void Vbsl(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vbsl(al, dt, rd, rn, rm); + } + void Vbsl(Condition cond, DRegister rd, DRegister rn, DRegister rm) { + Vbsl(cond, kDataTypeValueNone, rd, rn, rm); + } + void Vbsl(DRegister rd, DRegister rn, DRegister rm) { + Vbsl(al, kDataTypeValueNone, rd, rn, rm); + } + + void Vbsl( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vbsl(cond, dt, rd, rn, rm); + } + void Vbsl(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vbsl(al, dt, rd, rn, rm); + } + void Vbsl(Condition cond, QRegister rd, QRegister rn, QRegister rm) { + Vbsl(cond, kDataTypeValueNone, rd, rn, rm); + } + void Vbsl(QRegister rd, QRegister rn, QRegister rm) { + Vbsl(al, kDataTypeValueNone, rd, rn, rm); + } + + void Vceq(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vceq(cond, dt, rd, rm, operand); + } + void Vceq(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + Vceq(al, dt, rd, rm, operand); + } + + void Vceq(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vceq(cond, dt, rd, rm, operand); + } + void Vceq(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + Vceq(al, dt, rd, rm, operand); + } + + void Vceq( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vceq(cond, dt, rd, rn, rm); + } + void Vceq(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vceq(al, dt, rd, rn, rm); + } + + void Vceq( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vceq(cond, dt, rd, rn, rm); + } + void Vceq(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vceq(al, dt, rd, rn, rm); + } + + void Vcge(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcge(cond, dt, rd, rm, operand); + } + void Vcge(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + Vcge(al, dt, rd, rm, operand); + } + + void Vcge(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcge(cond, dt, rd, rm, operand); + } + void Vcge(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + Vcge(al, dt, rd, rm, operand); + } + + void Vcge( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcge(cond, dt, rd, rn, rm); + } + void Vcge(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vcge(al, dt, rd, rn, rm); + } + + void Vcge( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcge(cond, dt, rd, rn, rm); + } + void Vcge(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vcge(al, dt, rd, rn, rm); + } + + void Vcgt(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcgt(cond, dt, rd, rm, operand); + } + void Vcgt(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + Vcgt(al, dt, rd, rm, operand); + } + + void Vcgt(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcgt(cond, dt, rd, rm, operand); + } + void Vcgt(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + Vcgt(al, dt, rd, rm, operand); + } + + void Vcgt( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcgt(cond, dt, rd, rn, rm); + } + void Vcgt(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vcgt(al, dt, rd, rn, rm); + } + + void Vcgt( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcgt(cond, dt, rd, rn, rm); + } + void Vcgt(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vcgt(al, dt, rd, rn, rm); + } + + void Vcle(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcle(cond, dt, rd, rm, operand); + } + void Vcle(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + Vcle(al, dt, rd, rm, operand); + } + + void Vcle(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcle(cond, dt, rd, rm, operand); + } + void Vcle(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + Vcle(al, dt, rd, rm, operand); + } + + void Vcle( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcle(cond, dt, rd, rn, rm); + } + void Vcle(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vcle(al, dt, rd, rn, rm); + } + + void Vcle( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcle(cond, dt, rd, rn, rm); + } + void Vcle(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vcle(al, dt, rd, rn, rm); + } + + void Vcls(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcls(cond, dt, rd, rm); + } + void Vcls(DataType dt, DRegister rd, DRegister rm) { Vcls(al, dt, rd, rm); } + + void Vcls(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcls(cond, dt, rd, rm); + } + void Vcls(DataType dt, QRegister rd, QRegister rm) { Vcls(al, dt, rd, rm); } + + void Vclt(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vclt(cond, dt, rd, rm, operand); + } + void Vclt(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + Vclt(al, dt, rd, rm, operand); + } + + void Vclt(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vclt(cond, dt, rd, rm, operand); + } + void Vclt(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + Vclt(al, dt, rd, rm, operand); + } + + void Vclt( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vclt(cond, dt, rd, rn, rm); + } + void Vclt(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vclt(al, dt, rd, rn, rm); + } + + void Vclt( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vclt(cond, dt, rd, rn, rm); + } + void Vclt(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vclt(al, dt, rd, rn, rm); + } + + void Vclz(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vclz(cond, dt, rd, rm); + } + void Vclz(DataType dt, DRegister rd, DRegister rm) { Vclz(al, dt, rd, rm); } + + void Vclz(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vclz(cond, dt, rd, rm); + } + void Vclz(DataType dt, QRegister rd, QRegister rm) { Vclz(al, dt, rd, rm); } + + void Vcmp(Condition cond, + DataType dt, + SRegister rd, + const SOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcmp(cond, dt, rd, operand); + } + void Vcmp(DataType dt, SRegister rd, const SOperand& operand) { + Vcmp(al, dt, rd, operand); + } + + void Vcmp(Condition cond, + DataType dt, + DRegister rd, + const DOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcmp(cond, dt, rd, operand); + } + void Vcmp(DataType dt, DRegister rd, const DOperand& operand) { + Vcmp(al, dt, rd, operand); + } + + void Vcmpe(Condition cond, + DataType dt, + SRegister rd, + const SOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcmpe(cond, dt, rd, operand); + } + void Vcmpe(DataType dt, SRegister rd, const SOperand& operand) { + Vcmpe(al, dt, rd, operand); + } + + void Vcmpe(Condition cond, + DataType dt, + DRegister rd, + const DOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcmpe(cond, dt, rd, operand); + } + void Vcmpe(DataType dt, DRegister rd, const DOperand& operand) { + Vcmpe(al, dt, rd, operand); + } + + void Vcnt(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcnt(cond, dt, rd, rm); + } + void Vcnt(DataType dt, DRegister rd, DRegister rm) { Vcnt(al, dt, rd, rm); } + + void Vcnt(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcnt(cond, dt, rd, rm); + } + void Vcnt(DataType dt, QRegister rd, QRegister rm) { Vcnt(al, dt, rd, rm); } + + void Vcvt( + Condition cond, DataType dt1, DataType dt2, DRegister rd, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcvt(cond, dt1, dt2, rd, rm); + } + void Vcvt(DataType dt1, DataType dt2, DRegister rd, SRegister rm) { + Vcvt(al, dt1, dt2, rd, rm); + } + + void Vcvt( + Condition cond, DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcvt(cond, dt1, dt2, rd, rm); + } + void Vcvt(DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + Vcvt(al, dt1, dt2, rd, rm); + } + + void Vcvt(Condition cond, + DataType dt1, + DataType dt2, + DRegister rd, + DRegister rm, + int32_t fbits) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcvt(cond, dt1, dt2, rd, rm, fbits); + } + void Vcvt( + DataType dt1, DataType dt2, DRegister rd, DRegister rm, int32_t fbits) { + Vcvt(al, dt1, dt2, rd, rm, fbits); + } + + void Vcvt(Condition cond, + DataType dt1, + DataType dt2, + QRegister rd, + QRegister rm, + int32_t fbits) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcvt(cond, dt1, dt2, rd, rm, fbits); + } + void Vcvt( + DataType dt1, DataType dt2, QRegister rd, QRegister rm, int32_t fbits) { + Vcvt(al, dt1, dt2, rd, rm, fbits); + } + + void Vcvt(Condition cond, + DataType dt1, + DataType dt2, + SRegister rd, + SRegister rm, + int32_t fbits) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcvt(cond, dt1, dt2, rd, rm, fbits); + } + void Vcvt( + DataType dt1, DataType dt2, SRegister rd, SRegister rm, int32_t fbits) { + Vcvt(al, dt1, dt2, rd, rm, fbits); + } + + void Vcvt( + Condition cond, DataType dt1, DataType dt2, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcvt(cond, dt1, dt2, rd, rm); + } + void Vcvt(DataType dt1, DataType dt2, DRegister rd, DRegister rm) { + Vcvt(al, dt1, dt2, rd, rm); + } + + void Vcvt( + Condition cond, DataType dt1, DataType dt2, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcvt(cond, dt1, dt2, rd, rm); + } + void Vcvt(DataType dt1, DataType dt2, QRegister rd, QRegister rm) { + Vcvt(al, dt1, dt2, rd, rm); + } + + void Vcvt( + Condition cond, DataType dt1, DataType dt2, DRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcvt(cond, dt1, dt2, rd, rm); + } + void Vcvt(DataType dt1, DataType dt2, DRegister rd, QRegister rm) { + Vcvt(al, dt1, dt2, rd, rm); + } + + void Vcvt( + Condition cond, DataType dt1, DataType dt2, QRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcvt(cond, dt1, dt2, rd, rm); + } + void Vcvt(DataType dt1, DataType dt2, QRegister rd, DRegister rm) { + Vcvt(al, dt1, dt2, rd, rm); + } + + void Vcvt( + Condition cond, DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcvt(cond, dt1, dt2, rd, rm); + } + void Vcvt(DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + Vcvt(al, dt1, dt2, rd, rm); + } + + void Vcvta(DataType dt1, DataType dt2, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vcvta(dt1, dt2, rd, rm); + } + + void Vcvta(DataType dt1, DataType dt2, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vcvta(dt1, dt2, rd, rm); + } + + void Vcvta(DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vcvta(dt1, dt2, rd, rm); + } + + void Vcvta(DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vcvta(dt1, dt2, rd, rm); + } + + void Vcvtb( + Condition cond, DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcvtb(cond, dt1, dt2, rd, rm); + } + void Vcvtb(DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + Vcvtb(al, dt1, dt2, rd, rm); + } + + void Vcvtb( + Condition cond, DataType dt1, DataType dt2, DRegister rd, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcvtb(cond, dt1, dt2, rd, rm); + } + void Vcvtb(DataType dt1, DataType dt2, DRegister rd, SRegister rm) { + Vcvtb(al, dt1, dt2, rd, rm); + } + + void Vcvtb( + Condition cond, DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcvtb(cond, dt1, dt2, rd, rm); + } + void Vcvtb(DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + Vcvtb(al, dt1, dt2, rd, rm); + } + + void Vcvtm(DataType dt1, DataType dt2, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vcvtm(dt1, dt2, rd, rm); + } + + void Vcvtm(DataType dt1, DataType dt2, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vcvtm(dt1, dt2, rd, rm); + } + + void Vcvtm(DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vcvtm(dt1, dt2, rd, rm); + } + + void Vcvtm(DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vcvtm(dt1, dt2, rd, rm); + } + + void Vcvtn(DataType dt1, DataType dt2, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vcvtn(dt1, dt2, rd, rm); + } + + void Vcvtn(DataType dt1, DataType dt2, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vcvtn(dt1, dt2, rd, rm); + } + + void Vcvtn(DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vcvtn(dt1, dt2, rd, rm); + } + + void Vcvtn(DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vcvtn(dt1, dt2, rd, rm); + } + + void Vcvtp(DataType dt1, DataType dt2, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vcvtp(dt1, dt2, rd, rm); + } + + void Vcvtp(DataType dt1, DataType dt2, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vcvtp(dt1, dt2, rd, rm); + } + + void Vcvtp(DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vcvtp(dt1, dt2, rd, rm); + } + + void Vcvtp(DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vcvtp(dt1, dt2, rd, rm); + } + + void Vcvtr( + Condition cond, DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcvtr(cond, dt1, dt2, rd, rm); + } + void Vcvtr(DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + Vcvtr(al, dt1, dt2, rd, rm); + } + + void Vcvtr( + Condition cond, DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcvtr(cond, dt1, dt2, rd, rm); + } + void Vcvtr(DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + Vcvtr(al, dt1, dt2, rd, rm); + } + + void Vcvtt( + Condition cond, DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcvtt(cond, dt1, dt2, rd, rm); + } + void Vcvtt(DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + Vcvtt(al, dt1, dt2, rd, rm); + } + + void Vcvtt( + Condition cond, DataType dt1, DataType dt2, DRegister rd, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcvtt(cond, dt1, dt2, rd, rm); + } + void Vcvtt(DataType dt1, DataType dt2, DRegister rd, SRegister rm) { + Vcvtt(al, dt1, dt2, rd, rm); + } + + void Vcvtt( + Condition cond, DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcvtt(cond, dt1, dt2, rd, rm); + } + void Vcvtt(DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + Vcvtt(al, dt1, dt2, rd, rm); + } + + void Vdiv( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vdiv(cond, dt, rd, rn, rm); + } + void Vdiv(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + Vdiv(al, dt, rd, rn, rm); + } + + void Vdiv( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vdiv(cond, dt, rd, rn, rm); + } + void Vdiv(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vdiv(al, dt, rd, rn, rm); + } + + void Vdup(Condition cond, DataType dt, QRegister rd, Register rt) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vdup(cond, dt, rd, rt); + } + void Vdup(DataType dt, QRegister rd, Register rt) { Vdup(al, dt, rd, rt); } + + void Vdup(Condition cond, DataType dt, DRegister rd, Register rt) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vdup(cond, dt, rd, rt); + } + void Vdup(DataType dt, DRegister rd, Register rt) { Vdup(al, dt, rd, rt); } + + void Vdup(Condition cond, DataType dt, DRegister rd, DRegisterLane rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vdup(cond, dt, rd, rm); + } + void Vdup(DataType dt, DRegister rd, DRegisterLane rm) { + Vdup(al, dt, rd, rm); + } + + void Vdup(Condition cond, DataType dt, QRegister rd, DRegisterLane rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vdup(cond, dt, rd, rm); + } + void Vdup(DataType dt, QRegister rd, DRegisterLane rm) { + Vdup(al, dt, rd, rm); + } + + void Veor( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + veor(cond, dt, rd, rn, rm); + } + void Veor(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Veor(al, dt, rd, rn, rm); + } + void Veor(Condition cond, DRegister rd, DRegister rn, DRegister rm) { + Veor(cond, kDataTypeValueNone, rd, rn, rm); + } + void Veor(DRegister rd, DRegister rn, DRegister rm) { + Veor(al, kDataTypeValueNone, rd, rn, rm); + } + + void Veor( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + veor(cond, dt, rd, rn, rm); + } + void Veor(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Veor(al, dt, rd, rn, rm); + } + void Veor(Condition cond, QRegister rd, QRegister rn, QRegister rm) { + Veor(cond, kDataTypeValueNone, rd, rn, rm); + } + void Veor(QRegister rd, QRegister rn, QRegister rm) { + Veor(al, kDataTypeValueNone, rd, rn, rm); + } + + void Vext(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vext(cond, dt, rd, rn, rm, operand); + } + void Vext(DataType dt, + DRegister rd, + DRegister rn, + DRegister rm, + const DOperand& operand) { + Vext(al, dt, rd, rn, rm, operand); + } + + void Vext(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vext(cond, dt, rd, rn, rm, operand); + } + void Vext(DataType dt, + QRegister rd, + QRegister rn, + QRegister rm, + const QOperand& operand) { + Vext(al, dt, rd, rn, rm, operand); + } + + void Vfma( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vfma(cond, dt, rd, rn, rm); + } + void Vfma(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vfma(al, dt, rd, rn, rm); + } + + void Vfma( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vfma(cond, dt, rd, rn, rm); + } + void Vfma(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vfma(al, dt, rd, rn, rm); + } + + void Vfma( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vfma(cond, dt, rd, rn, rm); + } + void Vfma(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + Vfma(al, dt, rd, rn, rm); + } + + void Vfms( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vfms(cond, dt, rd, rn, rm); + } + void Vfms(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vfms(al, dt, rd, rn, rm); + } + + void Vfms( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vfms(cond, dt, rd, rn, rm); + } + void Vfms(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vfms(al, dt, rd, rn, rm); + } + + void Vfms( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vfms(cond, dt, rd, rn, rm); + } + void Vfms(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + Vfms(al, dt, rd, rn, rm); + } + + void Vfnma( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vfnma(cond, dt, rd, rn, rm); + } + void Vfnma(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + Vfnma(al, dt, rd, rn, rm); + } + + void Vfnma( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vfnma(cond, dt, rd, rn, rm); + } + void Vfnma(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vfnma(al, dt, rd, rn, rm); + } + + void Vfnms( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vfnms(cond, dt, rd, rn, rm); + } + void Vfnms(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + Vfnms(al, dt, rd, rn, rm); + } + + void Vfnms( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vfnms(cond, dt, rd, rn, rm); + } + void Vfnms(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vfnms(al, dt, rd, rn, rm); + } + + void Vhadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vhadd(cond, dt, rd, rn, rm); + } + void Vhadd(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vhadd(al, dt, rd, rn, rm); + } + + void Vhadd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vhadd(cond, dt, rd, rn, rm); + } + void Vhadd(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vhadd(al, dt, rd, rn, rm); + } + + void Vhsub( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vhsub(cond, dt, rd, rn, rm); + } + void Vhsub(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vhsub(al, dt, rd, rn, rm); + } + + void Vhsub( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vhsub(cond, dt, rd, rn, rm); + } + void Vhsub(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vhsub(al, dt, rd, rn, rm); + } + + void Vld1(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(nreglist)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vld1(cond, dt, nreglist, operand); + } + void Vld1(DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + Vld1(al, dt, nreglist, operand); + } + + void Vld2(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(nreglist)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vld2(cond, dt, nreglist, operand); + } + void Vld2(DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + Vld2(al, dt, nreglist, operand); + } + + void Vld3(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(nreglist)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vld3(cond, dt, nreglist, operand); + } + void Vld3(DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + Vld3(al, dt, nreglist, operand); + } + + void Vld3(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(nreglist)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vld3(cond, dt, nreglist, operand); + } + void Vld3(DataType dt, + const NeonRegisterList& nreglist, + const MemOperand& operand) { + Vld3(al, dt, nreglist, operand); + } + + void Vld4(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(nreglist)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vld4(cond, dt, nreglist, operand); + } + void Vld4(DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + Vld4(al, dt, nreglist, operand); + } + + void Vldm(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(dreglist)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vldm(cond, dt, rn, write_back, dreglist); + } + void Vldm(DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + Vldm(al, dt, rn, write_back, dreglist); + } + void Vldm(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + Vldm(cond, kDataTypeValueNone, rn, write_back, dreglist); + } + void Vldm(Register rn, WriteBack write_back, DRegisterList dreglist) { + Vldm(al, kDataTypeValueNone, rn, write_back, dreglist); + } + + void Vldm(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(sreglist)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vldm(cond, dt, rn, write_back, sreglist); + } + void Vldm(DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + Vldm(al, dt, rn, write_back, sreglist); + } + void Vldm(Condition cond, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + Vldm(cond, kDataTypeValueNone, rn, write_back, sreglist); + } + void Vldm(Register rn, WriteBack write_back, SRegisterList sreglist) { + Vldm(al, kDataTypeValueNone, rn, write_back, sreglist); + } + + void Vldmdb(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(dreglist)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vldmdb(cond, dt, rn, write_back, dreglist); + } + void Vldmdb(DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + Vldmdb(al, dt, rn, write_back, dreglist); + } + void Vldmdb(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + Vldmdb(cond, kDataTypeValueNone, rn, write_back, dreglist); + } + void Vldmdb(Register rn, WriteBack write_back, DRegisterList dreglist) { + Vldmdb(al, kDataTypeValueNone, rn, write_back, dreglist); + } + + void Vldmdb(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(sreglist)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vldmdb(cond, dt, rn, write_back, sreglist); + } + void Vldmdb(DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + Vldmdb(al, dt, rn, write_back, sreglist); + } + void Vldmdb(Condition cond, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + Vldmdb(cond, kDataTypeValueNone, rn, write_back, sreglist); + } + void Vldmdb(Register rn, WriteBack write_back, SRegisterList sreglist) { + Vldmdb(al, kDataTypeValueNone, rn, write_back, sreglist); + } + + void Vldmia(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(dreglist)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vldmia(cond, dt, rn, write_back, dreglist); + } + void Vldmia(DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + Vldmia(al, dt, rn, write_back, dreglist); + } + void Vldmia(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + Vldmia(cond, kDataTypeValueNone, rn, write_back, dreglist); + } + void Vldmia(Register rn, WriteBack write_back, DRegisterList dreglist) { + Vldmia(al, kDataTypeValueNone, rn, write_back, dreglist); + } + + void Vldmia(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(sreglist)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vldmia(cond, dt, rn, write_back, sreglist); + } + void Vldmia(DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + Vldmia(al, dt, rn, write_back, sreglist); + } + void Vldmia(Condition cond, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + Vldmia(cond, kDataTypeValueNone, rn, write_back, sreglist); + } + void Vldmia(Register rn, WriteBack write_back, SRegisterList sreglist) { + Vldmia(al, kDataTypeValueNone, rn, write_back, sreglist); + } + + + void Vldr(Condition cond, + DataType dt, + DRegister rd, + const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vldr(cond, dt, rd, operand); + } + void Vldr(DataType dt, DRegister rd, const MemOperand& operand) { + Vldr(al, dt, rd, operand); + } + void Vldr(Condition cond, DRegister rd, const MemOperand& operand) { + Vldr(cond, Untyped64, rd, operand); + } + void Vldr(DRegister rd, const MemOperand& operand) { + Vldr(al, Untyped64, rd, operand); + } + + + void Vldr(Condition cond, + DataType dt, + SRegister rd, + const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vldr(cond, dt, rd, operand); + } + void Vldr(DataType dt, SRegister rd, const MemOperand& operand) { + Vldr(al, dt, rd, operand); + } + void Vldr(Condition cond, SRegister rd, const MemOperand& operand) { + Vldr(cond, Untyped32, rd, operand); + } + void Vldr(SRegister rd, const MemOperand& operand) { + Vldr(al, Untyped32, rd, operand); + } + + void Vmax( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmax(cond, dt, rd, rn, rm); + } + void Vmax(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vmax(al, dt, rd, rn, rm); + } + + void Vmax( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmax(cond, dt, rd, rn, rm); + } + void Vmax(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vmax(al, dt, rd, rn, rm); + } + + void Vmaxnm(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vmaxnm(dt, rd, rn, rm); + } + + void Vmaxnm(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vmaxnm(dt, rd, rn, rm); + } + + void Vmaxnm(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vmaxnm(dt, rd, rn, rm); + } + + void Vmin( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmin(cond, dt, rd, rn, rm); + } + void Vmin(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vmin(al, dt, rd, rn, rm); + } + + void Vmin( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmin(cond, dt, rd, rn, rm); + } + void Vmin(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vmin(al, dt, rd, rn, rm); + } + + void Vminnm(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vminnm(dt, rd, rn, rm); + } + + void Vminnm(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vminnm(dt, rd, rn, rm); + } + + void Vminnm(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vminnm(dt, rd, rn, rm); + } + + void Vmla(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegisterLane rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmla(cond, dt, rd, rn, rm); + } + void Vmla(DataType dt, DRegister rd, DRegister rn, DRegisterLane rm) { + Vmla(al, dt, rd, rn, rm); + } + + void Vmla(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + DRegisterLane rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmla(cond, dt, rd, rn, rm); + } + void Vmla(DataType dt, QRegister rd, QRegister rn, DRegisterLane rm) { + Vmla(al, dt, rd, rn, rm); + } + + void Vmla( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmla(cond, dt, rd, rn, rm); + } + void Vmla(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vmla(al, dt, rd, rn, rm); + } + + void Vmla( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmla(cond, dt, rd, rn, rm); + } + void Vmla(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vmla(al, dt, rd, rn, rm); + } + + void Vmla( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmla(cond, dt, rd, rn, rm); + } + void Vmla(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + Vmla(al, dt, rd, rn, rm); + } + + void Vmlal(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegisterLane rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmlal(cond, dt, rd, rn, rm); + } + void Vmlal(DataType dt, QRegister rd, DRegister rn, DRegisterLane rm) { + Vmlal(al, dt, rd, rn, rm); + } + + void Vmlal( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmlal(cond, dt, rd, rn, rm); + } + void Vmlal(DataType dt, QRegister rd, DRegister rn, DRegister rm) { + Vmlal(al, dt, rd, rn, rm); + } + + void Vmls(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegisterLane rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmls(cond, dt, rd, rn, rm); + } + void Vmls(DataType dt, DRegister rd, DRegister rn, DRegisterLane rm) { + Vmls(al, dt, rd, rn, rm); + } + + void Vmls(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + DRegisterLane rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmls(cond, dt, rd, rn, rm); + } + void Vmls(DataType dt, QRegister rd, QRegister rn, DRegisterLane rm) { + Vmls(al, dt, rd, rn, rm); + } + + void Vmls( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmls(cond, dt, rd, rn, rm); + } + void Vmls(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vmls(al, dt, rd, rn, rm); + } + + void Vmls( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmls(cond, dt, rd, rn, rm); + } + void Vmls(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vmls(al, dt, rd, rn, rm); + } + + void Vmls( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmls(cond, dt, rd, rn, rm); + } + void Vmls(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + Vmls(al, dt, rd, rn, rm); + } + + void Vmlsl(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegisterLane rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmlsl(cond, dt, rd, rn, rm); + } + void Vmlsl(DataType dt, QRegister rd, DRegister rn, DRegisterLane rm) { + Vmlsl(al, dt, rd, rn, rm); + } + + void Vmlsl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmlsl(cond, dt, rd, rn, rm); + } + void Vmlsl(DataType dt, QRegister rd, DRegister rn, DRegister rm) { + Vmlsl(al, dt, rd, rn, rm); + } + + void Vmov(Condition cond, Register rt, SRegister rn) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmov(cond, rt, rn); + } + void Vmov(Register rt, SRegister rn) { Vmov(al, rt, rn); } + + void Vmov(Condition cond, SRegister rn, Register rt) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmov(cond, rn, rt); + } + void Vmov(SRegister rn, Register rt) { Vmov(al, rn, rt); } + + void Vmov(Condition cond, Register rt, Register rt2, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt2)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmov(cond, rt, rt2, rm); + } + void Vmov(Register rt, Register rt2, DRegister rm) { Vmov(al, rt, rt2, rm); } + + void Vmov(Condition cond, DRegister rm, Register rt, Register rt2) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt2)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmov(cond, rm, rt, rt2); + } + void Vmov(DRegister rm, Register rt, Register rt2) { Vmov(al, rm, rt, rt2); } + + void Vmov( + Condition cond, Register rt, Register rt2, SRegister rm, SRegister rm1) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt2)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm1)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmov(cond, rt, rt2, rm, rm1); + } + void Vmov(Register rt, Register rt2, SRegister rm, SRegister rm1) { + Vmov(al, rt, rt2, rm, rm1); + } + + void Vmov( + Condition cond, SRegister rm, SRegister rm1, Register rt, Register rt2) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm1)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt2)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmov(cond, rm, rm1, rt, rt2); + } + void Vmov(SRegister rm, SRegister rm1, Register rt, Register rt2) { + Vmov(al, rm, rm1, rt, rt2); + } + + void Vmov(Condition cond, DataType dt, DRegisterLane rd, Register rt) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmov(cond, dt, rd, rt); + } + void Vmov(DataType dt, DRegisterLane rd, Register rt) { + Vmov(al, dt, rd, rt); + } + void Vmov(Condition cond, DRegisterLane rd, Register rt) { + Vmov(cond, kDataTypeValueNone, rd, rt); + } + void Vmov(DRegisterLane rd, Register rt) { + Vmov(al, kDataTypeValueNone, rd, rt); + } + + void Vmov(Condition cond, + DataType dt, + DRegister rd, + const DOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmov(cond, dt, rd, operand); + } + void Vmov(DataType dt, DRegister rd, const DOperand& operand) { + Vmov(al, dt, rd, operand); + } + + void Vmov(Condition cond, + DataType dt, + QRegister rd, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmov(cond, dt, rd, operand); + } + void Vmov(DataType dt, QRegister rd, const QOperand& operand) { + Vmov(al, dt, rd, operand); + } + + void Vmov(Condition cond, + DataType dt, + SRegister rd, + const SOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmov(cond, dt, rd, operand); + } + void Vmov(DataType dt, SRegister rd, const SOperand& operand) { + Vmov(al, dt, rd, operand); + } + + void Vmov(Condition cond, DataType dt, Register rt, DRegisterLane rn) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmov(cond, dt, rt, rn); + } + void Vmov(DataType dt, Register rt, DRegisterLane rn) { + Vmov(al, dt, rt, rn); + } + void Vmov(Condition cond, Register rt, DRegisterLane rn) { + Vmov(cond, kDataTypeValueNone, rt, rn); + } + void Vmov(Register rt, DRegisterLane rn) { + Vmov(al, kDataTypeValueNone, rt, rn); + } + + void Vmovl(Condition cond, DataType dt, QRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmovl(cond, dt, rd, rm); + } + void Vmovl(DataType dt, QRegister rd, DRegister rm) { Vmovl(al, dt, rd, rm); } + + void Vmovn(Condition cond, DataType dt, DRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmovn(cond, dt, rd, rm); + } + void Vmovn(DataType dt, DRegister rd, QRegister rm) { Vmovn(al, dt, rd, rm); } + + void Vmrs(Condition cond, + RegisterOrAPSR_nzcv rt, + SpecialFPRegister spec_reg) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmrs(cond, rt, spec_reg); + } + void Vmrs(RegisterOrAPSR_nzcv rt, SpecialFPRegister spec_reg) { + Vmrs(al, rt, spec_reg); + } + + void Vmsr(Condition cond, SpecialFPRegister spec_reg, Register rt) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmsr(cond, spec_reg, rt); + } + void Vmsr(SpecialFPRegister spec_reg, Register rt) { Vmsr(al, spec_reg, rt); } + + void Vmul(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegister dm, + unsigned index) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(dm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmul(cond, dt, rd, rn, dm, index); + } + void Vmul( + DataType dt, DRegister rd, DRegister rn, DRegister dm, unsigned index) { + Vmul(al, dt, rd, rn, dm, index); + } + + void Vmul(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + DRegister dm, + unsigned index) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(dm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmul(cond, dt, rd, rn, dm, index); + } + void Vmul( + DataType dt, QRegister rd, QRegister rn, DRegister dm, unsigned index) { + Vmul(al, dt, rd, rn, dm, index); + } + + void Vmul( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmul(cond, dt, rd, rn, rm); + } + void Vmul(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vmul(al, dt, rd, rn, rm); + } + + void Vmul( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmul(cond, dt, rd, rn, rm); + } + void Vmul(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vmul(al, dt, rd, rn, rm); + } + + void Vmul( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmul(cond, dt, rd, rn, rm); + } + void Vmul(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + Vmul(al, dt, rd, rn, rm); + } + + void Vmull(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegister dm, + unsigned index) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(dm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmull(cond, dt, rd, rn, dm, index); + } + void Vmull( + DataType dt, QRegister rd, DRegister rn, DRegister dm, unsigned index) { + Vmull(al, dt, rd, rn, dm, index); + } + + void Vmull( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmull(cond, dt, rd, rn, rm); + } + void Vmull(DataType dt, QRegister rd, DRegister rn, DRegister rm) { + Vmull(al, dt, rd, rn, rm); + } + + void Vmvn(Condition cond, + DataType dt, + DRegister rd, + const DOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmvn(cond, dt, rd, operand); + } + void Vmvn(DataType dt, DRegister rd, const DOperand& operand) { + Vmvn(al, dt, rd, operand); + } + + void Vmvn(Condition cond, + DataType dt, + QRegister rd, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmvn(cond, dt, rd, operand); + } + void Vmvn(DataType dt, QRegister rd, const QOperand& operand) { + Vmvn(al, dt, rd, operand); + } + + void Vneg(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vneg(cond, dt, rd, rm); + } + void Vneg(DataType dt, DRegister rd, DRegister rm) { Vneg(al, dt, rd, rm); } + + void Vneg(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vneg(cond, dt, rd, rm); + } + void Vneg(DataType dt, QRegister rd, QRegister rm) { Vneg(al, dt, rd, rm); } + + void Vneg(Condition cond, DataType dt, SRegister rd, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vneg(cond, dt, rd, rm); + } + void Vneg(DataType dt, SRegister rd, SRegister rm) { Vneg(al, dt, rd, rm); } + + void Vnmla( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vnmla(cond, dt, rd, rn, rm); + } + void Vnmla(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + Vnmla(al, dt, rd, rn, rm); + } + + void Vnmla( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vnmla(cond, dt, rd, rn, rm); + } + void Vnmla(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vnmla(al, dt, rd, rn, rm); + } + + void Vnmls( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vnmls(cond, dt, rd, rn, rm); + } + void Vnmls(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + Vnmls(al, dt, rd, rn, rm); + } + + void Vnmls( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vnmls(cond, dt, rd, rn, rm); + } + void Vnmls(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vnmls(al, dt, rd, rn, rm); + } + + void Vnmul( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vnmul(cond, dt, rd, rn, rm); + } + void Vnmul(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + Vnmul(al, dt, rd, rn, rm); + } + + void Vnmul( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vnmul(cond, dt, rd, rn, rm); + } + void Vnmul(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vnmul(al, dt, rd, rn, rm); + } + + void Vorn(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + const DOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vorn(cond, dt, rd, rn, operand); + } + void Vorn(DataType dt, DRegister rd, DRegister rn, const DOperand& operand) { + Vorn(al, dt, rd, rn, operand); + } + + void Vorn(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vorn(cond, dt, rd, rn, operand); + } + void Vorn(DataType dt, QRegister rd, QRegister rn, const QOperand& operand) { + Vorn(al, dt, rd, rn, operand); + } + + void Vorr(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + const DOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vorr(cond, dt, rd, rn, operand); + } + void Vorr(DataType dt, DRegister rd, DRegister rn, const DOperand& operand) { + Vorr(al, dt, rd, rn, operand); + } + void Vorr(Condition cond, + DRegister rd, + DRegister rn, + const DOperand& operand) { + Vorr(cond, kDataTypeValueNone, rd, rn, operand); + } + void Vorr(DRegister rd, DRegister rn, const DOperand& operand) { + Vorr(al, kDataTypeValueNone, rd, rn, operand); + } + + void Vorr(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vorr(cond, dt, rd, rn, operand); + } + void Vorr(DataType dt, QRegister rd, QRegister rn, const QOperand& operand) { + Vorr(al, dt, rd, rn, operand); + } + void Vorr(Condition cond, + QRegister rd, + QRegister rn, + const QOperand& operand) { + Vorr(cond, kDataTypeValueNone, rd, rn, operand); + } + void Vorr(QRegister rd, QRegister rn, const QOperand& operand) { + Vorr(al, kDataTypeValueNone, rd, rn, operand); + } + + void Vpadal(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vpadal(cond, dt, rd, rm); + } + void Vpadal(DataType dt, DRegister rd, DRegister rm) { + Vpadal(al, dt, rd, rm); + } + + void Vpadal(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vpadal(cond, dt, rd, rm); + } + void Vpadal(DataType dt, QRegister rd, QRegister rm) { + Vpadal(al, dt, rd, rm); + } + + void Vpadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vpadd(cond, dt, rd, rn, rm); + } + void Vpadd(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vpadd(al, dt, rd, rn, rm); + } + + void Vpaddl(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vpaddl(cond, dt, rd, rm); + } + void Vpaddl(DataType dt, DRegister rd, DRegister rm) { + Vpaddl(al, dt, rd, rm); + } + + void Vpaddl(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vpaddl(cond, dt, rd, rm); + } + void Vpaddl(DataType dt, QRegister rd, QRegister rm) { + Vpaddl(al, dt, rd, rm); + } + + void Vpmax( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vpmax(cond, dt, rd, rn, rm); + } + void Vpmax(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vpmax(al, dt, rd, rn, rm); + } + + void Vpmin( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vpmin(cond, dt, rd, rn, rm); + } + void Vpmin(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vpmin(al, dt, rd, rn, rm); + } + + void Vpop(Condition cond, DataType dt, DRegisterList dreglist) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(dreglist)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vpop(cond, dt, dreglist); + } + void Vpop(DataType dt, DRegisterList dreglist) { Vpop(al, dt, dreglist); } + void Vpop(Condition cond, DRegisterList dreglist) { + Vpop(cond, kDataTypeValueNone, dreglist); + } + void Vpop(DRegisterList dreglist) { Vpop(al, kDataTypeValueNone, dreglist); } + + void Vpop(Condition cond, DataType dt, SRegisterList sreglist) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(sreglist)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vpop(cond, dt, sreglist); + } + void Vpop(DataType dt, SRegisterList sreglist) { Vpop(al, dt, sreglist); } + void Vpop(Condition cond, SRegisterList sreglist) { + Vpop(cond, kDataTypeValueNone, sreglist); + } + void Vpop(SRegisterList sreglist) { Vpop(al, kDataTypeValueNone, sreglist); } + + void Vpush(Condition cond, DataType dt, DRegisterList dreglist) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(dreglist)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vpush(cond, dt, dreglist); + } + void Vpush(DataType dt, DRegisterList dreglist) { Vpush(al, dt, dreglist); } + void Vpush(Condition cond, DRegisterList dreglist) { + Vpush(cond, kDataTypeValueNone, dreglist); + } + void Vpush(DRegisterList dreglist) { + Vpush(al, kDataTypeValueNone, dreglist); + } + + void Vpush(Condition cond, DataType dt, SRegisterList sreglist) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(sreglist)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vpush(cond, dt, sreglist); + } + void Vpush(DataType dt, SRegisterList sreglist) { Vpush(al, dt, sreglist); } + void Vpush(Condition cond, SRegisterList sreglist) { + Vpush(cond, kDataTypeValueNone, sreglist); + } + void Vpush(SRegisterList sreglist) { + Vpush(al, kDataTypeValueNone, sreglist); + } + + void Vqabs(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqabs(cond, dt, rd, rm); + } + void Vqabs(DataType dt, DRegister rd, DRegister rm) { Vqabs(al, dt, rd, rm); } + + void Vqabs(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqabs(cond, dt, rd, rm); + } + void Vqabs(DataType dt, QRegister rd, QRegister rm) { Vqabs(al, dt, rd, rm); } + + void Vqadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqadd(cond, dt, rd, rn, rm); + } + void Vqadd(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vqadd(al, dt, rd, rn, rm); + } + + void Vqadd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqadd(cond, dt, rd, rn, rm); + } + void Vqadd(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vqadd(al, dt, rd, rn, rm); + } + + void Vqdmlal( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqdmlal(cond, dt, rd, rn, rm); + } + void Vqdmlal(DataType dt, QRegister rd, DRegister rn, DRegister rm) { + Vqdmlal(al, dt, rd, rn, rm); + } + + void Vqdmlal(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegister dm, + unsigned index) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(dm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqdmlal(cond, dt, rd, rn, dm, index); + } + void Vqdmlal( + DataType dt, QRegister rd, DRegister rn, DRegister dm, unsigned index) { + Vqdmlal(al, dt, rd, rn, dm, index); + } + + void Vqdmlsl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqdmlsl(cond, dt, rd, rn, rm); + } + void Vqdmlsl(DataType dt, QRegister rd, DRegister rn, DRegister rm) { + Vqdmlsl(al, dt, rd, rn, rm); + } + + void Vqdmlsl(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegister dm, + unsigned index) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(dm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqdmlsl(cond, dt, rd, rn, dm, index); + } + void Vqdmlsl( + DataType dt, QRegister rd, DRegister rn, DRegister dm, unsigned index) { + Vqdmlsl(al, dt, rd, rn, dm, index); + } + + void Vqdmulh( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqdmulh(cond, dt, rd, rn, rm); + } + void Vqdmulh(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vqdmulh(al, dt, rd, rn, rm); + } + + void Vqdmulh( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqdmulh(cond, dt, rd, rn, rm); + } + void Vqdmulh(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vqdmulh(al, dt, rd, rn, rm); + } + + void Vqdmulh(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegisterLane rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqdmulh(cond, dt, rd, rn, rm); + } + void Vqdmulh(DataType dt, DRegister rd, DRegister rn, DRegisterLane rm) { + Vqdmulh(al, dt, rd, rn, rm); + } + + void Vqdmulh(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + DRegisterLane rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqdmulh(cond, dt, rd, rn, rm); + } + void Vqdmulh(DataType dt, QRegister rd, QRegister rn, DRegisterLane rm) { + Vqdmulh(al, dt, rd, rn, rm); + } + + void Vqdmull( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqdmull(cond, dt, rd, rn, rm); + } + void Vqdmull(DataType dt, QRegister rd, DRegister rn, DRegister rm) { + Vqdmull(al, dt, rd, rn, rm); + } + + void Vqdmull(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegisterLane rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqdmull(cond, dt, rd, rn, rm); + } + void Vqdmull(DataType dt, QRegister rd, DRegister rn, DRegisterLane rm) { + Vqdmull(al, dt, rd, rn, rm); + } + + void Vqmovn(Condition cond, DataType dt, DRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqmovn(cond, dt, rd, rm); + } + void Vqmovn(DataType dt, DRegister rd, QRegister rm) { + Vqmovn(al, dt, rd, rm); + } + + void Vqmovun(Condition cond, DataType dt, DRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqmovun(cond, dt, rd, rm); + } + void Vqmovun(DataType dt, DRegister rd, QRegister rm) { + Vqmovun(al, dt, rd, rm); + } + + void Vqneg(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqneg(cond, dt, rd, rm); + } + void Vqneg(DataType dt, DRegister rd, DRegister rm) { Vqneg(al, dt, rd, rm); } + + void Vqneg(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqneg(cond, dt, rd, rm); + } + void Vqneg(DataType dt, QRegister rd, QRegister rm) { Vqneg(al, dt, rd, rm); } + + void Vqrdmulh( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqrdmulh(cond, dt, rd, rn, rm); + } + void Vqrdmulh(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vqrdmulh(al, dt, rd, rn, rm); + } + + void Vqrdmulh( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqrdmulh(cond, dt, rd, rn, rm); + } + void Vqrdmulh(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vqrdmulh(al, dt, rd, rn, rm); + } + + void Vqrdmulh(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegisterLane rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqrdmulh(cond, dt, rd, rn, rm); + } + void Vqrdmulh(DataType dt, DRegister rd, DRegister rn, DRegisterLane rm) { + Vqrdmulh(al, dt, rd, rn, rm); + } + + void Vqrdmulh(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + DRegisterLane rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqrdmulh(cond, dt, rd, rn, rm); + } + void Vqrdmulh(DataType dt, QRegister rd, QRegister rn, DRegisterLane rm) { + Vqrdmulh(al, dt, rd, rn, rm); + } + + void Vqrshl( + Condition cond, DataType dt, DRegister rd, DRegister rm, DRegister rn) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqrshl(cond, dt, rd, rm, rn); + } + void Vqrshl(DataType dt, DRegister rd, DRegister rm, DRegister rn) { + Vqrshl(al, dt, rd, rm, rn); + } + + void Vqrshl( + Condition cond, DataType dt, QRegister rd, QRegister rm, QRegister rn) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqrshl(cond, dt, rd, rm, rn); + } + void Vqrshl(DataType dt, QRegister rd, QRegister rm, QRegister rn) { + Vqrshl(al, dt, rd, rm, rn); + } + + void Vqrshrn(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqrshrn(cond, dt, rd, rm, operand); + } + void Vqrshrn(DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + Vqrshrn(al, dt, rd, rm, operand); + } + + void Vqrshrun(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqrshrun(cond, dt, rd, rm, operand); + } + void Vqrshrun(DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + Vqrshrun(al, dt, rd, rm, operand); + } + + void Vqshl(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqshl(cond, dt, rd, rm, operand); + } + void Vqshl(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + Vqshl(al, dt, rd, rm, operand); + } + + void Vqshl(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqshl(cond, dt, rd, rm, operand); + } + void Vqshl(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + Vqshl(al, dt, rd, rm, operand); + } + + void Vqshlu(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqshlu(cond, dt, rd, rm, operand); + } + void Vqshlu(DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + Vqshlu(al, dt, rd, rm, operand); + } + + void Vqshlu(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqshlu(cond, dt, rd, rm, operand); + } + void Vqshlu(DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + Vqshlu(al, dt, rd, rm, operand); + } + + void Vqshrn(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqshrn(cond, dt, rd, rm, operand); + } + void Vqshrn(DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + Vqshrn(al, dt, rd, rm, operand); + } + + void Vqshrun(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqshrun(cond, dt, rd, rm, operand); + } + void Vqshrun(DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + Vqshrun(al, dt, rd, rm, operand); + } + + void Vqsub( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqsub(cond, dt, rd, rn, rm); + } + void Vqsub(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vqsub(al, dt, rd, rn, rm); + } + + void Vqsub( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqsub(cond, dt, rd, rn, rm); + } + void Vqsub(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vqsub(al, dt, rd, rn, rm); + } + + void Vraddhn( + Condition cond, DataType dt, DRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vraddhn(cond, dt, rd, rn, rm); + } + void Vraddhn(DataType dt, DRegister rd, QRegister rn, QRegister rm) { + Vraddhn(al, dt, rd, rn, rm); + } + + void Vrecpe(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrecpe(cond, dt, rd, rm); + } + void Vrecpe(DataType dt, DRegister rd, DRegister rm) { + Vrecpe(al, dt, rd, rm); + } + + void Vrecpe(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrecpe(cond, dt, rd, rm); + } + void Vrecpe(DataType dt, QRegister rd, QRegister rm) { + Vrecpe(al, dt, rd, rm); + } + + void Vrecps( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrecps(cond, dt, rd, rn, rm); + } + void Vrecps(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vrecps(al, dt, rd, rn, rm); + } + + void Vrecps( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrecps(cond, dt, rd, rn, rm); + } + void Vrecps(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vrecps(al, dt, rd, rn, rm); + } + + void Vrev16(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrev16(cond, dt, rd, rm); + } + void Vrev16(DataType dt, DRegister rd, DRegister rm) { + Vrev16(al, dt, rd, rm); + } + + void Vrev16(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrev16(cond, dt, rd, rm); + } + void Vrev16(DataType dt, QRegister rd, QRegister rm) { + Vrev16(al, dt, rd, rm); + } + + void Vrev32(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrev32(cond, dt, rd, rm); + } + void Vrev32(DataType dt, DRegister rd, DRegister rm) { + Vrev32(al, dt, rd, rm); + } + + void Vrev32(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrev32(cond, dt, rd, rm); + } + void Vrev32(DataType dt, QRegister rd, QRegister rm) { + Vrev32(al, dt, rd, rm); + } + + void Vrev64(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrev64(cond, dt, rd, rm); + } + void Vrev64(DataType dt, DRegister rd, DRegister rm) { + Vrev64(al, dt, rd, rm); + } + + void Vrev64(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrev64(cond, dt, rd, rm); + } + void Vrev64(DataType dt, QRegister rd, QRegister rm) { + Vrev64(al, dt, rd, rm); + } + + void Vrhadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrhadd(cond, dt, rd, rn, rm); + } + void Vrhadd(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vrhadd(al, dt, rd, rn, rm); + } + + void Vrhadd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrhadd(cond, dt, rd, rn, rm); + } + void Vrhadd(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vrhadd(al, dt, rd, rn, rm); + } + + void Vrinta(DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vrinta(dt, rd, rm); + } + + void Vrinta(DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vrinta(dt, rd, rm); + } + + void Vrinta(DataType dt, SRegister rd, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vrinta(dt, rd, rm); + } + + void Vrintm(DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vrintm(dt, rd, rm); + } + + void Vrintm(DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vrintm(dt, rd, rm); + } + + void Vrintm(DataType dt, SRegister rd, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vrintm(dt, rd, rm); + } + + void Vrintn(DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vrintn(dt, rd, rm); + } + + void Vrintn(DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vrintn(dt, rd, rm); + } + + void Vrintn(DataType dt, SRegister rd, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vrintn(dt, rd, rm); + } + + void Vrintp(DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vrintp(dt, rd, rm); + } + + void Vrintp(DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vrintp(dt, rd, rm); + } + + void Vrintp(DataType dt, SRegister rd, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vrintp(dt, rd, rm); + } + + void Vrintr(Condition cond, DataType dt, SRegister rd, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrintr(cond, dt, rd, rm); + } + void Vrintr(DataType dt, SRegister rd, SRegister rm) { + Vrintr(al, dt, rd, rm); + } + + void Vrintr(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrintr(cond, dt, rd, rm); + } + void Vrintr(DataType dt, DRegister rd, DRegister rm) { + Vrintr(al, dt, rd, rm); + } + + void Vrintx(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrintx(cond, dt, rd, rm); + } + void Vrintx(DataType dt, DRegister rd, DRegister rm) { + Vrintx(al, dt, rd, rm); + } + + void Vrintx(DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vrintx(dt, rd, rm); + } + + void Vrintx(Condition cond, DataType dt, SRegister rd, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrintx(cond, dt, rd, rm); + } + void Vrintx(DataType dt, SRegister rd, SRegister rm) { + Vrintx(al, dt, rd, rm); + } + + void Vrintz(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrintz(cond, dt, rd, rm); + } + void Vrintz(DataType dt, DRegister rd, DRegister rm) { + Vrintz(al, dt, rd, rm); + } + + void Vrintz(DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vrintz(dt, rd, rm); + } + + void Vrintz(Condition cond, DataType dt, SRegister rd, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrintz(cond, dt, rd, rm); + } + void Vrintz(DataType dt, SRegister rd, SRegister rm) { + Vrintz(al, dt, rd, rm); + } + + void Vrshl( + Condition cond, DataType dt, DRegister rd, DRegister rm, DRegister rn) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrshl(cond, dt, rd, rm, rn); + } + void Vrshl(DataType dt, DRegister rd, DRegister rm, DRegister rn) { + Vrshl(al, dt, rd, rm, rn); + } + + void Vrshl( + Condition cond, DataType dt, QRegister rd, QRegister rm, QRegister rn) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrshl(cond, dt, rd, rm, rn); + } + void Vrshl(DataType dt, QRegister rd, QRegister rm, QRegister rn) { + Vrshl(al, dt, rd, rm, rn); + } + + void Vrshr(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrshr(cond, dt, rd, rm, operand); + } + void Vrshr(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + Vrshr(al, dt, rd, rm, operand); + } + + void Vrshr(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrshr(cond, dt, rd, rm, operand); + } + void Vrshr(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + Vrshr(al, dt, rd, rm, operand); + } + + void Vrshrn(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrshrn(cond, dt, rd, rm, operand); + } + void Vrshrn(DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + Vrshrn(al, dt, rd, rm, operand); + } + + void Vrsqrte(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrsqrte(cond, dt, rd, rm); + } + void Vrsqrte(DataType dt, DRegister rd, DRegister rm) { + Vrsqrte(al, dt, rd, rm); + } + + void Vrsqrte(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrsqrte(cond, dt, rd, rm); + } + void Vrsqrte(DataType dt, QRegister rd, QRegister rm) { + Vrsqrte(al, dt, rd, rm); + } + + void Vrsqrts( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrsqrts(cond, dt, rd, rn, rm); + } + void Vrsqrts(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vrsqrts(al, dt, rd, rn, rm); + } + + void Vrsqrts( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrsqrts(cond, dt, rd, rn, rm); + } + void Vrsqrts(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vrsqrts(al, dt, rd, rn, rm); + } + + void Vrsra(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrsra(cond, dt, rd, rm, operand); + } + void Vrsra(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + Vrsra(al, dt, rd, rm, operand); + } + + void Vrsra(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrsra(cond, dt, rd, rm, operand); + } + void Vrsra(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + Vrsra(al, dt, rd, rm, operand); + } + + void Vrsubhn( + Condition cond, DataType dt, DRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrsubhn(cond, dt, rd, rn, rm); + } + void Vrsubhn(DataType dt, DRegister rd, QRegister rn, QRegister rm) { + Vrsubhn(al, dt, rd, rn, rm); + } + + void Vseleq(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vseleq(dt, rd, rn, rm); + } + + void Vseleq(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vseleq(dt, rd, rn, rm); + } + + void Vselge(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vselge(dt, rd, rn, rm); + } + + void Vselge(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vselge(dt, rd, rn, rm); + } + + void Vselgt(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vselgt(dt, rd, rn, rm); + } + + void Vselgt(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vselgt(dt, rd, rn, rm); + } + + void Vselvs(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vselvs(dt, rd, rn, rm); + } + + void Vselvs(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vselvs(dt, rd, rn, rm); + } + + void Vshl(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vshl(cond, dt, rd, rm, operand); + } + void Vshl(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + Vshl(al, dt, rd, rm, operand); + } + + void Vshl(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vshl(cond, dt, rd, rm, operand); + } + void Vshl(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + Vshl(al, dt, rd, rm, operand); + } + + void Vshll(Condition cond, + DataType dt, + QRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vshll(cond, dt, rd, rm, operand); + } + void Vshll(DataType dt, QRegister rd, DRegister rm, const DOperand& operand) { + Vshll(al, dt, rd, rm, operand); + } + + void Vshr(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vshr(cond, dt, rd, rm, operand); + } + void Vshr(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + Vshr(al, dt, rd, rm, operand); + } + + void Vshr(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vshr(cond, dt, rd, rm, operand); + } + void Vshr(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + Vshr(al, dt, rd, rm, operand); + } + + void Vshrn(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vshrn(cond, dt, rd, rm, operand); + } + void Vshrn(DataType dt, DRegister rd, QRegister rm, const QOperand& operand) { + Vshrn(al, dt, rd, rm, operand); + } + + void Vsli(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vsli(cond, dt, rd, rm, operand); + } + void Vsli(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + Vsli(al, dt, rd, rm, operand); + } + + void Vsli(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vsli(cond, dt, rd, rm, operand); + } + void Vsli(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + Vsli(al, dt, rd, rm, operand); + } + + void Vsqrt(Condition cond, DataType dt, SRegister rd, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vsqrt(cond, dt, rd, rm); + } + void Vsqrt(DataType dt, SRegister rd, SRegister rm) { Vsqrt(al, dt, rd, rm); } + + void Vsqrt(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vsqrt(cond, dt, rd, rm); + } + void Vsqrt(DataType dt, DRegister rd, DRegister rm) { Vsqrt(al, dt, rd, rm); } + + void Vsra(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vsra(cond, dt, rd, rm, operand); + } + void Vsra(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + Vsra(al, dt, rd, rm, operand); + } + + void Vsra(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vsra(cond, dt, rd, rm, operand); + } + void Vsra(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + Vsra(al, dt, rd, rm, operand); + } + + void Vsri(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vsri(cond, dt, rd, rm, operand); + } + void Vsri(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + Vsri(al, dt, rd, rm, operand); + } + + void Vsri(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vsri(cond, dt, rd, rm, operand); + } + void Vsri(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + Vsri(al, dt, rd, rm, operand); + } + + void Vst1(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(nreglist)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vst1(cond, dt, nreglist, operand); + } + void Vst1(DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + Vst1(al, dt, nreglist, operand); + } + + void Vst2(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(nreglist)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vst2(cond, dt, nreglist, operand); + } + void Vst2(DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + Vst2(al, dt, nreglist, operand); + } + + void Vst3(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(nreglist)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vst3(cond, dt, nreglist, operand); + } + void Vst3(DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + Vst3(al, dt, nreglist, operand); + } + + void Vst3(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(nreglist)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vst3(cond, dt, nreglist, operand); + } + void Vst3(DataType dt, + const NeonRegisterList& nreglist, + const MemOperand& operand) { + Vst3(al, dt, nreglist, operand); + } + + void Vst4(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(nreglist)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vst4(cond, dt, nreglist, operand); + } + void Vst4(DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + Vst4(al, dt, nreglist, operand); + } + + void Vstm(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(dreglist)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vstm(cond, dt, rn, write_back, dreglist); + } + void Vstm(DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + Vstm(al, dt, rn, write_back, dreglist); + } + void Vstm(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + Vstm(cond, kDataTypeValueNone, rn, write_back, dreglist); + } + void Vstm(Register rn, WriteBack write_back, DRegisterList dreglist) { + Vstm(al, kDataTypeValueNone, rn, write_back, dreglist); + } + + void Vstm(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(sreglist)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vstm(cond, dt, rn, write_back, sreglist); + } + void Vstm(DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + Vstm(al, dt, rn, write_back, sreglist); + } + void Vstm(Condition cond, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + Vstm(cond, kDataTypeValueNone, rn, write_back, sreglist); + } + void Vstm(Register rn, WriteBack write_back, SRegisterList sreglist) { + Vstm(al, kDataTypeValueNone, rn, write_back, sreglist); + } + + void Vstmdb(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(dreglist)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vstmdb(cond, dt, rn, write_back, dreglist); + } + void Vstmdb(DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + Vstmdb(al, dt, rn, write_back, dreglist); + } + void Vstmdb(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + Vstmdb(cond, kDataTypeValueNone, rn, write_back, dreglist); + } + void Vstmdb(Register rn, WriteBack write_back, DRegisterList dreglist) { + Vstmdb(al, kDataTypeValueNone, rn, write_back, dreglist); + } + + void Vstmdb(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(sreglist)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vstmdb(cond, dt, rn, write_back, sreglist); + } + void Vstmdb(DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + Vstmdb(al, dt, rn, write_back, sreglist); + } + void Vstmdb(Condition cond, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + Vstmdb(cond, kDataTypeValueNone, rn, write_back, sreglist); + } + void Vstmdb(Register rn, WriteBack write_back, SRegisterList sreglist) { + Vstmdb(al, kDataTypeValueNone, rn, write_back, sreglist); + } + + void Vstmia(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(dreglist)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vstmia(cond, dt, rn, write_back, dreglist); + } + void Vstmia(DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + Vstmia(al, dt, rn, write_back, dreglist); + } + void Vstmia(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + Vstmia(cond, kDataTypeValueNone, rn, write_back, dreglist); + } + void Vstmia(Register rn, WriteBack write_back, DRegisterList dreglist) { + Vstmia(al, kDataTypeValueNone, rn, write_back, dreglist); + } + + void Vstmia(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(sreglist)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vstmia(cond, dt, rn, write_back, sreglist); + } + void Vstmia(DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + Vstmia(al, dt, rn, write_back, sreglist); + } + void Vstmia(Condition cond, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + Vstmia(cond, kDataTypeValueNone, rn, write_back, sreglist); + } + void Vstmia(Register rn, WriteBack write_back, SRegisterList sreglist) { + Vstmia(al, kDataTypeValueNone, rn, write_back, sreglist); + } + + void Vstr(Condition cond, + DataType dt, + DRegister rd, + const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vstr(cond, dt, rd, operand); + } + void Vstr(DataType dt, DRegister rd, const MemOperand& operand) { + Vstr(al, dt, rd, operand); + } + void Vstr(Condition cond, DRegister rd, const MemOperand& operand) { + Vstr(cond, Untyped64, rd, operand); + } + void Vstr(DRegister rd, const MemOperand& operand) { + Vstr(al, Untyped64, rd, operand); + } + + void Vstr(Condition cond, + DataType dt, + SRegister rd, + const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vstr(cond, dt, rd, operand); + } + void Vstr(DataType dt, SRegister rd, const MemOperand& operand) { + Vstr(al, dt, rd, operand); + } + void Vstr(Condition cond, SRegister rd, const MemOperand& operand) { + Vstr(cond, Untyped32, rd, operand); + } + void Vstr(SRegister rd, const MemOperand& operand) { + Vstr(al, Untyped32, rd, operand); + } + + void Vsub( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vsub(cond, dt, rd, rn, rm); + } + void Vsub(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vsub(al, dt, rd, rn, rm); + } + + void Vsub( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vsub(cond, dt, rd, rn, rm); + } + void Vsub(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vsub(al, dt, rd, rn, rm); + } + + void Vsub( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vsub(cond, dt, rd, rn, rm); + } + void Vsub(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + Vsub(al, dt, rd, rn, rm); + } + + void Vsubhn( + Condition cond, DataType dt, DRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vsubhn(cond, dt, rd, rn, rm); + } + void Vsubhn(DataType dt, DRegister rd, QRegister rn, QRegister rm) { + Vsubhn(al, dt, rd, rn, rm); + } + + void Vsubl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vsubl(cond, dt, rd, rn, rm); + } + void Vsubl(DataType dt, QRegister rd, DRegister rn, DRegister rm) { + Vsubl(al, dt, rd, rn, rm); + } + + void Vsubw( + Condition cond, DataType dt, QRegister rd, QRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vsubw(cond, dt, rd, rn, rm); + } + void Vsubw(DataType dt, QRegister rd, QRegister rn, DRegister rm) { + Vsubw(al, dt, rd, rn, rm); + } + + void Vswp(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vswp(cond, dt, rd, rm); + } + void Vswp(DataType dt, DRegister rd, DRegister rm) { Vswp(al, dt, rd, rm); } + void Vswp(Condition cond, DRegister rd, DRegister rm) { + Vswp(cond, kDataTypeValueNone, rd, rm); + } + void Vswp(DRegister rd, DRegister rm) { + Vswp(al, kDataTypeValueNone, rd, rm); + } + + void Vswp(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vswp(cond, dt, rd, rm); + } + void Vswp(DataType dt, QRegister rd, QRegister rm) { Vswp(al, dt, rd, rm); } + void Vswp(Condition cond, QRegister rd, QRegister rm) { + Vswp(cond, kDataTypeValueNone, rd, rm); + } + void Vswp(QRegister rd, QRegister rm) { + Vswp(al, kDataTypeValueNone, rd, rm); + } + + void Vtbl(Condition cond, + DataType dt, + DRegister rd, + const NeonRegisterList& nreglist, + DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(nreglist)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vtbl(cond, dt, rd, nreglist, rm); + } + void Vtbl(DataType dt, + DRegister rd, + const NeonRegisterList& nreglist, + DRegister rm) { + Vtbl(al, dt, rd, nreglist, rm); + } + + void Vtbx(Condition cond, + DataType dt, + DRegister rd, + const NeonRegisterList& nreglist, + DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(nreglist)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vtbx(cond, dt, rd, nreglist, rm); + } + void Vtbx(DataType dt, + DRegister rd, + const NeonRegisterList& nreglist, + DRegister rm) { + Vtbx(al, dt, rd, nreglist, rm); + } + + void Vtrn(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vtrn(cond, dt, rd, rm); + } + void Vtrn(DataType dt, DRegister rd, DRegister rm) { Vtrn(al, dt, rd, rm); } + + void Vtrn(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vtrn(cond, dt, rd, rm); + } + void Vtrn(DataType dt, QRegister rd, QRegister rm) { Vtrn(al, dt, rd, rm); } + + void Vtst( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vtst(cond, dt, rd, rn, rm); + } + void Vtst(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vtst(al, dt, rd, rn, rm); + } + + void Vtst( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vtst(cond, dt, rd, rn, rm); + } + void Vtst(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vtst(al, dt, rd, rn, rm); + } + + void Vuzp(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vuzp(cond, dt, rd, rm); + } + void Vuzp(DataType dt, DRegister rd, DRegister rm) { Vuzp(al, dt, rd, rm); } + + void Vuzp(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vuzp(cond, dt, rd, rm); + } + void Vuzp(DataType dt, QRegister rd, QRegister rm) { Vuzp(al, dt, rd, rm); } + + void Vzip(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vzip(cond, dt, rd, rm); + } + void Vzip(DataType dt, DRegister rd, DRegister rm) { Vzip(al, dt, rd, rm); } + + void Vzip(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vzip(cond, dt, rd, rm); + } + void Vzip(DataType dt, QRegister rd, QRegister rm) { Vzip(al, dt, rd, rm); } + + void Yield(Condition cond) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + yield(cond); + } + void Yield() { Yield(al); } + void Vabs(Condition cond, VRegister rd, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vabs(cond, F32, rd.S(), rm.S()); + } else { + Vabs(cond, F64, rd.D(), rm.D()); + } + } + void Vabs(VRegister rd, VRegister rm) { Vabs(al, rd, rm); } + void Vadd(Condition cond, VRegister rd, VRegister rn, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rn.GetType()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vadd(cond, F32, rd.S(), rn.S(), rm.S()); + } else { + Vadd(cond, F64, rd.D(), rn.D(), rm.D()); + } + } + void Vadd(VRegister rd, VRegister rn, VRegister rm) { Vadd(al, rd, rn, rm); } + void Vcmp(Condition cond, VRegister rd, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vcmp(cond, F32, rd.S(), rm.S()); + } else { + Vcmp(cond, F64, rd.D(), rm.D()); + } + } + void Vcmp(VRegister rd, VRegister rm) { Vcmp(al, rd, rm); } + void Vcmpe(Condition cond, VRegister rd, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vcmpe(cond, F32, rd.S(), rm.S()); + } else { + Vcmpe(cond, F64, rd.D(), rm.D()); + } + } + void Vcmpe(VRegister rd, VRegister rm) { Vcmpe(al, rd, rm); } + void Vdiv(Condition cond, VRegister rd, VRegister rn, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rn.GetType()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vdiv(cond, F32, rd.S(), rn.S(), rm.S()); + } else { + Vdiv(cond, F64, rd.D(), rn.D(), rm.D()); + } + } + void Vdiv(VRegister rd, VRegister rn, VRegister rm) { Vdiv(al, rd, rn, rm); } + void Vfma(Condition cond, VRegister rd, VRegister rn, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rn.GetType()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vfma(cond, F32, rd.S(), rn.S(), rm.S()); + } else { + Vfma(cond, F64, rd.D(), rn.D(), rm.D()); + } + } + void Vfma(VRegister rd, VRegister rn, VRegister rm) { Vfma(al, rd, rn, rm); } + void Vfms(Condition cond, VRegister rd, VRegister rn, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rn.GetType()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vfms(cond, F32, rd.S(), rn.S(), rm.S()); + } else { + Vfms(cond, F64, rd.D(), rn.D(), rm.D()); + } + } + void Vfms(VRegister rd, VRegister rn, VRegister rm) { Vfms(al, rd, rn, rm); } + void Vfnma(Condition cond, VRegister rd, VRegister rn, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rn.GetType()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vfnma(cond, F32, rd.S(), rn.S(), rm.S()); + } else { + Vfnma(cond, F64, rd.D(), rn.D(), rm.D()); + } + } + void Vfnma(VRegister rd, VRegister rn, VRegister rm) { + Vfnma(al, rd, rn, rm); + } + void Vfnms(Condition cond, VRegister rd, VRegister rn, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rn.GetType()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vfnms(cond, F32, rd.S(), rn.S(), rm.S()); + } else { + Vfnms(cond, F64, rd.D(), rn.D(), rm.D()); + } + } + void Vfnms(VRegister rd, VRegister rn, VRegister rm) { + Vfnms(al, rd, rn, rm); + } + void Vmaxnm(VRegister rd, VRegister rn, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rn.GetType()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vmaxnm(F32, rd.S(), rn.S(), rm.S()); + } else { + Vmaxnm(F64, rd.D(), rn.D(), rm.D()); + } + } + void Vminnm(VRegister rd, VRegister rn, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rn.GetType()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vminnm(F32, rd.S(), rn.S(), rm.S()); + } else { + Vminnm(F64, rd.D(), rn.D(), rm.D()); + } + } + void Vmla(Condition cond, VRegister rd, VRegister rn, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rn.GetType()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vmla(cond, F32, rd.S(), rn.S(), rm.S()); + } else { + Vmla(cond, F64, rd.D(), rn.D(), rm.D()); + } + } + void Vmla(VRegister rd, VRegister rn, VRegister rm) { Vmla(al, rd, rn, rm); } + void Vmls(Condition cond, VRegister rd, VRegister rn, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rn.GetType()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vmls(cond, F32, rd.S(), rn.S(), rm.S()); + } else { + Vmls(cond, F64, rd.D(), rn.D(), rm.D()); + } + } + void Vmls(VRegister rd, VRegister rn, VRegister rm) { Vmls(al, rd, rn, rm); } + void Vmov(Condition cond, VRegister rd, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vmov(cond, F32, rd.S(), rm.S()); + } else { + Vmov(cond, F64, rd.D(), rm.D()); + } + } + void Vmov(VRegister rd, VRegister rm) { Vmov(al, rd, rm); } + void Vmul(Condition cond, VRegister rd, VRegister rn, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rn.GetType()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vmul(cond, F32, rd.S(), rn.S(), rm.S()); + } else { + Vmul(cond, F64, rd.D(), rn.D(), rm.D()); + } + } + void Vmul(VRegister rd, VRegister rn, VRegister rm) { Vmul(al, rd, rn, rm); } + void Vneg(Condition cond, VRegister rd, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vneg(cond, F32, rd.S(), rm.S()); + } else { + Vneg(cond, F64, rd.D(), rm.D()); + } + } + void Vneg(VRegister rd, VRegister rm) { Vneg(al, rd, rm); } + void Vnmla(Condition cond, VRegister rd, VRegister rn, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rn.GetType()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vnmla(cond, F32, rd.S(), rn.S(), rm.S()); + } else { + Vnmla(cond, F64, rd.D(), rn.D(), rm.D()); + } + } + void Vnmla(VRegister rd, VRegister rn, VRegister rm) { + Vnmla(al, rd, rn, rm); + } + void Vnmls(Condition cond, VRegister rd, VRegister rn, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rn.GetType()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vnmls(cond, F32, rd.S(), rn.S(), rm.S()); + } else { + Vnmls(cond, F64, rd.D(), rn.D(), rm.D()); + } + } + void Vnmls(VRegister rd, VRegister rn, VRegister rm) { + Vnmls(al, rd, rn, rm); + } + void Vnmul(Condition cond, VRegister rd, VRegister rn, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rn.GetType()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vnmul(cond, F32, rd.S(), rn.S(), rm.S()); + } else { + Vnmul(cond, F64, rd.D(), rn.D(), rm.D()); + } + } + void Vnmul(VRegister rd, VRegister rn, VRegister rm) { + Vnmul(al, rd, rn, rm); + } + void Vrinta(VRegister rd, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vrinta(F32, rd.S(), rm.S()); + } else { + Vrinta(F64, rd.D(), rm.D()); + } + } + void Vrintm(VRegister rd, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vrintm(F32, rd.S(), rm.S()); + } else { + Vrintm(F64, rd.D(), rm.D()); + } + } + void Vrintn(VRegister rd, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vrintn(F32, rd.S(), rm.S()); + } else { + Vrintn(F64, rd.D(), rm.D()); + } + } + void Vrintp(VRegister rd, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vrintp(F32, rd.S(), rm.S()); + } else { + Vrintp(F64, rd.D(), rm.D()); + } + } + void Vrintr(Condition cond, VRegister rd, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vrintr(cond, F32, rd.S(), rm.S()); + } else { + Vrintr(cond, F64, rd.D(), rm.D()); + } + } + void Vrintr(VRegister rd, VRegister rm) { Vrintr(al, rd, rm); } + void Vrintx(Condition cond, VRegister rd, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vrintx(cond, F32, rd.S(), rm.S()); + } else { + Vrintx(cond, F64, rd.D(), rm.D()); + } + } + void Vrintx(VRegister rd, VRegister rm) { Vrintx(al, rd, rm); } + void Vrintz(Condition cond, VRegister rd, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vrintz(cond, F32, rd.S(), rm.S()); + } else { + Vrintz(cond, F64, rd.D(), rm.D()); + } + } + void Vrintz(VRegister rd, VRegister rm) { Vrintz(al, rd, rm); } + void Vseleq(VRegister rd, VRegister rn, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rn.GetType()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vseleq(F32, rd.S(), rn.S(), rm.S()); + } else { + Vseleq(F64, rd.D(), rn.D(), rm.D()); + } + } + void Vselge(VRegister rd, VRegister rn, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rn.GetType()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vselge(F32, rd.S(), rn.S(), rm.S()); + } else { + Vselge(F64, rd.D(), rn.D(), rm.D()); + } + } + void Vselgt(VRegister rd, VRegister rn, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rn.GetType()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vselgt(F32, rd.S(), rn.S(), rm.S()); + } else { + Vselgt(F64, rd.D(), rn.D(), rm.D()); + } + } + void Vselvs(VRegister rd, VRegister rn, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rn.GetType()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vselvs(F32, rd.S(), rn.S(), rm.S()); + } else { + Vselvs(F64, rd.D(), rn.D(), rm.D()); + } + } + void Vsqrt(Condition cond, VRegister rd, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vsqrt(cond, F32, rd.S(), rm.S()); + } else { + Vsqrt(cond, F64, rd.D(), rm.D()); + } + } + void Vsqrt(VRegister rd, VRegister rm) { Vsqrt(al, rd, rm); } + void Vsub(Condition cond, VRegister rd, VRegister rn, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rn.GetType()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vsub(cond, F32, rd.S(), rn.S(), rm.S()); + } else { + Vsub(cond, F64, rd.D(), rn.D(), rm.D()); + } + } + void Vsub(VRegister rd, VRegister rn, VRegister rm) { Vsub(al, rd, rn, rm); } + // End of generated code. + + virtual bool AllowUnpredictable() VIXL_OVERRIDE { + VIXL_ABORT_WITH_MSG("Unpredictable instruction.\n"); + return false; + } + virtual bool AllowStronglyDiscouraged() VIXL_OVERRIDE { + VIXL_ABORT_WITH_MSG( + "ARM strongly recommends to not use this instruction.\n"); + return false; + } + // Old syntax of vrint instructions. + VIXL_DEPRECATED( + "void Vrinta(DataType dt, DRegister rd, DRegister rm)", + void Vrinta(DataType dt1, DataType dt2, DRegister rd, DRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrinta(dt1, rd, rm); + } + VIXL_DEPRECATED( + "void Vrinta(DataType dt, QRegister rd, QRegister rm)", + void Vrinta(DataType dt1, DataType dt2, QRegister rd, QRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrinta(dt1, rd, rm); + } + VIXL_DEPRECATED( + "void Vrinta(DataType dt, SRegister rd, SRegister rm)", + void Vrinta(DataType dt1, DataType dt2, SRegister rd, SRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrinta(dt1, rd, rm); + } + + VIXL_DEPRECATED( + "void Vrintm(DataType dt, DRegister rd, DRegister rm)", + void Vrintm(DataType dt1, DataType dt2, DRegister rd, DRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrintm(dt1, rd, rm); + } + VIXL_DEPRECATED( + "void Vrintm(DataType dt, QRegister rd, QRegister rm)", + void Vrintm(DataType dt1, DataType dt2, QRegister rd, QRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrintm(dt1, rd, rm); + } + VIXL_DEPRECATED( + "void Vrintm(DataType dt, SRegister rd, SRegister rm)", + void Vrintm(DataType dt1, DataType dt2, SRegister rd, SRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrintm(dt1, rd, rm); + } + + VIXL_DEPRECATED( + "void Vrintn(DataType dt, DRegister rd, DRegister rm)", + void Vrintn(DataType dt1, DataType dt2, DRegister rd, DRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrintn(dt1, rd, rm); + } + VIXL_DEPRECATED( + "void Vrintn(DataType dt, QRegister rd, QRegister rm)", + void Vrintn(DataType dt1, DataType dt2, QRegister rd, QRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrintn(dt1, rd, rm); + } + VIXL_DEPRECATED( + "void Vrintn(DataType dt, SRegister rd, SRegister rm)", + void Vrintn(DataType dt1, DataType dt2, SRegister rd, SRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrintn(dt1, rd, rm); + } + + VIXL_DEPRECATED( + "void Vrintp(DataType dt, DRegister rd, DRegister rm)", + void Vrintp(DataType dt1, DataType dt2, DRegister rd, DRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrintp(dt1, rd, rm); + } + VIXL_DEPRECATED( + "void Vrintp(DataType dt, QRegister rd, QRegister rm)", + void Vrintp(DataType dt1, DataType dt2, QRegister rd, QRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrintp(dt1, rd, rm); + } + VIXL_DEPRECATED( + "void Vrintp(DataType dt, SRegister rd, SRegister rm)", + void Vrintp(DataType dt1, DataType dt2, SRegister rd, SRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrintp(dt1, rd, rm); + } + + VIXL_DEPRECATED( + "void Vrintr(Condition cond, DataType dt, SRegister rd, SRegister rm)", + void Vrintr(Condition cond, + DataType dt1, + DataType dt2, + SRegister rd, + SRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrintr(cond, dt1, rd, rm); + } + VIXL_DEPRECATED( + "void Vrintr(DataType dt, SRegister rd, SRegister rm)", + void Vrintr(DataType dt1, DataType dt2, SRegister rd, SRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrintr(dt1, rd, rm); + } + + VIXL_DEPRECATED( + "void Vrintr(Condition cond, DataType dt, DRegister rd, DRegister rm)", + void Vrintr(Condition cond, + DataType dt1, + DataType dt2, + DRegister rd, + DRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrintr(cond, dt1, rd, rm); + } + VIXL_DEPRECATED( + "void Vrintr(DataType dt, DRegister rd, DRegister rm)", + void Vrintr(DataType dt1, DataType dt2, DRegister rd, DRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrintr(dt1, rd, rm); + } + + VIXL_DEPRECATED( + "void Vrintx(Condition cond, DataType dt, DRegister rd, DRegister rm)", + void Vrintx(Condition cond, + DataType dt1, + DataType dt2, + DRegister rd, + DRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrintx(cond, dt1, rd, rm); + } + VIXL_DEPRECATED( + "void Vrintx(DataType dt, DRegister rd, DRegister rm)", + void Vrintx(DataType dt1, DataType dt2, DRegister rd, DRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrintx(dt1, rd, rm); + } + + VIXL_DEPRECATED( + "void Vrintx(DataType dt, QRegister rd, QRegister rm)", + void Vrintx(DataType dt1, DataType dt2, QRegister rd, QRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrintx(dt1, rd, rm); + } + + VIXL_DEPRECATED( + "void Vrintx(Condition cond, DataType dt, SRegister rd, SRegister rm)", + void Vrintx(Condition cond, + DataType dt1, + DataType dt2, + SRegister rd, + SRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrintx(cond, dt1, rd, rm); + } + VIXL_DEPRECATED( + "void Vrintx(DataType dt, SRegister rd, SRegister rm)", + void Vrintx(DataType dt1, DataType dt2, SRegister rd, SRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrintx(dt1, rd, rm); + } + + VIXL_DEPRECATED( + "void Vrintz(Condition cond, DataType dt, DRegister rd, DRegister rm)", + void Vrintz(Condition cond, + DataType dt1, + DataType dt2, + DRegister rd, + DRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrintz(cond, dt1, rd, rm); + } + VIXL_DEPRECATED( + "void Vrintz(DataType dt, DRegister rd, DRegister rm)", + void Vrintz(DataType dt1, DataType dt2, DRegister rd, DRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrintz(dt1, rd, rm); + } + + VIXL_DEPRECATED( + "void Vrintz(DataType dt, QRegister rd, QRegister rm)", + void Vrintz(DataType dt1, DataType dt2, QRegister rd, QRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrintz(dt1, rd, rm); + } + + VIXL_DEPRECATED( + "void Vrintz(Condition cond, DataType dt, SRegister rd, SRegister rm)", + void Vrintz(Condition cond, + DataType dt1, + DataType dt2, + SRegister rd, + SRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrintz(cond, dt1, rd, rm); + } + VIXL_DEPRECATED( + "void Vrintz(DataType dt, SRegister rd, SRegister rm)", + void Vrintz(DataType dt1, DataType dt2, SRegister rd, SRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrintz(dt1, rd, rm); + } + + private: + bool NeedBranch(Condition* cond) { return !cond->Is(al) && IsUsingT32(); } + static const int kBranchSize = kMaxInstructionSizeInBytes; + + RegisterList available_; + VRegisterList available_vfp_; + UseScratchRegisterScope* current_scratch_scope_; + MacroAssemblerContext context_; + PoolManager pool_manager_; + bool generate_simulator_code_; + bool allow_macro_instructions_; + Label* pool_end_; + + friend class TestMacroAssembler; +}; + +// This scope utility allows scratch registers to be managed safely. The +// MacroAssembler's GetScratchRegisterList() is used as a pool of scratch +// registers. These registers can be allocated on demand, and will be returned +// at the end of the scope. +// +// When the scope ends, the MacroAssembler's lists will be restored to their +// original state, even if the lists were modified by some other means. +// +// Scopes must nest perfectly. That is, they must be destructed in reverse +// construction order. Otherwise, it is not clear how to handle cases where one +// scope acquires a register that was included in a now-closing scope. With +// perfect nesting, this cannot occur. +class UseScratchRegisterScope { + public: + // This constructor implicitly calls the `Open` function to initialise the + // scope, so it is ready to use immediately after it has been constructed. + explicit UseScratchRegisterScope(MacroAssembler* masm) + : masm_(NULL), parent_(NULL), old_available_(0), old_available_vfp_(0) { + Open(masm); + } + // This constructor allows deferred and optional initialisation of the scope. + // The user is required to explicitly call the `Open` function before using + // the scope. + UseScratchRegisterScope() + : masm_(NULL), parent_(NULL), old_available_(0), old_available_vfp_(0) {} + + // This function performs the actual initialisation work. + void Open(MacroAssembler* masm); + + // The destructor always implicitly calls the `Close` function. + ~UseScratchRegisterScope() { Close(); } + + // This function performs the cleaning-up work. It must succeed even if the + // scope has not been opened. It is safe to call multiple times. + void Close(); + + bool IsAvailable(const Register& reg) const; + bool IsAvailable(const VRegister& reg) const; + + // Take a register from the temp list. It will be returned automatically when + // the scope ends. + Register Acquire(); + VRegister AcquireV(unsigned size_in_bits); + QRegister AcquireQ(); + DRegister AcquireD(); + SRegister AcquireS(); + + // Explicitly release an acquired (or excluded) register, putting it back in + // the temp list. + void Release(const Register& reg); + void Release(const VRegister& reg); + + // Make the specified registers available as scratch registers for the + // duration of this scope. + void Include(const RegisterList& list); + void Include(const Register& reg1, + const Register& reg2 = NoReg, + const Register& reg3 = NoReg, + const Register& reg4 = NoReg) { + Include(RegisterList(reg1, reg2, reg3, reg4)); + } + void Include(const VRegisterList& list); + void Include(const VRegister& reg1, + const VRegister& reg2 = NoVReg, + const VRegister& reg3 = NoVReg, + const VRegister& reg4 = NoVReg) { + Include(VRegisterList(reg1, reg2, reg3, reg4)); + } + + // Make sure that the specified registers are not available in this scope. + // This can be used to prevent helper functions from using sensitive + // registers, for example. + void Exclude(const RegisterList& list); + void Exclude(const Register& reg1, + const Register& reg2 = NoReg, + const Register& reg3 = NoReg, + const Register& reg4 = NoReg) { + Exclude(RegisterList(reg1, reg2, reg3, reg4)); + } + void Exclude(const VRegisterList& list); + void Exclude(const VRegister& reg1, + const VRegister& reg2 = NoVReg, + const VRegister& reg3 = NoVReg, + const VRegister& reg4 = NoVReg) { + Exclude(VRegisterList(reg1, reg2, reg3, reg4)); + } + + // A convenience helper to exclude any registers used by the operand. + void Exclude(const Operand& operand); + + // Prevent any scratch registers from being used in this scope. + void ExcludeAll(); + + private: + // The MacroAssembler maintains a list of available scratch registers, and + // also keeps track of the most recently-opened scope so that on destruction + // we can check that scopes do not outlive their parents. + MacroAssembler* masm_; + UseScratchRegisterScope* parent_; + + // The state of the available lists at the start of this scope. + uint32_t old_available_; // kRRegister + uint64_t old_available_vfp_; // kVRegister + + VIXL_DEBUG_NO_RETURN UseScratchRegisterScope(const UseScratchRegisterScope&) { + VIXL_UNREACHABLE(); + } + VIXL_DEBUG_NO_RETURN void operator=(const UseScratchRegisterScope&) { + VIXL_UNREACHABLE(); + } +}; + + +} // namespace aarch32 +} // namespace vixl + +#endif // VIXL_AARCH32_MACRO_ASSEMBLER_AARCH32_H_ diff --git a/dep/vixl/include/vixl/aarch32/operands-aarch32.h b/dep/vixl/include/vixl/aarch32/operands-aarch32.h new file mode 100644 index 000000000..1d18bfd31 --- /dev/null +++ b/dep/vixl/include/vixl/aarch32/operands-aarch32.h @@ -0,0 +1,927 @@ +// Copyright 2015, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may +// be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_AARCH32_OPERANDS_AARCH32_H_ +#define VIXL_AARCH32_OPERANDS_AARCH32_H_ + +#include "aarch32/instructions-aarch32.h" + +namespace vixl { +namespace aarch32 { + +// Operand represents generic set of arguments to pass to an instruction. +// +// Usage: , +// +// where is the instruction to use (e.g., Mov(), Rsb(), etc.) +// is the destination register +// is the rest of the arguments to the instruction +// +// can be one of: +// +// # - an unsigned 32-bit immediate value +// , <#amount> - immediate shifted register +// , - register shifted register +// +class Operand { + public: + // { # } + // where is uint32_t. + // This is allowed to be an implicit constructor because Operand is + // a wrapper class that doesn't normally perform any type conversion. + Operand(uint32_t immediate) // NOLINT(runtime/explicit) + : imm_(immediate), + rm_(NoReg), + shift_(LSL), + amount_(0), + rs_(NoReg) {} + Operand(int32_t immediate) // NOLINT(runtime/explicit) + : imm_(immediate), + rm_(NoReg), + shift_(LSL), + amount_(0), + rs_(NoReg) {} + + // rm + // where rm is the base register + // This is allowed to be an implicit constructor because Operand is + // a wrapper class that doesn't normally perform any type conversion. + Operand(Register rm) // NOLINT(runtime/explicit) + : imm_(0), + rm_(rm), + shift_(LSL), + amount_(0), + rs_(NoReg) { + VIXL_ASSERT(rm_.IsValid()); + } + + // rm, + // where rm is the base register, and + // is RRX + Operand(Register rm, Shift shift) + : imm_(0), rm_(rm), shift_(shift), amount_(0), rs_(NoReg) { + VIXL_ASSERT(rm_.IsValid()); + VIXL_ASSERT(shift_.IsRRX()); + } + + // rm, # + // where rm is the base register, and + // is one of {LSL, LSR, ASR, ROR}, and + // is uint6_t. + Operand(Register rm, Shift shift, uint32_t amount) + : imm_(0), rm_(rm), shift_(shift), amount_(amount), rs_(NoReg) { + VIXL_ASSERT(rm_.IsValid()); + VIXL_ASSERT(!shift_.IsRRX()); +#ifdef VIXL_DEBUG + switch (shift_.GetType()) { + case LSL: + VIXL_ASSERT(amount_ <= 31); + break; + case ROR: + VIXL_ASSERT(amount_ <= 31); + break; + case LSR: + case ASR: + VIXL_ASSERT(amount_ <= 32); + break; + case RRX: + default: + VIXL_UNREACHABLE(); + break; + } +#endif + } + + // rm, rs + // where rm is the base register, and + // is one of {LSL, LSR, ASR, ROR}, and + // rs is the shifted register + Operand(Register rm, Shift shift, Register rs) + : imm_(0), rm_(rm), shift_(shift), amount_(0), rs_(rs) { + VIXL_ASSERT(rm_.IsValid() && rs_.IsValid()); + VIXL_ASSERT(!shift_.IsRRX()); + } + + // Factory methods creating operands from any integral or pointer type. The + // source must fit into 32 bits. + template + static Operand From(T immediate) { +#if __cplusplus >= 201103L + VIXL_STATIC_ASSERT_MESSAGE(std::is_integral::value, + "An integral type is required to build an " + "immediate operand."); +#endif + // Allow both a signed or unsigned 32 bit integer to be passed, but store it + // as a uint32_t. The signedness information will be lost. We have to add a + // static_cast to make sure the compiler does not complain about implicit 64 + // to 32 narrowing. It's perfectly acceptable for the user to pass a 64-bit + // value, as long as it can be encoded in 32 bits. + VIXL_ASSERT(IsInt32(immediate) || IsUint32(immediate)); + return Operand(static_cast(immediate)); + } + + template + static Operand From(T* address) { + uintptr_t address_as_integral = reinterpret_cast(address); + VIXL_ASSERT(IsUint32(address_as_integral)); + return Operand(static_cast(address_as_integral)); + } + + bool IsImmediate() const { return !rm_.IsValid(); } + + bool IsPlainRegister() const { + return rm_.IsValid() && !shift_.IsRRX() && !rs_.IsValid() && (amount_ == 0); + } + + bool IsImmediateShiftedRegister() const { + return rm_.IsValid() && !rs_.IsValid(); + } + + bool IsRegisterShiftedRegister() const { + return rm_.IsValid() && rs_.IsValid(); + } + + uint32_t GetImmediate() const { + VIXL_ASSERT(IsImmediate()); + return imm_; + } + + int32_t GetSignedImmediate() const { + VIXL_ASSERT(IsImmediate()); + int32_t result; + memcpy(&result, &imm_, sizeof(result)); + return result; + } + + Register GetBaseRegister() const { + VIXL_ASSERT(IsImmediateShiftedRegister() || IsRegisterShiftedRegister()); + return rm_; + } + + Shift GetShift() const { + VIXL_ASSERT(IsImmediateShiftedRegister() || IsRegisterShiftedRegister()); + return shift_; + } + + uint32_t GetShiftAmount() const { + VIXL_ASSERT(IsImmediateShiftedRegister()); + return amount_; + } + + Register GetShiftRegister() const { + VIXL_ASSERT(IsRegisterShiftedRegister()); + return rs_; + } + + uint32_t GetTypeEncodingValue() const { + return shift_.IsRRX() ? kRRXEncodedValue : shift_.GetValue(); + } + + private: +// Forbid implicitely creating operands around types that cannot be encoded +// into a uint32_t without loss. +#if __cplusplus >= 201103L + Operand(int64_t) = delete; // NOLINT(runtime/explicit) + Operand(uint64_t) = delete; // NOLINT(runtime/explicit) + Operand(float) = delete; // NOLINT(runtime/explicit) + Operand(double) = delete; // NOLINT(runtime/explicit) +#else + VIXL_NO_RETURN_IN_DEBUG_MODE Operand(int64_t) { // NOLINT(runtime/explicit) + VIXL_UNREACHABLE(); + } + VIXL_NO_RETURN_IN_DEBUG_MODE Operand(uint64_t) { // NOLINT(runtime/explicit) + VIXL_UNREACHABLE(); + } + VIXL_NO_RETURN_IN_DEBUG_MODE Operand(float) { // NOLINT + VIXL_UNREACHABLE(); + } + VIXL_NO_RETURN_IN_DEBUG_MODE Operand(double) { // NOLINT + VIXL_UNREACHABLE(); + } +#endif + + uint32_t imm_; + Register rm_; + Shift shift_; + uint32_t amount_; + Register rs_; +}; + +std::ostream& operator<<(std::ostream& os, const Operand& operand); + +class NeonImmediate { + template + struct DataTypeIdentity { + T data_type_; + }; + + public: + // { # } + // where is 32 bit number. + // This is allowed to be an implicit constructor because NeonImmediate is + // a wrapper class that doesn't normally perform any type conversion. + NeonImmediate(uint32_t immediate) // NOLINT(runtime/explicit) + : imm_(immediate), + immediate_type_(I32) {} + NeonImmediate(int immediate) // NOLINT(runtime/explicit) + : imm_(immediate), + immediate_type_(I32) {} + + // { # } + // where is a 64 bit number + // This is allowed to be an implicit constructor because NeonImmediate is + // a wrapper class that doesn't normally perform any type conversion. + NeonImmediate(int64_t immediate) // NOLINT(runtime/explicit) + : imm_(immediate), + immediate_type_(I64) {} + NeonImmediate(uint64_t immediate) // NOLINT(runtime/explicit) + : imm_(immediate), + immediate_type_(I64) {} + + // { # } + // where is a non zero floating point number which can be encoded + // as an 8 bit floating point (checked by the constructor). + // This is allowed to be an implicit constructor because NeonImmediate is + // a wrapper class that doesn't normally perform any type conversion. + NeonImmediate(float immediate) // NOLINT(runtime/explicit) + : imm_(immediate), + immediate_type_(F32) {} + NeonImmediate(double immediate) // NOLINT(runtime/explicit) + : imm_(immediate), + immediate_type_(F64) {} + + NeonImmediate(const NeonImmediate& src) + : imm_(src.imm_), immediate_type_(src.immediate_type_) {} + + template + T GetImmediate() const { + return GetImmediate(DataTypeIdentity()); + } + + template + T GetImmediate(const DataTypeIdentity&) const { + VIXL_ASSERT(sizeof(T) <= sizeof(uint32_t)); + VIXL_ASSERT(CanConvert()); + if (immediate_type_.Is(I64)) + return static_cast(imm_.u64_ & static_cast(-1)); + if (immediate_type_.Is(F64) || immediate_type_.Is(F32)) return 0; + return static_cast(imm_.u32_ & static_cast(-1)); + } + + uint64_t GetImmediate(const DataTypeIdentity&) const { + VIXL_ASSERT(CanConvert()); + if (immediate_type_.Is(I32)) return imm_.u32_; + if (immediate_type_.Is(F64) || immediate_type_.Is(F32)) return 0; + return imm_.u64_; + } + float GetImmediate(const DataTypeIdentity&) const { + VIXL_ASSERT(CanConvert()); + if (immediate_type_.Is(F64)) return static_cast(imm_.d_); + return imm_.f_; + } + double GetImmediate(const DataTypeIdentity&) const { + VIXL_ASSERT(CanConvert()); + if (immediate_type_.Is(F32)) return static_cast(imm_.f_); + return imm_.d_; + } + + bool IsInteger32() const { return immediate_type_.Is(I32); } + bool IsInteger64() const { return immediate_type_.Is(I64); } + bool IsInteger() const { return IsInteger32() | IsInteger64(); } + bool IsFloat() const { return immediate_type_.Is(F32); } + bool IsDouble() const { return immediate_type_.Is(F64); } + bool IsFloatZero() const { + if (immediate_type_.Is(F32)) return imm_.f_ == 0.0f; + if (immediate_type_.Is(F64)) return imm_.d_ == 0.0; + return false; + } + + template + bool CanConvert() const { + return CanConvert(DataTypeIdentity()); + } + + template + bool CanConvert(const DataTypeIdentity&) const { + VIXL_ASSERT(sizeof(T) < sizeof(uint32_t)); + return (immediate_type_.Is(I32) && ((imm_.u32_ >> (8 * sizeof(T))) == 0)) || + (immediate_type_.Is(I64) && ((imm_.u64_ >> (8 * sizeof(T))) == 0)) || + (immediate_type_.Is(F32) && (imm_.f_ == 0.0f)) || + (immediate_type_.Is(F64) && (imm_.d_ == 0.0)); + } + bool CanConvert(const DataTypeIdentity&) const { + return immediate_type_.Is(I32) || + (immediate_type_.Is(I64) && ((imm_.u64_ >> 32) == 0)) || + (immediate_type_.Is(F32) && (imm_.f_ == 0.0f)) || + (immediate_type_.Is(F64) && (imm_.d_ == 0.0)); + } + bool CanConvert(const DataTypeIdentity&) const { + return IsInteger() || CanConvert(); + } + bool CanConvert(const DataTypeIdentity&) const { + return IsFloat() || IsDouble(); + } + bool CanConvert(const DataTypeIdentity&) const { + return IsFloat() || IsDouble(); + } + friend std::ostream& operator<<(std::ostream& os, + const NeonImmediate& operand); + + private: + union NeonImmediateType { + uint64_t u64_; + double d_; + uint32_t u32_; + float f_; + NeonImmediateType(uint64_t u) : u64_(u) {} + NeonImmediateType(int64_t u) : u64_(u) {} + NeonImmediateType(uint32_t u) : u32_(u) {} + NeonImmediateType(int32_t u) : u32_(u) {} + NeonImmediateType(double d) : d_(d) {} + NeonImmediateType(float f) : f_(f) {} + NeonImmediateType(const NeonImmediateType& ref) : u64_(ref.u64_) {} + } imm_; + + DataType immediate_type_; +}; + +std::ostream& operator<<(std::ostream& os, const NeonImmediate& operand); + +class NeonOperand { + public: + NeonOperand(int32_t immediate) // NOLINT(runtime/explicit) + : imm_(immediate), + rm_(NoDReg) {} + NeonOperand(uint32_t immediate) // NOLINT(runtime/explicit) + : imm_(immediate), + rm_(NoDReg) {} + NeonOperand(int64_t immediate) // NOLINT(runtime/explicit) + : imm_(immediate), + rm_(NoDReg) {} + NeonOperand(uint64_t immediate) // NOLINT(runtime/explicit) + : imm_(immediate), + rm_(NoDReg) {} + NeonOperand(float immediate) // NOLINT(runtime/explicit) + : imm_(immediate), + rm_(NoDReg) {} + NeonOperand(double immediate) // NOLINT(runtime/explicit) + : imm_(immediate), + rm_(NoDReg) {} + NeonOperand(const NeonImmediate& imm) // NOLINT(runtime/explicit) + : imm_(imm), + rm_(NoDReg) {} + NeonOperand(const VRegister& rm) // NOLINT(runtime/explicit) + : imm_(0), + rm_(rm) { + VIXL_ASSERT(rm_.IsValid()); + } + + bool IsImmediate() const { return !rm_.IsValid(); } + bool IsRegister() const { return rm_.IsValid(); } + bool IsFloatZero() const { + VIXL_ASSERT(IsImmediate()); + return imm_.IsFloatZero(); + } + + const NeonImmediate& GetNeonImmediate() const { return imm_; } + + VRegister GetRegister() const { + VIXL_ASSERT(IsRegister()); + return rm_; + } + + protected: + NeonImmediate imm_; + VRegister rm_; +}; + +std::ostream& operator<<(std::ostream& os, const NeonOperand& operand); + +// SOperand represents either an immediate or a SRegister. +class SOperand : public NeonOperand { + public: + // # + // where is 32bit int + // This is allowed to be an implicit constructor because SOperand is + // a wrapper class that doesn't normally perform any type conversion. + SOperand(int32_t immediate) // NOLINT(runtime/explicit) + : NeonOperand(immediate) {} + SOperand(uint32_t immediate) // NOLINT(runtime/explicit) + : NeonOperand(immediate) {} + // # + // where is 32bit float + SOperand(float immediate) // NOLINT(runtime/explicit) + : NeonOperand(immediate) {} + // where is 64bit float + SOperand(double immediate) // NOLINT(runtime/explicit) + : NeonOperand(immediate) {} + + SOperand(const NeonImmediate& imm) // NOLINT(runtime/explicit) + : NeonOperand(imm) {} + + // rm + // This is allowed to be an implicit constructor because SOperand is + // a wrapper class that doesn't normally perform any type conversion. + SOperand(SRegister rm) // NOLINT(runtime/explicit) + : NeonOperand(rm) {} + SRegister GetRegister() const { + VIXL_ASSERT(IsRegister() && (rm_.GetType() == CPURegister::kSRegister)); + return SRegister(rm_.GetCode()); + } +}; + +// DOperand represents either an immediate or a DRegister. +std::ostream& operator<<(std::ostream& os, const SOperand& operand); + +class DOperand : public NeonOperand { + public: + // # + // where is uint32_t. + // This is allowed to be an implicit constructor because DOperand is + // a wrapper class that doesn't normally perform any type conversion. + DOperand(int32_t immediate) // NOLINT(runtime/explicit) + : NeonOperand(immediate) {} + DOperand(uint32_t immediate) // NOLINT(runtime/explicit) + : NeonOperand(immediate) {} + DOperand(int64_t immediate) // NOLINT(runtime/explicit) + : NeonOperand(immediate) {} + DOperand(uint64_t immediate) // NOLINT(runtime/explicit) + : NeonOperand(immediate) {} + + // # + // where is a non zero floating point number which can be encoded + // as an 8 bit floating point (checked by the constructor). + // This is allowed to be an implicit constructor because DOperand is + // a wrapper class that doesn't normally perform any type conversion. + DOperand(float immediate) // NOLINT(runtime/explicit) + : NeonOperand(immediate) {} + DOperand(double immediate) // NOLINT(runtime/explicit) + : NeonOperand(immediate) {} + + DOperand(const NeonImmediate& imm) // NOLINT(runtime/explicit) + : NeonOperand(imm) {} + // rm + // This is allowed to be an implicit constructor because DOperand is + // a wrapper class that doesn't normally perform any type conversion. + DOperand(DRegister rm) // NOLINT(runtime/explicit) + : NeonOperand(rm) {} + + DRegister GetRegister() const { + VIXL_ASSERT(IsRegister() && (rm_.GetType() == CPURegister::kDRegister)); + return DRegister(rm_.GetCode()); + } +}; + +std::ostream& operator<<(std::ostream& os, const DOperand& operand); + +// QOperand represents either an immediate or a QRegister. +class QOperand : public NeonOperand { + public: + // # + // where is uint32_t. + // This is allowed to be an implicit constructor because QOperand is + // a wrapper class that doesn't normally perform any type conversion. + QOperand(int32_t immediate) // NOLINT(runtime/explicit) + : NeonOperand(immediate) {} + QOperand(uint32_t immediate) // NOLINT(runtime/explicit) + : NeonOperand(immediate) {} + QOperand(int64_t immediate) // NOLINT(runtime/explicit) + : NeonOperand(immediate) {} + QOperand(uint64_t immediate) // NOLINT(runtime/explicit) + : NeonOperand(immediate) {} + QOperand(float immediate) // NOLINT(runtime/explicit) + : NeonOperand(immediate) {} + QOperand(double immediate) // NOLINT(runtime/explicit) + : NeonOperand(immediate) {} + + QOperand(const NeonImmediate& imm) // NOLINT(runtime/explicit) + : NeonOperand(imm) {} + + // rm + // This is allowed to be an implicit constructor because QOperand is + // a wrapper class that doesn't normally perform any type conversion. + QOperand(QRegister rm) // NOLINT(runtime/explicit) + : NeonOperand(rm) { + VIXL_ASSERT(rm_.IsValid()); + } + + QRegister GetRegister() const { + VIXL_ASSERT(IsRegister() && (rm_.GetType() == CPURegister::kQRegister)); + return QRegister(rm_.GetCode()); + } +}; + +std::ostream& operator<<(std::ostream& os, const QOperand& operand); + +class ImmediateVFP : public EncodingValue { + template + struct FloatType { + typedef T base_type; + }; + + public: + explicit ImmediateVFP(const NeonImmediate& neon_imm) { + if (neon_imm.IsFloat()) { + const float imm = neon_imm.GetImmediate(); + if (VFP::IsImmFP32(imm)) { + SetEncodingValue(VFP::FP32ToImm8(imm)); + } + } else if (neon_imm.IsDouble()) { + const double imm = neon_imm.GetImmediate(); + if (VFP::IsImmFP64(imm)) { + SetEncodingValue(VFP::FP64ToImm8(imm)); + } + } + } + + template + static T Decode(uint32_t v) { + return Decode(v, FloatType()); + } + + static float Decode(uint32_t imm8, const FloatType&) { + return VFP::Imm8ToFP32(imm8); + } + + static double Decode(uint32_t imm8, const FloatType&) { + return VFP::Imm8ToFP64(imm8); + } +}; + + +class ImmediateVbic : public EncodingValueAndImmediate { + public: + ImmediateVbic(DataType dt, const NeonImmediate& neon_imm); + static DataType DecodeDt(uint32_t cmode); + static NeonImmediate DecodeImmediate(uint32_t cmode, uint32_t immediate); +}; + +class ImmediateVand : public ImmediateVbic { + public: + ImmediateVand(DataType dt, const NeonImmediate neon_imm) + : ImmediateVbic(dt, neon_imm) { + if (IsValid()) { + SetEncodedImmediate(~GetEncodedImmediate() & 0xff); + } + } +}; + +class ImmediateVmov : public EncodingValueAndImmediate { + public: + ImmediateVmov(DataType dt, const NeonImmediate& neon_imm); + static DataType DecodeDt(uint32_t cmode); + static NeonImmediate DecodeImmediate(uint32_t cmode, uint32_t immediate); +}; + +class ImmediateVmvn : public EncodingValueAndImmediate { + public: + ImmediateVmvn(DataType dt, const NeonImmediate& neon_imm); + static DataType DecodeDt(uint32_t cmode); + static NeonImmediate DecodeImmediate(uint32_t cmode, uint32_t immediate); +}; + +class ImmediateVorr : public EncodingValueAndImmediate { + public: + ImmediateVorr(DataType dt, const NeonImmediate& neon_imm); + static DataType DecodeDt(uint32_t cmode); + static NeonImmediate DecodeImmediate(uint32_t cmode, uint32_t immediate); +}; + +class ImmediateVorn : public ImmediateVorr { + public: + ImmediateVorn(DataType dt, const NeonImmediate& neon_imm) + : ImmediateVorr(dt, neon_imm) { + if (IsValid()) { + SetEncodedImmediate(~GetEncodedImmediate() & 0xff); + } + } +}; + +// MemOperand represents the addressing mode of a load or store instruction. +// +// Usage: , +// +// where is the instruction to use (e.g., Ldr(), Str(), etc.), +// is general purpose register to be transferred, +// is the rest of the arguments to the instruction +// +// can be in one of 3 addressing modes: +// +// [ , ] == offset addressing +// [ , ]! == pre-indexed addressing +// [ ], == post-indexed addressing +// +// where can be one of: +// - an immediate constant, such as , +// - an index register +// - a shifted index register , # +// +// The index register may have an associated {+/-} sign, +// which if ommitted, defaults to + . +// +// We have two constructors for the offset: +// +// One with a signed value offset parameter. The value of sign_ is +// "sign_of(constructor's offset parameter) and the value of offset_ is +// "constructor's offset parameter". +// +// The other with a sign and a positive value offset parameters. The value of +// sign_ is "constructor's sign parameter" and the value of offset_ is +// "constructor's sign parameter * constructor's offset parameter". +// +// The value of offset_ reflects the effective offset. For an offset_ of 0, +// sign_ can be positive or negative. Otherwise, sign_ always agrees with +// the sign of offset_. +class MemOperand { + public: + // rn + // where rn is the general purpose base register only + explicit MemOperand(Register rn, AddrMode addrmode = Offset) + : rn_(rn), + offset_(0), + sign_(plus), + rm_(NoReg), + shift_(LSL), + shift_amount_(0), + addrmode_(addrmode | kMemOperandRegisterOnly) { + VIXL_ASSERT(rn_.IsValid()); + } + + // rn, # + // where rn is the general purpose base register, + // is a 32-bit offset to add to rn + // + // Note: if rn is PC, then this form is equivalent to a "label" + // Note: the second constructor allow minus zero (-0). + MemOperand(Register rn, int32_t offset, AddrMode addrmode = Offset) + : rn_(rn), + offset_(offset), + sign_((offset < 0) ? minus : plus), + rm_(NoReg), + shift_(LSL), + shift_amount_(0), + addrmode_(addrmode) { + VIXL_ASSERT(rn_.IsValid()); + } + MemOperand(Register rn, Sign sign, int32_t offset, AddrMode addrmode = Offset) + : rn_(rn), + offset_(sign.IsPlus() ? offset : -offset), + sign_(sign), + rm_(NoReg), + shift_(LSL), + shift_amount_(0), + addrmode_(addrmode) { + VIXL_ASSERT(rn_.IsValid()); + // With this constructor, the sign must only be specified by "sign". + VIXL_ASSERT(offset >= 0); + } + + // rn, {+/-}rm + // where rn is the general purpose base register, + // {+/-} is the sign of the index register, + // rm is the general purpose index register, + MemOperand(Register rn, Sign sign, Register rm, AddrMode addrmode = Offset) + : rn_(rn), + offset_(0), + sign_(sign), + rm_(rm), + shift_(LSL), + shift_amount_(0), + addrmode_(addrmode) { + VIXL_ASSERT(rn_.IsValid() && rm_.IsValid()); + } + + // rn, rm + // where rn is the general purpose base register, + // rm is the general purpose index register, + MemOperand(Register rn, Register rm, AddrMode addrmode = Offset) + : rn_(rn), + offset_(0), + sign_(plus), + rm_(rm), + shift_(LSL), + shift_amount_(0), + addrmode_(addrmode) { + VIXL_ASSERT(rn_.IsValid() && rm_.IsValid()); + } + + // rn, {+/-}rm, + // where rn is the general purpose base register, + // {+/-} is the sign of the index register, + // rm is the general purpose index register, + // is RRX, applied to value from rm + MemOperand(Register rn, + Sign sign, + Register rm, + Shift shift, + AddrMode addrmode = Offset) + : rn_(rn), + offset_(0), + sign_(sign), + rm_(rm), + shift_(shift), + shift_amount_(0), + addrmode_(addrmode) { + VIXL_ASSERT(rn_.IsValid() && rm_.IsValid()); + VIXL_ASSERT(shift_.IsRRX()); + } + + // rn, rm, + // where rn is the general purpose base register, + // rm is the general purpose index register, + // is RRX, applied to value from rm + MemOperand(Register rn, Register rm, Shift shift, AddrMode addrmode = Offset) + : rn_(rn), + offset_(0), + sign_(plus), + rm_(rm), + shift_(shift), + shift_amount_(0), + addrmode_(addrmode) { + VIXL_ASSERT(rn_.IsValid() && rm_.IsValid()); + VIXL_ASSERT(shift_.IsRRX()); + } + + // rn, {+/-}rm, # + // where rn is the general purpose base register, + // {+/-} is the sign of the index register, + // rm is the general purpose index register, + // is one of {LSL, LSR, ASR, ROR}, applied to value from rm + // is optional size to apply to value from rm + MemOperand(Register rn, + Sign sign, + Register rm, + Shift shift, + uint32_t shift_amount, + AddrMode addrmode = Offset) + : rn_(rn), + offset_(0), + sign_(sign), + rm_(rm), + shift_(shift), + shift_amount_(shift_amount), + addrmode_(addrmode) { + VIXL_ASSERT(rn_.IsValid() && rm_.IsValid()); + CheckShift(); + } + + // rn, rm, # + // where rn is the general purpose base register, + // rm is the general purpose index register, + // is one of {LSL, LSR, ASR, ROR}, applied to value from rm + // is optional size to apply to value from rm + MemOperand(Register rn, + Register rm, + Shift shift, + uint32_t shift_amount, + AddrMode addrmode = Offset) + : rn_(rn), + offset_(0), + sign_(plus), + rm_(rm), + shift_(shift), + shift_amount_(shift_amount), + addrmode_(addrmode) { + VIXL_ASSERT(rn_.IsValid() && rm_.IsValid()); + CheckShift(); + } + + Register GetBaseRegister() const { return rn_; } + int32_t GetOffsetImmediate() const { return offset_; } + bool IsOffsetImmediateWithinRange(int min, + int max, + int multiple_of = 1) const { + return (offset_ >= min) && (offset_ <= max) && + ((offset_ % multiple_of) == 0); + } + Sign GetSign() const { return sign_; } + Register GetOffsetRegister() const { return rm_; } + Shift GetShift() const { return shift_; } + unsigned GetShiftAmount() const { return shift_amount_; } + AddrMode GetAddrMode() const { + return static_cast(addrmode_ & kMemOperandAddrModeMask); + } + bool IsRegisterOnly() const { + return (addrmode_ & kMemOperandRegisterOnly) != 0; + } + + bool IsImmediate() const { return !rm_.IsValid(); } + bool IsImmediateZero() const { return !rm_.IsValid() && (offset_ == 0); } + bool IsPlainRegister() const { + return rm_.IsValid() && shift_.IsLSL() && (shift_amount_ == 0); + } + bool IsShiftedRegister() const { return rm_.IsValid(); } + bool IsImmediateOffset() const { + return (GetAddrMode() == Offset) && !rm_.IsValid(); + } + bool IsImmediateZeroOffset() const { + return (GetAddrMode() == Offset) && !rm_.IsValid() && (offset_ == 0); + } + bool IsRegisterOffset() const { + return (GetAddrMode() == Offset) && rm_.IsValid() && shift_.IsLSL() && + (shift_amount_ == 0); + } + bool IsShiftedRegisterOffset() const { + return (GetAddrMode() == Offset) && rm_.IsValid(); + } + uint32_t GetTypeEncodingValue() const { + return shift_.IsRRX() ? kRRXEncodedValue : shift_.GetValue(); + } + bool IsOffset() const { return GetAddrMode() == Offset; } + bool IsPreIndex() const { return GetAddrMode() == PreIndex; } + bool IsPostIndex() const { return GetAddrMode() == PostIndex; } + bool IsShiftValid() const { return shift_.IsValidAmount(shift_amount_); } + + private: + static const int kMemOperandRegisterOnly = 0x1000; + static const int kMemOperandAddrModeMask = 0xfff; + void CheckShift() { +#ifdef VIXL_DEBUG + // Disallow any zero shift other than RRX #0 and LSL #0 . + if ((shift_amount_ == 0) && shift_.IsRRX()) return; + if ((shift_amount_ == 0) && !shift_.IsLSL()) { + VIXL_ABORT_WITH_MSG( + "A shift by 0 is only accepted in " + "the case of lsl and will be treated as " + "no shift.\n"); + } + switch (shift_.GetType()) { + case LSL: + VIXL_ASSERT(shift_amount_ <= 31); + break; + case ROR: + VIXL_ASSERT(shift_amount_ <= 31); + break; + case LSR: + case ASR: + VIXL_ASSERT(shift_amount_ <= 32); + break; + case RRX: + default: + VIXL_UNREACHABLE(); + break; + } +#endif + } + Register rn_; + int32_t offset_; + Sign sign_; + Register rm_; + Shift shift_; + uint32_t shift_amount_; + uint32_t addrmode_; +}; + +std::ostream& operator<<(std::ostream& os, const MemOperand& operand); + +class AlignedMemOperand : public MemOperand { + public: + AlignedMemOperand(Register rn, Alignment align, AddrMode addrmode = Offset) + : MemOperand(rn, addrmode), align_(align) { + VIXL_ASSERT(addrmode != PreIndex); + } + + AlignedMemOperand(Register rn, + Alignment align, + Register rm, + AddrMode addrmode) + : MemOperand(rn, rm, addrmode), align_(align) { + VIXL_ASSERT(addrmode != PreIndex); + } + + Alignment GetAlignment() const { return align_; } + + private: + Alignment align_; +}; + +std::ostream& operator<<(std::ostream& os, const AlignedMemOperand& operand); + +} // namespace aarch32 +} // namespace vixl + +#endif // VIXL_AARCH32_OPERANDS_AARCH32_H_ diff --git a/dep/vixl/include/vixl/aarch64/abi-aarch64.h b/dep/vixl/include/vixl/aarch64/abi-aarch64.h new file mode 100644 index 000000000..a00580241 --- /dev/null +++ b/dep/vixl/include/vixl/aarch64/abi-aarch64.h @@ -0,0 +1,167 @@ +// Copyright 2016, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// The ABI features are only supported with C++11 or later. +#if __cplusplus >= 201103L +// This should not be defined manually. +#define VIXL_HAS_ABI_SUPPORT +#elif defined(VIXL_HAS_ABI_SUPPORT) +#error "The ABI support requires C++11 or later." +#endif + +#ifdef VIXL_HAS_ABI_SUPPORT + +#ifndef VIXL_AARCH64_ABI_AARCH64_H_ +#define VIXL_AARCH64_ABI_AARCH64_H_ + +#include +#include + +#include "../globals-vixl.h" + +#include "instructions-aarch64.h" +#include "operands-aarch64.h" + +namespace vixl { +namespace aarch64 { + +// Class describing the AArch64 procedure call standard, as defined in "ARM +// Procedure Call Standard for the ARM 64-bit Architecture (AArch64)", +// release 1.0 (AAPCS below). +// +// The stages in the comments match the description in that document. +// +// Stage B does not apply to arguments handled by this class. +class ABI { + public: + explicit ABI(Register stack_pointer = sp) : stack_pointer_(stack_pointer) { + // Stage A - Initialization + Reset(); + } + + void Reset() { + NGRN_ = 0; + NSRN_ = 0; + stack_offset_ = 0; + } + + int GetStackSpaceRequired() { return stack_offset_; } + + // The logic is described in section 5.5 of the AAPCS. + template + GenericOperand GetReturnGenericOperand() const { + ABI abi(stack_pointer_); + GenericOperand result = abi.GetNextParameterGenericOperand(); + VIXL_ASSERT(result.IsCPURegister()); + return result; + } + + // The logic is described in section 5.4.2 of the AAPCS. + // The `GenericOperand` returned describes the location reserved for the + // argument from the point of view of the callee. + template + GenericOperand GetNextParameterGenericOperand() { + const bool is_floating_point_type = std::is_floating_point::value; + const bool is_integral_type = + std::is_integral::value || std::is_enum::value; + const bool is_pointer_type = std::is_pointer::value; + int type_alignment = std::alignment_of::value; + + // We only support basic types. + VIXL_ASSERT(is_floating_point_type || is_integral_type || is_pointer_type); + + // To ensure we get the correct type of operand when simulating on a 32-bit + // host, force the size of pointer types to the native AArch64 pointer size. + unsigned size = is_pointer_type ? 8 : sizeof(T); + // The size of the 'operand' reserved for the argument. + unsigned operand_size = AlignUp(size, kWRegSizeInBytes); + if (size > 8) { + VIXL_UNIMPLEMENTED(); + return GenericOperand(); + } + + // Stage C.1 + if (is_floating_point_type && (NSRN_ < 8)) { + return GenericOperand(FPRegister(NSRN_++, size * kBitsPerByte)); + } + // Stages C.2, C.3, and C.4: Unsupported. Caught by the assertions above. + // Stages C.5 and C.6 + if (is_floating_point_type) { + VIXL_STATIC_ASSERT( + !is_floating_point_type || + (std::is_same::value || std::is_same::value)); + int offset = stack_offset_; + stack_offset_ += 8; + return GenericOperand(MemOperand(stack_pointer_, offset), operand_size); + } + // Stage C.7 + if ((is_integral_type || is_pointer_type) && (size <= 8) && (NGRN_ < 8)) { + return GenericOperand(Register(NGRN_++, operand_size * kBitsPerByte)); + } + // Stage C.8 + if (type_alignment == 16) { + NGRN_ = AlignUp(NGRN_, 2); + } + // Stage C.9 + if (is_integral_type && (size == 16) && (NGRN_ < 7)) { + VIXL_UNIMPLEMENTED(); + return GenericOperand(); + } + // Stage C.10: Unsupported. Caught by the assertions above. + // Stage C.11 + NGRN_ = 8; + // Stage C.12 + stack_offset_ = AlignUp(stack_offset_, std::max(type_alignment, 8)); + // Stage C.13: Unsupported. Caught by the assertions above. + // Stage C.14 + VIXL_ASSERT(size <= 8u); + size = std::max(size, 8u); + int offset = stack_offset_; + stack_offset_ += size; + return GenericOperand(MemOperand(stack_pointer_, offset), operand_size); + } + + private: + Register stack_pointer_; + // Next General-purpose Register Number. + int NGRN_; + // Next SIMD and Floating-point Register Number. + int NSRN_; + // The acronym "NSAA" used in the standard refers to the "Next Stacked + // Argument Address". Here we deal with offsets from the stack pointer. + int stack_offset_; +}; + +template <> +inline GenericOperand ABI::GetReturnGenericOperand() const { + return GenericOperand(); +} +} +} // namespace vixl::aarch64 + +#endif // VIXL_AARCH64_ABI_AARCH64_H_ + +#endif // VIXL_HAS_ABI_SUPPORT diff --git a/dep/vixl/include/vixl/aarch64/assembler-aarch64.h b/dep/vixl/include/vixl/aarch64/assembler-aarch64.h new file mode 100644 index 000000000..7d9546657 --- /dev/null +++ b/dep/vixl/include/vixl/aarch64/assembler-aarch64.h @@ -0,0 +1,4434 @@ +// Copyright 2015, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_AARCH64_ASSEMBLER_AARCH64_H_ +#define VIXL_AARCH64_ASSEMBLER_AARCH64_H_ + +#include "../assembler-base-vixl.h" +#include "../code-generation-scopes-vixl.h" +#include "../cpu-features.h" +#include "../globals-vixl.h" +#include "../invalset-vixl.h" +#include "../utils-vixl.h" +#include "operands-aarch64.h" + +namespace vixl { +namespace aarch64 { + +class LabelTestHelper; // Forward declaration. + + +class Label { + public: + Label() : location_(kLocationUnbound) {} + ~Label() { + // All links to a label must have been resolved before it is destructed. + VIXL_ASSERT(!IsLinked()); + } + + bool IsBound() const { return location_ >= 0; } + bool IsLinked() const { return !links_.empty(); } + + ptrdiff_t GetLocation() const { return location_; } + VIXL_DEPRECATED("GetLocation", ptrdiff_t location() const) { + return GetLocation(); + } + + static const int kNPreallocatedLinks = 4; + static const ptrdiff_t kInvalidLinkKey = PTRDIFF_MAX; + static const size_t kReclaimFrom = 512; + static const size_t kReclaimFactor = 2; + + typedef InvalSet + LinksSetBase; + typedef InvalSetIterator LabelLinksIteratorBase; + + private: + class LinksSet : public LinksSetBase { + public: + LinksSet() : LinksSetBase() {} + }; + + // Allows iterating over the links of a label. The behaviour is undefined if + // the list of links is modified in any way while iterating. + class LabelLinksIterator : public LabelLinksIteratorBase { + public: + explicit LabelLinksIterator(Label* label) + : LabelLinksIteratorBase(&label->links_) {} + + // TODO: Remove these and use the STL-like interface instead. + using LabelLinksIteratorBase::Advance; + using LabelLinksIteratorBase::Current; + }; + + void Bind(ptrdiff_t location) { + // Labels can only be bound once. + VIXL_ASSERT(!IsBound()); + location_ = location; + } + + void AddLink(ptrdiff_t instruction) { + // If a label is bound, the assembler already has the information it needs + // to write the instruction, so there is no need to add it to links_. + VIXL_ASSERT(!IsBound()); + links_.insert(instruction); + } + + void DeleteLink(ptrdiff_t instruction) { links_.erase(instruction); } + + void ClearAllLinks() { links_.clear(); } + + // TODO: The comment below considers average case complexity for our + // usual use-cases. The elements of interest are: + // - Branches to a label are emitted in order: branch instructions to a label + // are generated at an offset in the code generation buffer greater than any + // other branch to that same label already generated. As an example, this can + // be broken when an instruction is patched to become a branch. Note that the + // code will still work, but the complexity considerations below may locally + // not apply any more. + // - Veneers are generated in order: for multiple branches of the same type + // branching to the same unbound label going out of range, veneers are + // generated in growing order of the branch instruction offset from the start + // of the buffer. + // + // When creating a veneer for a branch going out of range, the link for this + // branch needs to be removed from this `links_`. Since all branches are + // tracked in one underlying InvalSet, the complexity for this deletion is the + // same as for finding the element, ie. O(n), where n is the number of links + // in the set. + // This could be reduced to O(1) by using the same trick as used when tracking + // branch information for veneers: split the container to use one set per type + // of branch. With that setup, when a veneer is created and the link needs to + // be deleted, if the two points above hold, it must be the minimum element of + // the set for its type of branch, and that minimum element will be accessible + // in O(1). + + // The offsets of the instructions that have linked to this label. + LinksSet links_; + // The label location. + ptrdiff_t location_; + + static const ptrdiff_t kLocationUnbound = -1; + +// It is not safe to copy labels, so disable the copy constructor and operator +// by declaring them private (without an implementation). +#if __cplusplus >= 201103L + Label(const Label&) = delete; + void operator=(const Label&) = delete; +#else + Label(const Label&); + void operator=(const Label&); +#endif + + // The Assembler class is responsible for binding and linking labels, since + // the stored offsets need to be consistent with the Assembler's buffer. + friend class Assembler; + // The MacroAssembler and VeneerPool handle resolution of branches to distant + // targets. + friend class MacroAssembler; + friend class VeneerPool; +}; + + +class Assembler; +class LiteralPool; + +// A literal is a 32-bit or 64-bit piece of data stored in the instruction +// stream and loaded through a pc relative load. The same literal can be +// referred to by multiple instructions but a literal can only reside at one +// place in memory. A literal can be used by a load before or after being +// placed in memory. +// +// Internally an offset of 0 is associated with a literal which has been +// neither used nor placed. Then two possibilities arise: +// 1) the label is placed, the offset (stored as offset + 1) is used to +// resolve any subsequent load using the label. +// 2) the label is not placed and offset is the offset of the last load using +// the literal (stored as -offset -1). If multiple loads refer to this +// literal then the last load holds the offset of the preceding load and +// all loads form a chain. Once the offset is placed all the loads in the +// chain are resolved and future loads fall back to possibility 1. +class RawLiteral { + public: + enum DeletionPolicy { + kDeletedOnPlacementByPool, + kDeletedOnPoolDestruction, + kManuallyDeleted + }; + + RawLiteral(size_t size, + LiteralPool* literal_pool, + DeletionPolicy deletion_policy = kManuallyDeleted); + + // The literal pool only sees and deletes `RawLiteral*` pointers, but they are + // actually pointing to `Literal` objects. + virtual ~RawLiteral() {} + + size_t GetSize() const { + VIXL_STATIC_ASSERT(kDRegSizeInBytes == kXRegSizeInBytes); + VIXL_STATIC_ASSERT(kSRegSizeInBytes == kWRegSizeInBytes); + VIXL_ASSERT((size_ == kXRegSizeInBytes) || (size_ == kWRegSizeInBytes) || + (size_ == kQRegSizeInBytes)); + return size_; + } + VIXL_DEPRECATED("GetSize", size_t size()) { return GetSize(); } + + uint64_t GetRawValue128Low64() const { + VIXL_ASSERT(size_ == kQRegSizeInBytes); + return low64_; + } + VIXL_DEPRECATED("GetRawValue128Low64", uint64_t raw_value128_low64()) { + return GetRawValue128Low64(); + } + + uint64_t GetRawValue128High64() const { + VIXL_ASSERT(size_ == kQRegSizeInBytes); + return high64_; + } + VIXL_DEPRECATED("GetRawValue128High64", uint64_t raw_value128_high64()) { + return GetRawValue128High64(); + } + + uint64_t GetRawValue64() const { + VIXL_ASSERT(size_ == kXRegSizeInBytes); + VIXL_ASSERT(high64_ == 0); + return low64_; + } + VIXL_DEPRECATED("GetRawValue64", uint64_t raw_value64()) { + return GetRawValue64(); + } + + uint32_t GetRawValue32() const { + VIXL_ASSERT(size_ == kWRegSizeInBytes); + VIXL_ASSERT(high64_ == 0); + VIXL_ASSERT(IsUint32(low64_) || IsInt32(low64_)); + return static_cast(low64_); + } + VIXL_DEPRECATED("GetRawValue32", uint32_t raw_value32()) { + return GetRawValue32(); + } + + bool IsUsed() const { return offset_ < 0; } + bool IsPlaced() const { return offset_ > 0; } + + LiteralPool* GetLiteralPool() const { return literal_pool_; } + + ptrdiff_t GetOffset() const { + VIXL_ASSERT(IsPlaced()); + return offset_ - 1; + } + VIXL_DEPRECATED("GetOffset", ptrdiff_t offset()) { return GetOffset(); } + + protected: + void SetOffset(ptrdiff_t offset) { + VIXL_ASSERT(offset >= 0); + VIXL_ASSERT(IsWordAligned(offset)); + VIXL_ASSERT(!IsPlaced()); + offset_ = offset + 1; + } + VIXL_DEPRECATED("SetOffset", void set_offset(ptrdiff_t offset)) { + SetOffset(offset); + } + + ptrdiff_t GetLastUse() const { + VIXL_ASSERT(IsUsed()); + return -offset_ - 1; + } + VIXL_DEPRECATED("GetLastUse", ptrdiff_t last_use()) { return GetLastUse(); } + + void SetLastUse(ptrdiff_t offset) { + VIXL_ASSERT(offset >= 0); + VIXL_ASSERT(IsWordAligned(offset)); + VIXL_ASSERT(!IsPlaced()); + offset_ = -offset - 1; + } + VIXL_DEPRECATED("SetLastUse", void set_last_use(ptrdiff_t offset)) { + SetLastUse(offset); + } + + size_t size_; + ptrdiff_t offset_; + uint64_t low64_; + uint64_t high64_; + + private: + LiteralPool* literal_pool_; + DeletionPolicy deletion_policy_; + + friend class Assembler; + friend class LiteralPool; +}; + + +template +class Literal : public RawLiteral { + public: + explicit Literal(T value, + LiteralPool* literal_pool = NULL, + RawLiteral::DeletionPolicy ownership = kManuallyDeleted) + : RawLiteral(sizeof(value), literal_pool, ownership) { + VIXL_STATIC_ASSERT(sizeof(value) <= kXRegSizeInBytes); + UpdateValue(value); + } + + Literal(T high64, + T low64, + LiteralPool* literal_pool = NULL, + RawLiteral::DeletionPolicy ownership = kManuallyDeleted) + : RawLiteral(kQRegSizeInBytes, literal_pool, ownership) { + VIXL_STATIC_ASSERT(sizeof(low64) == (kQRegSizeInBytes / 2)); + UpdateValue(high64, low64); + } + + virtual ~Literal() {} + + // Update the value of this literal, if necessary by rewriting the value in + // the pool. + // If the literal has already been placed in a literal pool, the address of + // the start of the code buffer must be provided, as the literal only knows it + // offset from there. This also allows patching the value after the code has + // been moved in memory. + void UpdateValue(T new_value, uint8_t* code_buffer = NULL) { + VIXL_ASSERT(sizeof(new_value) == size_); + memcpy(&low64_, &new_value, sizeof(new_value)); + if (IsPlaced()) { + VIXL_ASSERT(code_buffer != NULL); + RewriteValueInCode(code_buffer); + } + } + + void UpdateValue(T high64, T low64, uint8_t* code_buffer = NULL) { + VIXL_ASSERT(sizeof(low64) == size_ / 2); + memcpy(&low64_, &low64, sizeof(low64)); + memcpy(&high64_, &high64, sizeof(high64)); + if (IsPlaced()) { + VIXL_ASSERT(code_buffer != NULL); + RewriteValueInCode(code_buffer); + } + } + + void UpdateValue(T new_value, const Assembler* assembler); + void UpdateValue(T high64, T low64, const Assembler* assembler); + + private: + void RewriteValueInCode(uint8_t* code_buffer) { + VIXL_ASSERT(IsPlaced()); + VIXL_STATIC_ASSERT(sizeof(T) <= kXRegSizeInBytes); + switch (GetSize()) { + case kSRegSizeInBytes: + *reinterpret_cast(code_buffer + GetOffset()) = + GetRawValue32(); + break; + case kDRegSizeInBytes: + *reinterpret_cast(code_buffer + GetOffset()) = + GetRawValue64(); + break; + default: + VIXL_ASSERT(GetSize() == kQRegSizeInBytes); + uint64_t* base_address = + reinterpret_cast(code_buffer + GetOffset()); + *base_address = GetRawValue128Low64(); + *(base_address + 1) = GetRawValue128High64(); + } + } +}; + + +// Control whether or not position-independent code should be emitted. +enum PositionIndependentCodeOption { + // All code generated will be position-independent; all branches and + // references to labels generated with the Label class will use PC-relative + // addressing. + PositionIndependentCode, + + // Allow VIXL to generate code that refers to absolute addresses. With this + // option, it will not be possible to copy the code buffer and run it from a + // different address; code must be generated in its final location. + PositionDependentCode, + + // Allow VIXL to assume that the bottom 12 bits of the address will be + // constant, but that the top 48 bits may change. This allows `adrp` to + // function in systems which copy code between pages, but otherwise maintain + // 4KB page alignment. + PageOffsetDependentCode +}; + + +// Control how scaled- and unscaled-offset loads and stores are generated. +enum LoadStoreScalingOption { + // Prefer scaled-immediate-offset instructions, but emit unscaled-offset, + // register-offset, pre-index or post-index instructions if necessary. + PreferScaledOffset, + + // Prefer unscaled-immediate-offset instructions, but emit scaled-offset, + // register-offset, pre-index or post-index instructions if necessary. + PreferUnscaledOffset, + + // Require scaled-immediate-offset instructions. + RequireScaledOffset, + + // Require unscaled-immediate-offset instructions. + RequireUnscaledOffset +}; + + +// Assembler. +class Assembler : public vixl::internal::AssemblerBase { + public: + explicit Assembler( + PositionIndependentCodeOption pic = PositionIndependentCode) + : pic_(pic), cpu_features_(CPUFeatures::AArch64LegacyBaseline()) {} + explicit Assembler( + size_t capacity, + PositionIndependentCodeOption pic = PositionIndependentCode) + : AssemblerBase(capacity), + pic_(pic), + cpu_features_(CPUFeatures::AArch64LegacyBaseline()) {} + Assembler(byte* buffer, + size_t capacity, + PositionIndependentCodeOption pic = PositionIndependentCode) + : AssemblerBase(buffer, capacity), + pic_(pic), + cpu_features_(CPUFeatures::AArch64LegacyBaseline()) {} + + // Upon destruction, the code will assert that one of the following is true: + // * The Assembler object has not been used. + // * Nothing has been emitted since the last Reset() call. + // * Nothing has been emitted since the last FinalizeCode() call. + ~Assembler() {} + + // System functions. + + // Start generating code from the beginning of the buffer, discarding any code + // and data that has already been emitted into the buffer. + void Reset(); + + // Label. + // Bind a label to the current PC. + void bind(Label* label); + + // Bind a label to a specified offset from the start of the buffer. + void BindToOffset(Label* label, ptrdiff_t offset); + + // Place a literal at the current PC. + void place(RawLiteral* literal); + + VIXL_DEPRECATED("GetCursorOffset", ptrdiff_t CursorOffset() const) { + return GetCursorOffset(); + } + + VIXL_DEPRECATED("GetBuffer().GetCapacity()", + ptrdiff_t GetBufferEndOffset() const) { + return static_cast(GetBuffer().GetCapacity()); + } + VIXL_DEPRECATED("GetBuffer().GetCapacity()", + ptrdiff_t BufferEndOffset() const) { + return GetBuffer().GetCapacity(); + } + + // Return the address of a bound label. + template + T GetLabelAddress(const Label* label) const { + VIXL_ASSERT(label->IsBound()); + VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t)); + return GetBuffer().GetOffsetAddress(label->GetLocation()); + } + + Instruction* GetInstructionAt(ptrdiff_t instruction_offset) { + return GetBuffer()->GetOffsetAddress(instruction_offset); + } + VIXL_DEPRECATED("GetInstructionAt", + Instruction* InstructionAt(ptrdiff_t instruction_offset)) { + return GetInstructionAt(instruction_offset); + } + + ptrdiff_t GetInstructionOffset(Instruction* instruction) { + VIXL_STATIC_ASSERT(sizeof(*instruction) == 1); + ptrdiff_t offset = + instruction - GetBuffer()->GetStartAddress(); + VIXL_ASSERT((0 <= offset) && + (offset < static_cast(GetBuffer()->GetCapacity()))); + return offset; + } + VIXL_DEPRECATED("GetInstructionOffset", + ptrdiff_t InstructionOffset(Instruction* instruction)) { + return GetInstructionOffset(instruction); + } + + // Instruction set functions. + + // Branch / Jump instructions. + // Branch to register. + void br(const Register& xn); + + // Branch with link to register. + void blr(const Register& xn); + + // Branch to register with return hint. + void ret(const Register& xn = lr); + + // Branch to register, with pointer authentication. Using key A and a modifier + // of zero [Armv8.3]. + void braaz(const Register& xn); + + // Branch to register, with pointer authentication. Using key B and a modifier + // of zero [Armv8.3]. + void brabz(const Register& xn); + + // Branch with link to register, with pointer authentication. Using key A and + // a modifier of zero [Armv8.3]. + void blraaz(const Register& xn); + + // Branch with link to register, with pointer authentication. Using key B and + // a modifier of zero [Armv8.3]. + void blrabz(const Register& xn); + + // Return from subroutine, with pointer authentication. Using key A [Armv8.3]. + void retaa(); + + // Return from subroutine, with pointer authentication. Using key B [Armv8.3]. + void retab(); + + // Branch to register, with pointer authentication. Using key A [Armv8.3]. + void braa(const Register& xn, const Register& xm); + + // Branch to register, with pointer authentication. Using key B [Armv8.3]. + void brab(const Register& xn, const Register& xm); + + // Branch with link to register, with pointer authentication. Using key A + // [Armv8.3]. + void blraa(const Register& xn, const Register& xm); + + // Branch with link to register, with pointer authentication. Using key B + // [Armv8.3]. + void blrab(const Register& xn, const Register& xm); + + // Unconditional branch to label. + void b(Label* label); + + // Conditional branch to label. + void b(Label* label, Condition cond); + + // Unconditional branch to PC offset. + void b(int64_t imm26); + + // Conditional branch to PC offset. + void b(int64_t imm19, Condition cond); + + // Branch with link to label. + void bl(Label* label); + + // Branch with link to PC offset. + void bl(int64_t imm26); + + // Compare and branch to label if zero. + void cbz(const Register& rt, Label* label); + + // Compare and branch to PC offset if zero. + void cbz(const Register& rt, int64_t imm19); + + // Compare and branch to label if not zero. + void cbnz(const Register& rt, Label* label); + + // Compare and branch to PC offset if not zero. + void cbnz(const Register& rt, int64_t imm19); + + // Table lookup from one register. + void tbl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Table lookup from two registers. + void tbl(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vm); + + // Table lookup from three registers. + void tbl(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vn3, + const VRegister& vm); + + // Table lookup from four registers. + void tbl(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vn3, + const VRegister& vn4, + const VRegister& vm); + + // Table lookup extension from one register. + void tbx(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Table lookup extension from two registers. + void tbx(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vm); + + // Table lookup extension from three registers. + void tbx(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vn3, + const VRegister& vm); + + // Table lookup extension from four registers. + void tbx(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vn3, + const VRegister& vn4, + const VRegister& vm); + + // Test bit and branch to label if zero. + void tbz(const Register& rt, unsigned bit_pos, Label* label); + + // Test bit and branch to PC offset if zero. + void tbz(const Register& rt, unsigned bit_pos, int64_t imm14); + + // Test bit and branch to label if not zero. + void tbnz(const Register& rt, unsigned bit_pos, Label* label); + + // Test bit and branch to PC offset if not zero. + void tbnz(const Register& rt, unsigned bit_pos, int64_t imm14); + + // Address calculation instructions. + // Calculate a PC-relative address. Unlike for branches the offset in adr is + // unscaled (i.e. the result can be unaligned). + + // Calculate the address of a label. + void adr(const Register& xd, Label* label); + + // Calculate the address of a PC offset. + void adr(const Register& xd, int64_t imm21); + + // Calculate the page address of a label. + void adrp(const Register& xd, Label* label); + + // Calculate the page address of a PC offset. + void adrp(const Register& xd, int64_t imm21); + + // Data Processing instructions. + // Add. + void add(const Register& rd, const Register& rn, const Operand& operand); + + // Add and update status flags. + void adds(const Register& rd, const Register& rn, const Operand& operand); + + // Compare negative. + void cmn(const Register& rn, const Operand& operand); + + // Subtract. + void sub(const Register& rd, const Register& rn, const Operand& operand); + + // Subtract and update status flags. + void subs(const Register& rd, const Register& rn, const Operand& operand); + + // Compare. + void cmp(const Register& rn, const Operand& operand); + + // Negate. + void neg(const Register& rd, const Operand& operand); + + // Negate and update status flags. + void negs(const Register& rd, const Operand& operand); + + // Add with carry bit. + void adc(const Register& rd, const Register& rn, const Operand& operand); + + // Add with carry bit and update status flags. + void adcs(const Register& rd, const Register& rn, const Operand& operand); + + // Subtract with carry bit. + void sbc(const Register& rd, const Register& rn, const Operand& operand); + + // Subtract with carry bit and update status flags. + void sbcs(const Register& rd, const Register& rn, const Operand& operand); + + // Negate with carry bit. + void ngc(const Register& rd, const Operand& operand); + + // Negate with carry bit and update status flags. + void ngcs(const Register& rd, const Operand& operand); + + // Logical instructions. + // Bitwise and (A & B). + void and_(const Register& rd, const Register& rn, const Operand& operand); + + // Bitwise and (A & B) and update status flags. + void ands(const Register& rd, const Register& rn, const Operand& operand); + + // Bit test and set flags. + void tst(const Register& rn, const Operand& operand); + + // Bit clear (A & ~B). + void bic(const Register& rd, const Register& rn, const Operand& operand); + + // Bit clear (A & ~B) and update status flags. + void bics(const Register& rd, const Register& rn, const Operand& operand); + + // Bitwise or (A | B). + void orr(const Register& rd, const Register& rn, const Operand& operand); + + // Bitwise nor (A | ~B). + void orn(const Register& rd, const Register& rn, const Operand& operand); + + // Bitwise eor/xor (A ^ B). + void eor(const Register& rd, const Register& rn, const Operand& operand); + + // Bitwise enor/xnor (A ^ ~B). + void eon(const Register& rd, const Register& rn, const Operand& operand); + + // Logical shift left by variable. + void lslv(const Register& rd, const Register& rn, const Register& rm); + + // Logical shift right by variable. + void lsrv(const Register& rd, const Register& rn, const Register& rm); + + // Arithmetic shift right by variable. + void asrv(const Register& rd, const Register& rn, const Register& rm); + + // Rotate right by variable. + void rorv(const Register& rd, const Register& rn, const Register& rm); + + // Bitfield instructions. + // Bitfield move. + void bfm(const Register& rd, + const Register& rn, + unsigned immr, + unsigned imms); + + // Signed bitfield move. + void sbfm(const Register& rd, + const Register& rn, + unsigned immr, + unsigned imms); + + // Unsigned bitfield move. + void ubfm(const Register& rd, + const Register& rn, + unsigned immr, + unsigned imms); + + // Bfm aliases. + // Bitfield insert. + void bfi(const Register& rd, + const Register& rn, + unsigned lsb, + unsigned width) { + VIXL_ASSERT(width >= 1); + VIXL_ASSERT(lsb + width <= static_cast(rn.GetSizeInBits())); + bfm(rd, + rn, + (rd.GetSizeInBits() - lsb) & (rd.GetSizeInBits() - 1), + width - 1); + } + + // Bitfield extract and insert low. + void bfxil(const Register& rd, + const Register& rn, + unsigned lsb, + unsigned width) { + VIXL_ASSERT(width >= 1); + VIXL_ASSERT(lsb + width <= static_cast(rn.GetSizeInBits())); + bfm(rd, rn, lsb, lsb + width - 1); + } + + // Bitfield clear [Armv8.2]. + void bfc(const Register& rd, unsigned lsb, unsigned width) { + bfi(rd, AppropriateZeroRegFor(rd), lsb, width); + } + + // Sbfm aliases. + // Arithmetic shift right. + void asr(const Register& rd, const Register& rn, unsigned shift) { + VIXL_ASSERT(shift < static_cast(rd.GetSizeInBits())); + sbfm(rd, rn, shift, rd.GetSizeInBits() - 1); + } + + // Signed bitfield insert with zero at right. + void sbfiz(const Register& rd, + const Register& rn, + unsigned lsb, + unsigned width) { + VIXL_ASSERT(width >= 1); + VIXL_ASSERT(lsb + width <= static_cast(rn.GetSizeInBits())); + sbfm(rd, + rn, + (rd.GetSizeInBits() - lsb) & (rd.GetSizeInBits() - 1), + width - 1); + } + + // Signed bitfield extract. + void sbfx(const Register& rd, + const Register& rn, + unsigned lsb, + unsigned width) { + VIXL_ASSERT(width >= 1); + VIXL_ASSERT(lsb + width <= static_cast(rn.GetSizeInBits())); + sbfm(rd, rn, lsb, lsb + width - 1); + } + + // Signed extend byte. + void sxtb(const Register& rd, const Register& rn) { sbfm(rd, rn, 0, 7); } + + // Signed extend halfword. + void sxth(const Register& rd, const Register& rn) { sbfm(rd, rn, 0, 15); } + + // Signed extend word. + void sxtw(const Register& rd, const Register& rn) { sbfm(rd, rn, 0, 31); } + + // Ubfm aliases. + // Logical shift left. + void lsl(const Register& rd, const Register& rn, unsigned shift) { + unsigned reg_size = rd.GetSizeInBits(); + VIXL_ASSERT(shift < reg_size); + ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1); + } + + // Logical shift right. + void lsr(const Register& rd, const Register& rn, unsigned shift) { + VIXL_ASSERT(shift < static_cast(rd.GetSizeInBits())); + ubfm(rd, rn, shift, rd.GetSizeInBits() - 1); + } + + // Unsigned bitfield insert with zero at right. + void ubfiz(const Register& rd, + const Register& rn, + unsigned lsb, + unsigned width) { + VIXL_ASSERT(width >= 1); + VIXL_ASSERT(lsb + width <= static_cast(rn.GetSizeInBits())); + ubfm(rd, + rn, + (rd.GetSizeInBits() - lsb) & (rd.GetSizeInBits() - 1), + width - 1); + } + + // Unsigned bitfield extract. + void ubfx(const Register& rd, + const Register& rn, + unsigned lsb, + unsigned width) { + VIXL_ASSERT(width >= 1); + VIXL_ASSERT(lsb + width <= static_cast(rn.GetSizeInBits())); + ubfm(rd, rn, lsb, lsb + width - 1); + } + + // Unsigned extend byte. + void uxtb(const Register& rd, const Register& rn) { ubfm(rd, rn, 0, 7); } + + // Unsigned extend halfword. + void uxth(const Register& rd, const Register& rn) { ubfm(rd, rn, 0, 15); } + + // Unsigned extend word. + void uxtw(const Register& rd, const Register& rn) { ubfm(rd, rn, 0, 31); } + + // Extract. + void extr(const Register& rd, + const Register& rn, + const Register& rm, + unsigned lsb); + + // Conditional select: rd = cond ? rn : rm. + void csel(const Register& rd, + const Register& rn, + const Register& rm, + Condition cond); + + // Conditional select increment: rd = cond ? rn : rm + 1. + void csinc(const Register& rd, + const Register& rn, + const Register& rm, + Condition cond); + + // Conditional select inversion: rd = cond ? rn : ~rm. + void csinv(const Register& rd, + const Register& rn, + const Register& rm, + Condition cond); + + // Conditional select negation: rd = cond ? rn : -rm. + void csneg(const Register& rd, + const Register& rn, + const Register& rm, + Condition cond); + + // Conditional set: rd = cond ? 1 : 0. + void cset(const Register& rd, Condition cond); + + // Conditional set mask: rd = cond ? -1 : 0. + void csetm(const Register& rd, Condition cond); + + // Conditional increment: rd = cond ? rn + 1 : rn. + void cinc(const Register& rd, const Register& rn, Condition cond); + + // Conditional invert: rd = cond ? ~rn : rn. + void cinv(const Register& rd, const Register& rn, Condition cond); + + // Conditional negate: rd = cond ? -rn : rn. + void cneg(const Register& rd, const Register& rn, Condition cond); + + // Rotate right. + void ror(const Register& rd, const Register& rs, unsigned shift) { + extr(rd, rs, rs, shift); + } + + // Conditional comparison. + // Conditional compare negative. + void ccmn(const Register& rn, + const Operand& operand, + StatusFlags nzcv, + Condition cond); + + // Conditional compare. + void ccmp(const Register& rn, + const Operand& operand, + StatusFlags nzcv, + Condition cond); + + // CRC-32 checksum from byte. + void crc32b(const Register& wd, const Register& wn, const Register& wm); + + // CRC-32 checksum from half-word. + void crc32h(const Register& wd, const Register& wn, const Register& wm); + + // CRC-32 checksum from word. + void crc32w(const Register& wd, const Register& wn, const Register& wm); + + // CRC-32 checksum from double word. + void crc32x(const Register& wd, const Register& wn, const Register& xm); + + // CRC-32 C checksum from byte. + void crc32cb(const Register& wd, const Register& wn, const Register& wm); + + // CRC-32 C checksum from half-word. + void crc32ch(const Register& wd, const Register& wn, const Register& wm); + + // CRC-32 C checksum from word. + void crc32cw(const Register& wd, const Register& wn, const Register& wm); + + // CRC-32C checksum from double word. + void crc32cx(const Register& wd, const Register& wn, const Register& xm); + + // Multiply. + void mul(const Register& rd, const Register& rn, const Register& rm); + + // Negated multiply. + void mneg(const Register& rd, const Register& rn, const Register& rm); + + // Signed long multiply: 32 x 32 -> 64-bit. + void smull(const Register& xd, const Register& wn, const Register& wm); + + // Signed multiply high: 64 x 64 -> 64-bit <127:64>. + void smulh(const Register& xd, const Register& xn, const Register& xm); + + // Multiply and accumulate. + void madd(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra); + + // Multiply and subtract. + void msub(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra); + + // Signed long multiply and accumulate: 32 x 32 + 64 -> 64-bit. + void smaddl(const Register& xd, + const Register& wn, + const Register& wm, + const Register& xa); + + // Unsigned long multiply and accumulate: 32 x 32 + 64 -> 64-bit. + void umaddl(const Register& xd, + const Register& wn, + const Register& wm, + const Register& xa); + + // Unsigned long multiply: 32 x 32 -> 64-bit. + void umull(const Register& xd, const Register& wn, const Register& wm) { + umaddl(xd, wn, wm, xzr); + } + + // Unsigned multiply high: 64 x 64 -> 64-bit <127:64>. + void umulh(const Register& xd, const Register& xn, const Register& xm); + + // Signed long multiply and subtract: 64 - (32 x 32) -> 64-bit. + void smsubl(const Register& xd, + const Register& wn, + const Register& wm, + const Register& xa); + + // Unsigned long multiply and subtract: 64 - (32 x 32) -> 64-bit. + void umsubl(const Register& xd, + const Register& wn, + const Register& wm, + const Register& xa); + + // Signed integer divide. + void sdiv(const Register& rd, const Register& rn, const Register& rm); + + // Unsigned integer divide. + void udiv(const Register& rd, const Register& rn, const Register& rm); + + // Bit reverse. + void rbit(const Register& rd, const Register& rn); + + // Reverse bytes in 16-bit half words. + void rev16(const Register& rd, const Register& rn); + + // Reverse bytes in 32-bit words. + void rev32(const Register& xd, const Register& xn); + + // Reverse bytes in 64-bit general purpose register, an alias for rev + // [Armv8.2]. + void rev64(const Register& xd, const Register& xn) { + VIXL_ASSERT(xd.Is64Bits() && xn.Is64Bits()); + rev(xd, xn); + } + + // Reverse bytes. + void rev(const Register& rd, const Register& rn); + + // Count leading zeroes. + void clz(const Register& rd, const Register& rn); + + // Count leading sign bits. + void cls(const Register& rd, const Register& rn); + + // Pointer Authentication Code for Instruction address, using key A [Armv8.3]. + void pacia(const Register& xd, const Register& rn); + + // Pointer Authentication Code for Instruction address, using key A and a + // modifier of zero [Armv8.3]. + void paciza(const Register& xd); + + // Pointer Authentication Code for Instruction address, using key A, with + // address in x17 and modifier in x16 [Armv8.3]. + void pacia1716(); + + // Pointer Authentication Code for Instruction address, using key A, with + // address in LR and modifier in SP [Armv8.3]. + void paciasp(); + + // Pointer Authentication Code for Instruction address, using key A, with + // address in LR and a modifier of zero [Armv8.3]. + void paciaz(); + + // Pointer Authentication Code for Instruction address, using key B [Armv8.3]. + void pacib(const Register& xd, const Register& xn); + + // Pointer Authentication Code for Instruction address, using key B and a + // modifier of zero [Armv8.3]. + void pacizb(const Register& xd); + + // Pointer Authentication Code for Instruction address, using key B, with + // address in x17 and modifier in x16 [Armv8.3]. + void pacib1716(); + + // Pointer Authentication Code for Instruction address, using key B, with + // address in LR and modifier in SP [Armv8.3]. + void pacibsp(); + + // Pointer Authentication Code for Instruction address, using key B, with + // address in LR and a modifier of zero [Armv8.3]. + void pacibz(); + + // Pointer Authentication Code for Data address, using key A [Armv8.3]. + void pacda(const Register& xd, const Register& xn); + + // Pointer Authentication Code for Data address, using key A and a modifier of + // zero [Armv8.3]. + void pacdza(const Register& xd); + + // Pointer Authentication Code for Data address, using key A, with address in + // x17 and modifier in x16 [Armv8.3]. + void pacda1716(); + + // Pointer Authentication Code for Data address, using key A, with address in + // LR and modifier in SP [Armv8.3]. + void pacdasp(); + + // Pointer Authentication Code for Data address, using key A, with address in + // LR and a modifier of zero [Armv8.3]. + void pacdaz(); + + // Pointer Authentication Code for Data address, using key B [Armv8.3]. + void pacdb(const Register& xd, const Register& xn); + + // Pointer Authentication Code for Data address, using key B and a modifier of + // zero [Armv8.3]. + void pacdzb(const Register& xd); + + // Pointer Authentication Code for Data address, using key B, with address in + // x17 and modifier in x16 [Armv8.3]. + void pacdb1716(); + + // Pointer Authentication Code for Data address, using key B, with address in + // LR and modifier in SP [Armv8.3]. + void pacdbsp(); + + // Pointer Authentication Code for Data address, using key B, with address in + // LR and a modifier of zero [Armv8.3]. + void pacdbz(); + + // Pointer Authentication Code, using Generic key [Armv8.3]. + void pacga(const Register& xd, const Register& xn, const Register& xm); + + // Authenticate Instruction address, using key A [Armv8.3]. + void autia(const Register& xd, const Register& xn); + + // Authenticate Instruction address, using key A and a modifier of zero + // [Armv8.3]. + void autiza(const Register& xd); + + // Authenticate Instruction address, using key A, with address in x17 and + // modifier in x16 [Armv8.3]. + void autia1716(); + + // Authenticate Instruction address, using key A, with address in LR and + // modifier in SP [Armv8.3]. + void autiasp(); + + // Authenticate Instruction address, using key A, with address in LR and a + // modifier of zero [Armv8.3]. + void autiaz(); + + // Authenticate Instruction address, using key B [Armv8.3]. + void autib(const Register& xd, const Register& xn); + + // Authenticate Instruction address, using key B and a modifier of zero + // [Armv8.3]. + void autizb(const Register& xd); + + // Authenticate Instruction address, using key B, with address in x17 and + // modifier in x16 [Armv8.3]. + void autib1716(); + + // Authenticate Instruction address, using key B, with address in LR and + // modifier in SP [Armv8.3]. + void autibsp(); + + // Authenticate Instruction address, using key B, with address in LR and a + // modifier of zero [Armv8.3]. + void autibz(); + + // Authenticate Data address, using key A [Armv8.3]. + void autda(const Register& xd, const Register& xn); + + // Authenticate Data address, using key A and a modifier of zero [Armv8.3]. + void autdza(const Register& xd); + + // Authenticate Data address, using key A, with address in x17 and modifier in + // x16 [Armv8.3]. + void autda1716(); + + // Authenticate Data address, using key A, with address in LR and modifier in + // SP [Armv8.3]. + void autdasp(); + + // Authenticate Data address, using key A, with address in LR and a modifier + // of zero [Armv8.3]. + void autdaz(); + + // Authenticate Data address, using key B [Armv8.3]. + void autdb(const Register& xd, const Register& xn); + + // Authenticate Data address, using key B and a modifier of zero [Armv8.3]. + void autdzb(const Register& xd); + + // Authenticate Data address, using key B, with address in x17 and modifier in + // x16 [Armv8.3]. + void autdb1716(); + + // Authenticate Data address, using key B, with address in LR and modifier in + // SP [Armv8.3]. + void autdbsp(); + + // Authenticate Data address, using key B, with address in LR and a modifier + // of zero [Armv8.3]. + void autdbz(); + + // Strip Pointer Authentication Code of Data address [Armv8.3]. + void xpacd(const Register& xd); + + // Strip Pointer Authentication Code of Instruction address [Armv8.3]. + void xpaci(const Register& xd); + + // Strip Pointer Authentication Code of Instruction address in LR [Armv8.3]. + void xpaclri(); + + // Memory instructions. + // Load integer or FP register. + void ldr(const CPURegister& rt, + const MemOperand& src, + LoadStoreScalingOption option = PreferScaledOffset); + + // Store integer or FP register. + void str(const CPURegister& rt, + const MemOperand& dst, + LoadStoreScalingOption option = PreferScaledOffset); + + // Load word with sign extension. + void ldrsw(const Register& xt, + const MemOperand& src, + LoadStoreScalingOption option = PreferScaledOffset); + + // Load byte. + void ldrb(const Register& rt, + const MemOperand& src, + LoadStoreScalingOption option = PreferScaledOffset); + + // Store byte. + void strb(const Register& rt, + const MemOperand& dst, + LoadStoreScalingOption option = PreferScaledOffset); + + // Load byte with sign extension. + void ldrsb(const Register& rt, + const MemOperand& src, + LoadStoreScalingOption option = PreferScaledOffset); + + // Load half-word. + void ldrh(const Register& rt, + const MemOperand& src, + LoadStoreScalingOption option = PreferScaledOffset); + + // Store half-word. + void strh(const Register& rt, + const MemOperand& dst, + LoadStoreScalingOption option = PreferScaledOffset); + + // Load half-word with sign extension. + void ldrsh(const Register& rt, + const MemOperand& src, + LoadStoreScalingOption option = PreferScaledOffset); + + // Load integer or FP register (with unscaled offset). + void ldur(const CPURegister& rt, + const MemOperand& src, + LoadStoreScalingOption option = PreferUnscaledOffset); + + // Store integer or FP register (with unscaled offset). + void stur(const CPURegister& rt, + const MemOperand& src, + LoadStoreScalingOption option = PreferUnscaledOffset); + + // Load word with sign extension. + void ldursw(const Register& xt, + const MemOperand& src, + LoadStoreScalingOption option = PreferUnscaledOffset); + + // Load byte (with unscaled offset). + void ldurb(const Register& rt, + const MemOperand& src, + LoadStoreScalingOption option = PreferUnscaledOffset); + + // Store byte (with unscaled offset). + void sturb(const Register& rt, + const MemOperand& dst, + LoadStoreScalingOption option = PreferUnscaledOffset); + + // Load byte with sign extension (and unscaled offset). + void ldursb(const Register& rt, + const MemOperand& src, + LoadStoreScalingOption option = PreferUnscaledOffset); + + // Load half-word (with unscaled offset). + void ldurh(const Register& rt, + const MemOperand& src, + LoadStoreScalingOption option = PreferUnscaledOffset); + + // Store half-word (with unscaled offset). + void sturh(const Register& rt, + const MemOperand& dst, + LoadStoreScalingOption option = PreferUnscaledOffset); + + // Load half-word with sign extension (and unscaled offset). + void ldursh(const Register& rt, + const MemOperand& src, + LoadStoreScalingOption option = PreferUnscaledOffset); + + // Load integer or FP register pair. + void ldp(const CPURegister& rt, + const CPURegister& rt2, + const MemOperand& src); + + // Store integer or FP register pair. + void stp(const CPURegister& rt, + const CPURegister& rt2, + const MemOperand& dst); + + // Load word pair with sign extension. + void ldpsw(const Register& xt, const Register& xt2, const MemOperand& src); + + // Load integer or FP register pair, non-temporal. + void ldnp(const CPURegister& rt, + const CPURegister& rt2, + const MemOperand& src); + + // Store integer or FP register pair, non-temporal. + void stnp(const CPURegister& rt, + const CPURegister& rt2, + const MemOperand& dst); + + // Load integer or FP register from literal pool. + void ldr(const CPURegister& rt, RawLiteral* literal); + + // Load word with sign extension from literal pool. + void ldrsw(const Register& xt, RawLiteral* literal); + + // Load integer or FP register from pc + imm19 << 2. + void ldr(const CPURegister& rt, int64_t imm19); + + // Load word with sign extension from pc + imm19 << 2. + void ldrsw(const Register& xt, int64_t imm19); + + // Store exclusive byte. + void stxrb(const Register& rs, const Register& rt, const MemOperand& dst); + + // Store exclusive half-word. + void stxrh(const Register& rs, const Register& rt, const MemOperand& dst); + + // Store exclusive register. + void stxr(const Register& rs, const Register& rt, const MemOperand& dst); + + // Load exclusive byte. + void ldxrb(const Register& rt, const MemOperand& src); + + // Load exclusive half-word. + void ldxrh(const Register& rt, const MemOperand& src); + + // Load exclusive register. + void ldxr(const Register& rt, const MemOperand& src); + + // Store exclusive register pair. + void stxp(const Register& rs, + const Register& rt, + const Register& rt2, + const MemOperand& dst); + + // Load exclusive register pair. + void ldxp(const Register& rt, const Register& rt2, const MemOperand& src); + + // Store-release exclusive byte. + void stlxrb(const Register& rs, const Register& rt, const MemOperand& dst); + + // Store-release exclusive half-word. + void stlxrh(const Register& rs, const Register& rt, const MemOperand& dst); + + // Store-release exclusive register. + void stlxr(const Register& rs, const Register& rt, const MemOperand& dst); + + // Load-acquire exclusive byte. + void ldaxrb(const Register& rt, const MemOperand& src); + + // Load-acquire exclusive half-word. + void ldaxrh(const Register& rt, const MemOperand& src); + + // Load-acquire exclusive register. + void ldaxr(const Register& rt, const MemOperand& src); + + // Store-release exclusive register pair. + void stlxp(const Register& rs, + const Register& rt, + const Register& rt2, + const MemOperand& dst); + + // Load-acquire exclusive register pair. + void ldaxp(const Register& rt, const Register& rt2, const MemOperand& src); + + // Store-release byte. + void stlrb(const Register& rt, const MemOperand& dst); + + // Store-release half-word. + void stlrh(const Register& rt, const MemOperand& dst); + + // Store-release register. + void stlr(const Register& rt, const MemOperand& dst); + + // Load-acquire byte. + void ldarb(const Register& rt, const MemOperand& src); + + // Load-acquire half-word. + void ldarh(const Register& rt, const MemOperand& src); + + // Load-acquire register. + void ldar(const Register& rt, const MemOperand& src); + + // Store LORelease byte [Armv8.1]. + void stllrb(const Register& rt, const MemOperand& dst); + + // Store LORelease half-word [Armv8.1]. + void stllrh(const Register& rt, const MemOperand& dst); + + // Store LORelease register [Armv8.1]. + void stllr(const Register& rt, const MemOperand& dst); + + // Load LORelease byte [Armv8.1]. + void ldlarb(const Register& rt, const MemOperand& src); + + // Load LORelease half-word [Armv8.1]. + void ldlarh(const Register& rt, const MemOperand& src); + + // Load LORelease register [Armv8.1]. + void ldlar(const Register& rt, const MemOperand& src); + + // Compare and Swap word or doubleword in memory [Armv8.1]. + void cas(const Register& rs, const Register& rt, const MemOperand& src); + + // Compare and Swap word or doubleword in memory [Armv8.1]. + void casa(const Register& rs, const Register& rt, const MemOperand& src); + + // Compare and Swap word or doubleword in memory [Armv8.1]. + void casl(const Register& rs, const Register& rt, const MemOperand& src); + + // Compare and Swap word or doubleword in memory [Armv8.1]. + void casal(const Register& rs, const Register& rt, const MemOperand& src); + + // Compare and Swap byte in memory [Armv8.1]. + void casb(const Register& rs, const Register& rt, const MemOperand& src); + + // Compare and Swap byte in memory [Armv8.1]. + void casab(const Register& rs, const Register& rt, const MemOperand& src); + + // Compare and Swap byte in memory [Armv8.1]. + void caslb(const Register& rs, const Register& rt, const MemOperand& src); + + // Compare and Swap byte in memory [Armv8.1]. + void casalb(const Register& rs, const Register& rt, const MemOperand& src); + + // Compare and Swap halfword in memory [Armv8.1]. + void cash(const Register& rs, const Register& rt, const MemOperand& src); + + // Compare and Swap halfword in memory [Armv8.1]. + void casah(const Register& rs, const Register& rt, const MemOperand& src); + + // Compare and Swap halfword in memory [Armv8.1]. + void caslh(const Register& rs, const Register& rt, const MemOperand& src); + + // Compare and Swap halfword in memory [Armv8.1]. + void casalh(const Register& rs, const Register& rt, const MemOperand& src); + + // Compare and Swap Pair of words or doublewords in memory [Armv8.1]. + void casp(const Register& rs, + const Register& rs2, + const Register& rt, + const Register& rt2, + const MemOperand& src); + + // Compare and Swap Pair of words or doublewords in memory [Armv8.1]. + void caspa(const Register& rs, + const Register& rs2, + const Register& rt, + const Register& rt2, + const MemOperand& src); + + // Compare and Swap Pair of words or doublewords in memory [Armv8.1]. + void caspl(const Register& rs, + const Register& rs2, + const Register& rt, + const Register& rt2, + const MemOperand& src); + + // Compare and Swap Pair of words or doublewords in memory [Armv8.1]. + void caspal(const Register& rs, + const Register& rs2, + const Register& rt, + const Register& rt2, + const MemOperand& src); + + // Atomic add on byte in memory [Armv8.1] + void ldaddb(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic add on byte in memory, with Load-acquire semantics [Armv8.1] + void ldaddab(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic add on byte in memory, with Store-release semantics [Armv8.1] + void ldaddlb(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic add on byte in memory, with Load-acquire and Store-release semantics + // [Armv8.1] + void ldaddalb(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic add on halfword in memory [Armv8.1] + void ldaddh(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic add on halfword in memory, with Load-acquire semantics [Armv8.1] + void ldaddah(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic add on halfword in memory, with Store-release semantics [Armv8.1] + void ldaddlh(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic add on halfword in memory, with Load-acquire and Store-release + // semantics [Armv8.1] + void ldaddalh(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic add on word or doubleword in memory [Armv8.1] + void ldadd(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic add on word or doubleword in memory, with Load-acquire semantics + // [Armv8.1] + void ldadda(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic add on word or doubleword in memory, with Store-release semantics + // [Armv8.1] + void ldaddl(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic add on word or doubleword in memory, with Load-acquire and + // Store-release semantics [Armv8.1] + void ldaddal(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic bit clear on byte in memory [Armv8.1] + void ldclrb(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic bit clear on byte in memory, with Load-acquire semantics [Armv8.1] + void ldclrab(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic bit clear on byte in memory, with Store-release semantics [Armv8.1] + void ldclrlb(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic bit clear on byte in memory, with Load-acquire and Store-release + // semantics [Armv8.1] + void ldclralb(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic bit clear on halfword in memory [Armv8.1] + void ldclrh(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic bit clear on halfword in memory, with Load-acquire semantics + // [Armv8.1] + void ldclrah(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic bit clear on halfword in memory, with Store-release semantics + // [Armv8.1] + void ldclrlh(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic bit clear on halfword in memory, with Load-acquire and Store-release + // semantics [Armv8.1] + void ldclralh(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic bit clear on word or doubleword in memory [Armv8.1] + void ldclr(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic bit clear on word or doubleword in memory, with Load-acquire + // semantics [Armv8.1] + void ldclra(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic bit clear on word or doubleword in memory, with Store-release + // semantics [Armv8.1] + void ldclrl(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic bit clear on word or doubleword in memory, with Load-acquire and + // Store-release semantics [Armv8.1] + void ldclral(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic exclusive OR on byte in memory [Armv8.1] + void ldeorb(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic exclusive OR on byte in memory, with Load-acquire semantics + // [Armv8.1] + void ldeorab(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic exclusive OR on byte in memory, with Store-release semantics + // [Armv8.1] + void ldeorlb(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic exclusive OR on byte in memory, with Load-acquire and Store-release + // semantics [Armv8.1] + void ldeoralb(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic exclusive OR on halfword in memory [Armv8.1] + void ldeorh(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic exclusive OR on halfword in memory, with Load-acquire semantics + // [Armv8.1] + void ldeorah(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic exclusive OR on halfword in memory, with Store-release semantics + // [Armv8.1] + void ldeorlh(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic exclusive OR on halfword in memory, with Load-acquire and + // Store-release semantics [Armv8.1] + void ldeoralh(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic exclusive OR on word or doubleword in memory [Armv8.1] + void ldeor(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic exclusive OR on word or doubleword in memory, with Load-acquire + // semantics [Armv8.1] + void ldeora(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic exclusive OR on word or doubleword in memory, with Store-release + // semantics [Armv8.1] + void ldeorl(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic exclusive OR on word or doubleword in memory, with Load-acquire and + // Store-release semantics [Armv8.1] + void ldeoral(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic bit set on byte in memory [Armv8.1] + void ldsetb(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic bit set on byte in memory, with Load-acquire semantics [Armv8.1] + void ldsetab(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic bit set on byte in memory, with Store-release semantics [Armv8.1] + void ldsetlb(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic bit set on byte in memory, with Load-acquire and Store-release + // semantics [Armv8.1] + void ldsetalb(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic bit set on halfword in memory [Armv8.1] + void ldseth(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic bit set on halfword in memory, with Load-acquire semantics [Armv8.1] + void ldsetah(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic bit set on halfword in memory, with Store-release semantics + // [Armv8.1] + void ldsetlh(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic bit set on halfword in memory, with Load-acquire and Store-release + // semantics [Armv8.1] + void ldsetalh(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic bit set on word or doubleword in memory [Armv8.1] + void ldset(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic bit set on word or doubleword in memory, with Load-acquire semantics + // [Armv8.1] + void ldseta(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic bit set on word or doubleword in memory, with Store-release + // semantics [Armv8.1] + void ldsetl(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic bit set on word or doubleword in memory, with Load-acquire and + // Store-release semantics [Armv8.1] + void ldsetal(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic signed maximum on byte in memory [Armv8.1] + void ldsmaxb(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic signed maximum on byte in memory, with Load-acquire semantics + // [Armv8.1] + void ldsmaxab(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic signed maximum on byte in memory, with Store-release semantics + // [Armv8.1] + void ldsmaxlb(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic signed maximum on byte in memory, with Load-acquire and + // Store-release semantics [Armv8.1] + void ldsmaxalb(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic signed maximum on halfword in memory [Armv8.1] + void ldsmaxh(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic signed maximum on halfword in memory, with Load-acquire semantics + // [Armv8.1] + void ldsmaxah(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic signed maximum on halfword in memory, with Store-release semantics + // [Armv8.1] + void ldsmaxlh(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic signed maximum on halfword in memory, with Load-acquire and + // Store-release semantics [Armv8.1] + void ldsmaxalh(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic signed maximum on word or doubleword in memory [Armv8.1] + void ldsmax(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic signed maximum on word or doubleword in memory, with Load-acquire + // semantics [Armv8.1] + void ldsmaxa(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic signed maximum on word or doubleword in memory, with Store-release + // semantics [Armv8.1] + void ldsmaxl(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic signed maximum on word or doubleword in memory, with Load-acquire + // and Store-release semantics [Armv8.1] + void ldsmaxal(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic signed minimum on byte in memory [Armv8.1] + void ldsminb(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic signed minimum on byte in memory, with Load-acquire semantics + // [Armv8.1] + void ldsminab(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic signed minimum on byte in memory, with Store-release semantics + // [Armv8.1] + void ldsminlb(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic signed minimum on byte in memory, with Load-acquire and + // Store-release semantics [Armv8.1] + void ldsminalb(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic signed minimum on halfword in memory [Armv8.1] + void ldsminh(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic signed minimum on halfword in memory, with Load-acquire semantics + // [Armv8.1] + void ldsminah(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic signed minimum on halfword in memory, with Store-release semantics + // [Armv8.1] + void ldsminlh(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic signed minimum on halfword in memory, with Load-acquire and + // Store-release semantics [Armv8.1] + void ldsminalh(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic signed minimum on word or doubleword in memory [Armv8.1] + void ldsmin(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic signed minimum on word or doubleword in memory, with Load-acquire + // semantics [Armv8.1] + void ldsmina(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic signed minimum on word or doubleword in memory, with Store-release + // semantics [Armv8.1] + void ldsminl(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic signed minimum on word or doubleword in memory, with Load-acquire + // and Store-release semantics [Armv8.1] + void ldsminal(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic unsigned maximum on byte in memory [Armv8.1] + void ldumaxb(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic unsigned maximum on byte in memory, with Load-acquire semantics + // [Armv8.1] + void ldumaxab(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic unsigned maximum on byte in memory, with Store-release semantics + // [Armv8.1] + void ldumaxlb(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic unsigned maximum on byte in memory, with Load-acquire and + // Store-release semantics [Armv8.1] + void ldumaxalb(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic unsigned maximum on halfword in memory [Armv8.1] + void ldumaxh(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic unsigned maximum on halfword in memory, with Load-acquire semantics + // [Armv8.1] + void ldumaxah(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic unsigned maximum on halfword in memory, with Store-release semantics + // [Armv8.1] + void ldumaxlh(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic unsigned maximum on halfword in memory, with Load-acquire and + // Store-release semantics [Armv8.1] + void ldumaxalh(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic unsigned maximum on word or doubleword in memory [Armv8.1] + void ldumax(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic unsigned maximum on word or doubleword in memory, with Load-acquire + // semantics [Armv8.1] + void ldumaxa(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic unsigned maximum on word or doubleword in memory, with Store-release + // semantics [Armv8.1] + void ldumaxl(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic unsigned maximum on word or doubleword in memory, with Load-acquire + // and Store-release semantics [Armv8.1] + void ldumaxal(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic unsigned minimum on byte in memory [Armv8.1] + void lduminb(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic unsigned minimum on byte in memory, with Load-acquire semantics + // [Armv8.1] + void lduminab(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic unsigned minimum on byte in memory, with Store-release semantics + // [Armv8.1] + void lduminlb(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic unsigned minimum on byte in memory, with Load-acquire and + // Store-release semantics [Armv8.1] + void lduminalb(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic unsigned minimum on halfword in memory [Armv8.1] + void lduminh(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic unsigned minimum on halfword in memory, with Load-acquire semantics + // [Armv8.1] + void lduminah(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic unsigned minimum on halfword in memory, with Store-release semantics + // [Armv8.1] + void lduminlh(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic unsigned minimum on halfword in memory, with Load-acquire and + // Store-release semantics [Armv8.1] + void lduminalh(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic unsigned minimum on word or doubleword in memory [Armv8.1] + void ldumin(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic unsigned minimum on word or doubleword in memory, with Load-acquire + // semantics [Armv8.1] + void ldumina(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic unsigned minimum on word or doubleword in memory, with Store-release + // semantics [Armv8.1] + void lduminl(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic unsigned minimum on word or doubleword in memory, with Load-acquire + // and Store-release semantics [Armv8.1] + void lduminal(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic add on byte in memory, without return. [Armv8.1] + void staddb(const Register& rs, const MemOperand& src); + + // Atomic add on byte in memory, with Store-release semantics and without + // return. [Armv8.1] + void staddlb(const Register& rs, const MemOperand& src); + + // Atomic add on halfword in memory, without return. [Armv8.1] + void staddh(const Register& rs, const MemOperand& src); + + // Atomic add on halfword in memory, with Store-release semantics and without + // return. [Armv8.1] + void staddlh(const Register& rs, const MemOperand& src); + + // Atomic add on word or doubleword in memory, without return. [Armv8.1] + void stadd(const Register& rs, const MemOperand& src); + + // Atomic add on word or doubleword in memory, with Store-release semantics + // and without return. [Armv8.1] + void staddl(const Register& rs, const MemOperand& src); + + // Atomic bit clear on byte in memory, without return. [Armv8.1] + void stclrb(const Register& rs, const MemOperand& src); + + // Atomic bit clear on byte in memory, with Store-release semantics and + // without return. [Armv8.1] + void stclrlb(const Register& rs, const MemOperand& src); + + // Atomic bit clear on halfword in memory, without return. [Armv8.1] + void stclrh(const Register& rs, const MemOperand& src); + + // Atomic bit clear on halfword in memory, with Store-release semantics and + // without return. [Armv8.1] + void stclrlh(const Register& rs, const MemOperand& src); + + // Atomic bit clear on word or doubleword in memory, without return. [Armv8.1] + void stclr(const Register& rs, const MemOperand& src); + + // Atomic bit clear on word or doubleword in memory, with Store-release + // semantics and without return. [Armv8.1] + void stclrl(const Register& rs, const MemOperand& src); + + // Atomic exclusive OR on byte in memory, without return. [Armv8.1] + void steorb(const Register& rs, const MemOperand& src); + + // Atomic exclusive OR on byte in memory, with Store-release semantics and + // without return. [Armv8.1] + void steorlb(const Register& rs, const MemOperand& src); + + // Atomic exclusive OR on halfword in memory, without return. [Armv8.1] + void steorh(const Register& rs, const MemOperand& src); + + // Atomic exclusive OR on halfword in memory, with Store-release semantics + // and without return. [Armv8.1] + void steorlh(const Register& rs, const MemOperand& src); + + // Atomic exclusive OR on word or doubleword in memory, without return. + // [Armv8.1] + void steor(const Register& rs, const MemOperand& src); + + // Atomic exclusive OR on word or doubleword in memory, with Store-release + // semantics and without return. [Armv8.1] + void steorl(const Register& rs, const MemOperand& src); + + // Atomic bit set on byte in memory, without return. [Armv8.1] + void stsetb(const Register& rs, const MemOperand& src); + + // Atomic bit set on byte in memory, with Store-release semantics and without + // return. [Armv8.1] + void stsetlb(const Register& rs, const MemOperand& src); + + // Atomic bit set on halfword in memory, without return. [Armv8.1] + void stseth(const Register& rs, const MemOperand& src); + + // Atomic bit set on halfword in memory, with Store-release semantics and + // without return. [Armv8.1] + void stsetlh(const Register& rs, const MemOperand& src); + + // Atomic bit set on word or doubleword in memory, without return. [Armv8.1] + void stset(const Register& rs, const MemOperand& src); + + // Atomic bit set on word or doubleword in memory, with Store-release + // semantics and without return. [Armv8.1] + void stsetl(const Register& rs, const MemOperand& src); + + // Atomic signed maximum on byte in memory, without return. [Armv8.1] + void stsmaxb(const Register& rs, const MemOperand& src); + + // Atomic signed maximum on byte in memory, with Store-release semantics and + // without return. [Armv8.1] + void stsmaxlb(const Register& rs, const MemOperand& src); + + // Atomic signed maximum on halfword in memory, without return. [Armv8.1] + void stsmaxh(const Register& rs, const MemOperand& src); + + // Atomic signed maximum on halfword in memory, with Store-release semantics + // and without return. [Armv8.1] + void stsmaxlh(const Register& rs, const MemOperand& src); + + // Atomic signed maximum on word or doubleword in memory, without return. + // [Armv8.1] + void stsmax(const Register& rs, const MemOperand& src); + + // Atomic signed maximum on word or doubleword in memory, with Store-release + // semantics and without return. [Armv8.1] + void stsmaxl(const Register& rs, const MemOperand& src); + + // Atomic signed minimum on byte in memory, without return. [Armv8.1] + void stsminb(const Register& rs, const MemOperand& src); + + // Atomic signed minimum on byte in memory, with Store-release semantics and + // without return. [Armv8.1] + void stsminlb(const Register& rs, const MemOperand& src); + + // Atomic signed minimum on halfword in memory, without return. [Armv8.1] + void stsminh(const Register& rs, const MemOperand& src); + + // Atomic signed minimum on halfword in memory, with Store-release semantics + // and without return. [Armv8.1] + void stsminlh(const Register& rs, const MemOperand& src); + + // Atomic signed minimum on word or doubleword in memory, without return. + // [Armv8.1] + void stsmin(const Register& rs, const MemOperand& src); + + // Atomic signed minimum on word or doubleword in memory, with Store-release + // semantics and without return. semantics [Armv8.1] + void stsminl(const Register& rs, const MemOperand& src); + + // Atomic unsigned maximum on byte in memory, without return. [Armv8.1] + void stumaxb(const Register& rs, const MemOperand& src); + + // Atomic unsigned maximum on byte in memory, with Store-release semantics and + // without return. [Armv8.1] + void stumaxlb(const Register& rs, const MemOperand& src); + + // Atomic unsigned maximum on halfword in memory, without return. [Armv8.1] + void stumaxh(const Register& rs, const MemOperand& src); + + // Atomic unsigned maximum on halfword in memory, with Store-release semantics + // and without return. [Armv8.1] + void stumaxlh(const Register& rs, const MemOperand& src); + + // Atomic unsigned maximum on word or doubleword in memory, without return. + // [Armv8.1] + void stumax(const Register& rs, const MemOperand& src); + + // Atomic unsigned maximum on word or doubleword in memory, with Store-release + // semantics and without return. [Armv8.1] + void stumaxl(const Register& rs, const MemOperand& src); + + // Atomic unsigned minimum on byte in memory, without return. [Armv8.1] + void stuminb(const Register& rs, const MemOperand& src); + + // Atomic unsigned minimum on byte in memory, with Store-release semantics and + // without return. [Armv8.1] + void stuminlb(const Register& rs, const MemOperand& src); + + // Atomic unsigned minimum on halfword in memory, without return. [Armv8.1] + void stuminh(const Register& rs, const MemOperand& src); + + // Atomic unsigned minimum on halfword in memory, with Store-release semantics + // and without return. [Armv8.1] + void stuminlh(const Register& rs, const MemOperand& src); + + // Atomic unsigned minimum on word or doubleword in memory, without return. + // [Armv8.1] + void stumin(const Register& rs, const MemOperand& src); + + // Atomic unsigned minimum on word or doubleword in memory, with Store-release + // semantics and without return. [Armv8.1] + void stuminl(const Register& rs, const MemOperand& src); + + // Swap byte in memory [Armv8.1] + void swpb(const Register& rs, const Register& rt, const MemOperand& src); + + // Swap byte in memory, with Load-acquire semantics [Armv8.1] + void swpab(const Register& rs, const Register& rt, const MemOperand& src); + + // Swap byte in memory, with Store-release semantics [Armv8.1] + void swplb(const Register& rs, const Register& rt, const MemOperand& src); + + // Swap byte in memory, with Load-acquire and Store-release semantics + // [Armv8.1] + void swpalb(const Register& rs, const Register& rt, const MemOperand& src); + + // Swap halfword in memory [Armv8.1] + void swph(const Register& rs, const Register& rt, const MemOperand& src); + + // Swap halfword in memory, with Load-acquire semantics [Armv8.1] + void swpah(const Register& rs, const Register& rt, const MemOperand& src); + + // Swap halfword in memory, with Store-release semantics [Armv8.1] + void swplh(const Register& rs, const Register& rt, const MemOperand& src); + + // Swap halfword in memory, with Load-acquire and Store-release semantics + // [Armv8.1] + void swpalh(const Register& rs, const Register& rt, const MemOperand& src); + + // Swap word or doubleword in memory [Armv8.1] + void swp(const Register& rs, const Register& rt, const MemOperand& src); + + // Swap word or doubleword in memory, with Load-acquire semantics [Armv8.1] + void swpa(const Register& rs, const Register& rt, const MemOperand& src); + + // Swap word or doubleword in memory, with Store-release semantics [Armv8.1] + void swpl(const Register& rs, const Register& rt, const MemOperand& src); + + // Swap word or doubleword in memory, with Load-acquire and Store-release + // semantics [Armv8.1] + void swpal(const Register& rs, const Register& rt, const MemOperand& src); + + // Load-Acquire RCpc Register byte [Armv8.3] + void ldaprb(const Register& rt, const MemOperand& src); + + // Load-Acquire RCpc Register halfword [Armv8.3] + void ldaprh(const Register& rt, const MemOperand& src); + + // Load-Acquire RCpc Register word or doubleword [Armv8.3] + void ldapr(const Register& rt, const MemOperand& src); + + // Prefetch memory. + void prfm(PrefetchOperation op, + const MemOperand& addr, + LoadStoreScalingOption option = PreferScaledOffset); + + // Prefetch memory (with unscaled offset). + void prfum(PrefetchOperation op, + const MemOperand& addr, + LoadStoreScalingOption option = PreferUnscaledOffset); + + // Prefetch memory in the literal pool. + void prfm(PrefetchOperation op, RawLiteral* literal); + + // Prefetch from pc + imm19 << 2. + void prfm(PrefetchOperation op, int64_t imm19); + + // Move instructions. The default shift of -1 indicates that the move + // instruction will calculate an appropriate 16-bit immediate and left shift + // that is equal to the 64-bit immediate argument. If an explicit left shift + // is specified (0, 16, 32 or 48), the immediate must be a 16-bit value. + // + // For movk, an explicit shift can be used to indicate which half word should + // be overwritten, eg. movk(x0, 0, 0) will overwrite the least-significant + // half word with zero, whereas movk(x0, 0, 48) will overwrite the + // most-significant. + + // Move immediate and keep. + void movk(const Register& rd, uint64_t imm, int shift = -1) { + MoveWide(rd, imm, shift, MOVK); + } + + // Move inverted immediate. + void movn(const Register& rd, uint64_t imm, int shift = -1) { + MoveWide(rd, imm, shift, MOVN); + } + + // Move immediate. + void movz(const Register& rd, uint64_t imm, int shift = -1) { + MoveWide(rd, imm, shift, MOVZ); + } + + // Misc instructions. + // Monitor debug-mode breakpoint. + void brk(int code); + + // Halting debug-mode breakpoint. + void hlt(int code); + + // Generate exception targeting EL1. + void svc(int code); + + // Move register to register. + void mov(const Register& rd, const Register& rn); + + // Move inverted operand to register. + void mvn(const Register& rd, const Operand& operand); + + // System instructions. + // Move to register from system register. + void mrs(const Register& xt, SystemRegister sysreg); + + // Move from register to system register. + void msr(SystemRegister sysreg, const Register& xt); + + // System instruction. + void sys(int op1, int crn, int crm, int op2, const Register& xt = xzr); + + // System instruction with pre-encoded op (op1:crn:crm:op2). + void sys(int op, const Register& xt = xzr); + + // System data cache operation. + void dc(DataCacheOp op, const Register& rt); + + // System instruction cache operation. + void ic(InstructionCacheOp op, const Register& rt); + + // System hint (named type). + void hint(SystemHint code); + + // System hint (numbered type). + void hint(int imm7); + + // Clear exclusive monitor. + void clrex(int imm4 = 0xf); + + // Data memory barrier. + void dmb(BarrierDomain domain, BarrierType type); + + // Data synchronization barrier. + void dsb(BarrierDomain domain, BarrierType type); + + // Instruction synchronization barrier. + void isb(); + + // Error synchronization barrier. + void esb(); + + // Conditional speculation dependency barrier. + void csdb(); + + // Alias for system instructions. + // No-op. + void nop() { hint(NOP); } + + // FP and NEON instructions. + // Move double precision immediate to FP register. + void fmov(const VRegister& vd, double imm); + + // Move single precision immediate to FP register. + void fmov(const VRegister& vd, float imm); + + // Move half precision immediate to FP register [Armv8.2]. + void fmov(const VRegister& vd, Float16 imm); + + // Move FP register to register. + void fmov(const Register& rd, const VRegister& fn); + + // Move register to FP register. + void fmov(const VRegister& vd, const Register& rn); + + // Move FP register to FP register. + void fmov(const VRegister& vd, const VRegister& fn); + + // Move 64-bit register to top half of 128-bit FP register. + void fmov(const VRegister& vd, int index, const Register& rn); + + // Move top half of 128-bit FP register to 64-bit register. + void fmov(const Register& rd, const VRegister& vn, int index); + + // FP add. + void fadd(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP subtract. + void fsub(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP multiply. + void fmul(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP fused multiply-add. + void fmadd(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + const VRegister& va); + + // FP fused multiply-subtract. + void fmsub(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + const VRegister& va); + + // FP fused multiply-add and negate. + void fnmadd(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + const VRegister& va); + + // FP fused multiply-subtract and negate. + void fnmsub(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + const VRegister& va); + + // FP multiply-negate scalar. + void fnmul(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP reciprocal exponent scalar. + void frecpx(const VRegister& vd, const VRegister& vn); + + // FP divide. + void fdiv(const VRegister& vd, const VRegister& fn, const VRegister& vm); + + // FP maximum. + void fmax(const VRegister& vd, const VRegister& fn, const VRegister& vm); + + // FP minimum. + void fmin(const VRegister& vd, const VRegister& fn, const VRegister& vm); + + // FP maximum number. + void fmaxnm(const VRegister& vd, const VRegister& fn, const VRegister& vm); + + // FP minimum number. + void fminnm(const VRegister& vd, const VRegister& fn, const VRegister& vm); + + // FP absolute. + void fabs(const VRegister& vd, const VRegister& vn); + + // FP negate. + void fneg(const VRegister& vd, const VRegister& vn); + + // FP square root. + void fsqrt(const VRegister& vd, const VRegister& vn); + + // FP round to integer, nearest with ties to away. + void frinta(const VRegister& vd, const VRegister& vn); + + // FP round to integer, implicit rounding. + void frinti(const VRegister& vd, const VRegister& vn); + + // FP round to integer, toward minus infinity. + void frintm(const VRegister& vd, const VRegister& vn); + + // FP round to integer, nearest with ties to even. + void frintn(const VRegister& vd, const VRegister& vn); + + // FP round to integer, toward plus infinity. + void frintp(const VRegister& vd, const VRegister& vn); + + // FP round to integer, exact, implicit rounding. + void frintx(const VRegister& vd, const VRegister& vn); + + // FP round to integer, towards zero. + void frintz(const VRegister& vd, const VRegister& vn); + + void FPCompareMacro(const VRegister& vn, double value, FPTrapFlags trap); + + void FPCompareMacro(const VRegister& vn, + const VRegister& vm, + FPTrapFlags trap); + + // FP compare registers. + void fcmp(const VRegister& vn, const VRegister& vm); + + // FP compare immediate. + void fcmp(const VRegister& vn, double value); + + void FPCCompareMacro(const VRegister& vn, + const VRegister& vm, + StatusFlags nzcv, + Condition cond, + FPTrapFlags trap); + + // FP conditional compare. + void fccmp(const VRegister& vn, + const VRegister& vm, + StatusFlags nzcv, + Condition cond); + + // FP signaling compare registers. + void fcmpe(const VRegister& vn, const VRegister& vm); + + // FP signaling compare immediate. + void fcmpe(const VRegister& vn, double value); + + // FP conditional signaling compare. + void fccmpe(const VRegister& vn, + const VRegister& vm, + StatusFlags nzcv, + Condition cond); + + // FP conditional select. + void fcsel(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + Condition cond); + + // Common FP Convert functions. + void NEONFPConvertToInt(const Register& rd, const VRegister& vn, Instr op); + void NEONFPConvertToInt(const VRegister& vd, const VRegister& vn, Instr op); + void NEONFP16ConvertToInt(const VRegister& vd, const VRegister& vn, Instr op); + + // FP convert between precisions. + void fcvt(const VRegister& vd, const VRegister& vn); + + // FP convert to higher precision. + void fcvtl(const VRegister& vd, const VRegister& vn); + + // FP convert to higher precision (second part). + void fcvtl2(const VRegister& vd, const VRegister& vn); + + // FP convert to lower precision. + void fcvtn(const VRegister& vd, const VRegister& vn); + + // FP convert to lower prevision (second part). + void fcvtn2(const VRegister& vd, const VRegister& vn); + + // FP convert to lower precision, rounding to odd. + void fcvtxn(const VRegister& vd, const VRegister& vn); + + // FP convert to lower precision, rounding to odd (second part). + void fcvtxn2(const VRegister& vd, const VRegister& vn); + + // FP convert to signed integer, nearest with ties to away. + void fcvtas(const Register& rd, const VRegister& vn); + + // FP convert to unsigned integer, nearest with ties to away. + void fcvtau(const Register& rd, const VRegister& vn); + + // FP convert to signed integer, nearest with ties to away. + void fcvtas(const VRegister& vd, const VRegister& vn); + + // FP convert to unsigned integer, nearest with ties to away. + void fcvtau(const VRegister& vd, const VRegister& vn); + + // FP convert to signed integer, round towards -infinity. + void fcvtms(const Register& rd, const VRegister& vn); + + // FP convert to unsigned integer, round towards -infinity. + void fcvtmu(const Register& rd, const VRegister& vn); + + // FP convert to signed integer, round towards -infinity. + void fcvtms(const VRegister& vd, const VRegister& vn); + + // FP convert to unsigned integer, round towards -infinity. + void fcvtmu(const VRegister& vd, const VRegister& vn); + + // FP convert to signed integer, nearest with ties to even. + void fcvtns(const Register& rd, const VRegister& vn); + + // FP JavaScript convert to signed integer, rounding toward zero [Armv8.3]. + void fjcvtzs(const Register& rd, const VRegister& vn); + + // FP convert to unsigned integer, nearest with ties to even. + void fcvtnu(const Register& rd, const VRegister& vn); + + // FP convert to signed integer, nearest with ties to even. + void fcvtns(const VRegister& rd, const VRegister& vn); + + // FP convert to unsigned integer, nearest with ties to even. + void fcvtnu(const VRegister& rd, const VRegister& vn); + + // FP convert to signed integer or fixed-point, round towards zero. + void fcvtzs(const Register& rd, const VRegister& vn, int fbits = 0); + + // FP convert to unsigned integer or fixed-point, round towards zero. + void fcvtzu(const Register& rd, const VRegister& vn, int fbits = 0); + + // FP convert to signed integer or fixed-point, round towards zero. + void fcvtzs(const VRegister& vd, const VRegister& vn, int fbits = 0); + + // FP convert to unsigned integer or fixed-point, round towards zero. + void fcvtzu(const VRegister& vd, const VRegister& vn, int fbits = 0); + + // FP convert to signed integer, round towards +infinity. + void fcvtps(const Register& rd, const VRegister& vn); + + // FP convert to unsigned integer, round towards +infinity. + void fcvtpu(const Register& rd, const VRegister& vn); + + // FP convert to signed integer, round towards +infinity. + void fcvtps(const VRegister& vd, const VRegister& vn); + + // FP convert to unsigned integer, round towards +infinity. + void fcvtpu(const VRegister& vd, const VRegister& vn); + + // Convert signed integer or fixed point to FP. + void scvtf(const VRegister& fd, const Register& rn, int fbits = 0); + + // Convert unsigned integer or fixed point to FP. + void ucvtf(const VRegister& fd, const Register& rn, int fbits = 0); + + // Convert signed integer or fixed-point to FP. + void scvtf(const VRegister& fd, const VRegister& vn, int fbits = 0); + + // Convert unsigned integer or fixed-point to FP. + void ucvtf(const VRegister& fd, const VRegister& vn, int fbits = 0); + + // Unsigned absolute difference. + void uabd(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed absolute difference. + void sabd(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned absolute difference and accumulate. + void uaba(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed absolute difference and accumulate. + void saba(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Add. + void add(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Subtract. + void sub(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned halving add. + void uhadd(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed halving add. + void shadd(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned rounding halving add. + void urhadd(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed rounding halving add. + void srhadd(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned halving sub. + void uhsub(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed halving sub. + void shsub(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned saturating add. + void uqadd(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed saturating add. + void sqadd(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned saturating subtract. + void uqsub(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed saturating subtract. + void sqsub(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Add pairwise. + void addp(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Add pair of elements scalar. + void addp(const VRegister& vd, const VRegister& vn); + + // Multiply-add to accumulator. + void mla(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Multiply-subtract to accumulator. + void mls(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Multiply. + void mul(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Multiply by scalar element. + void mul(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Multiply-add by scalar element. + void mla(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Multiply-subtract by scalar element. + void mls(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Signed long multiply-add by scalar element. + void smlal(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Signed long multiply-add by scalar element (second part). + void smlal2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Unsigned long multiply-add by scalar element. + void umlal(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Unsigned long multiply-add by scalar element (second part). + void umlal2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Signed long multiply-sub by scalar element. + void smlsl(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Signed long multiply-sub by scalar element (second part). + void smlsl2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Unsigned long multiply-sub by scalar element. + void umlsl(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Unsigned long multiply-sub by scalar element (second part). + void umlsl2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Signed long multiply by scalar element. + void smull(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Signed long multiply by scalar element (second part). + void smull2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Unsigned long multiply by scalar element. + void umull(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Unsigned long multiply by scalar element (second part). + void umull2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Signed saturating double long multiply by element. + void sqdmull(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Signed saturating double long multiply by element (second part). + void sqdmull2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Signed saturating doubling long multiply-add by element. + void sqdmlal(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Signed saturating doubling long multiply-add by element (second part). + void sqdmlal2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Signed saturating doubling long multiply-sub by element. + void sqdmlsl(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Signed saturating doubling long multiply-sub by element (second part). + void sqdmlsl2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Compare equal. + void cmeq(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Compare signed greater than or equal. + void cmge(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Compare signed greater than. + void cmgt(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Compare unsigned higher. + void cmhi(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Compare unsigned higher or same. + void cmhs(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Compare bitwise test bits nonzero. + void cmtst(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Compare bitwise to zero. + void cmeq(const VRegister& vd, const VRegister& vn, int value); + + // Compare signed greater than or equal to zero. + void cmge(const VRegister& vd, const VRegister& vn, int value); + + // Compare signed greater than zero. + void cmgt(const VRegister& vd, const VRegister& vn, int value); + + // Compare signed less than or equal to zero. + void cmle(const VRegister& vd, const VRegister& vn, int value); + + // Compare signed less than zero. + void cmlt(const VRegister& vd, const VRegister& vn, int value); + + // Signed shift left by register. + void sshl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned shift left by register. + void ushl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed saturating shift left by register. + void sqshl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned saturating shift left by register. + void uqshl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed rounding shift left by register. + void srshl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned rounding shift left by register. + void urshl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed saturating rounding shift left by register. + void sqrshl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned saturating rounding shift left by register. + void uqrshl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Bitwise and. + void and_(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Bitwise or. + void orr(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Bitwise or immediate. + void orr(const VRegister& vd, const int imm8, const int left_shift = 0); + + // Move register to register. + void mov(const VRegister& vd, const VRegister& vn); + + // Bitwise orn. + void orn(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Bitwise eor. + void eor(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Bit clear immediate. + void bic(const VRegister& vd, const int imm8, const int left_shift = 0); + + // Bit clear. + void bic(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Bitwise insert if false. + void bif(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Bitwise insert if true. + void bit(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Bitwise select. + void bsl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Polynomial multiply. + void pmul(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Vector move immediate. + void movi(const VRegister& vd, + const uint64_t imm, + Shift shift = LSL, + const int shift_amount = 0); + + // Bitwise not. + void mvn(const VRegister& vd, const VRegister& vn); + + // Vector move inverted immediate. + void mvni(const VRegister& vd, + const int imm8, + Shift shift = LSL, + const int shift_amount = 0); + + // Signed saturating accumulate of unsigned value. + void suqadd(const VRegister& vd, const VRegister& vn); + + // Unsigned saturating accumulate of signed value. + void usqadd(const VRegister& vd, const VRegister& vn); + + // Absolute value. + void abs(const VRegister& vd, const VRegister& vn); + + // Signed saturating absolute value. + void sqabs(const VRegister& vd, const VRegister& vn); + + // Negate. + void neg(const VRegister& vd, const VRegister& vn); + + // Signed saturating negate. + void sqneg(const VRegister& vd, const VRegister& vn); + + // Bitwise not. + void not_(const VRegister& vd, const VRegister& vn); + + // Extract narrow. + void xtn(const VRegister& vd, const VRegister& vn); + + // Extract narrow (second part). + void xtn2(const VRegister& vd, const VRegister& vn); + + // Signed saturating extract narrow. + void sqxtn(const VRegister& vd, const VRegister& vn); + + // Signed saturating extract narrow (second part). + void sqxtn2(const VRegister& vd, const VRegister& vn); + + // Unsigned saturating extract narrow. + void uqxtn(const VRegister& vd, const VRegister& vn); + + // Unsigned saturating extract narrow (second part). + void uqxtn2(const VRegister& vd, const VRegister& vn); + + // Signed saturating extract unsigned narrow. + void sqxtun(const VRegister& vd, const VRegister& vn); + + // Signed saturating extract unsigned narrow (second part). + void sqxtun2(const VRegister& vd, const VRegister& vn); + + // Extract vector from pair of vectors. + void ext(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int index); + + // Duplicate vector element to vector or scalar. + void dup(const VRegister& vd, const VRegister& vn, int vn_index); + + // Move vector element to scalar. + void mov(const VRegister& vd, const VRegister& vn, int vn_index); + + // Duplicate general-purpose register to vector. + void dup(const VRegister& vd, const Register& rn); + + // Insert vector element from another vector element. + void ins(const VRegister& vd, + int vd_index, + const VRegister& vn, + int vn_index); + + // Move vector element to another vector element. + void mov(const VRegister& vd, + int vd_index, + const VRegister& vn, + int vn_index); + + // Insert vector element from general-purpose register. + void ins(const VRegister& vd, int vd_index, const Register& rn); + + // Move general-purpose register to a vector element. + void mov(const VRegister& vd, int vd_index, const Register& rn); + + // Unsigned move vector element to general-purpose register. + void umov(const Register& rd, const VRegister& vn, int vn_index); + + // Move vector element to general-purpose register. + void mov(const Register& rd, const VRegister& vn, int vn_index); + + // Signed move vector element to general-purpose register. + void smov(const Register& rd, const VRegister& vn, int vn_index); + + // One-element structure load to one register. + void ld1(const VRegister& vt, const MemOperand& src); + + // One-element structure load to two registers. + void ld1(const VRegister& vt, const VRegister& vt2, const MemOperand& src); + + // One-element structure load to three registers. + void ld1(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const MemOperand& src); + + // One-element structure load to four registers. + void ld1(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + const MemOperand& src); + + // One-element single structure load to one lane. + void ld1(const VRegister& vt, int lane, const MemOperand& src); + + // One-element single structure load to all lanes. + void ld1r(const VRegister& vt, const MemOperand& src); + + // Two-element structure load. + void ld2(const VRegister& vt, const VRegister& vt2, const MemOperand& src); + + // Two-element single structure load to one lane. + void ld2(const VRegister& vt, + const VRegister& vt2, + int lane, + const MemOperand& src); + + // Two-element single structure load to all lanes. + void ld2r(const VRegister& vt, const VRegister& vt2, const MemOperand& src); + + // Three-element structure load. + void ld3(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const MemOperand& src); + + // Three-element single structure load to one lane. + void ld3(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + int lane, + const MemOperand& src); + + // Three-element single structure load to all lanes. + void ld3r(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const MemOperand& src); + + // Four-element structure load. + void ld4(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + const MemOperand& src); + + // Four-element single structure load to one lane. + void ld4(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + int lane, + const MemOperand& src); + + // Four-element single structure load to all lanes. + void ld4r(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + const MemOperand& src); + + // Count leading sign bits. + void cls(const VRegister& vd, const VRegister& vn); + + // Count leading zero bits (vector). + void clz(const VRegister& vd, const VRegister& vn); + + // Population count per byte. + void cnt(const VRegister& vd, const VRegister& vn); + + // Reverse bit order. + void rbit(const VRegister& vd, const VRegister& vn); + + // Reverse elements in 16-bit halfwords. + void rev16(const VRegister& vd, const VRegister& vn); + + // Reverse elements in 32-bit words. + void rev32(const VRegister& vd, const VRegister& vn); + + // Reverse elements in 64-bit doublewords. + void rev64(const VRegister& vd, const VRegister& vn); + + // Unsigned reciprocal square root estimate. + void ursqrte(const VRegister& vd, const VRegister& vn); + + // Unsigned reciprocal estimate. + void urecpe(const VRegister& vd, const VRegister& vn); + + // Signed pairwise long add. + void saddlp(const VRegister& vd, const VRegister& vn); + + // Unsigned pairwise long add. + void uaddlp(const VRegister& vd, const VRegister& vn); + + // Signed pairwise long add and accumulate. + void sadalp(const VRegister& vd, const VRegister& vn); + + // Unsigned pairwise long add and accumulate. + void uadalp(const VRegister& vd, const VRegister& vn); + + // Shift left by immediate. + void shl(const VRegister& vd, const VRegister& vn, int shift); + + // Signed saturating shift left by immediate. + void sqshl(const VRegister& vd, const VRegister& vn, int shift); + + // Signed saturating shift left unsigned by immediate. + void sqshlu(const VRegister& vd, const VRegister& vn, int shift); + + // Unsigned saturating shift left by immediate. + void uqshl(const VRegister& vd, const VRegister& vn, int shift); + + // Signed shift left long by immediate. + void sshll(const VRegister& vd, const VRegister& vn, int shift); + + // Signed shift left long by immediate (second part). + void sshll2(const VRegister& vd, const VRegister& vn, int shift); + + // Signed extend long. + void sxtl(const VRegister& vd, const VRegister& vn); + + // Signed extend long (second part). + void sxtl2(const VRegister& vd, const VRegister& vn); + + // Unsigned shift left long by immediate. + void ushll(const VRegister& vd, const VRegister& vn, int shift); + + // Unsigned shift left long by immediate (second part). + void ushll2(const VRegister& vd, const VRegister& vn, int shift); + + // Shift left long by element size. + void shll(const VRegister& vd, const VRegister& vn, int shift); + + // Shift left long by element size (second part). + void shll2(const VRegister& vd, const VRegister& vn, int shift); + + // Unsigned extend long. + void uxtl(const VRegister& vd, const VRegister& vn); + + // Unsigned extend long (second part). + void uxtl2(const VRegister& vd, const VRegister& vn); + + // Shift left by immediate and insert. + void sli(const VRegister& vd, const VRegister& vn, int shift); + + // Shift right by immediate and insert. + void sri(const VRegister& vd, const VRegister& vn, int shift); + + // Signed maximum. + void smax(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed pairwise maximum. + void smaxp(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Add across vector. + void addv(const VRegister& vd, const VRegister& vn); + + // Signed add long across vector. + void saddlv(const VRegister& vd, const VRegister& vn); + + // Unsigned add long across vector. + void uaddlv(const VRegister& vd, const VRegister& vn); + + // FP maximum number across vector. + void fmaxnmv(const VRegister& vd, const VRegister& vn); + + // FP maximum across vector. + void fmaxv(const VRegister& vd, const VRegister& vn); + + // FP minimum number across vector. + void fminnmv(const VRegister& vd, const VRegister& vn); + + // FP minimum across vector. + void fminv(const VRegister& vd, const VRegister& vn); + + // Signed maximum across vector. + void smaxv(const VRegister& vd, const VRegister& vn); + + // Signed minimum. + void smin(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed minimum pairwise. + void sminp(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed minimum across vector. + void sminv(const VRegister& vd, const VRegister& vn); + + // One-element structure store from one register. + void st1(const VRegister& vt, const MemOperand& src); + + // One-element structure store from two registers. + void st1(const VRegister& vt, const VRegister& vt2, const MemOperand& src); + + // One-element structure store from three registers. + void st1(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const MemOperand& src); + + // One-element structure store from four registers. + void st1(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + const MemOperand& src); + + // One-element single structure store from one lane. + void st1(const VRegister& vt, int lane, const MemOperand& src); + + // Two-element structure store from two registers. + void st2(const VRegister& vt, const VRegister& vt2, const MemOperand& src); + + // Two-element single structure store from two lanes. + void st2(const VRegister& vt, + const VRegister& vt2, + int lane, + const MemOperand& src); + + // Three-element structure store from three registers. + void st3(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const MemOperand& src); + + // Three-element single structure store from three lanes. + void st3(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + int lane, + const MemOperand& src); + + // Four-element structure store from four registers. + void st4(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + const MemOperand& src); + + // Four-element single structure store from four lanes. + void st4(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + int lane, + const MemOperand& src); + + // Unsigned add long. + void uaddl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned add long (second part). + void uaddl2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned add wide. + void uaddw(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned add wide (second part). + void uaddw2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed add long. + void saddl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed add long (second part). + void saddl2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed add wide. + void saddw(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed add wide (second part). + void saddw2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned subtract long. + void usubl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned subtract long (second part). + void usubl2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned subtract wide. + void usubw(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned subtract wide (second part). + void usubw2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed subtract long. + void ssubl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed subtract long (second part). + void ssubl2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed integer subtract wide. + void ssubw(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed integer subtract wide (second part). + void ssubw2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned maximum. + void umax(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned pairwise maximum. + void umaxp(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned maximum across vector. + void umaxv(const VRegister& vd, const VRegister& vn); + + // Unsigned minimum. + void umin(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned pairwise minimum. + void uminp(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned minimum across vector. + void uminv(const VRegister& vd, const VRegister& vn); + + // Transpose vectors (primary). + void trn1(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Transpose vectors (secondary). + void trn2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unzip vectors (primary). + void uzp1(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unzip vectors (secondary). + void uzp2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Zip vectors (primary). + void zip1(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Zip vectors (secondary). + void zip2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed shift right by immediate. + void sshr(const VRegister& vd, const VRegister& vn, int shift); + + // Unsigned shift right by immediate. + void ushr(const VRegister& vd, const VRegister& vn, int shift); + + // Signed rounding shift right by immediate. + void srshr(const VRegister& vd, const VRegister& vn, int shift); + + // Unsigned rounding shift right by immediate. + void urshr(const VRegister& vd, const VRegister& vn, int shift); + + // Signed shift right by immediate and accumulate. + void ssra(const VRegister& vd, const VRegister& vn, int shift); + + // Unsigned shift right by immediate and accumulate. + void usra(const VRegister& vd, const VRegister& vn, int shift); + + // Signed rounding shift right by immediate and accumulate. + void srsra(const VRegister& vd, const VRegister& vn, int shift); + + // Unsigned rounding shift right by immediate and accumulate. + void ursra(const VRegister& vd, const VRegister& vn, int shift); + + // Shift right narrow by immediate. + void shrn(const VRegister& vd, const VRegister& vn, int shift); + + // Shift right narrow by immediate (second part). + void shrn2(const VRegister& vd, const VRegister& vn, int shift); + + // Rounding shift right narrow by immediate. + void rshrn(const VRegister& vd, const VRegister& vn, int shift); + + // Rounding shift right narrow by immediate (second part). + void rshrn2(const VRegister& vd, const VRegister& vn, int shift); + + // Unsigned saturating shift right narrow by immediate. + void uqshrn(const VRegister& vd, const VRegister& vn, int shift); + + // Unsigned saturating shift right narrow by immediate (second part). + void uqshrn2(const VRegister& vd, const VRegister& vn, int shift); + + // Unsigned saturating rounding shift right narrow by immediate. + void uqrshrn(const VRegister& vd, const VRegister& vn, int shift); + + // Unsigned saturating rounding shift right narrow by immediate (second part). + void uqrshrn2(const VRegister& vd, const VRegister& vn, int shift); + + // Signed saturating shift right narrow by immediate. + void sqshrn(const VRegister& vd, const VRegister& vn, int shift); + + // Signed saturating shift right narrow by immediate (second part). + void sqshrn2(const VRegister& vd, const VRegister& vn, int shift); + + // Signed saturating rounded shift right narrow by immediate. + void sqrshrn(const VRegister& vd, const VRegister& vn, int shift); + + // Signed saturating rounded shift right narrow by immediate (second part). + void sqrshrn2(const VRegister& vd, const VRegister& vn, int shift); + + // Signed saturating shift right unsigned narrow by immediate. + void sqshrun(const VRegister& vd, const VRegister& vn, int shift); + + // Signed saturating shift right unsigned narrow by immediate (second part). + void sqshrun2(const VRegister& vd, const VRegister& vn, int shift); + + // Signed sat rounded shift right unsigned narrow by immediate. + void sqrshrun(const VRegister& vd, const VRegister& vn, int shift); + + // Signed sat rounded shift right unsigned narrow by immediate (second part). + void sqrshrun2(const VRegister& vd, const VRegister& vn, int shift); + + // FP reciprocal step. + void frecps(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP reciprocal estimate. + void frecpe(const VRegister& vd, const VRegister& vn); + + // FP reciprocal square root estimate. + void frsqrte(const VRegister& vd, const VRegister& vn); + + // FP reciprocal square root step. + void frsqrts(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed absolute difference and accumulate long. + void sabal(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed absolute difference and accumulate long (second part). + void sabal2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned absolute difference and accumulate long. + void uabal(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned absolute difference and accumulate long (second part). + void uabal2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed absolute difference long. + void sabdl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed absolute difference long (second part). + void sabdl2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned absolute difference long. + void uabdl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned absolute difference long (second part). + void uabdl2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Polynomial multiply long. + void pmull(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Polynomial multiply long (second part). + void pmull2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed long multiply-add. + void smlal(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed long multiply-add (second part). + void smlal2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned long multiply-add. + void umlal(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned long multiply-add (second part). + void umlal2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed long multiply-sub. + void smlsl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed long multiply-sub (second part). + void smlsl2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned long multiply-sub. + void umlsl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned long multiply-sub (second part). + void umlsl2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed long multiply. + void smull(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed long multiply (second part). + void smull2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed saturating doubling long multiply-add. + void sqdmlal(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed saturating doubling long multiply-add (second part). + void sqdmlal2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed saturating doubling long multiply-subtract. + void sqdmlsl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed saturating doubling long multiply-subtract (second part). + void sqdmlsl2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed saturating doubling long multiply. + void sqdmull(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed saturating doubling long multiply (second part). + void sqdmull2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed saturating doubling multiply returning high half. + void sqdmulh(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed saturating rounding doubling multiply returning high half. + void sqrdmulh(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed dot product [Armv8.2]. + void sdot(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed saturating rounding doubling multiply accumulate returning high + // half [Armv8.1]. + void sqrdmlah(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned dot product [Armv8.2]. + void udot(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed saturating rounding doubling multiply subtract returning high half + // [Armv8.1]. + void sqrdmlsh(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed saturating doubling multiply element returning high half. + void sqdmulh(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Signed saturating rounding doubling multiply element returning high half. + void sqrdmulh(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Signed dot product by element [Armv8.2]. + void sdot(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Signed saturating rounding doubling multiply accumulate element returning + // high half [Armv8.1]. + void sqrdmlah(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Unsigned dot product by element [Armv8.2]. + void udot(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Signed saturating rounding doubling multiply subtract element returning + // high half [Armv8.1]. + void sqrdmlsh(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Unsigned long multiply long. + void umull(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned long multiply (second part). + void umull2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Add narrow returning high half. + void addhn(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Add narrow returning high half (second part). + void addhn2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Rounding add narrow returning high half. + void raddhn(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Rounding add narrow returning high half (second part). + void raddhn2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Subtract narrow returning high half. + void subhn(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Subtract narrow returning high half (second part). + void subhn2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Rounding subtract narrow returning high half. + void rsubhn(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Rounding subtract narrow returning high half (second part). + void rsubhn2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP vector multiply accumulate. + void fmla(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP vector multiply subtract. + void fmls(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP vector multiply extended. + void fmulx(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP absolute greater than or equal. + void facge(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP absolute greater than. + void facgt(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP multiply by element. + void fmul(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // FP fused multiply-add to accumulator by element. + void fmla(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // FP fused multiply-sub from accumulator by element. + void fmls(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // FP multiply extended by element. + void fmulx(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // FP compare equal. + void fcmeq(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP greater than. + void fcmgt(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP greater than or equal. + void fcmge(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP compare equal to zero. + void fcmeq(const VRegister& vd, const VRegister& vn, double imm); + + // FP greater than zero. + void fcmgt(const VRegister& vd, const VRegister& vn, double imm); + + // FP greater than or equal to zero. + void fcmge(const VRegister& vd, const VRegister& vn, double imm); + + // FP less than or equal to zero. + void fcmle(const VRegister& vd, const VRegister& vn, double imm); + + // FP less than to zero. + void fcmlt(const VRegister& vd, const VRegister& vn, double imm); + + // FP absolute difference. + void fabd(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP pairwise add vector. + void faddp(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP pairwise add scalar. + void faddp(const VRegister& vd, const VRegister& vn); + + // FP pairwise maximum vector. + void fmaxp(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP pairwise maximum scalar. + void fmaxp(const VRegister& vd, const VRegister& vn); + + // FP pairwise minimum vector. + void fminp(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP pairwise minimum scalar. + void fminp(const VRegister& vd, const VRegister& vn); + + // FP pairwise maximum number vector. + void fmaxnmp(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP pairwise maximum number scalar. + void fmaxnmp(const VRegister& vd, const VRegister& vn); + + // FP pairwise minimum number vector. + void fminnmp(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP pairwise minimum number scalar. + void fminnmp(const VRegister& vd, const VRegister& vn); + + // v8.3 complex numbers - note that these are only partial/helper functions + // and must be used in series in order to perform full CN operations. + // FP complex multiply accumulate (by element) [Armv8.3]. + void fcmla(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index, + int rot); + + // FP complex multiply accumulate [Armv8.3]. + void fcmla(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int rot); + + // FP complex add [Armv8.3]. + void fcadd(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int rot); + + // Emit generic instructions. + // Emit raw instructions into the instruction stream. + void dci(Instr raw_inst) { Emit(raw_inst); } + + // Emit 32 bits of data into the instruction stream. + void dc32(uint32_t data) { dc(data); } + + // Emit 64 bits of data into the instruction stream. + void dc64(uint64_t data) { dc(data); } + + // Emit data in the instruction stream. + template + void dc(T data) { + VIXL_ASSERT(AllowAssembler()); + GetBuffer()->Emit(data); + } + + // Copy a string into the instruction stream, including the terminating NULL + // character. The instruction pointer is then aligned correctly for + // subsequent instructions. + void EmitString(const char* string) { + VIXL_ASSERT(string != NULL); + VIXL_ASSERT(AllowAssembler()); + + GetBuffer()->EmitString(string); + GetBuffer()->Align(); + } + + // Code generation helpers. + + // Register encoding. + static Instr Rd(CPURegister rd) { + VIXL_ASSERT(rd.GetCode() != kSPRegInternalCode); + return rd.GetCode() << Rd_offset; + } + + static Instr Rn(CPURegister rn) { + VIXL_ASSERT(rn.GetCode() != kSPRegInternalCode); + return rn.GetCode() << Rn_offset; + } + + static Instr Rm(CPURegister rm) { + VIXL_ASSERT(rm.GetCode() != kSPRegInternalCode); + return rm.GetCode() << Rm_offset; + } + + static Instr RmNot31(CPURegister rm) { + VIXL_ASSERT(rm.GetCode() != kSPRegInternalCode); + VIXL_ASSERT(!rm.IsZero()); + return Rm(rm); + } + + static Instr Ra(CPURegister ra) { + VIXL_ASSERT(ra.GetCode() != kSPRegInternalCode); + return ra.GetCode() << Ra_offset; + } + + static Instr Rt(CPURegister rt) { + VIXL_ASSERT(rt.GetCode() != kSPRegInternalCode); + return rt.GetCode() << Rt_offset; + } + + static Instr Rt2(CPURegister rt2) { + VIXL_ASSERT(rt2.GetCode() != kSPRegInternalCode); + return rt2.GetCode() << Rt2_offset; + } + + static Instr Rs(CPURegister rs) { + VIXL_ASSERT(rs.GetCode() != kSPRegInternalCode); + return rs.GetCode() << Rs_offset; + } + + // These encoding functions allow the stack pointer to be encoded, and + // disallow the zero register. + static Instr RdSP(Register rd) { + VIXL_ASSERT(!rd.IsZero()); + return (rd.GetCode() & kRegCodeMask) << Rd_offset; + } + + static Instr RnSP(Register rn) { + VIXL_ASSERT(!rn.IsZero()); + return (rn.GetCode() & kRegCodeMask) << Rn_offset; + } + + static Instr RmSP(Register rm) { + VIXL_ASSERT(!rm.IsZero()); + return (rm.GetCode() & kRegCodeMask) << Rm_offset; + } + + // Flags encoding. + static Instr Flags(FlagsUpdate S) { + if (S == SetFlags) { + return 1 << FlagsUpdate_offset; + } else if (S == LeaveFlags) { + return 0 << FlagsUpdate_offset; + } + VIXL_UNREACHABLE(); + return 0; + } + + static Instr Cond(Condition cond) { return cond << Condition_offset; } + + // PC-relative address encoding. + static Instr ImmPCRelAddress(int64_t imm21) { + VIXL_ASSERT(IsInt21(imm21)); + Instr imm = static_cast(TruncateToUint21(imm21)); + Instr immhi = (imm >> ImmPCRelLo_width) << ImmPCRelHi_offset; + Instr immlo = imm << ImmPCRelLo_offset; + return (immhi & ImmPCRelHi_mask) | (immlo & ImmPCRelLo_mask); + } + + // Branch encoding. + static Instr ImmUncondBranch(int64_t imm26) { + VIXL_ASSERT(IsInt26(imm26)); + return TruncateToUint26(imm26) << ImmUncondBranch_offset; + } + + static Instr ImmCondBranch(int64_t imm19) { + VIXL_ASSERT(IsInt19(imm19)); + return TruncateToUint19(imm19) << ImmCondBranch_offset; + } + + static Instr ImmCmpBranch(int64_t imm19) { + VIXL_ASSERT(IsInt19(imm19)); + return TruncateToUint19(imm19) << ImmCmpBranch_offset; + } + + static Instr ImmTestBranch(int64_t imm14) { + VIXL_ASSERT(IsInt14(imm14)); + return TruncateToUint14(imm14) << ImmTestBranch_offset; + } + + static Instr ImmTestBranchBit(unsigned bit_pos) { + VIXL_ASSERT(IsUint6(bit_pos)); + // Subtract five from the shift offset, as we need bit 5 from bit_pos. + unsigned b5 = bit_pos << (ImmTestBranchBit5_offset - 5); + unsigned b40 = bit_pos << ImmTestBranchBit40_offset; + b5 &= ImmTestBranchBit5_mask; + b40 &= ImmTestBranchBit40_mask; + return b5 | b40; + } + + // Data Processing encoding. + static Instr SF(Register rd) { + return rd.Is64Bits() ? SixtyFourBits : ThirtyTwoBits; + } + + static Instr ImmAddSub(int imm) { + VIXL_ASSERT(IsImmAddSub(imm)); + if (IsUint12(imm)) { // No shift required. + imm <<= ImmAddSub_offset; + } else { + imm = ((imm >> 12) << ImmAddSub_offset) | (1 << ShiftAddSub_offset); + } + return imm; + } + + static Instr ImmS(unsigned imms, unsigned reg_size) { + VIXL_ASSERT(((reg_size == kXRegSize) && IsUint6(imms)) || + ((reg_size == kWRegSize) && IsUint5(imms))); + USE(reg_size); + return imms << ImmS_offset; + } + + static Instr ImmR(unsigned immr, unsigned reg_size) { + VIXL_ASSERT(((reg_size == kXRegSize) && IsUint6(immr)) || + ((reg_size == kWRegSize) && IsUint5(immr))); + USE(reg_size); + VIXL_ASSERT(IsUint6(immr)); + return immr << ImmR_offset; + } + + static Instr ImmSetBits(unsigned imms, unsigned reg_size) { + VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize)); + VIXL_ASSERT(IsUint6(imms)); + VIXL_ASSERT((reg_size == kXRegSize) || IsUint6(imms + 3)); + USE(reg_size); + return imms << ImmSetBits_offset; + } + + static Instr ImmRotate(unsigned immr, unsigned reg_size) { + VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize)); + VIXL_ASSERT(((reg_size == kXRegSize) && IsUint6(immr)) || + ((reg_size == kWRegSize) && IsUint5(immr))); + USE(reg_size); + return immr << ImmRotate_offset; + } + + static Instr ImmLLiteral(int64_t imm19) { + VIXL_ASSERT(IsInt19(imm19)); + return TruncateToUint19(imm19) << ImmLLiteral_offset; + } + + static Instr BitN(unsigned bitn, unsigned reg_size) { + VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize)); + VIXL_ASSERT((reg_size == kXRegSize) || (bitn == 0)); + USE(reg_size); + return bitn << BitN_offset; + } + + static Instr ShiftDP(Shift shift) { + VIXL_ASSERT(shift == LSL || shift == LSR || shift == ASR || shift == ROR); + return shift << ShiftDP_offset; + } + + static Instr ImmDPShift(unsigned amount) { + VIXL_ASSERT(IsUint6(amount)); + return amount << ImmDPShift_offset; + } + + static Instr ExtendMode(Extend extend) { return extend << ExtendMode_offset; } + + static Instr ImmExtendShift(unsigned left_shift) { + VIXL_ASSERT(left_shift <= 4); + return left_shift << ImmExtendShift_offset; + } + + static Instr ImmCondCmp(unsigned imm) { + VIXL_ASSERT(IsUint5(imm)); + return imm << ImmCondCmp_offset; + } + + static Instr Nzcv(StatusFlags nzcv) { + return ((nzcv >> Flags_offset) & 0xf) << Nzcv_offset; + } + + // MemOperand offset encoding. + static Instr ImmLSUnsigned(int64_t imm12) { + VIXL_ASSERT(IsUint12(imm12)); + return TruncateToUint12(imm12) << ImmLSUnsigned_offset; + } + + static Instr ImmLS(int64_t imm9) { + VIXL_ASSERT(IsInt9(imm9)); + return TruncateToUint9(imm9) << ImmLS_offset; + } + + static Instr ImmLSPair(int64_t imm7, unsigned access_size) { + VIXL_ASSERT(IsMultiple(imm7, 1 << access_size)); + int64_t scaled_imm7 = imm7 / (1 << access_size); + VIXL_ASSERT(IsInt7(scaled_imm7)); + return TruncateToUint7(scaled_imm7) << ImmLSPair_offset; + } + + static Instr ImmShiftLS(unsigned shift_amount) { + VIXL_ASSERT(IsUint1(shift_amount)); + return shift_amount << ImmShiftLS_offset; + } + + static Instr ImmPrefetchOperation(int imm5) { + VIXL_ASSERT(IsUint5(imm5)); + return imm5 << ImmPrefetchOperation_offset; + } + + static Instr ImmException(int imm16) { + VIXL_ASSERT(IsUint16(imm16)); + return imm16 << ImmException_offset; + } + + static Instr ImmSystemRegister(int imm16) { + VIXL_ASSERT(IsUint16(imm16)); + return imm16 << ImmSystemRegister_offset; + } + + static Instr ImmHint(int imm7) { + VIXL_ASSERT(IsUint7(imm7)); + return imm7 << ImmHint_offset; + } + + static Instr CRm(int imm4) { + VIXL_ASSERT(IsUint4(imm4)); + return imm4 << CRm_offset; + } + + static Instr CRn(int imm4) { + VIXL_ASSERT(IsUint4(imm4)); + return imm4 << CRn_offset; + } + + static Instr SysOp(int imm14) { + VIXL_ASSERT(IsUint14(imm14)); + return imm14 << SysOp_offset; + } + + static Instr ImmSysOp1(int imm3) { + VIXL_ASSERT(IsUint3(imm3)); + return imm3 << SysOp1_offset; + } + + static Instr ImmSysOp2(int imm3) { + VIXL_ASSERT(IsUint3(imm3)); + return imm3 << SysOp2_offset; + } + + static Instr ImmBarrierDomain(int imm2) { + VIXL_ASSERT(IsUint2(imm2)); + return imm2 << ImmBarrierDomain_offset; + } + + static Instr ImmBarrierType(int imm2) { + VIXL_ASSERT(IsUint2(imm2)); + return imm2 << ImmBarrierType_offset; + } + + // Move immediates encoding. + static Instr ImmMoveWide(uint64_t imm) { + VIXL_ASSERT(IsUint16(imm)); + return static_cast(imm << ImmMoveWide_offset); + } + + static Instr ShiftMoveWide(int64_t shift) { + VIXL_ASSERT(IsUint2(shift)); + return static_cast(shift << ShiftMoveWide_offset); + } + + // FP Immediates. + static Instr ImmFP16(Float16 imm); + static Instr ImmFP32(float imm); + static Instr ImmFP64(double imm); + + // FP register type. + static Instr FPType(FPRegister fd) { + switch (fd.GetSizeInBits()) { + case 16: + return FP16; + case 32: + return FP32; + case 64: + return FP64; + default: + VIXL_UNREACHABLE(); + return 0; + } + } + + static Instr FPScale(unsigned scale) { + VIXL_ASSERT(IsUint6(scale)); + return scale << FPScale_offset; + } + + // Immediate field checking helpers. + static bool IsImmAddSub(int64_t immediate); + static bool IsImmConditionalCompare(int64_t immediate); + static bool IsImmFP16(Float16 imm); + static bool IsImmFP32(float imm); + static bool IsImmFP64(double imm); + static bool IsImmLogical(uint64_t value, + unsigned width, + unsigned* n = NULL, + unsigned* imm_s = NULL, + unsigned* imm_r = NULL); + static bool IsImmLSPair(int64_t offset, unsigned access_size); + static bool IsImmLSScaled(int64_t offset, unsigned access_size); + static bool IsImmLSUnscaled(int64_t offset); + static bool IsImmMovn(uint64_t imm, unsigned reg_size); + static bool IsImmMovz(uint64_t imm, unsigned reg_size); + + // Instruction bits for vector format in data processing operations. + static Instr VFormat(VRegister vd) { + if (vd.Is64Bits()) { + switch (vd.GetLanes()) { + case 2: + return NEON_2S; + case 4: + return NEON_4H; + case 8: + return NEON_8B; + default: + return 0xffffffff; + } + } else { + VIXL_ASSERT(vd.Is128Bits()); + switch (vd.GetLanes()) { + case 2: + return NEON_2D; + case 4: + return NEON_4S; + case 8: + return NEON_8H; + case 16: + return NEON_16B; + default: + return 0xffffffff; + } + } + } + + // Instruction bits for vector format in floating point data processing + // operations. + static Instr FPFormat(VRegister vd) { + switch (vd.GetLanes()) { + case 1: + // Floating point scalar formats. + switch (vd.GetSizeInBits()) { + case 16: + return FP16; + case 32: + return FP32; + case 64: + return FP64; + default: + VIXL_UNREACHABLE(); + } + break; + case 2: + // Two lane floating point vector formats. + switch (vd.GetSizeInBits()) { + case 64: + return NEON_FP_2S; + case 128: + return NEON_FP_2D; + default: + VIXL_UNREACHABLE(); + } + break; + case 4: + // Four lane floating point vector formats. + switch (vd.GetSizeInBits()) { + case 64: + return NEON_FP_4H; + case 128: + return NEON_FP_4S; + default: + VIXL_UNREACHABLE(); + } + break; + case 8: + // Eight lane floating point vector format. + VIXL_ASSERT(vd.Is128Bits()); + return NEON_FP_8H; + default: + VIXL_UNREACHABLE(); + return 0; + } + VIXL_UNREACHABLE(); + return 0; + } + + // Instruction bits for vector format in load and store operations. + static Instr LSVFormat(VRegister vd) { + if (vd.Is64Bits()) { + switch (vd.GetLanes()) { + case 1: + return LS_NEON_1D; + case 2: + return LS_NEON_2S; + case 4: + return LS_NEON_4H; + case 8: + return LS_NEON_8B; + default: + return 0xffffffff; + } + } else { + VIXL_ASSERT(vd.Is128Bits()); + switch (vd.GetLanes()) { + case 2: + return LS_NEON_2D; + case 4: + return LS_NEON_4S; + case 8: + return LS_NEON_8H; + case 16: + return LS_NEON_16B; + default: + return 0xffffffff; + } + } + } + + // Instruction bits for scalar format in data processing operations. + static Instr SFormat(VRegister vd) { + VIXL_ASSERT(vd.GetLanes() == 1); + switch (vd.GetSizeInBytes()) { + case 1: + return NEON_B; + case 2: + return NEON_H; + case 4: + return NEON_S; + case 8: + return NEON_D; + default: + return 0xffffffff; + } + } + + static Instr ImmNEONHLM(int index, int num_bits) { + int h, l, m; + if (num_bits == 3) { + VIXL_ASSERT(IsUint3(index)); + h = (index >> 2) & 1; + l = (index >> 1) & 1; + m = (index >> 0) & 1; + } else if (num_bits == 2) { + VIXL_ASSERT(IsUint2(index)); + h = (index >> 1) & 1; + l = (index >> 0) & 1; + m = 0; + } else { + VIXL_ASSERT(IsUint1(index) && (num_bits == 1)); + h = (index >> 0) & 1; + l = 0; + m = 0; + } + return (h << NEONH_offset) | (l << NEONL_offset) | (m << NEONM_offset); + } + + static Instr ImmRotFcadd(int rot) { + VIXL_ASSERT(rot == 90 || rot == 270); + return (((rot == 270) ? 1 : 0) << ImmRotFcadd_offset); + } + + static Instr ImmRotFcmlaSca(int rot) { + VIXL_ASSERT(rot == 0 || rot == 90 || rot == 180 || rot == 270); + return (rot / 90) << ImmRotFcmlaSca_offset; + } + + static Instr ImmRotFcmlaVec(int rot) { + VIXL_ASSERT(rot == 0 || rot == 90 || rot == 180 || rot == 270); + return (rot / 90) << ImmRotFcmlaVec_offset; + } + + static Instr ImmNEONExt(int imm4) { + VIXL_ASSERT(IsUint4(imm4)); + return imm4 << ImmNEONExt_offset; + } + + static Instr ImmNEON5(Instr format, int index) { + VIXL_ASSERT(IsUint4(index)); + int s = LaneSizeInBytesLog2FromFormat(static_cast(format)); + int imm5 = (index << (s + 1)) | (1 << s); + return imm5 << ImmNEON5_offset; + } + + static Instr ImmNEON4(Instr format, int index) { + VIXL_ASSERT(IsUint4(index)); + int s = LaneSizeInBytesLog2FromFormat(static_cast(format)); + int imm4 = index << s; + return imm4 << ImmNEON4_offset; + } + + static Instr ImmNEONabcdefgh(int imm8) { + VIXL_ASSERT(IsUint8(imm8)); + Instr instr; + instr = ((imm8 >> 5) & 7) << ImmNEONabc_offset; + instr |= (imm8 & 0x1f) << ImmNEONdefgh_offset; + return instr; + } + + static Instr NEONCmode(int cmode) { + VIXL_ASSERT(IsUint4(cmode)); + return cmode << NEONCmode_offset; + } + + static Instr NEONModImmOp(int op) { + VIXL_ASSERT(IsUint1(op)); + return op << NEONModImmOp_offset; + } + + // Size of the code generated since label to the current position. + size_t GetSizeOfCodeGeneratedSince(Label* label) const { + VIXL_ASSERT(label->IsBound()); + return GetBuffer().GetOffsetFrom(label->GetLocation()); + } + VIXL_DEPRECATED("GetSizeOfCodeGeneratedSince", + size_t SizeOfCodeGeneratedSince(Label* label) const) { + return GetSizeOfCodeGeneratedSince(label); + } + + VIXL_DEPRECATED("GetBuffer().GetCapacity()", + size_t GetBufferCapacity() const) { + return GetBuffer().GetCapacity(); + } + VIXL_DEPRECATED("GetBuffer().GetCapacity()", size_t BufferCapacity() const) { + return GetBuffer().GetCapacity(); + } + + VIXL_DEPRECATED("GetBuffer().GetRemainingBytes()", + size_t GetRemainingBufferSpace() const) { + return GetBuffer().GetRemainingBytes(); + } + VIXL_DEPRECATED("GetBuffer().GetRemainingBytes()", + size_t RemainingBufferSpace() const) { + return GetBuffer().GetRemainingBytes(); + } + + PositionIndependentCodeOption GetPic() const { return pic_; } + VIXL_DEPRECATED("GetPic", PositionIndependentCodeOption pic() const) { + return GetPic(); + } + + CPUFeatures* GetCPUFeatures() { return &cpu_features_; } + + void SetCPUFeatures(const CPUFeatures& cpu_features) { + cpu_features_ = cpu_features; + } + + bool AllowPageOffsetDependentCode() const { + return (GetPic() == PageOffsetDependentCode) || + (GetPic() == PositionDependentCode); + } + + static Register AppropriateZeroRegFor(const CPURegister& reg) { + return reg.Is64Bits() ? Register(xzr) : Register(wzr); + } + + protected: + void LoadStore(const CPURegister& rt, + const MemOperand& addr, + LoadStoreOp op, + LoadStoreScalingOption option = PreferScaledOffset); + + void LoadStorePair(const CPURegister& rt, + const CPURegister& rt2, + const MemOperand& addr, + LoadStorePairOp op); + void LoadStoreStruct(const VRegister& vt, + const MemOperand& addr, + NEONLoadStoreMultiStructOp op); + void LoadStoreStruct1(const VRegister& vt, + int reg_count, + const MemOperand& addr); + void LoadStoreStructSingle(const VRegister& vt, + uint32_t lane, + const MemOperand& addr, + NEONLoadStoreSingleStructOp op); + void LoadStoreStructSingleAllLanes(const VRegister& vt, + const MemOperand& addr, + NEONLoadStoreSingleStructOp op); + void LoadStoreStructVerify(const VRegister& vt, + const MemOperand& addr, + Instr op); + + void Prefetch(PrefetchOperation op, + const MemOperand& addr, + LoadStoreScalingOption option = PreferScaledOffset); + + // TODO(all): The third parameter should be passed by reference but gcc 4.8.2 + // reports a bogus uninitialised warning then. + void Logical(const Register& rd, + const Register& rn, + const Operand operand, + LogicalOp op); + void LogicalImmediate(const Register& rd, + const Register& rn, + unsigned n, + unsigned imm_s, + unsigned imm_r, + LogicalOp op); + + void ConditionalCompare(const Register& rn, + const Operand& operand, + StatusFlags nzcv, + Condition cond, + ConditionalCompareOp op); + + void AddSubWithCarry(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S, + AddSubWithCarryOp op); + + + // Functions for emulating operands not directly supported by the instruction + // set. + void EmitShift(const Register& rd, + const Register& rn, + Shift shift, + unsigned amount); + void EmitExtendShift(const Register& rd, + const Register& rn, + Extend extend, + unsigned left_shift); + + void AddSub(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S, + AddSubOp op); + + void NEONTable(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + NEONTableOp op); + + // Find an appropriate LoadStoreOp or LoadStorePairOp for the specified + // registers. Only simple loads are supported; sign- and zero-extension (such + // as in LDPSW_x or LDRB_w) are not supported. + static LoadStoreOp LoadOpFor(const CPURegister& rt); + static LoadStorePairOp LoadPairOpFor(const CPURegister& rt, + const CPURegister& rt2); + static LoadStoreOp StoreOpFor(const CPURegister& rt); + static LoadStorePairOp StorePairOpFor(const CPURegister& rt, + const CPURegister& rt2); + static LoadStorePairNonTemporalOp LoadPairNonTemporalOpFor( + const CPURegister& rt, const CPURegister& rt2); + static LoadStorePairNonTemporalOp StorePairNonTemporalOpFor( + const CPURegister& rt, const CPURegister& rt2); + static LoadLiteralOp LoadLiteralOpFor(const CPURegister& rt); + + // Convenience pass-through for CPU feature checks. + bool CPUHas(CPUFeatures::Feature feature0, + CPUFeatures::Feature feature1 = CPUFeatures::kNone, + CPUFeatures::Feature feature2 = CPUFeatures::kNone, + CPUFeatures::Feature feature3 = CPUFeatures::kNone) const { + return cpu_features_.Has(feature0, feature1, feature2, feature3); + } + + // Determine whether the target CPU has the specified registers, based on the + // currently-enabled CPU features. Presence of a register does not imply + // support for arbitrary operations on it. For example, CPUs with FP have H + // registers, but most half-precision operations require the FPHalf feature. + // + // These are used to check CPU features in loads and stores that have the same + // entry point for both integer and FP registers. + bool CPUHas(const CPURegister& rt) const; + bool CPUHas(const CPURegister& rt, const CPURegister& rt2) const; + + private: + static uint32_t FP16ToImm8(Float16 imm); + static uint32_t FP32ToImm8(float imm); + static uint32_t FP64ToImm8(double imm); + + // Instruction helpers. + void MoveWide(const Register& rd, + uint64_t imm, + int shift, + MoveWideImmediateOp mov_op); + void DataProcShiftedRegister(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S, + Instr op); + void DataProcExtendedRegister(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S, + Instr op); + void LoadStorePairNonTemporal(const CPURegister& rt, + const CPURegister& rt2, + const MemOperand& addr, + LoadStorePairNonTemporalOp op); + void LoadLiteral(const CPURegister& rt, uint64_t imm, LoadLiteralOp op); + void ConditionalSelect(const Register& rd, + const Register& rn, + const Register& rm, + Condition cond, + ConditionalSelectOp op); + void DataProcessing1Source(const Register& rd, + const Register& rn, + DataProcessing1SourceOp op); + void DataProcessing3Source(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra, + DataProcessing3SourceOp op); + void FPDataProcessing1Source(const VRegister& fd, + const VRegister& fn, + FPDataProcessing1SourceOp op); + void FPDataProcessing3Source(const VRegister& fd, + const VRegister& fn, + const VRegister& fm, + const VRegister& fa, + FPDataProcessing3SourceOp op); + void NEONAcrossLanesL(const VRegister& vd, + const VRegister& vn, + NEONAcrossLanesOp op); + void NEONAcrossLanes(const VRegister& vd, + const VRegister& vn, + NEONAcrossLanesOp op, + Instr op_half); + void NEONModifiedImmShiftLsl(const VRegister& vd, + const int imm8, + const int left_shift, + NEONModifiedImmediateOp op); + void NEONModifiedImmShiftMsl(const VRegister& vd, + const int imm8, + const int shift_amount, + NEONModifiedImmediateOp op); + void NEONFP2Same(const VRegister& vd, const VRegister& vn, Instr vop); + void NEON3Same(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + NEON3SameOp vop); + void NEON3SameFP16(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + Instr op); + void NEONFP3Same(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + Instr op); + void NEON3DifferentL(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + NEON3DifferentOp vop); + void NEON3DifferentW(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + NEON3DifferentOp vop); + void NEON3DifferentHN(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + NEON3DifferentOp vop); + void NEONFP2RegMisc(const VRegister& vd, + const VRegister& vn, + NEON2RegMiscOp vop, + double value = 0.0); + void NEONFP2RegMiscFP16(const VRegister& vd, + const VRegister& vn, + NEON2RegMiscFP16Op vop, + double value = 0.0); + void NEON2RegMisc(const VRegister& vd, + const VRegister& vn, + NEON2RegMiscOp vop, + int value = 0); + void NEONFP2RegMisc(const VRegister& vd, const VRegister& vn, Instr op); + void NEONFP2RegMiscFP16(const VRegister& vd, const VRegister& vn, Instr op); + void NEONAddlp(const VRegister& vd, const VRegister& vn, NEON2RegMiscOp op); + void NEONPerm(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + NEONPermOp op); + void NEONFPByElement(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index, + NEONByIndexedElementOp op, + NEONByIndexedElementOp op_half); + void NEONByElement(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index, + NEONByIndexedElementOp op); + void NEONByElementL(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index, + NEONByIndexedElementOp op); + void NEONShiftImmediate(const VRegister& vd, + const VRegister& vn, + NEONShiftImmediateOp op, + int immh_immb); + void NEONShiftLeftImmediate(const VRegister& vd, + const VRegister& vn, + int shift, + NEONShiftImmediateOp op); + void NEONShiftRightImmediate(const VRegister& vd, + const VRegister& vn, + int shift, + NEONShiftImmediateOp op); + void NEONShiftImmediateL(const VRegister& vd, + const VRegister& vn, + int shift, + NEONShiftImmediateOp op); + void NEONShiftImmediateN(const VRegister& vd, + const VRegister& vn, + int shift, + NEONShiftImmediateOp op); + void NEONXtn(const VRegister& vd, const VRegister& vn, NEON2RegMiscOp vop); + + Instr LoadStoreStructAddrModeField(const MemOperand& addr); + + // Encode the specified MemOperand for the specified access size and scaling + // preference. + Instr LoadStoreMemOperand(const MemOperand& addr, + unsigned access_size, + LoadStoreScalingOption option); + + // Link the current (not-yet-emitted) instruction to the specified label, then + // return an offset to be encoded in the instruction. If the label is not yet + // bound, an offset of 0 is returned. + ptrdiff_t LinkAndGetByteOffsetTo(Label* label); + ptrdiff_t LinkAndGetInstructionOffsetTo(Label* label); + ptrdiff_t LinkAndGetPageOffsetTo(Label* label); + + // A common implementation for the LinkAndGetOffsetTo helpers. + template + ptrdiff_t LinkAndGetOffsetTo(Label* label); + + // Literal load offset are in words (32-bit). + ptrdiff_t LinkAndGetWordOffsetTo(RawLiteral* literal); + + // Emit the instruction in buffer_. + void Emit(Instr instruction) { + VIXL_STATIC_ASSERT(sizeof(instruction) == kInstructionSize); + VIXL_ASSERT(AllowAssembler()); + GetBuffer()->Emit32(instruction); + } + + PositionIndependentCodeOption pic_; + + CPUFeatures cpu_features_; +}; + + +template +void Literal::UpdateValue(T new_value, const Assembler* assembler) { + return UpdateValue(new_value, + assembler->GetBuffer().GetStartAddress()); +} + + +template +void Literal::UpdateValue(T high64, T low64, const Assembler* assembler) { + return UpdateValue(high64, + low64, + assembler->GetBuffer().GetStartAddress()); +} + + +} // namespace aarch64 + +// Required InvalSet template specialisations. +// TODO: These template specialisations should not live in this file. Move +// Label out of the aarch64 namespace in order to share its implementation +// later. +#define INVAL_SET_TEMPLATE_PARAMETERS \ + ptrdiff_t, aarch64::Label::kNPreallocatedLinks, ptrdiff_t, \ + aarch64::Label::kInvalidLinkKey, aarch64::Label::kReclaimFrom, \ + aarch64::Label::kReclaimFactor +template <> +inline ptrdiff_t InvalSet::GetKey( + const ptrdiff_t& element) { + return element; +} +template <> +inline void InvalSet::SetKey(ptrdiff_t* element, + ptrdiff_t key) { + *element = key; +} +#undef INVAL_SET_TEMPLATE_PARAMETERS + +} // namespace vixl + +#endif // VIXL_AARCH64_ASSEMBLER_AARCH64_H_ diff --git a/dep/vixl/include/vixl/aarch64/constants-aarch64.h b/dep/vixl/include/vixl/aarch64/constants-aarch64.h new file mode 100644 index 000000000..de659f070 --- /dev/null +++ b/dep/vixl/include/vixl/aarch64/constants-aarch64.h @@ -0,0 +1,2544 @@ +// Copyright 2015, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_AARCH64_CONSTANTS_AARCH64_H_ +#define VIXL_AARCH64_CONSTANTS_AARCH64_H_ + +#include "../globals-vixl.h" + +namespace vixl { +namespace aarch64 { + +const unsigned kNumberOfRegisters = 32; +const unsigned kNumberOfVRegisters = 32; +const unsigned kNumberOfFPRegisters = kNumberOfVRegisters; +// Callee saved registers are x21-x30(lr). +const int kNumberOfCalleeSavedRegisters = 10; +const int kFirstCalleeSavedRegisterIndex = 21; +// Callee saved FP registers are d8-d15. +const int kNumberOfCalleeSavedFPRegisters = 8; +const int kFirstCalleeSavedFPRegisterIndex = 8; + +// clang-format off +#define AARCH64_REGISTER_CODE_LIST(R) \ + R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \ + R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \ + R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \ + R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31) + +#define INSTRUCTION_FIELDS_LIST(V_) \ +/* Register fields */ \ +V_(Rd, 4, 0, ExtractBits) /* Destination register. */ \ +V_(Rn, 9, 5, ExtractBits) /* First source register. */ \ +V_(Rm, 20, 16, ExtractBits) /* Second source register. */ \ +V_(Ra, 14, 10, ExtractBits) /* Third source register. */ \ +V_(Rt, 4, 0, ExtractBits) /* Load/store register. */ \ +V_(Rt2, 14, 10, ExtractBits) /* Load/store second register. */ \ +V_(Rs, 20, 16, ExtractBits) /* Exclusive access status. */ \ + \ +/* Common bits */ \ +V_(SixtyFourBits, 31, 31, ExtractBits) \ +V_(FlagsUpdate, 29, 29, ExtractBits) \ + \ +/* PC relative addressing */ \ +V_(ImmPCRelHi, 23, 5, ExtractSignedBits) \ +V_(ImmPCRelLo, 30, 29, ExtractBits) \ + \ +/* Add/subtract/logical shift register */ \ +V_(ShiftDP, 23, 22, ExtractBits) \ +V_(ImmDPShift, 15, 10, ExtractBits) \ + \ +/* Add/subtract immediate */ \ +V_(ImmAddSub, 21, 10, ExtractBits) \ +V_(ShiftAddSub, 23, 22, ExtractBits) \ + \ +/* Add/substract extend */ \ +V_(ImmExtendShift, 12, 10, ExtractBits) \ +V_(ExtendMode, 15, 13, ExtractBits) \ + \ +/* Move wide */ \ +V_(ImmMoveWide, 20, 5, ExtractBits) \ +V_(ShiftMoveWide, 22, 21, ExtractBits) \ + \ +/* Logical immediate, bitfield and extract */ \ +V_(BitN, 22, 22, ExtractBits) \ +V_(ImmRotate, 21, 16, ExtractBits) \ +V_(ImmSetBits, 15, 10, ExtractBits) \ +V_(ImmR, 21, 16, ExtractBits) \ +V_(ImmS, 15, 10, ExtractBits) \ + \ +/* Test and branch immediate */ \ +V_(ImmTestBranch, 18, 5, ExtractSignedBits) \ +V_(ImmTestBranchBit40, 23, 19, ExtractBits) \ +V_(ImmTestBranchBit5, 31, 31, ExtractBits) \ + \ +/* Conditionals */ \ +V_(Condition, 15, 12, ExtractBits) \ +V_(ConditionBranch, 3, 0, ExtractBits) \ +V_(Nzcv, 3, 0, ExtractBits) \ +V_(ImmCondCmp, 20, 16, ExtractBits) \ +V_(ImmCondBranch, 23, 5, ExtractSignedBits) \ + \ +/* Floating point */ \ +V_(FPType, 23, 22, ExtractBits) \ +V_(ImmFP, 20, 13, ExtractBits) \ +V_(FPScale, 15, 10, ExtractBits) \ + \ +/* Load Store */ \ +V_(ImmLS, 20, 12, ExtractSignedBits) \ +V_(ImmLSUnsigned, 21, 10, ExtractBits) \ +V_(ImmLSPair, 21, 15, ExtractSignedBits) \ +V_(ImmShiftLS, 12, 12, ExtractBits) \ +V_(LSOpc, 23, 22, ExtractBits) \ +V_(LSVector, 26, 26, ExtractBits) \ +V_(LSSize, 31, 30, ExtractBits) \ +V_(ImmPrefetchOperation, 4, 0, ExtractBits) \ +V_(PrefetchHint, 4, 3, ExtractBits) \ +V_(PrefetchTarget, 2, 1, ExtractBits) \ +V_(PrefetchStream, 0, 0, ExtractBits) \ + \ +/* Other immediates */ \ +V_(ImmUncondBranch, 25, 0, ExtractSignedBits) \ +V_(ImmCmpBranch, 23, 5, ExtractSignedBits) \ +V_(ImmLLiteral, 23, 5, ExtractSignedBits) \ +V_(ImmException, 20, 5, ExtractBits) \ +V_(ImmHint, 11, 5, ExtractBits) \ +V_(ImmBarrierDomain, 11, 10, ExtractBits) \ +V_(ImmBarrierType, 9, 8, ExtractBits) \ + \ +/* System (MRS, MSR, SYS) */ \ +V_(ImmSystemRegister, 20, 5, ExtractBits) \ +V_(SysO0, 19, 19, ExtractBits) \ +V_(SysOp, 18, 5, ExtractBits) \ +V_(SysOp0, 20, 19, ExtractBits) \ +V_(SysOp1, 18, 16, ExtractBits) \ +V_(SysOp2, 7, 5, ExtractBits) \ +V_(CRn, 15, 12, ExtractBits) \ +V_(CRm, 11, 8, ExtractBits) \ + \ +/* Load-/store-exclusive */ \ +V_(LdStXLoad, 22, 22, ExtractBits) \ +V_(LdStXNotExclusive, 23, 23, ExtractBits) \ +V_(LdStXAcquireRelease, 15, 15, ExtractBits) \ +V_(LdStXSizeLog2, 31, 30, ExtractBits) \ +V_(LdStXPair, 21, 21, ExtractBits) \ + \ +/* NEON generic fields */ \ +V_(NEONQ, 30, 30, ExtractBits) \ +V_(NEONSize, 23, 22, ExtractBits) \ +V_(NEONLSSize, 11, 10, ExtractBits) \ +V_(NEONS, 12, 12, ExtractBits) \ +V_(NEONL, 21, 21, ExtractBits) \ +V_(NEONM, 20, 20, ExtractBits) \ +V_(NEONH, 11, 11, ExtractBits) \ +V_(ImmNEONExt, 14, 11, ExtractBits) \ +V_(ImmNEON5, 20, 16, ExtractBits) \ +V_(ImmNEON4, 14, 11, ExtractBits) \ + \ +/* NEON extra fields */ \ +V_(ImmRotFcadd, 12, 12, ExtractBits) \ +V_(ImmRotFcmlaVec, 12, 11, ExtractBits) \ +V_(ImmRotFcmlaSca, 14, 13, ExtractBits) \ + \ +/* NEON Modified Immediate fields */ \ +V_(ImmNEONabc, 18, 16, ExtractBits) \ +V_(ImmNEONdefgh, 9, 5, ExtractBits) \ +V_(NEONModImmOp, 29, 29, ExtractBits) \ +V_(NEONCmode, 15, 12, ExtractBits) \ + \ +/* NEON Shift Immediate fields */ \ +V_(ImmNEONImmhImmb, 22, 16, ExtractBits) \ +V_(ImmNEONImmh, 22, 19, ExtractBits) \ +V_(ImmNEONImmb, 18, 16, ExtractBits) +// clang-format on + +#define SYSTEM_REGISTER_FIELDS_LIST(V_, M_) \ + /* NZCV */ \ + V_(Flags, 31, 28, ExtractBits) \ + V_(N, 31, 31, ExtractBits) \ + V_(Z, 30, 30, ExtractBits) \ + V_(C, 29, 29, ExtractBits) \ + V_(V, 28, 28, ExtractBits) \ + M_(NZCV, Flags_mask) \ + /* FPCR */ \ + V_(AHP, 26, 26, ExtractBits) \ + V_(DN, 25, 25, ExtractBits) \ + V_(FZ, 24, 24, ExtractBits) \ + V_(RMode, 23, 22, ExtractBits) \ + M_(FPCR, AHP_mask | DN_mask | FZ_mask | RMode_mask) + +// Fields offsets. +#define DECLARE_FIELDS_OFFSETS(Name, HighBit, LowBit, X) \ + const int Name##_offset = LowBit; \ + const int Name##_width = HighBit - LowBit + 1; \ + const uint32_t Name##_mask = ((1 << Name##_width) - 1) << LowBit; +#define NOTHING(A, B) +INSTRUCTION_FIELDS_LIST(DECLARE_FIELDS_OFFSETS) +SYSTEM_REGISTER_FIELDS_LIST(DECLARE_FIELDS_OFFSETS, NOTHING) +#undef NOTHING +#undef DECLARE_FIELDS_BITS + +// ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST), formed +// from ImmPCRelLo and ImmPCRelHi. +const int ImmPCRel_mask = ImmPCRelLo_mask | ImmPCRelHi_mask; + +// Disable `clang-format` for the `enum`s below. We care about the manual +// formatting that `clang-format` would destroy. +// clang-format off + +// Condition codes. +enum Condition { + eq = 0, // Z set Equal. + ne = 1, // Z clear Not equal. + cs = 2, // C set Carry set. + cc = 3, // C clear Carry clear. + mi = 4, // N set Negative. + pl = 5, // N clear Positive or zero. + vs = 6, // V set Overflow. + vc = 7, // V clear No overflow. + hi = 8, // C set, Z clear Unsigned higher. + ls = 9, // C clear or Z set Unsigned lower or same. + ge = 10, // N == V Greater or equal. + lt = 11, // N != V Less than. + gt = 12, // Z clear, N == V Greater than. + le = 13, // Z set or N != V Less then or equal + al = 14, // Always. + nv = 15, // Behaves as always/al. + + // Aliases. + hs = cs, // C set Unsigned higher or same. + lo = cc // C clear Unsigned lower. +}; + +inline Condition InvertCondition(Condition cond) { + // Conditions al and nv behave identically, as "always true". They can't be + // inverted, because there is no "always false" condition. + VIXL_ASSERT((cond != al) && (cond != nv)); + return static_cast(cond ^ 1); +} + +enum FPTrapFlags { + EnableTrap = 1, + DisableTrap = 0 +}; + +enum FlagsUpdate { + SetFlags = 1, + LeaveFlags = 0 +}; + +enum StatusFlags { + NoFlag = 0, + + // Derive the flag combinations from the system register bit descriptions. + NFlag = N_mask, + ZFlag = Z_mask, + CFlag = C_mask, + VFlag = V_mask, + NZFlag = NFlag | ZFlag, + NCFlag = NFlag | CFlag, + NVFlag = NFlag | VFlag, + ZCFlag = ZFlag | CFlag, + ZVFlag = ZFlag | VFlag, + CVFlag = CFlag | VFlag, + NZCFlag = NFlag | ZFlag | CFlag, + NZVFlag = NFlag | ZFlag | VFlag, + NCVFlag = NFlag | CFlag | VFlag, + ZCVFlag = ZFlag | CFlag | VFlag, + NZCVFlag = NFlag | ZFlag | CFlag | VFlag, + + // Floating-point comparison results. + FPEqualFlag = ZCFlag, + FPLessThanFlag = NFlag, + FPGreaterThanFlag = CFlag, + FPUnorderedFlag = CVFlag +}; + +enum Shift { + NO_SHIFT = -1, + LSL = 0x0, + LSR = 0x1, + ASR = 0x2, + ROR = 0x3, + MSL = 0x4 +}; + +enum Extend { + NO_EXTEND = -1, + UXTB = 0, + UXTH = 1, + UXTW = 2, + UXTX = 3, + SXTB = 4, + SXTH = 5, + SXTW = 6, + SXTX = 7 +}; + +enum SystemHint { + NOP = 0, + YIELD = 1, + WFE = 2, + WFI = 3, + SEV = 4, + SEVL = 5, + ESB = 16, + CSDB = 20 +}; + +enum BarrierDomain { + OuterShareable = 0, + NonShareable = 1, + InnerShareable = 2, + FullSystem = 3 +}; + +enum BarrierType { + BarrierOther = 0, + BarrierReads = 1, + BarrierWrites = 2, + BarrierAll = 3 +}; + +enum PrefetchOperation { + PLDL1KEEP = 0x00, + PLDL1STRM = 0x01, + PLDL2KEEP = 0x02, + PLDL2STRM = 0x03, + PLDL3KEEP = 0x04, + PLDL3STRM = 0x05, + + PLIL1KEEP = 0x08, + PLIL1STRM = 0x09, + PLIL2KEEP = 0x0a, + PLIL2STRM = 0x0b, + PLIL3KEEP = 0x0c, + PLIL3STRM = 0x0d, + + PSTL1KEEP = 0x10, + PSTL1STRM = 0x11, + PSTL2KEEP = 0x12, + PSTL2STRM = 0x13, + PSTL3KEEP = 0x14, + PSTL3STRM = 0x15 +}; + +template +class SystemRegisterEncoder { + public: + static const uint32_t value = + ((op0 << SysO0_offset) | + (op1 << SysOp1_offset) | + (crn << CRn_offset) | + (crm << CRm_offset) | + (op2 << SysOp2_offset)) >> ImmSystemRegister_offset; +}; + +// System/special register names. +// This information is not encoded as one field but as the concatenation of +// multiple fields (Op0<0>, Op1, Crn, Crm, Op2). +enum SystemRegister { + NZCV = SystemRegisterEncoder<3, 3, 4, 2, 0>::value, + FPCR = SystemRegisterEncoder<3, 3, 4, 4, 0>::value +}; + +template +class CacheOpEncoder { + public: + static const uint32_t value = + ((op1 << SysOp1_offset) | + (crn << CRn_offset) | + (crm << CRm_offset) | + (op2 << SysOp2_offset)) >> SysOp_offset; +}; + +enum InstructionCacheOp { + IVAU = CacheOpEncoder<3, 7, 5, 1>::value +}; + +enum DataCacheOp { + CVAC = CacheOpEncoder<3, 7, 10, 1>::value, + CVAU = CacheOpEncoder<3, 7, 11, 1>::value, + CIVAC = CacheOpEncoder<3, 7, 14, 1>::value, + ZVA = CacheOpEncoder<3, 7, 4, 1>::value +}; + +// Instruction enumerations. +// +// These are the masks that define a class of instructions, and the list of +// instructions within each class. Each enumeration has a Fixed, FMask and +// Mask value. +// +// Fixed: The fixed bits in this instruction class. +// FMask: The mask used to extract the fixed bits in the class. +// Mask: The mask used to identify the instructions within a class. +// +// The enumerations can be used like this: +// +// VIXL_ASSERT(instr->Mask(PCRelAddressingFMask) == PCRelAddressingFixed); +// switch(instr->Mask(PCRelAddressingMask)) { +// case ADR: Format("adr 'Xd, 'AddrPCRelByte"); break; +// case ADRP: Format("adrp 'Xd, 'AddrPCRelPage"); break; +// default: printf("Unknown instruction\n"); +// } + + +// Generic fields. +enum GenericInstrField { + SixtyFourBits = 0x80000000, + ThirtyTwoBits = 0x00000000, + + FPTypeMask = 0x00C00000, + FP16 = 0x00C00000, + FP32 = 0x00000000, + FP64 = 0x00400000 +}; + +enum NEONFormatField { + NEONFormatFieldMask = 0x40C00000, + NEON_Q = 0x40000000, + NEON_8B = 0x00000000, + NEON_16B = NEON_8B | NEON_Q, + NEON_4H = 0x00400000, + NEON_8H = NEON_4H | NEON_Q, + NEON_2S = 0x00800000, + NEON_4S = NEON_2S | NEON_Q, + NEON_1D = 0x00C00000, + NEON_2D = 0x00C00000 | NEON_Q +}; + +enum NEONFPFormatField { + NEONFPFormatFieldMask = 0x40400000, + NEON_FP_4H = FP16, + NEON_FP_2S = FP32, + NEON_FP_8H = FP16 | NEON_Q, + NEON_FP_4S = FP32 | NEON_Q, + NEON_FP_2D = FP64 | NEON_Q +}; + +enum NEONLSFormatField { + NEONLSFormatFieldMask = 0x40000C00, + LS_NEON_8B = 0x00000000, + LS_NEON_16B = LS_NEON_8B | NEON_Q, + LS_NEON_4H = 0x00000400, + LS_NEON_8H = LS_NEON_4H | NEON_Q, + LS_NEON_2S = 0x00000800, + LS_NEON_4S = LS_NEON_2S | NEON_Q, + LS_NEON_1D = 0x00000C00, + LS_NEON_2D = LS_NEON_1D | NEON_Q +}; + +enum NEONScalarFormatField { + NEONScalarFormatFieldMask = 0x00C00000, + NEONScalar = 0x10000000, + NEON_B = 0x00000000, + NEON_H = 0x00400000, + NEON_S = 0x00800000, + NEON_D = 0x00C00000 +}; + +// PC relative addressing. +enum PCRelAddressingOp { + PCRelAddressingFixed = 0x10000000, + PCRelAddressingFMask = 0x1F000000, + PCRelAddressingMask = 0x9F000000, + ADR = PCRelAddressingFixed | 0x00000000, + ADRP = PCRelAddressingFixed | 0x80000000 +}; + +// Add/sub (immediate, shifted and extended.) +const int kSFOffset = 31; +enum AddSubOp { + AddSubOpMask = 0x60000000, + AddSubSetFlagsBit = 0x20000000, + ADD = 0x00000000, + ADDS = ADD | AddSubSetFlagsBit, + SUB = 0x40000000, + SUBS = SUB | AddSubSetFlagsBit +}; + +#define ADD_SUB_OP_LIST(V) \ + V(ADD), \ + V(ADDS), \ + V(SUB), \ + V(SUBS) + +enum AddSubImmediateOp { + AddSubImmediateFixed = 0x11000000, + AddSubImmediateFMask = 0x1F000000, + AddSubImmediateMask = 0xFF000000, + #define ADD_SUB_IMMEDIATE(A) \ + A##_w_imm = AddSubImmediateFixed | A, \ + A##_x_imm = AddSubImmediateFixed | A | SixtyFourBits + ADD_SUB_OP_LIST(ADD_SUB_IMMEDIATE) + #undef ADD_SUB_IMMEDIATE +}; + +enum AddSubShiftedOp { + AddSubShiftedFixed = 0x0B000000, + AddSubShiftedFMask = 0x1F200000, + AddSubShiftedMask = 0xFF200000, + #define ADD_SUB_SHIFTED(A) \ + A##_w_shift = AddSubShiftedFixed | A, \ + A##_x_shift = AddSubShiftedFixed | A | SixtyFourBits + ADD_SUB_OP_LIST(ADD_SUB_SHIFTED) + #undef ADD_SUB_SHIFTED +}; + +enum AddSubExtendedOp { + AddSubExtendedFixed = 0x0B200000, + AddSubExtendedFMask = 0x1F200000, + AddSubExtendedMask = 0xFFE00000, + #define ADD_SUB_EXTENDED(A) \ + A##_w_ext = AddSubExtendedFixed | A, \ + A##_x_ext = AddSubExtendedFixed | A | SixtyFourBits + ADD_SUB_OP_LIST(ADD_SUB_EXTENDED) + #undef ADD_SUB_EXTENDED +}; + +// Add/sub with carry. +enum AddSubWithCarryOp { + AddSubWithCarryFixed = 0x1A000000, + AddSubWithCarryFMask = 0x1FE00000, + AddSubWithCarryMask = 0xFFE0FC00, + ADC_w = AddSubWithCarryFixed | ADD, + ADC_x = AddSubWithCarryFixed | ADD | SixtyFourBits, + ADC = ADC_w, + ADCS_w = AddSubWithCarryFixed | ADDS, + ADCS_x = AddSubWithCarryFixed | ADDS | SixtyFourBits, + SBC_w = AddSubWithCarryFixed | SUB, + SBC_x = AddSubWithCarryFixed | SUB | SixtyFourBits, + SBC = SBC_w, + SBCS_w = AddSubWithCarryFixed | SUBS, + SBCS_x = AddSubWithCarryFixed | SUBS | SixtyFourBits +}; + + +// Logical (immediate and shifted register). +enum LogicalOp { + LogicalOpMask = 0x60200000, + NOT = 0x00200000, + AND = 0x00000000, + BIC = AND | NOT, + ORR = 0x20000000, + ORN = ORR | NOT, + EOR = 0x40000000, + EON = EOR | NOT, + ANDS = 0x60000000, + BICS = ANDS | NOT +}; + +// Logical immediate. +enum LogicalImmediateOp { + LogicalImmediateFixed = 0x12000000, + LogicalImmediateFMask = 0x1F800000, + LogicalImmediateMask = 0xFF800000, + AND_w_imm = LogicalImmediateFixed | AND, + AND_x_imm = LogicalImmediateFixed | AND | SixtyFourBits, + ORR_w_imm = LogicalImmediateFixed | ORR, + ORR_x_imm = LogicalImmediateFixed | ORR | SixtyFourBits, + EOR_w_imm = LogicalImmediateFixed | EOR, + EOR_x_imm = LogicalImmediateFixed | EOR | SixtyFourBits, + ANDS_w_imm = LogicalImmediateFixed | ANDS, + ANDS_x_imm = LogicalImmediateFixed | ANDS | SixtyFourBits +}; + +// Logical shifted register. +enum LogicalShiftedOp { + LogicalShiftedFixed = 0x0A000000, + LogicalShiftedFMask = 0x1F000000, + LogicalShiftedMask = 0xFF200000, + AND_w = LogicalShiftedFixed | AND, + AND_x = LogicalShiftedFixed | AND | SixtyFourBits, + AND_shift = AND_w, + BIC_w = LogicalShiftedFixed | BIC, + BIC_x = LogicalShiftedFixed | BIC | SixtyFourBits, + BIC_shift = BIC_w, + ORR_w = LogicalShiftedFixed | ORR, + ORR_x = LogicalShiftedFixed | ORR | SixtyFourBits, + ORR_shift = ORR_w, + ORN_w = LogicalShiftedFixed | ORN, + ORN_x = LogicalShiftedFixed | ORN | SixtyFourBits, + ORN_shift = ORN_w, + EOR_w = LogicalShiftedFixed | EOR, + EOR_x = LogicalShiftedFixed | EOR | SixtyFourBits, + EOR_shift = EOR_w, + EON_w = LogicalShiftedFixed | EON, + EON_x = LogicalShiftedFixed | EON | SixtyFourBits, + EON_shift = EON_w, + ANDS_w = LogicalShiftedFixed | ANDS, + ANDS_x = LogicalShiftedFixed | ANDS | SixtyFourBits, + ANDS_shift = ANDS_w, + BICS_w = LogicalShiftedFixed | BICS, + BICS_x = LogicalShiftedFixed | BICS | SixtyFourBits, + BICS_shift = BICS_w +}; + +// Move wide immediate. +enum MoveWideImmediateOp { + MoveWideImmediateFixed = 0x12800000, + MoveWideImmediateFMask = 0x1F800000, + MoveWideImmediateMask = 0xFF800000, + MOVN = 0x00000000, + MOVZ = 0x40000000, + MOVK = 0x60000000, + MOVN_w = MoveWideImmediateFixed | MOVN, + MOVN_x = MoveWideImmediateFixed | MOVN | SixtyFourBits, + MOVZ_w = MoveWideImmediateFixed | MOVZ, + MOVZ_x = MoveWideImmediateFixed | MOVZ | SixtyFourBits, + MOVK_w = MoveWideImmediateFixed | MOVK, + MOVK_x = MoveWideImmediateFixed | MOVK | SixtyFourBits +}; + +// Bitfield. +const int kBitfieldNOffset = 22; +enum BitfieldOp { + BitfieldFixed = 0x13000000, + BitfieldFMask = 0x1F800000, + BitfieldMask = 0xFF800000, + SBFM_w = BitfieldFixed | 0x00000000, + SBFM_x = BitfieldFixed | 0x80000000, + SBFM = SBFM_w, + BFM_w = BitfieldFixed | 0x20000000, + BFM_x = BitfieldFixed | 0xA0000000, + BFM = BFM_w, + UBFM_w = BitfieldFixed | 0x40000000, + UBFM_x = BitfieldFixed | 0xC0000000, + UBFM = UBFM_w + // Bitfield N field. +}; + +// Extract. +enum ExtractOp { + ExtractFixed = 0x13800000, + ExtractFMask = 0x1F800000, + ExtractMask = 0xFFA00000, + EXTR_w = ExtractFixed | 0x00000000, + EXTR_x = ExtractFixed | 0x80000000, + EXTR = EXTR_w +}; + +// Unconditional branch. +enum UnconditionalBranchOp { + UnconditionalBranchFixed = 0x14000000, + UnconditionalBranchFMask = 0x7C000000, + UnconditionalBranchMask = 0xFC000000, + B = UnconditionalBranchFixed | 0x00000000, + BL = UnconditionalBranchFixed | 0x80000000 +}; + +// Unconditional branch to register. +enum UnconditionalBranchToRegisterOp { + UnconditionalBranchToRegisterFixed = 0xD6000000, + UnconditionalBranchToRegisterFMask = 0xFE000000, + UnconditionalBranchToRegisterMask = 0xFFFFFC00, + BR = UnconditionalBranchToRegisterFixed | 0x001F0000, + BLR = UnconditionalBranchToRegisterFixed | 0x003F0000, + RET = UnconditionalBranchToRegisterFixed | 0x005F0000, + + BRAAZ = UnconditionalBranchToRegisterFixed | 0x001F0800, + BRABZ = UnconditionalBranchToRegisterFixed | 0x001F0C00, + BLRAAZ = UnconditionalBranchToRegisterFixed | 0x003F0800, + BLRABZ = UnconditionalBranchToRegisterFixed | 0x003F0C00, + RETAA = UnconditionalBranchToRegisterFixed | 0x005F0800, + RETAB = UnconditionalBranchToRegisterFixed | 0x005F0C00, + BRAA = UnconditionalBranchToRegisterFixed | 0x011F0800, + BRAB = UnconditionalBranchToRegisterFixed | 0x011F0C00, + BLRAA = UnconditionalBranchToRegisterFixed | 0x013F0800, + BLRAB = UnconditionalBranchToRegisterFixed | 0x013F0C00 +}; + +// Compare and branch. +enum CompareBranchOp { + CompareBranchFixed = 0x34000000, + CompareBranchFMask = 0x7E000000, + CompareBranchMask = 0xFF000000, + CBZ_w = CompareBranchFixed | 0x00000000, + CBZ_x = CompareBranchFixed | 0x80000000, + CBZ = CBZ_w, + CBNZ_w = CompareBranchFixed | 0x01000000, + CBNZ_x = CompareBranchFixed | 0x81000000, + CBNZ = CBNZ_w +}; + +// Test and branch. +enum TestBranchOp { + TestBranchFixed = 0x36000000, + TestBranchFMask = 0x7E000000, + TestBranchMask = 0x7F000000, + TBZ = TestBranchFixed | 0x00000000, + TBNZ = TestBranchFixed | 0x01000000 +}; + +// Conditional branch. +enum ConditionalBranchOp { + ConditionalBranchFixed = 0x54000000, + ConditionalBranchFMask = 0xFE000000, + ConditionalBranchMask = 0xFF000010, + B_cond = ConditionalBranchFixed | 0x00000000 +}; + +// System. +// System instruction encoding is complicated because some instructions use op +// and CR fields to encode parameters. To handle this cleanly, the system +// instructions are split into more than one enum. + +enum SystemOp { + SystemFixed = 0xD5000000, + SystemFMask = 0xFFC00000 +}; + +enum SystemSysRegOp { + SystemSysRegFixed = 0xD5100000, + SystemSysRegFMask = 0xFFD00000, + SystemSysRegMask = 0xFFF00000, + MRS = SystemSysRegFixed | 0x00200000, + MSR = SystemSysRegFixed | 0x00000000 +}; + +enum SystemHintOp { + SystemHintFixed = 0xD503201F, + SystemHintFMask = 0xFFFFF01F, + SystemHintMask = 0xFFFFF01F, + HINT = SystemHintFixed | 0x00000000 +}; + +enum SystemSysOp { + SystemSysFixed = 0xD5080000, + SystemSysFMask = 0xFFF80000, + SystemSysMask = 0xFFF80000, + SYS = SystemSysFixed | 0x00000000 +}; + +// Exception. +enum ExceptionOp { + ExceptionFixed = 0xD4000000, + ExceptionFMask = 0xFF000000, + ExceptionMask = 0xFFE0001F, + HLT = ExceptionFixed | 0x00400000, + BRK = ExceptionFixed | 0x00200000, + SVC = ExceptionFixed | 0x00000001, + HVC = ExceptionFixed | 0x00000002, + SMC = ExceptionFixed | 0x00000003, + DCPS1 = ExceptionFixed | 0x00A00001, + DCPS2 = ExceptionFixed | 0x00A00002, + DCPS3 = ExceptionFixed | 0x00A00003 +}; + +enum MemBarrierOp { + MemBarrierFixed = 0xD503309F, + MemBarrierFMask = 0xFFFFF09F, + MemBarrierMask = 0xFFFFF0FF, + DSB = MemBarrierFixed | 0x00000000, + DMB = MemBarrierFixed | 0x00000020, + ISB = MemBarrierFixed | 0x00000040 +}; + +enum SystemExclusiveMonitorOp { + SystemExclusiveMonitorFixed = 0xD503305F, + SystemExclusiveMonitorFMask = 0xFFFFF0FF, + SystemExclusiveMonitorMask = 0xFFFFF0FF, + CLREX = SystemExclusiveMonitorFixed +}; + +enum SystemPAuthOp { + SystemPAuthFixed = 0xD503211F, + SystemPAuthFMask = 0xFFFFFD1F, + SystemPAuthMask = 0xFFFFFFFF, + PACIA1716 = SystemPAuthFixed | 0x00000100, + PACIB1716 = SystemPAuthFixed | 0x00000140, + AUTIA1716 = SystemPAuthFixed | 0x00000180, + AUTIB1716 = SystemPAuthFixed | 0x000001C0, + PACIAZ = SystemPAuthFixed | 0x00000300, + PACIASP = SystemPAuthFixed | 0x00000320, + PACIBZ = SystemPAuthFixed | 0x00000340, + PACIBSP = SystemPAuthFixed | 0x00000360, + AUTIAZ = SystemPAuthFixed | 0x00000380, + AUTIASP = SystemPAuthFixed | 0x000003A0, + AUTIBZ = SystemPAuthFixed | 0x000003C0, + AUTIBSP = SystemPAuthFixed | 0x000003E0, + + // XPACLRI has the same fixed mask as System Hints and needs to be handled + // differently. + XPACLRI = 0xD50320FF +}; + +// Any load or store. +enum LoadStoreAnyOp { + LoadStoreAnyFMask = 0x0a000000, + LoadStoreAnyFixed = 0x08000000 +}; + +// Any load pair or store pair. +enum LoadStorePairAnyOp { + LoadStorePairAnyFMask = 0x3a000000, + LoadStorePairAnyFixed = 0x28000000 +}; + +#define LOAD_STORE_PAIR_OP_LIST(V) \ + V(STP, w, 0x00000000), \ + V(LDP, w, 0x00400000), \ + V(LDPSW, x, 0x40400000), \ + V(STP, x, 0x80000000), \ + V(LDP, x, 0x80400000), \ + V(STP, s, 0x04000000), \ + V(LDP, s, 0x04400000), \ + V(STP, d, 0x44000000), \ + V(LDP, d, 0x44400000), \ + V(STP, q, 0x84000000), \ + V(LDP, q, 0x84400000) + +// Load/store pair (post, pre and offset.) +enum LoadStorePairOp { + LoadStorePairMask = 0xC4400000, + LoadStorePairLBit = 1 << 22, + #define LOAD_STORE_PAIR(A, B, C) \ + A##_##B = C + LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR) + #undef LOAD_STORE_PAIR +}; + +enum LoadStorePairPostIndexOp { + LoadStorePairPostIndexFixed = 0x28800000, + LoadStorePairPostIndexFMask = 0x3B800000, + LoadStorePairPostIndexMask = 0xFFC00000, + #define LOAD_STORE_PAIR_POST_INDEX(A, B, C) \ + A##_##B##_post = LoadStorePairPostIndexFixed | A##_##B + LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR_POST_INDEX) + #undef LOAD_STORE_PAIR_POST_INDEX +}; + +enum LoadStorePairPreIndexOp { + LoadStorePairPreIndexFixed = 0x29800000, + LoadStorePairPreIndexFMask = 0x3B800000, + LoadStorePairPreIndexMask = 0xFFC00000, + #define LOAD_STORE_PAIR_PRE_INDEX(A, B, C) \ + A##_##B##_pre = LoadStorePairPreIndexFixed | A##_##B + LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR_PRE_INDEX) + #undef LOAD_STORE_PAIR_PRE_INDEX +}; + +enum LoadStorePairOffsetOp { + LoadStorePairOffsetFixed = 0x29000000, + LoadStorePairOffsetFMask = 0x3B800000, + LoadStorePairOffsetMask = 0xFFC00000, + #define LOAD_STORE_PAIR_OFFSET(A, B, C) \ + A##_##B##_off = LoadStorePairOffsetFixed | A##_##B + LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR_OFFSET) + #undef LOAD_STORE_PAIR_OFFSET +}; + +enum LoadStorePairNonTemporalOp { + LoadStorePairNonTemporalFixed = 0x28000000, + LoadStorePairNonTemporalFMask = 0x3B800000, + LoadStorePairNonTemporalMask = 0xFFC00000, + LoadStorePairNonTemporalLBit = 1 << 22, + STNP_w = LoadStorePairNonTemporalFixed | STP_w, + LDNP_w = LoadStorePairNonTemporalFixed | LDP_w, + STNP_x = LoadStorePairNonTemporalFixed | STP_x, + LDNP_x = LoadStorePairNonTemporalFixed | LDP_x, + STNP_s = LoadStorePairNonTemporalFixed | STP_s, + LDNP_s = LoadStorePairNonTemporalFixed | LDP_s, + STNP_d = LoadStorePairNonTemporalFixed | STP_d, + LDNP_d = LoadStorePairNonTemporalFixed | LDP_d, + STNP_q = LoadStorePairNonTemporalFixed | STP_q, + LDNP_q = LoadStorePairNonTemporalFixed | LDP_q +}; + +// Load literal. +enum LoadLiteralOp { + LoadLiteralFixed = 0x18000000, + LoadLiteralFMask = 0x3B000000, + LoadLiteralMask = 0xFF000000, + LDR_w_lit = LoadLiteralFixed | 0x00000000, + LDR_x_lit = LoadLiteralFixed | 0x40000000, + LDRSW_x_lit = LoadLiteralFixed | 0x80000000, + PRFM_lit = LoadLiteralFixed | 0xC0000000, + LDR_s_lit = LoadLiteralFixed | 0x04000000, + LDR_d_lit = LoadLiteralFixed | 0x44000000, + LDR_q_lit = LoadLiteralFixed | 0x84000000 +}; + +#define LOAD_STORE_OP_LIST(V) \ + V(ST, RB, w, 0x00000000), \ + V(ST, RH, w, 0x40000000), \ + V(ST, R, w, 0x80000000), \ + V(ST, R, x, 0xC0000000), \ + V(LD, RB, w, 0x00400000), \ + V(LD, RH, w, 0x40400000), \ + V(LD, R, w, 0x80400000), \ + V(LD, R, x, 0xC0400000), \ + V(LD, RSB, x, 0x00800000), \ + V(LD, RSH, x, 0x40800000), \ + V(LD, RSW, x, 0x80800000), \ + V(LD, RSB, w, 0x00C00000), \ + V(LD, RSH, w, 0x40C00000), \ + V(ST, R, b, 0x04000000), \ + V(ST, R, h, 0x44000000), \ + V(ST, R, s, 0x84000000), \ + V(ST, R, d, 0xC4000000), \ + V(ST, R, q, 0x04800000), \ + V(LD, R, b, 0x04400000), \ + V(LD, R, h, 0x44400000), \ + V(LD, R, s, 0x84400000), \ + V(LD, R, d, 0xC4400000), \ + V(LD, R, q, 0x04C00000) + +// Load/store (post, pre, offset and unsigned.) +enum LoadStoreOp { + LoadStoreMask = 0xC4C00000, + LoadStoreVMask = 0x04000000, + #define LOAD_STORE(A, B, C, D) \ + A##B##_##C = D + LOAD_STORE_OP_LIST(LOAD_STORE), + #undef LOAD_STORE + PRFM = 0xC0800000 +}; + +// Load/store unscaled offset. +enum LoadStoreUnscaledOffsetOp { + LoadStoreUnscaledOffsetFixed = 0x38000000, + LoadStoreUnscaledOffsetFMask = 0x3B200C00, + LoadStoreUnscaledOffsetMask = 0xFFE00C00, + PRFUM = LoadStoreUnscaledOffsetFixed | PRFM, + #define LOAD_STORE_UNSCALED(A, B, C, D) \ + A##U##B##_##C = LoadStoreUnscaledOffsetFixed | D + LOAD_STORE_OP_LIST(LOAD_STORE_UNSCALED) + #undef LOAD_STORE_UNSCALED +}; + +// Load/store post index. +enum LoadStorePostIndex { + LoadStorePostIndexFixed = 0x38000400, + LoadStorePostIndexFMask = 0x3B200C00, + LoadStorePostIndexMask = 0xFFE00C00, + #define LOAD_STORE_POST_INDEX(A, B, C, D) \ + A##B##_##C##_post = LoadStorePostIndexFixed | D + LOAD_STORE_OP_LIST(LOAD_STORE_POST_INDEX) + #undef LOAD_STORE_POST_INDEX +}; + +// Load/store pre index. +enum LoadStorePreIndex { + LoadStorePreIndexFixed = 0x38000C00, + LoadStorePreIndexFMask = 0x3B200C00, + LoadStorePreIndexMask = 0xFFE00C00, + #define LOAD_STORE_PRE_INDEX(A, B, C, D) \ + A##B##_##C##_pre = LoadStorePreIndexFixed | D + LOAD_STORE_OP_LIST(LOAD_STORE_PRE_INDEX) + #undef LOAD_STORE_PRE_INDEX +}; + +// Load/store unsigned offset. +enum LoadStoreUnsignedOffset { + LoadStoreUnsignedOffsetFixed = 0x39000000, + LoadStoreUnsignedOffsetFMask = 0x3B000000, + LoadStoreUnsignedOffsetMask = 0xFFC00000, + PRFM_unsigned = LoadStoreUnsignedOffsetFixed | PRFM, + #define LOAD_STORE_UNSIGNED_OFFSET(A, B, C, D) \ + A##B##_##C##_unsigned = LoadStoreUnsignedOffsetFixed | D + LOAD_STORE_OP_LIST(LOAD_STORE_UNSIGNED_OFFSET) + #undef LOAD_STORE_UNSIGNED_OFFSET +}; + +// Load/store register offset. +enum LoadStoreRegisterOffset { + LoadStoreRegisterOffsetFixed = 0x38200800, + LoadStoreRegisterOffsetFMask = 0x3B200C00, + LoadStoreRegisterOffsetMask = 0xFFE00C00, + PRFM_reg = LoadStoreRegisterOffsetFixed | PRFM, + #define LOAD_STORE_REGISTER_OFFSET(A, B, C, D) \ + A##B##_##C##_reg = LoadStoreRegisterOffsetFixed | D + LOAD_STORE_OP_LIST(LOAD_STORE_REGISTER_OFFSET) + #undef LOAD_STORE_REGISTER_OFFSET +}; + +enum LoadStoreExclusive { + LoadStoreExclusiveFixed = 0x08000000, + LoadStoreExclusiveFMask = 0x3F000000, + LoadStoreExclusiveMask = 0xFFE08000, + STXRB_w = LoadStoreExclusiveFixed | 0x00000000, + STXRH_w = LoadStoreExclusiveFixed | 0x40000000, + STXR_w = LoadStoreExclusiveFixed | 0x80000000, + STXR_x = LoadStoreExclusiveFixed | 0xC0000000, + LDXRB_w = LoadStoreExclusiveFixed | 0x00400000, + LDXRH_w = LoadStoreExclusiveFixed | 0x40400000, + LDXR_w = LoadStoreExclusiveFixed | 0x80400000, + LDXR_x = LoadStoreExclusiveFixed | 0xC0400000, + STXP_w = LoadStoreExclusiveFixed | 0x80200000, + STXP_x = LoadStoreExclusiveFixed | 0xC0200000, + LDXP_w = LoadStoreExclusiveFixed | 0x80600000, + LDXP_x = LoadStoreExclusiveFixed | 0xC0600000, + STLXRB_w = LoadStoreExclusiveFixed | 0x00008000, + STLXRH_w = LoadStoreExclusiveFixed | 0x40008000, + STLXR_w = LoadStoreExclusiveFixed | 0x80008000, + STLXR_x = LoadStoreExclusiveFixed | 0xC0008000, + LDAXRB_w = LoadStoreExclusiveFixed | 0x00408000, + LDAXRH_w = LoadStoreExclusiveFixed | 0x40408000, + LDAXR_w = LoadStoreExclusiveFixed | 0x80408000, + LDAXR_x = LoadStoreExclusiveFixed | 0xC0408000, + STLXP_w = LoadStoreExclusiveFixed | 0x80208000, + STLXP_x = LoadStoreExclusiveFixed | 0xC0208000, + LDAXP_w = LoadStoreExclusiveFixed | 0x80608000, + LDAXP_x = LoadStoreExclusiveFixed | 0xC0608000, + STLRB_w = LoadStoreExclusiveFixed | 0x00808000, + STLRH_w = LoadStoreExclusiveFixed | 0x40808000, + STLR_w = LoadStoreExclusiveFixed | 0x80808000, + STLR_x = LoadStoreExclusiveFixed | 0xC0808000, + LDARB_w = LoadStoreExclusiveFixed | 0x00C08000, + LDARH_w = LoadStoreExclusiveFixed | 0x40C08000, + LDAR_w = LoadStoreExclusiveFixed | 0x80C08000, + LDAR_x = LoadStoreExclusiveFixed | 0xC0C08000, + + // v8.1 Load/store LORegion ops + STLLRB = LoadStoreExclusiveFixed | 0x00800000, + LDLARB = LoadStoreExclusiveFixed | 0x00C00000, + STLLRH = LoadStoreExclusiveFixed | 0x40800000, + LDLARH = LoadStoreExclusiveFixed | 0x40C00000, + STLLR_w = LoadStoreExclusiveFixed | 0x80800000, + LDLAR_w = LoadStoreExclusiveFixed | 0x80C00000, + STLLR_x = LoadStoreExclusiveFixed | 0xC0800000, + LDLAR_x = LoadStoreExclusiveFixed | 0xC0C00000, + + // v8.1 Load/store exclusive ops + LSEBit_l = 0x00400000, + LSEBit_o0 = 0x00008000, + LSEBit_sz = 0x40000000, + CASFixed = LoadStoreExclusiveFixed | 0x80A00000, + CASBFixed = LoadStoreExclusiveFixed | 0x00A00000, + CASHFixed = LoadStoreExclusiveFixed | 0x40A00000, + CASPFixed = LoadStoreExclusiveFixed | 0x00200000, + CAS_w = CASFixed, + CAS_x = CASFixed | LSEBit_sz, + CASA_w = CASFixed | LSEBit_l, + CASA_x = CASFixed | LSEBit_l | LSEBit_sz, + CASL_w = CASFixed | LSEBit_o0, + CASL_x = CASFixed | LSEBit_o0 | LSEBit_sz, + CASAL_w = CASFixed | LSEBit_l | LSEBit_o0, + CASAL_x = CASFixed | LSEBit_l | LSEBit_o0 | LSEBit_sz, + CASB = CASBFixed, + CASAB = CASBFixed | LSEBit_l, + CASLB = CASBFixed | LSEBit_o0, + CASALB = CASBFixed | LSEBit_l | LSEBit_o0, + CASH = CASHFixed, + CASAH = CASHFixed | LSEBit_l, + CASLH = CASHFixed | LSEBit_o0, + CASALH = CASHFixed | LSEBit_l | LSEBit_o0, + CASP_w = CASPFixed, + CASP_x = CASPFixed | LSEBit_sz, + CASPA_w = CASPFixed | LSEBit_l, + CASPA_x = CASPFixed | LSEBit_l | LSEBit_sz, + CASPL_w = CASPFixed | LSEBit_o0, + CASPL_x = CASPFixed | LSEBit_o0 | LSEBit_sz, + CASPAL_w = CASPFixed | LSEBit_l | LSEBit_o0, + CASPAL_x = CASPFixed | LSEBit_l | LSEBit_o0 | LSEBit_sz +}; + +#define ATOMIC_MEMORY_SIMPLE_OPC_LIST(V) \ + V(LDADD, 0x00000000), \ + V(LDCLR, 0x00001000), \ + V(LDEOR, 0x00002000), \ + V(LDSET, 0x00003000), \ + V(LDSMAX, 0x00004000), \ + V(LDSMIN, 0x00005000), \ + V(LDUMAX, 0x00006000), \ + V(LDUMIN, 0x00007000) + +// Atomic memory. +enum AtomicMemoryOp { + AtomicMemoryFixed = 0x38200000, + AtomicMemoryFMask = 0x3B200C00, + AtomicMemoryMask = 0xFFE0FC00, + SWPB = AtomicMemoryFixed | 0x00008000, + SWPAB = AtomicMemoryFixed | 0x00808000, + SWPLB = AtomicMemoryFixed | 0x00408000, + SWPALB = AtomicMemoryFixed | 0x00C08000, + SWPH = AtomicMemoryFixed | 0x40008000, + SWPAH = AtomicMemoryFixed | 0x40808000, + SWPLH = AtomicMemoryFixed | 0x40408000, + SWPALH = AtomicMemoryFixed | 0x40C08000, + SWP_w = AtomicMemoryFixed | 0x80008000, + SWPA_w = AtomicMemoryFixed | 0x80808000, + SWPL_w = AtomicMemoryFixed | 0x80408000, + SWPAL_w = AtomicMemoryFixed | 0x80C08000, + SWP_x = AtomicMemoryFixed | 0xC0008000, + SWPA_x = AtomicMemoryFixed | 0xC0808000, + SWPL_x = AtomicMemoryFixed | 0xC0408000, + SWPAL_x = AtomicMemoryFixed | 0xC0C08000, + LDAPRB = AtomicMemoryFixed | 0x0080C000, + LDAPRH = AtomicMemoryFixed | 0x4080C000, + LDAPR_w = AtomicMemoryFixed | 0x8080C000, + LDAPR_x = AtomicMemoryFixed | 0xC080C000, + + AtomicMemorySimpleFMask = 0x3B208C00, + AtomicMemorySimpleOpMask = 0x00007000, +#define ATOMIC_MEMORY_SIMPLE(N, OP) \ + N##Op = OP, \ + N##B = AtomicMemoryFixed | OP, \ + N##AB = AtomicMemoryFixed | OP | 0x00800000, \ + N##LB = AtomicMemoryFixed | OP | 0x00400000, \ + N##ALB = AtomicMemoryFixed | OP | 0x00C00000, \ + N##H = AtomicMemoryFixed | OP | 0x40000000, \ + N##AH = AtomicMemoryFixed | OP | 0x40800000, \ + N##LH = AtomicMemoryFixed | OP | 0x40400000, \ + N##ALH = AtomicMemoryFixed | OP | 0x40C00000, \ + N##_w = AtomicMemoryFixed | OP | 0x80000000, \ + N##A_w = AtomicMemoryFixed | OP | 0x80800000, \ + N##L_w = AtomicMemoryFixed | OP | 0x80400000, \ + N##AL_w = AtomicMemoryFixed | OP | 0x80C00000, \ + N##_x = AtomicMemoryFixed | OP | 0xC0000000, \ + N##A_x = AtomicMemoryFixed | OP | 0xC0800000, \ + N##L_x = AtomicMemoryFixed | OP | 0xC0400000, \ + N##AL_x = AtomicMemoryFixed | OP | 0xC0C00000 + + ATOMIC_MEMORY_SIMPLE_OPC_LIST(ATOMIC_MEMORY_SIMPLE) +#undef ATOMIC_MEMORY_SIMPLE +}; + +// Conditional compare. +enum ConditionalCompareOp { + ConditionalCompareMask = 0x60000000, + CCMN = 0x20000000, + CCMP = 0x60000000 +}; + +// Conditional compare register. +enum ConditionalCompareRegisterOp { + ConditionalCompareRegisterFixed = 0x1A400000, + ConditionalCompareRegisterFMask = 0x1FE00800, + ConditionalCompareRegisterMask = 0xFFE00C10, + CCMN_w = ConditionalCompareRegisterFixed | CCMN, + CCMN_x = ConditionalCompareRegisterFixed | SixtyFourBits | CCMN, + CCMP_w = ConditionalCompareRegisterFixed | CCMP, + CCMP_x = ConditionalCompareRegisterFixed | SixtyFourBits | CCMP +}; + +// Conditional compare immediate. +enum ConditionalCompareImmediateOp { + ConditionalCompareImmediateFixed = 0x1A400800, + ConditionalCompareImmediateFMask = 0x1FE00800, + ConditionalCompareImmediateMask = 0xFFE00C10, + CCMN_w_imm = ConditionalCompareImmediateFixed | CCMN, + CCMN_x_imm = ConditionalCompareImmediateFixed | SixtyFourBits | CCMN, + CCMP_w_imm = ConditionalCompareImmediateFixed | CCMP, + CCMP_x_imm = ConditionalCompareImmediateFixed | SixtyFourBits | CCMP +}; + +// Conditional select. +enum ConditionalSelectOp { + ConditionalSelectFixed = 0x1A800000, + ConditionalSelectFMask = 0x1FE00000, + ConditionalSelectMask = 0xFFE00C00, + CSEL_w = ConditionalSelectFixed | 0x00000000, + CSEL_x = ConditionalSelectFixed | 0x80000000, + CSEL = CSEL_w, + CSINC_w = ConditionalSelectFixed | 0x00000400, + CSINC_x = ConditionalSelectFixed | 0x80000400, + CSINC = CSINC_w, + CSINV_w = ConditionalSelectFixed | 0x40000000, + CSINV_x = ConditionalSelectFixed | 0xC0000000, + CSINV = CSINV_w, + CSNEG_w = ConditionalSelectFixed | 0x40000400, + CSNEG_x = ConditionalSelectFixed | 0xC0000400, + CSNEG = CSNEG_w +}; + +// Data processing 1 source. +enum DataProcessing1SourceOp { + DataProcessing1SourceFixed = 0x5AC00000, + DataProcessing1SourceFMask = 0x5FE00000, + DataProcessing1SourceMask = 0xFFFFFC00, + RBIT = DataProcessing1SourceFixed | 0x00000000, + RBIT_w = RBIT, + RBIT_x = RBIT | SixtyFourBits, + REV16 = DataProcessing1SourceFixed | 0x00000400, + REV16_w = REV16, + REV16_x = REV16 | SixtyFourBits, + REV = DataProcessing1SourceFixed | 0x00000800, + REV_w = REV, + REV32_x = REV | SixtyFourBits, + REV_x = DataProcessing1SourceFixed | SixtyFourBits | 0x00000C00, + CLZ = DataProcessing1SourceFixed | 0x00001000, + CLZ_w = CLZ, + CLZ_x = CLZ | SixtyFourBits, + CLS = DataProcessing1SourceFixed | 0x00001400, + CLS_w = CLS, + CLS_x = CLS | SixtyFourBits, + + // Pointer authentication instructions in Armv8.3. + PACIA = DataProcessing1SourceFixed | 0x80010000, + PACIB = DataProcessing1SourceFixed | 0x80010400, + PACDA = DataProcessing1SourceFixed | 0x80010800, + PACDB = DataProcessing1SourceFixed | 0x80010C00, + AUTIA = DataProcessing1SourceFixed | 0x80011000, + AUTIB = DataProcessing1SourceFixed | 0x80011400, + AUTDA = DataProcessing1SourceFixed | 0x80011800, + AUTDB = DataProcessing1SourceFixed | 0x80011C00, + PACIZA = DataProcessing1SourceFixed | 0x80012000, + PACIZB = DataProcessing1SourceFixed | 0x80012400, + PACDZA = DataProcessing1SourceFixed | 0x80012800, + PACDZB = DataProcessing1SourceFixed | 0x80012C00, + AUTIZA = DataProcessing1SourceFixed | 0x80013000, + AUTIZB = DataProcessing1SourceFixed | 0x80013400, + AUTDZA = DataProcessing1SourceFixed | 0x80013800, + AUTDZB = DataProcessing1SourceFixed | 0x80013C00, + XPACI = DataProcessing1SourceFixed | 0x80014000, + XPACD = DataProcessing1SourceFixed | 0x80014400 +}; + +// Data processing 2 source. +enum DataProcessing2SourceOp { + DataProcessing2SourceFixed = 0x1AC00000, + DataProcessing2SourceFMask = 0x5FE00000, + DataProcessing2SourceMask = 0xFFE0FC00, + UDIV_w = DataProcessing2SourceFixed | 0x00000800, + UDIV_x = DataProcessing2SourceFixed | 0x80000800, + UDIV = UDIV_w, + SDIV_w = DataProcessing2SourceFixed | 0x00000C00, + SDIV_x = DataProcessing2SourceFixed | 0x80000C00, + SDIV = SDIV_w, + LSLV_w = DataProcessing2SourceFixed | 0x00002000, + LSLV_x = DataProcessing2SourceFixed | 0x80002000, + LSLV = LSLV_w, + LSRV_w = DataProcessing2SourceFixed | 0x00002400, + LSRV_x = DataProcessing2SourceFixed | 0x80002400, + LSRV = LSRV_w, + ASRV_w = DataProcessing2SourceFixed | 0x00002800, + ASRV_x = DataProcessing2SourceFixed | 0x80002800, + ASRV = ASRV_w, + RORV_w = DataProcessing2SourceFixed | 0x00002C00, + RORV_x = DataProcessing2SourceFixed | 0x80002C00, + RORV = RORV_w, + PACGA = DataProcessing2SourceFixed | SixtyFourBits | 0x00003000, + CRC32B = DataProcessing2SourceFixed | 0x00004000, + CRC32H = DataProcessing2SourceFixed | 0x00004400, + CRC32W = DataProcessing2SourceFixed | 0x00004800, + CRC32X = DataProcessing2SourceFixed | SixtyFourBits | 0x00004C00, + CRC32CB = DataProcessing2SourceFixed | 0x00005000, + CRC32CH = DataProcessing2SourceFixed | 0x00005400, + CRC32CW = DataProcessing2SourceFixed | 0x00005800, + CRC32CX = DataProcessing2SourceFixed | SixtyFourBits | 0x00005C00 +}; + +// Data processing 3 source. +enum DataProcessing3SourceOp { + DataProcessing3SourceFixed = 0x1B000000, + DataProcessing3SourceFMask = 0x1F000000, + DataProcessing3SourceMask = 0xFFE08000, + MADD_w = DataProcessing3SourceFixed | 0x00000000, + MADD_x = DataProcessing3SourceFixed | 0x80000000, + MADD = MADD_w, + MSUB_w = DataProcessing3SourceFixed | 0x00008000, + MSUB_x = DataProcessing3SourceFixed | 0x80008000, + MSUB = MSUB_w, + SMADDL_x = DataProcessing3SourceFixed | 0x80200000, + SMSUBL_x = DataProcessing3SourceFixed | 0x80208000, + SMULH_x = DataProcessing3SourceFixed | 0x80400000, + UMADDL_x = DataProcessing3SourceFixed | 0x80A00000, + UMSUBL_x = DataProcessing3SourceFixed | 0x80A08000, + UMULH_x = DataProcessing3SourceFixed | 0x80C00000 +}; + +// Floating point compare. +enum FPCompareOp { + FPCompareFixed = 0x1E202000, + FPCompareFMask = 0x5F203C00, + FPCompareMask = 0xFFE0FC1F, + FCMP_h = FPCompareFixed | FP16 | 0x00000000, + FCMP_s = FPCompareFixed | 0x00000000, + FCMP_d = FPCompareFixed | FP64 | 0x00000000, + FCMP = FCMP_s, + FCMP_h_zero = FPCompareFixed | FP16 | 0x00000008, + FCMP_s_zero = FPCompareFixed | 0x00000008, + FCMP_d_zero = FPCompareFixed | FP64 | 0x00000008, + FCMP_zero = FCMP_s_zero, + FCMPE_h = FPCompareFixed | FP16 | 0x00000010, + FCMPE_s = FPCompareFixed | 0x00000010, + FCMPE_d = FPCompareFixed | FP64 | 0x00000010, + FCMPE = FCMPE_s, + FCMPE_h_zero = FPCompareFixed | FP16 | 0x00000018, + FCMPE_s_zero = FPCompareFixed | 0x00000018, + FCMPE_d_zero = FPCompareFixed | FP64 | 0x00000018, + FCMPE_zero = FCMPE_s_zero +}; + +// Floating point conditional compare. +enum FPConditionalCompareOp { + FPConditionalCompareFixed = 0x1E200400, + FPConditionalCompareFMask = 0x5F200C00, + FPConditionalCompareMask = 0xFFE00C10, + FCCMP_h = FPConditionalCompareFixed | FP16 | 0x00000000, + FCCMP_s = FPConditionalCompareFixed | 0x00000000, + FCCMP_d = FPConditionalCompareFixed | FP64 | 0x00000000, + FCCMP = FCCMP_s, + FCCMPE_h = FPConditionalCompareFixed | FP16 | 0x00000010, + FCCMPE_s = FPConditionalCompareFixed | 0x00000010, + FCCMPE_d = FPConditionalCompareFixed | FP64 | 0x00000010, + FCCMPE = FCCMPE_s +}; + +// Floating point conditional select. +enum FPConditionalSelectOp { + FPConditionalSelectFixed = 0x1E200C00, + FPConditionalSelectFMask = 0x5F200C00, + FPConditionalSelectMask = 0xFFE00C00, + FCSEL_h = FPConditionalSelectFixed | FP16 | 0x00000000, + FCSEL_s = FPConditionalSelectFixed | 0x00000000, + FCSEL_d = FPConditionalSelectFixed | FP64 | 0x00000000, + FCSEL = FCSEL_s +}; + +// Floating point immediate. +enum FPImmediateOp { + FPImmediateFixed = 0x1E201000, + FPImmediateFMask = 0x5F201C00, + FPImmediateMask = 0xFFE01C00, + FMOV_h_imm = FPImmediateFixed | FP16 | 0x00000000, + FMOV_s_imm = FPImmediateFixed | 0x00000000, + FMOV_d_imm = FPImmediateFixed | FP64 | 0x00000000 +}; + +// Floating point data processing 1 source. +enum FPDataProcessing1SourceOp { + FPDataProcessing1SourceFixed = 0x1E204000, + FPDataProcessing1SourceFMask = 0x5F207C00, + FPDataProcessing1SourceMask = 0xFFFFFC00, + FMOV_h = FPDataProcessing1SourceFixed | FP16 | 0x00000000, + FMOV_s = FPDataProcessing1SourceFixed | 0x00000000, + FMOV_d = FPDataProcessing1SourceFixed | FP64 | 0x00000000, + FMOV = FMOV_s, + FABS_h = FPDataProcessing1SourceFixed | FP16 | 0x00008000, + FABS_s = FPDataProcessing1SourceFixed | 0x00008000, + FABS_d = FPDataProcessing1SourceFixed | FP64 | 0x00008000, + FABS = FABS_s, + FNEG_h = FPDataProcessing1SourceFixed | FP16 | 0x00010000, + FNEG_s = FPDataProcessing1SourceFixed | 0x00010000, + FNEG_d = FPDataProcessing1SourceFixed | FP64 | 0x00010000, + FNEG = FNEG_s, + FSQRT_h = FPDataProcessing1SourceFixed | FP16 | 0x00018000, + FSQRT_s = FPDataProcessing1SourceFixed | 0x00018000, + FSQRT_d = FPDataProcessing1SourceFixed | FP64 | 0x00018000, + FSQRT = FSQRT_s, + FCVT_ds = FPDataProcessing1SourceFixed | 0x00028000, + FCVT_sd = FPDataProcessing1SourceFixed | FP64 | 0x00020000, + FCVT_hs = FPDataProcessing1SourceFixed | 0x00038000, + FCVT_hd = FPDataProcessing1SourceFixed | FP64 | 0x00038000, + FCVT_sh = FPDataProcessing1SourceFixed | 0x00C20000, + FCVT_dh = FPDataProcessing1SourceFixed | 0x00C28000, + FRINTN_h = FPDataProcessing1SourceFixed | FP16 | 0x00040000, + FRINTN_s = FPDataProcessing1SourceFixed | 0x00040000, + FRINTN_d = FPDataProcessing1SourceFixed | FP64 | 0x00040000, + FRINTN = FRINTN_s, + FRINTP_h = FPDataProcessing1SourceFixed | FP16 | 0x00048000, + FRINTP_s = FPDataProcessing1SourceFixed | 0x00048000, + FRINTP_d = FPDataProcessing1SourceFixed | FP64 | 0x00048000, + FRINTP = FRINTP_s, + FRINTM_h = FPDataProcessing1SourceFixed | FP16 | 0x00050000, + FRINTM_s = FPDataProcessing1SourceFixed | 0x00050000, + FRINTM_d = FPDataProcessing1SourceFixed | FP64 | 0x00050000, + FRINTM = FRINTM_s, + FRINTZ_h = FPDataProcessing1SourceFixed | FP16 | 0x00058000, + FRINTZ_s = FPDataProcessing1SourceFixed | 0x00058000, + FRINTZ_d = FPDataProcessing1SourceFixed | FP64 | 0x00058000, + FRINTZ = FRINTZ_s, + FRINTA_h = FPDataProcessing1SourceFixed | FP16 | 0x00060000, + FRINTA_s = FPDataProcessing1SourceFixed | 0x00060000, + FRINTA_d = FPDataProcessing1SourceFixed | FP64 | 0x00060000, + FRINTA = FRINTA_s, + FRINTX_h = FPDataProcessing1SourceFixed | FP16 | 0x00070000, + FRINTX_s = FPDataProcessing1SourceFixed | 0x00070000, + FRINTX_d = FPDataProcessing1SourceFixed | FP64 | 0x00070000, + FRINTX = FRINTX_s, + FRINTI_h = FPDataProcessing1SourceFixed | FP16 | 0x00078000, + FRINTI_s = FPDataProcessing1SourceFixed | 0x00078000, + FRINTI_d = FPDataProcessing1SourceFixed | FP64 | 0x00078000, + FRINTI = FRINTI_s +}; + +// Floating point data processing 2 source. +enum FPDataProcessing2SourceOp { + FPDataProcessing2SourceFixed = 0x1E200800, + FPDataProcessing2SourceFMask = 0x5F200C00, + FPDataProcessing2SourceMask = 0xFFE0FC00, + FMUL = FPDataProcessing2SourceFixed | 0x00000000, + FMUL_h = FMUL | FP16, + FMUL_s = FMUL, + FMUL_d = FMUL | FP64, + FDIV = FPDataProcessing2SourceFixed | 0x00001000, + FDIV_h = FDIV | FP16, + FDIV_s = FDIV, + FDIV_d = FDIV | FP64, + FADD = FPDataProcessing2SourceFixed | 0x00002000, + FADD_h = FADD | FP16, + FADD_s = FADD, + FADD_d = FADD | FP64, + FSUB = FPDataProcessing2SourceFixed | 0x00003000, + FSUB_h = FSUB | FP16, + FSUB_s = FSUB, + FSUB_d = FSUB | FP64, + FMAX = FPDataProcessing2SourceFixed | 0x00004000, + FMAX_h = FMAX | FP16, + FMAX_s = FMAX, + FMAX_d = FMAX | FP64, + FMIN = FPDataProcessing2SourceFixed | 0x00005000, + FMIN_h = FMIN | FP16, + FMIN_s = FMIN, + FMIN_d = FMIN | FP64, + FMAXNM = FPDataProcessing2SourceFixed | 0x00006000, + FMAXNM_h = FMAXNM | FP16, + FMAXNM_s = FMAXNM, + FMAXNM_d = FMAXNM | FP64, + FMINNM = FPDataProcessing2SourceFixed | 0x00007000, + FMINNM_h = FMINNM | FP16, + FMINNM_s = FMINNM, + FMINNM_d = FMINNM | FP64, + FNMUL = FPDataProcessing2SourceFixed | 0x00008000, + FNMUL_h = FNMUL | FP16, + FNMUL_s = FNMUL, + FNMUL_d = FNMUL | FP64 +}; + +// Floating point data processing 3 source. +enum FPDataProcessing3SourceOp { + FPDataProcessing3SourceFixed = 0x1F000000, + FPDataProcessing3SourceFMask = 0x5F000000, + FPDataProcessing3SourceMask = 0xFFE08000, + FMADD_h = FPDataProcessing3SourceFixed | 0x00C00000, + FMSUB_h = FPDataProcessing3SourceFixed | 0x00C08000, + FNMADD_h = FPDataProcessing3SourceFixed | 0x00E00000, + FNMSUB_h = FPDataProcessing3SourceFixed | 0x00E08000, + FMADD_s = FPDataProcessing3SourceFixed | 0x00000000, + FMSUB_s = FPDataProcessing3SourceFixed | 0x00008000, + FNMADD_s = FPDataProcessing3SourceFixed | 0x00200000, + FNMSUB_s = FPDataProcessing3SourceFixed | 0x00208000, + FMADD_d = FPDataProcessing3SourceFixed | 0x00400000, + FMSUB_d = FPDataProcessing3SourceFixed | 0x00408000, + FNMADD_d = FPDataProcessing3SourceFixed | 0x00600000, + FNMSUB_d = FPDataProcessing3SourceFixed | 0x00608000 +}; + +// Conversion between floating point and integer. +enum FPIntegerConvertOp { + FPIntegerConvertFixed = 0x1E200000, + FPIntegerConvertFMask = 0x5F20FC00, + FPIntegerConvertMask = 0xFFFFFC00, + FCVTNS = FPIntegerConvertFixed | 0x00000000, + FCVTNS_wh = FCVTNS | FP16, + FCVTNS_xh = FCVTNS | SixtyFourBits | FP16, + FCVTNS_ws = FCVTNS, + FCVTNS_xs = FCVTNS | SixtyFourBits, + FCVTNS_wd = FCVTNS | FP64, + FCVTNS_xd = FCVTNS | SixtyFourBits | FP64, + FCVTNU = FPIntegerConvertFixed | 0x00010000, + FCVTNU_wh = FCVTNU | FP16, + FCVTNU_xh = FCVTNU | SixtyFourBits | FP16, + FCVTNU_ws = FCVTNU, + FCVTNU_xs = FCVTNU | SixtyFourBits, + FCVTNU_wd = FCVTNU | FP64, + FCVTNU_xd = FCVTNU | SixtyFourBits | FP64, + FCVTPS = FPIntegerConvertFixed | 0x00080000, + FCVTPS_wh = FCVTPS | FP16, + FCVTPS_xh = FCVTPS | SixtyFourBits | FP16, + FCVTPS_ws = FCVTPS, + FCVTPS_xs = FCVTPS | SixtyFourBits, + FCVTPS_wd = FCVTPS | FP64, + FCVTPS_xd = FCVTPS | SixtyFourBits | FP64, + FCVTPU = FPIntegerConvertFixed | 0x00090000, + FCVTPU_wh = FCVTPU | FP16, + FCVTPU_xh = FCVTPU | SixtyFourBits | FP16, + FCVTPU_ws = FCVTPU, + FCVTPU_xs = FCVTPU | SixtyFourBits, + FCVTPU_wd = FCVTPU | FP64, + FCVTPU_xd = FCVTPU | SixtyFourBits | FP64, + FCVTMS = FPIntegerConvertFixed | 0x00100000, + FCVTMS_wh = FCVTMS | FP16, + FCVTMS_xh = FCVTMS | SixtyFourBits | FP16, + FCVTMS_ws = FCVTMS, + FCVTMS_xs = FCVTMS | SixtyFourBits, + FCVTMS_wd = FCVTMS | FP64, + FCVTMS_xd = FCVTMS | SixtyFourBits | FP64, + FCVTMU = FPIntegerConvertFixed | 0x00110000, + FCVTMU_wh = FCVTMU | FP16, + FCVTMU_xh = FCVTMU | SixtyFourBits | FP16, + FCVTMU_ws = FCVTMU, + FCVTMU_xs = FCVTMU | SixtyFourBits, + FCVTMU_wd = FCVTMU | FP64, + FCVTMU_xd = FCVTMU | SixtyFourBits | FP64, + FCVTZS = FPIntegerConvertFixed | 0x00180000, + FCVTZS_wh = FCVTZS | FP16, + FCVTZS_xh = FCVTZS | SixtyFourBits | FP16, + FCVTZS_ws = FCVTZS, + FCVTZS_xs = FCVTZS | SixtyFourBits, + FCVTZS_wd = FCVTZS | FP64, + FCVTZS_xd = FCVTZS | SixtyFourBits | FP64, + FCVTZU = FPIntegerConvertFixed | 0x00190000, + FCVTZU_wh = FCVTZU | FP16, + FCVTZU_xh = FCVTZU | SixtyFourBits | FP16, + FCVTZU_ws = FCVTZU, + FCVTZU_xs = FCVTZU | SixtyFourBits, + FCVTZU_wd = FCVTZU | FP64, + FCVTZU_xd = FCVTZU | SixtyFourBits | FP64, + SCVTF = FPIntegerConvertFixed | 0x00020000, + SCVTF_hw = SCVTF | FP16, + SCVTF_hx = SCVTF | SixtyFourBits | FP16, + SCVTF_sw = SCVTF, + SCVTF_sx = SCVTF | SixtyFourBits, + SCVTF_dw = SCVTF | FP64, + SCVTF_dx = SCVTF | SixtyFourBits | FP64, + UCVTF = FPIntegerConvertFixed | 0x00030000, + UCVTF_hw = UCVTF | FP16, + UCVTF_hx = UCVTF | SixtyFourBits | FP16, + UCVTF_sw = UCVTF, + UCVTF_sx = UCVTF | SixtyFourBits, + UCVTF_dw = UCVTF | FP64, + UCVTF_dx = UCVTF | SixtyFourBits | FP64, + FCVTAS = FPIntegerConvertFixed | 0x00040000, + FCVTAS_wh = FCVTAS | FP16, + FCVTAS_xh = FCVTAS | SixtyFourBits | FP16, + FCVTAS_ws = FCVTAS, + FCVTAS_xs = FCVTAS | SixtyFourBits, + FCVTAS_wd = FCVTAS | FP64, + FCVTAS_xd = FCVTAS | SixtyFourBits | FP64, + FCVTAU = FPIntegerConvertFixed | 0x00050000, + FCVTAU_wh = FCVTAU | FP16, + FCVTAU_xh = FCVTAU | SixtyFourBits | FP16, + FCVTAU_ws = FCVTAU, + FCVTAU_xs = FCVTAU | SixtyFourBits, + FCVTAU_wd = FCVTAU | FP64, + FCVTAU_xd = FCVTAU | SixtyFourBits | FP64, + FMOV_wh = FPIntegerConvertFixed | 0x00060000 | FP16, + FMOV_hw = FPIntegerConvertFixed | 0x00070000 | FP16, + FMOV_xh = FMOV_wh | SixtyFourBits, + FMOV_hx = FMOV_hw | SixtyFourBits, + FMOV_ws = FPIntegerConvertFixed | 0x00060000, + FMOV_sw = FPIntegerConvertFixed | 0x00070000, + FMOV_xd = FMOV_ws | SixtyFourBits | FP64, + FMOV_dx = FMOV_sw | SixtyFourBits | FP64, + FMOV_d1_x = FPIntegerConvertFixed | SixtyFourBits | 0x008F0000, + FMOV_x_d1 = FPIntegerConvertFixed | SixtyFourBits | 0x008E0000, + FJCVTZS = FPIntegerConvertFixed | FP64 | 0x001E0000 +}; + +// Conversion between fixed point and floating point. +enum FPFixedPointConvertOp { + FPFixedPointConvertFixed = 0x1E000000, + FPFixedPointConvertFMask = 0x5F200000, + FPFixedPointConvertMask = 0xFFFF0000, + FCVTZS_fixed = FPFixedPointConvertFixed | 0x00180000, + FCVTZS_wh_fixed = FCVTZS_fixed | FP16, + FCVTZS_xh_fixed = FCVTZS_fixed | SixtyFourBits | FP16, + FCVTZS_ws_fixed = FCVTZS_fixed, + FCVTZS_xs_fixed = FCVTZS_fixed | SixtyFourBits, + FCVTZS_wd_fixed = FCVTZS_fixed | FP64, + FCVTZS_xd_fixed = FCVTZS_fixed | SixtyFourBits | FP64, + FCVTZU_fixed = FPFixedPointConvertFixed | 0x00190000, + FCVTZU_wh_fixed = FCVTZU_fixed | FP16, + FCVTZU_xh_fixed = FCVTZU_fixed | SixtyFourBits | FP16, + FCVTZU_ws_fixed = FCVTZU_fixed, + FCVTZU_xs_fixed = FCVTZU_fixed | SixtyFourBits, + FCVTZU_wd_fixed = FCVTZU_fixed | FP64, + FCVTZU_xd_fixed = FCVTZU_fixed | SixtyFourBits | FP64, + SCVTF_fixed = FPFixedPointConvertFixed | 0x00020000, + SCVTF_hw_fixed = SCVTF_fixed | FP16, + SCVTF_hx_fixed = SCVTF_fixed | SixtyFourBits | FP16, + SCVTF_sw_fixed = SCVTF_fixed, + SCVTF_sx_fixed = SCVTF_fixed | SixtyFourBits, + SCVTF_dw_fixed = SCVTF_fixed | FP64, + SCVTF_dx_fixed = SCVTF_fixed | SixtyFourBits | FP64, + UCVTF_fixed = FPFixedPointConvertFixed | 0x00030000, + UCVTF_hw_fixed = UCVTF_fixed | FP16, + UCVTF_hx_fixed = UCVTF_fixed | SixtyFourBits | FP16, + UCVTF_sw_fixed = UCVTF_fixed, + UCVTF_sx_fixed = UCVTF_fixed | SixtyFourBits, + UCVTF_dw_fixed = UCVTF_fixed | FP64, + UCVTF_dx_fixed = UCVTF_fixed | SixtyFourBits | FP64 +}; + +// Crypto - two register SHA. +enum Crypto2RegSHAOp { + Crypto2RegSHAFixed = 0x5E280800, + Crypto2RegSHAFMask = 0xFF3E0C00 +}; + +// Crypto - three register SHA. +enum Crypto3RegSHAOp { + Crypto3RegSHAFixed = 0x5E000000, + Crypto3RegSHAFMask = 0xFF208C00 +}; + +// Crypto - AES. +enum CryptoAESOp { + CryptoAESFixed = 0x4E280800, + CryptoAESFMask = 0xFF3E0C00 +}; + +// NEON instructions with two register operands. +enum NEON2RegMiscOp { + NEON2RegMiscFixed = 0x0E200800, + NEON2RegMiscFMask = 0x9F3E0C00, + NEON2RegMiscMask = 0xBF3FFC00, + NEON2RegMiscUBit = 0x20000000, + NEON_REV64 = NEON2RegMiscFixed | 0x00000000, + NEON_REV32 = NEON2RegMiscFixed | 0x20000000, + NEON_REV16 = NEON2RegMiscFixed | 0x00001000, + NEON_SADDLP = NEON2RegMiscFixed | 0x00002000, + NEON_UADDLP = NEON_SADDLP | NEON2RegMiscUBit, + NEON_SUQADD = NEON2RegMiscFixed | 0x00003000, + NEON_USQADD = NEON_SUQADD | NEON2RegMiscUBit, + NEON_CLS = NEON2RegMiscFixed | 0x00004000, + NEON_CLZ = NEON2RegMiscFixed | 0x20004000, + NEON_CNT = NEON2RegMiscFixed | 0x00005000, + NEON_RBIT_NOT = NEON2RegMiscFixed | 0x20005000, + NEON_SADALP = NEON2RegMiscFixed | 0x00006000, + NEON_UADALP = NEON_SADALP | NEON2RegMiscUBit, + NEON_SQABS = NEON2RegMiscFixed | 0x00007000, + NEON_SQNEG = NEON2RegMiscFixed | 0x20007000, + NEON_CMGT_zero = NEON2RegMiscFixed | 0x00008000, + NEON_CMGE_zero = NEON2RegMiscFixed | 0x20008000, + NEON_CMEQ_zero = NEON2RegMiscFixed | 0x00009000, + NEON_CMLE_zero = NEON2RegMiscFixed | 0x20009000, + NEON_CMLT_zero = NEON2RegMiscFixed | 0x0000A000, + NEON_ABS = NEON2RegMiscFixed | 0x0000B000, + NEON_NEG = NEON2RegMiscFixed | 0x2000B000, + NEON_XTN = NEON2RegMiscFixed | 0x00012000, + NEON_SQXTUN = NEON2RegMiscFixed | 0x20012000, + NEON_SHLL = NEON2RegMiscFixed | 0x20013000, + NEON_SQXTN = NEON2RegMiscFixed | 0x00014000, + NEON_UQXTN = NEON_SQXTN | NEON2RegMiscUBit, + + NEON2RegMiscOpcode = 0x0001F000, + NEON_RBIT_NOT_opcode = NEON_RBIT_NOT & NEON2RegMiscOpcode, + NEON_NEG_opcode = NEON_NEG & NEON2RegMiscOpcode, + NEON_XTN_opcode = NEON_XTN & NEON2RegMiscOpcode, + NEON_UQXTN_opcode = NEON_UQXTN & NEON2RegMiscOpcode, + + // These instructions use only one bit of the size field. The other bit is + // used to distinguish between instructions. + NEON2RegMiscFPMask = NEON2RegMiscMask | 0x00800000, + NEON_FABS = NEON2RegMiscFixed | 0x0080F000, + NEON_FNEG = NEON2RegMiscFixed | 0x2080F000, + NEON_FCVTN = NEON2RegMiscFixed | 0x00016000, + NEON_FCVTXN = NEON2RegMiscFixed | 0x20016000, + NEON_FCVTL = NEON2RegMiscFixed | 0x00017000, + NEON_FRINTN = NEON2RegMiscFixed | 0x00018000, + NEON_FRINTA = NEON2RegMiscFixed | 0x20018000, + NEON_FRINTP = NEON2RegMiscFixed | 0x00818000, + NEON_FRINTM = NEON2RegMiscFixed | 0x00019000, + NEON_FRINTX = NEON2RegMiscFixed | 0x20019000, + NEON_FRINTZ = NEON2RegMiscFixed | 0x00819000, + NEON_FRINTI = NEON2RegMiscFixed | 0x20819000, + NEON_FCVTNS = NEON2RegMiscFixed | 0x0001A000, + NEON_FCVTNU = NEON_FCVTNS | NEON2RegMiscUBit, + NEON_FCVTPS = NEON2RegMiscFixed | 0x0081A000, + NEON_FCVTPU = NEON_FCVTPS | NEON2RegMiscUBit, + NEON_FCVTMS = NEON2RegMiscFixed | 0x0001B000, + NEON_FCVTMU = NEON_FCVTMS | NEON2RegMiscUBit, + NEON_FCVTZS = NEON2RegMiscFixed | 0x0081B000, + NEON_FCVTZU = NEON_FCVTZS | NEON2RegMiscUBit, + NEON_FCVTAS = NEON2RegMiscFixed | 0x0001C000, + NEON_FCVTAU = NEON_FCVTAS | NEON2RegMiscUBit, + NEON_FSQRT = NEON2RegMiscFixed | 0x2081F000, + NEON_SCVTF = NEON2RegMiscFixed | 0x0001D000, + NEON_UCVTF = NEON_SCVTF | NEON2RegMiscUBit, + NEON_URSQRTE = NEON2RegMiscFixed | 0x2081C000, + NEON_URECPE = NEON2RegMiscFixed | 0x0081C000, + NEON_FRSQRTE = NEON2RegMiscFixed | 0x2081D000, + NEON_FRECPE = NEON2RegMiscFixed | 0x0081D000, + NEON_FCMGT_zero = NEON2RegMiscFixed | 0x0080C000, + NEON_FCMGE_zero = NEON2RegMiscFixed | 0x2080C000, + NEON_FCMEQ_zero = NEON2RegMiscFixed | 0x0080D000, + NEON_FCMLE_zero = NEON2RegMiscFixed | 0x2080D000, + NEON_FCMLT_zero = NEON2RegMiscFixed | 0x0080E000, + + NEON_FCVTL_opcode = NEON_FCVTL & NEON2RegMiscOpcode, + NEON_FCVTN_opcode = NEON_FCVTN & NEON2RegMiscOpcode +}; + +// NEON instructions with two register operands (FP16). +enum NEON2RegMiscFP16Op { + NEON2RegMiscFP16Fixed = 0x0E780800, + NEON2RegMiscFP16FMask = 0x9F7E0C00, + NEON2RegMiscFP16Mask = 0xBFFFFC00, + NEON_FRINTN_H = NEON2RegMiscFP16Fixed | 0x00018000, + NEON_FRINTM_H = NEON2RegMiscFP16Fixed | 0x00019000, + NEON_FCVTNS_H = NEON2RegMiscFP16Fixed | 0x0001A000, + NEON_FCVTMS_H = NEON2RegMiscFP16Fixed | 0x0001B000, + NEON_FCVTAS_H = NEON2RegMiscFP16Fixed | 0x0001C000, + NEON_SCVTF_H = NEON2RegMiscFP16Fixed | 0x0001D000, + NEON_FCMGT_H_zero = NEON2RegMiscFP16Fixed | 0x0080C000, + NEON_FCMEQ_H_zero = NEON2RegMiscFP16Fixed | 0x0080D000, + NEON_FCMLT_H_zero = NEON2RegMiscFP16Fixed | 0x0080E000, + NEON_FABS_H = NEON2RegMiscFP16Fixed | 0x0080F000, + NEON_FRINTP_H = NEON2RegMiscFP16Fixed | 0x00818000, + NEON_FRINTZ_H = NEON2RegMiscFP16Fixed | 0x00819000, + NEON_FCVTPS_H = NEON2RegMiscFP16Fixed | 0x0081A000, + NEON_FCVTZS_H = NEON2RegMiscFP16Fixed | 0x0081B000, + NEON_FRECPE_H = NEON2RegMiscFP16Fixed | 0x0081D000, + NEON_FRINTA_H = NEON2RegMiscFP16Fixed | 0x20018000, + NEON_FRINTX_H = NEON2RegMiscFP16Fixed | 0x20019000, + NEON_FCVTNU_H = NEON2RegMiscFP16Fixed | 0x2001A000, + NEON_FCVTMU_H = NEON2RegMiscFP16Fixed | 0x2001B000, + NEON_FCVTAU_H = NEON2RegMiscFP16Fixed | 0x2001C000, + NEON_UCVTF_H = NEON2RegMiscFP16Fixed | 0x2001D000, + NEON_FCMGE_H_zero = NEON2RegMiscFP16Fixed | 0x2080C000, + NEON_FCMLE_H_zero = NEON2RegMiscFP16Fixed | 0x2080D000, + NEON_FNEG_H = NEON2RegMiscFP16Fixed | 0x2080F000, + NEON_FRINTI_H = NEON2RegMiscFP16Fixed | 0x20819000, + NEON_FCVTPU_H = NEON2RegMiscFP16Fixed | 0x2081A000, + NEON_FCVTZU_H = NEON2RegMiscFP16Fixed | 0x2081B000, + NEON_FRSQRTE_H = NEON2RegMiscFP16Fixed | 0x2081D000, + NEON_FSQRT_H = NEON2RegMiscFP16Fixed | 0x2081F000 +}; + +// NEON instructions with three same-type operands. +enum NEON3SameOp { + NEON3SameFixed = 0x0E200400, + NEON3SameFMask = 0x9F200400, + NEON3SameMask = 0xBF20FC00, + NEON3SameUBit = 0x20000000, + NEON_ADD = NEON3SameFixed | 0x00008000, + NEON_ADDP = NEON3SameFixed | 0x0000B800, + NEON_SHADD = NEON3SameFixed | 0x00000000, + NEON_SHSUB = NEON3SameFixed | 0x00002000, + NEON_SRHADD = NEON3SameFixed | 0x00001000, + NEON_CMEQ = NEON3SameFixed | NEON3SameUBit | 0x00008800, + NEON_CMGE = NEON3SameFixed | 0x00003800, + NEON_CMGT = NEON3SameFixed | 0x00003000, + NEON_CMHI = NEON3SameFixed | NEON3SameUBit | NEON_CMGT, + NEON_CMHS = NEON3SameFixed | NEON3SameUBit | NEON_CMGE, + NEON_CMTST = NEON3SameFixed | 0x00008800, + NEON_MLA = NEON3SameFixed | 0x00009000, + NEON_MLS = NEON3SameFixed | 0x20009000, + NEON_MUL = NEON3SameFixed | 0x00009800, + NEON_PMUL = NEON3SameFixed | 0x20009800, + NEON_SRSHL = NEON3SameFixed | 0x00005000, + NEON_SQSHL = NEON3SameFixed | 0x00004800, + NEON_SQRSHL = NEON3SameFixed | 0x00005800, + NEON_SSHL = NEON3SameFixed | 0x00004000, + NEON_SMAX = NEON3SameFixed | 0x00006000, + NEON_SMAXP = NEON3SameFixed | 0x0000A000, + NEON_SMIN = NEON3SameFixed | 0x00006800, + NEON_SMINP = NEON3SameFixed | 0x0000A800, + NEON_SABD = NEON3SameFixed | 0x00007000, + NEON_SABA = NEON3SameFixed | 0x00007800, + NEON_UABD = NEON3SameFixed | NEON3SameUBit | NEON_SABD, + NEON_UABA = NEON3SameFixed | NEON3SameUBit | NEON_SABA, + NEON_SQADD = NEON3SameFixed | 0x00000800, + NEON_SQSUB = NEON3SameFixed | 0x00002800, + NEON_SUB = NEON3SameFixed | NEON3SameUBit | 0x00008000, + NEON_UHADD = NEON3SameFixed | NEON3SameUBit | NEON_SHADD, + NEON_UHSUB = NEON3SameFixed | NEON3SameUBit | NEON_SHSUB, + NEON_URHADD = NEON3SameFixed | NEON3SameUBit | NEON_SRHADD, + NEON_UMAX = NEON3SameFixed | NEON3SameUBit | NEON_SMAX, + NEON_UMAXP = NEON3SameFixed | NEON3SameUBit | NEON_SMAXP, + NEON_UMIN = NEON3SameFixed | NEON3SameUBit | NEON_SMIN, + NEON_UMINP = NEON3SameFixed | NEON3SameUBit | NEON_SMINP, + NEON_URSHL = NEON3SameFixed | NEON3SameUBit | NEON_SRSHL, + NEON_UQADD = NEON3SameFixed | NEON3SameUBit | NEON_SQADD, + NEON_UQRSHL = NEON3SameFixed | NEON3SameUBit | NEON_SQRSHL, + NEON_UQSHL = NEON3SameFixed | NEON3SameUBit | NEON_SQSHL, + NEON_UQSUB = NEON3SameFixed | NEON3SameUBit | NEON_SQSUB, + NEON_USHL = NEON3SameFixed | NEON3SameUBit | NEON_SSHL, + NEON_SQDMULH = NEON3SameFixed | 0x0000B000, + NEON_SQRDMULH = NEON3SameFixed | 0x2000B000, + + // NEON floating point instructions with three same-type operands. + NEON3SameFPFixed = NEON3SameFixed | 0x0000C000, + NEON3SameFPFMask = NEON3SameFMask | 0x0000C000, + NEON3SameFPMask = NEON3SameMask | 0x00800000, + NEON_FADD = NEON3SameFixed | 0x0000D000, + NEON_FSUB = NEON3SameFixed | 0x0080D000, + NEON_FMUL = NEON3SameFixed | 0x2000D800, + NEON_FDIV = NEON3SameFixed | 0x2000F800, + NEON_FMAX = NEON3SameFixed | 0x0000F000, + NEON_FMAXNM = NEON3SameFixed | 0x0000C000, + NEON_FMAXP = NEON3SameFixed | 0x2000F000, + NEON_FMAXNMP = NEON3SameFixed | 0x2000C000, + NEON_FMIN = NEON3SameFixed | 0x0080F000, + NEON_FMINNM = NEON3SameFixed | 0x0080C000, + NEON_FMINP = NEON3SameFixed | 0x2080F000, + NEON_FMINNMP = NEON3SameFixed | 0x2080C000, + NEON_FMLA = NEON3SameFixed | 0x0000C800, + NEON_FMLS = NEON3SameFixed | 0x0080C800, + NEON_FMULX = NEON3SameFixed | 0x0000D800, + NEON_FRECPS = NEON3SameFixed | 0x0000F800, + NEON_FRSQRTS = NEON3SameFixed | 0x0080F800, + NEON_FABD = NEON3SameFixed | 0x2080D000, + NEON_FADDP = NEON3SameFixed | 0x2000D000, + NEON_FCMEQ = NEON3SameFixed | 0x0000E000, + NEON_FCMGE = NEON3SameFixed | 0x2000E000, + NEON_FCMGT = NEON3SameFixed | 0x2080E000, + NEON_FACGE = NEON3SameFixed | 0x2000E800, + NEON_FACGT = NEON3SameFixed | 0x2080E800, + + // NEON logical instructions with three same-type operands. + NEON3SameLogicalFixed = NEON3SameFixed | 0x00001800, + NEON3SameLogicalFMask = NEON3SameFMask | 0x0000F800, + NEON3SameLogicalMask = 0xBFE0FC00, + NEON3SameLogicalFormatMask = NEON_Q, + NEON_AND = NEON3SameLogicalFixed | 0x00000000, + NEON_ORR = NEON3SameLogicalFixed | 0x00A00000, + NEON_ORN = NEON3SameLogicalFixed | 0x00C00000, + NEON_EOR = NEON3SameLogicalFixed | 0x20000000, + NEON_BIC = NEON3SameLogicalFixed | 0x00400000, + NEON_BIF = NEON3SameLogicalFixed | 0x20C00000, + NEON_BIT = NEON3SameLogicalFixed | 0x20800000, + NEON_BSL = NEON3SameLogicalFixed | 0x20400000 +}; + + +enum NEON3SameFP16 { + NEON3SameFP16Fixed = 0x0E400400, + NEON3SameFP16FMask = 0x9F60C400, + NEON3SameFP16Mask = 0xBFE0FC00, + NEON_FMAXNM_H = NEON3SameFP16Fixed | 0x00000000, + NEON_FMLA_H = NEON3SameFP16Fixed | 0x00000800, + NEON_FADD_H = NEON3SameFP16Fixed | 0x00001000, + NEON_FMULX_H = NEON3SameFP16Fixed | 0x00001800, + NEON_FCMEQ_H = NEON3SameFP16Fixed | 0x00002000, + NEON_FMAX_H = NEON3SameFP16Fixed | 0x00003000, + NEON_FRECPS_H = NEON3SameFP16Fixed | 0x00003800, + NEON_FMINNM_H = NEON3SameFP16Fixed | 0x00800000, + NEON_FMLS_H = NEON3SameFP16Fixed | 0x00800800, + NEON_FSUB_H = NEON3SameFP16Fixed | 0x00801000, + NEON_FMIN_H = NEON3SameFP16Fixed | 0x00803000, + NEON_FRSQRTS_H = NEON3SameFP16Fixed | 0x00803800, + NEON_FMAXNMP_H = NEON3SameFP16Fixed | 0x20000000, + NEON_FADDP_H = NEON3SameFP16Fixed | 0x20001000, + NEON_FMUL_H = NEON3SameFP16Fixed | 0x20001800, + NEON_FCMGE_H = NEON3SameFP16Fixed | 0x20002000, + NEON_FACGE_H = NEON3SameFP16Fixed | 0x20002800, + NEON_FMAXP_H = NEON3SameFP16Fixed | 0x20003000, + NEON_FDIV_H = NEON3SameFP16Fixed | 0x20003800, + NEON_FMINNMP_H = NEON3SameFP16Fixed | 0x20800000, + NEON_FABD_H = NEON3SameFP16Fixed | 0x20801000, + NEON_FCMGT_H = NEON3SameFP16Fixed | 0x20802000, + NEON_FACGT_H = NEON3SameFP16Fixed | 0x20802800, + NEON_FMINP_H = NEON3SameFP16Fixed | 0x20803000 +}; + + +// 'Extra' NEON instructions with three same-type operands. +enum NEON3SameExtraOp { + NEON3SameExtraFixed = 0x0E008400, + NEON3SameExtraUBit = 0x20000000, + NEON3SameExtraFMask = 0x9E208400, + NEON3SameExtraMask = 0xBE20FC00, + NEON_SQRDMLAH = NEON3SameExtraFixed | NEON3SameExtraUBit, + NEON_SQRDMLSH = NEON3SameExtraFixed | NEON3SameExtraUBit | 0x00000800, + NEON_SDOT = NEON3SameExtraFixed | 0x00001000, + NEON_UDOT = NEON3SameExtraFixed | NEON3SameExtraUBit | 0x00001000, + + /* v8.3 Complex Numbers */ + NEON3SameExtraFCFixed = 0x2E00C400, + NEON3SameExtraFCFMask = 0xBF20C400, + // FCMLA fixes opcode<3:2>, and uses opcode<1:0> to encode . + NEON3SameExtraFCMLAMask = NEON3SameExtraFCFMask | 0x00006000, + NEON_FCMLA = NEON3SameExtraFCFixed, + // FCADD fixes opcode<3:2, 0>, and uses opcode<1> to encode . + NEON3SameExtraFCADDMask = NEON3SameExtraFCFMask | 0x00006800, + NEON_FCADD = NEON3SameExtraFCFixed | 0x00002000 + // Other encodings under NEON3SameExtraFCFMask are UNALLOCATED. +}; + +// NEON instructions with three different-type operands. +enum NEON3DifferentOp { + NEON3DifferentFixed = 0x0E200000, + NEON3DifferentFMask = 0x9F200C00, + NEON3DifferentMask = 0xFF20FC00, + NEON_ADDHN = NEON3DifferentFixed | 0x00004000, + NEON_ADDHN2 = NEON_ADDHN | NEON_Q, + NEON_PMULL = NEON3DifferentFixed | 0x0000E000, + NEON_PMULL2 = NEON_PMULL | NEON_Q, + NEON_RADDHN = NEON3DifferentFixed | 0x20004000, + NEON_RADDHN2 = NEON_RADDHN | NEON_Q, + NEON_RSUBHN = NEON3DifferentFixed | 0x20006000, + NEON_RSUBHN2 = NEON_RSUBHN | NEON_Q, + NEON_SABAL = NEON3DifferentFixed | 0x00005000, + NEON_SABAL2 = NEON_SABAL | NEON_Q, + NEON_SABDL = NEON3DifferentFixed | 0x00007000, + NEON_SABDL2 = NEON_SABDL | NEON_Q, + NEON_SADDL = NEON3DifferentFixed | 0x00000000, + NEON_SADDL2 = NEON_SADDL | NEON_Q, + NEON_SADDW = NEON3DifferentFixed | 0x00001000, + NEON_SADDW2 = NEON_SADDW | NEON_Q, + NEON_SMLAL = NEON3DifferentFixed | 0x00008000, + NEON_SMLAL2 = NEON_SMLAL | NEON_Q, + NEON_SMLSL = NEON3DifferentFixed | 0x0000A000, + NEON_SMLSL2 = NEON_SMLSL | NEON_Q, + NEON_SMULL = NEON3DifferentFixed | 0x0000C000, + NEON_SMULL2 = NEON_SMULL | NEON_Q, + NEON_SSUBL = NEON3DifferentFixed | 0x00002000, + NEON_SSUBL2 = NEON_SSUBL | NEON_Q, + NEON_SSUBW = NEON3DifferentFixed | 0x00003000, + NEON_SSUBW2 = NEON_SSUBW | NEON_Q, + NEON_SQDMLAL = NEON3DifferentFixed | 0x00009000, + NEON_SQDMLAL2 = NEON_SQDMLAL | NEON_Q, + NEON_SQDMLSL = NEON3DifferentFixed | 0x0000B000, + NEON_SQDMLSL2 = NEON_SQDMLSL | NEON_Q, + NEON_SQDMULL = NEON3DifferentFixed | 0x0000D000, + NEON_SQDMULL2 = NEON_SQDMULL | NEON_Q, + NEON_SUBHN = NEON3DifferentFixed | 0x00006000, + NEON_SUBHN2 = NEON_SUBHN | NEON_Q, + NEON_UABAL = NEON_SABAL | NEON3SameUBit, + NEON_UABAL2 = NEON_UABAL | NEON_Q, + NEON_UABDL = NEON_SABDL | NEON3SameUBit, + NEON_UABDL2 = NEON_UABDL | NEON_Q, + NEON_UADDL = NEON_SADDL | NEON3SameUBit, + NEON_UADDL2 = NEON_UADDL | NEON_Q, + NEON_UADDW = NEON_SADDW | NEON3SameUBit, + NEON_UADDW2 = NEON_UADDW | NEON_Q, + NEON_UMLAL = NEON_SMLAL | NEON3SameUBit, + NEON_UMLAL2 = NEON_UMLAL | NEON_Q, + NEON_UMLSL = NEON_SMLSL | NEON3SameUBit, + NEON_UMLSL2 = NEON_UMLSL | NEON_Q, + NEON_UMULL = NEON_SMULL | NEON3SameUBit, + NEON_UMULL2 = NEON_UMULL | NEON_Q, + NEON_USUBL = NEON_SSUBL | NEON3SameUBit, + NEON_USUBL2 = NEON_USUBL | NEON_Q, + NEON_USUBW = NEON_SSUBW | NEON3SameUBit, + NEON_USUBW2 = NEON_USUBW | NEON_Q +}; + +// NEON instructions operating across vectors. +enum NEONAcrossLanesOp { + NEONAcrossLanesFixed = 0x0E300800, + NEONAcrossLanesFMask = 0x9F3E0C00, + NEONAcrossLanesMask = 0xBF3FFC00, + NEON_ADDV = NEONAcrossLanesFixed | 0x0001B000, + NEON_SADDLV = NEONAcrossLanesFixed | 0x00003000, + NEON_UADDLV = NEONAcrossLanesFixed | 0x20003000, + NEON_SMAXV = NEONAcrossLanesFixed | 0x0000A000, + NEON_SMINV = NEONAcrossLanesFixed | 0x0001A000, + NEON_UMAXV = NEONAcrossLanesFixed | 0x2000A000, + NEON_UMINV = NEONAcrossLanesFixed | 0x2001A000, + + NEONAcrossLanesFP16Fixed = NEONAcrossLanesFixed | 0x0000C000, + NEONAcrossLanesFP16FMask = NEONAcrossLanesFMask | 0x2000C000, + NEONAcrossLanesFP16Mask = NEONAcrossLanesMask | 0x20800000, + NEON_FMAXNMV_H = NEONAcrossLanesFP16Fixed | 0x00000000, + NEON_FMAXV_H = NEONAcrossLanesFP16Fixed | 0x00003000, + NEON_FMINNMV_H = NEONAcrossLanesFP16Fixed | 0x00800000, + NEON_FMINV_H = NEONAcrossLanesFP16Fixed | 0x00803000, + + // NEON floating point across instructions. + NEONAcrossLanesFPFixed = NEONAcrossLanesFixed | 0x2000C000, + NEONAcrossLanesFPFMask = NEONAcrossLanesFMask | 0x2000C000, + NEONAcrossLanesFPMask = NEONAcrossLanesMask | 0x20800000, + + NEON_FMAXV = NEONAcrossLanesFPFixed | 0x2000F000, + NEON_FMINV = NEONAcrossLanesFPFixed | 0x2080F000, + NEON_FMAXNMV = NEONAcrossLanesFPFixed | 0x2000C000, + NEON_FMINNMV = NEONAcrossLanesFPFixed | 0x2080C000 +}; + +// NEON instructions with indexed element operand. +enum NEONByIndexedElementOp { + NEONByIndexedElementFixed = 0x0F000000, + NEONByIndexedElementFMask = 0x9F000400, + NEONByIndexedElementMask = 0xBF00F400, + NEON_MUL_byelement = NEONByIndexedElementFixed | 0x00008000, + NEON_MLA_byelement = NEONByIndexedElementFixed | 0x20000000, + NEON_MLS_byelement = NEONByIndexedElementFixed | 0x20004000, + NEON_SMULL_byelement = NEONByIndexedElementFixed | 0x0000A000, + NEON_SMLAL_byelement = NEONByIndexedElementFixed | 0x00002000, + NEON_SMLSL_byelement = NEONByIndexedElementFixed | 0x00006000, + NEON_UMULL_byelement = NEONByIndexedElementFixed | 0x2000A000, + NEON_UMLAL_byelement = NEONByIndexedElementFixed | 0x20002000, + NEON_UMLSL_byelement = NEONByIndexedElementFixed | 0x20006000, + NEON_SQDMULL_byelement = NEONByIndexedElementFixed | 0x0000B000, + NEON_SQDMLAL_byelement = NEONByIndexedElementFixed | 0x00003000, + NEON_SQDMLSL_byelement = NEONByIndexedElementFixed | 0x00007000, + NEON_SQDMULH_byelement = NEONByIndexedElementFixed | 0x0000C000, + NEON_SQRDMULH_byelement = NEONByIndexedElementFixed | 0x0000D000, + NEON_SDOT_byelement = NEONByIndexedElementFixed | 0x0000E000, + NEON_SQRDMLAH_byelement = NEONByIndexedElementFixed | 0x2000D000, + NEON_UDOT_byelement = NEONByIndexedElementFixed | 0x2000E000, + NEON_SQRDMLSH_byelement = NEONByIndexedElementFixed | 0x2000F000, + NEON_FMLA_H_byelement = NEONByIndexedElementFixed | 0x00001000, + NEON_FMLS_H_byelement = NEONByIndexedElementFixed | 0x00005000, + NEON_FMUL_H_byelement = NEONByIndexedElementFixed | 0x00009000, + NEON_FMULX_H_byelement = NEONByIndexedElementFixed | 0x20009000, + + // Floating point instructions. + NEONByIndexedElementFPFixed = NEONByIndexedElementFixed | 0x00800000, + NEONByIndexedElementFPMask = NEONByIndexedElementMask | 0x00800000, + NEON_FMLA_byelement = NEONByIndexedElementFPFixed | 0x00001000, + NEON_FMLS_byelement = NEONByIndexedElementFPFixed | 0x00005000, + NEON_FMUL_byelement = NEONByIndexedElementFPFixed | 0x00009000, + NEON_FMULX_byelement = NEONByIndexedElementFPFixed | 0x20009000, + NEON_FCMLA_byelement = NEONByIndexedElementFixed | 0x20001000, + + // Complex instruction(s) this is necessary because 'rot' encoding moves into the NEONByIndex..Mask space + NEONByIndexedElementFPComplexMask = 0xBF009400 +}; + +// NEON register copy. +enum NEONCopyOp { + NEONCopyFixed = 0x0E000400, + NEONCopyFMask = 0x9FE08400, + NEONCopyMask = 0x3FE08400, + NEONCopyInsElementMask = NEONCopyMask | 0x40000000, + NEONCopyInsGeneralMask = NEONCopyMask | 0x40007800, + NEONCopyDupElementMask = NEONCopyMask | 0x20007800, + NEONCopyDupGeneralMask = NEONCopyDupElementMask, + NEONCopyUmovMask = NEONCopyMask | 0x20007800, + NEONCopySmovMask = NEONCopyMask | 0x20007800, + NEON_INS_ELEMENT = NEONCopyFixed | 0x60000000, + NEON_INS_GENERAL = NEONCopyFixed | 0x40001800, + NEON_DUP_ELEMENT = NEONCopyFixed | 0x00000000, + NEON_DUP_GENERAL = NEONCopyFixed | 0x00000800, + NEON_SMOV = NEONCopyFixed | 0x00002800, + NEON_UMOV = NEONCopyFixed | 0x00003800 +}; + +// NEON extract. +enum NEONExtractOp { + NEONExtractFixed = 0x2E000000, + NEONExtractFMask = 0xBF208400, + NEONExtractMask = 0xBFE08400, + NEON_EXT = NEONExtractFixed | 0x00000000 +}; + +enum NEONLoadStoreMultiOp { + NEONLoadStoreMultiL = 0x00400000, + NEONLoadStoreMulti1_1v = 0x00007000, + NEONLoadStoreMulti1_2v = 0x0000A000, + NEONLoadStoreMulti1_3v = 0x00006000, + NEONLoadStoreMulti1_4v = 0x00002000, + NEONLoadStoreMulti2 = 0x00008000, + NEONLoadStoreMulti3 = 0x00004000, + NEONLoadStoreMulti4 = 0x00000000 +}; + +// NEON load/store multiple structures. +enum NEONLoadStoreMultiStructOp { + NEONLoadStoreMultiStructFixed = 0x0C000000, + NEONLoadStoreMultiStructFMask = 0xBFBF0000, + NEONLoadStoreMultiStructMask = 0xBFFFF000, + NEONLoadStoreMultiStructStore = NEONLoadStoreMultiStructFixed, + NEONLoadStoreMultiStructLoad = NEONLoadStoreMultiStructFixed | + NEONLoadStoreMultiL, + NEON_LD1_1v = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti1_1v, + NEON_LD1_2v = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti1_2v, + NEON_LD1_3v = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti1_3v, + NEON_LD1_4v = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti1_4v, + NEON_LD2 = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti2, + NEON_LD3 = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti3, + NEON_LD4 = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti4, + NEON_ST1_1v = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti1_1v, + NEON_ST1_2v = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti1_2v, + NEON_ST1_3v = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti1_3v, + NEON_ST1_4v = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti1_4v, + NEON_ST2 = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti2, + NEON_ST3 = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti3, + NEON_ST4 = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti4 +}; + +// NEON load/store multiple structures with post-index addressing. +enum NEONLoadStoreMultiStructPostIndexOp { + NEONLoadStoreMultiStructPostIndexFixed = 0x0C800000, + NEONLoadStoreMultiStructPostIndexFMask = 0xBFA00000, + NEONLoadStoreMultiStructPostIndexMask = 0xBFE0F000, + NEONLoadStoreMultiStructPostIndex = 0x00800000, + NEON_LD1_1v_post = NEON_LD1_1v | NEONLoadStoreMultiStructPostIndex, + NEON_LD1_2v_post = NEON_LD1_2v | NEONLoadStoreMultiStructPostIndex, + NEON_LD1_3v_post = NEON_LD1_3v | NEONLoadStoreMultiStructPostIndex, + NEON_LD1_4v_post = NEON_LD1_4v | NEONLoadStoreMultiStructPostIndex, + NEON_LD2_post = NEON_LD2 | NEONLoadStoreMultiStructPostIndex, + NEON_LD3_post = NEON_LD3 | NEONLoadStoreMultiStructPostIndex, + NEON_LD4_post = NEON_LD4 | NEONLoadStoreMultiStructPostIndex, + NEON_ST1_1v_post = NEON_ST1_1v | NEONLoadStoreMultiStructPostIndex, + NEON_ST1_2v_post = NEON_ST1_2v | NEONLoadStoreMultiStructPostIndex, + NEON_ST1_3v_post = NEON_ST1_3v | NEONLoadStoreMultiStructPostIndex, + NEON_ST1_4v_post = NEON_ST1_4v | NEONLoadStoreMultiStructPostIndex, + NEON_ST2_post = NEON_ST2 | NEONLoadStoreMultiStructPostIndex, + NEON_ST3_post = NEON_ST3 | NEONLoadStoreMultiStructPostIndex, + NEON_ST4_post = NEON_ST4 | NEONLoadStoreMultiStructPostIndex +}; + +enum NEONLoadStoreSingleOp { + NEONLoadStoreSingle1 = 0x00000000, + NEONLoadStoreSingle2 = 0x00200000, + NEONLoadStoreSingle3 = 0x00002000, + NEONLoadStoreSingle4 = 0x00202000, + NEONLoadStoreSingleL = 0x00400000, + NEONLoadStoreSingle_b = 0x00000000, + NEONLoadStoreSingle_h = 0x00004000, + NEONLoadStoreSingle_s = 0x00008000, + NEONLoadStoreSingle_d = 0x00008400, + NEONLoadStoreSingleAllLanes = 0x0000C000, + NEONLoadStoreSingleLenMask = 0x00202000 +}; + +// NEON load/store single structure. +enum NEONLoadStoreSingleStructOp { + NEONLoadStoreSingleStructFixed = 0x0D000000, + NEONLoadStoreSingleStructFMask = 0xBF9F0000, + NEONLoadStoreSingleStructMask = 0xBFFFE000, + NEONLoadStoreSingleStructStore = NEONLoadStoreSingleStructFixed, + NEONLoadStoreSingleStructLoad = NEONLoadStoreSingleStructFixed | + NEONLoadStoreSingleL, + NEONLoadStoreSingleStructLoad1 = NEONLoadStoreSingle1 | + NEONLoadStoreSingleStructLoad, + NEONLoadStoreSingleStructLoad2 = NEONLoadStoreSingle2 | + NEONLoadStoreSingleStructLoad, + NEONLoadStoreSingleStructLoad3 = NEONLoadStoreSingle3 | + NEONLoadStoreSingleStructLoad, + NEONLoadStoreSingleStructLoad4 = NEONLoadStoreSingle4 | + NEONLoadStoreSingleStructLoad, + NEONLoadStoreSingleStructStore1 = NEONLoadStoreSingle1 | + NEONLoadStoreSingleStructFixed, + NEONLoadStoreSingleStructStore2 = NEONLoadStoreSingle2 | + NEONLoadStoreSingleStructFixed, + NEONLoadStoreSingleStructStore3 = NEONLoadStoreSingle3 | + NEONLoadStoreSingleStructFixed, + NEONLoadStoreSingleStructStore4 = NEONLoadStoreSingle4 | + NEONLoadStoreSingleStructFixed, + NEON_LD1_b = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingle_b, + NEON_LD1_h = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingle_h, + NEON_LD1_s = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingle_s, + NEON_LD1_d = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingle_d, + NEON_LD1R = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingleAllLanes, + NEON_ST1_b = NEONLoadStoreSingleStructStore1 | NEONLoadStoreSingle_b, + NEON_ST1_h = NEONLoadStoreSingleStructStore1 | NEONLoadStoreSingle_h, + NEON_ST1_s = NEONLoadStoreSingleStructStore1 | NEONLoadStoreSingle_s, + NEON_ST1_d = NEONLoadStoreSingleStructStore1 | NEONLoadStoreSingle_d, + + NEON_LD2_b = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingle_b, + NEON_LD2_h = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingle_h, + NEON_LD2_s = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingle_s, + NEON_LD2_d = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingle_d, + NEON_LD2R = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingleAllLanes, + NEON_ST2_b = NEONLoadStoreSingleStructStore2 | NEONLoadStoreSingle_b, + NEON_ST2_h = NEONLoadStoreSingleStructStore2 | NEONLoadStoreSingle_h, + NEON_ST2_s = NEONLoadStoreSingleStructStore2 | NEONLoadStoreSingle_s, + NEON_ST2_d = NEONLoadStoreSingleStructStore2 | NEONLoadStoreSingle_d, + + NEON_LD3_b = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingle_b, + NEON_LD3_h = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingle_h, + NEON_LD3_s = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingle_s, + NEON_LD3_d = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingle_d, + NEON_LD3R = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingleAllLanes, + NEON_ST3_b = NEONLoadStoreSingleStructStore3 | NEONLoadStoreSingle_b, + NEON_ST3_h = NEONLoadStoreSingleStructStore3 | NEONLoadStoreSingle_h, + NEON_ST3_s = NEONLoadStoreSingleStructStore3 | NEONLoadStoreSingle_s, + NEON_ST3_d = NEONLoadStoreSingleStructStore3 | NEONLoadStoreSingle_d, + + NEON_LD4_b = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingle_b, + NEON_LD4_h = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingle_h, + NEON_LD4_s = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingle_s, + NEON_LD4_d = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingle_d, + NEON_LD4R = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingleAllLanes, + NEON_ST4_b = NEONLoadStoreSingleStructStore4 | NEONLoadStoreSingle_b, + NEON_ST4_h = NEONLoadStoreSingleStructStore4 | NEONLoadStoreSingle_h, + NEON_ST4_s = NEONLoadStoreSingleStructStore4 | NEONLoadStoreSingle_s, + NEON_ST4_d = NEONLoadStoreSingleStructStore4 | NEONLoadStoreSingle_d +}; + +// NEON load/store single structure with post-index addressing. +enum NEONLoadStoreSingleStructPostIndexOp { + NEONLoadStoreSingleStructPostIndexFixed = 0x0D800000, + NEONLoadStoreSingleStructPostIndexFMask = 0xBF800000, + NEONLoadStoreSingleStructPostIndexMask = 0xBFE0E000, + NEONLoadStoreSingleStructPostIndex = 0x00800000, + NEON_LD1_b_post = NEON_LD1_b | NEONLoadStoreSingleStructPostIndex, + NEON_LD1_h_post = NEON_LD1_h | NEONLoadStoreSingleStructPostIndex, + NEON_LD1_s_post = NEON_LD1_s | NEONLoadStoreSingleStructPostIndex, + NEON_LD1_d_post = NEON_LD1_d | NEONLoadStoreSingleStructPostIndex, + NEON_LD1R_post = NEON_LD1R | NEONLoadStoreSingleStructPostIndex, + NEON_ST1_b_post = NEON_ST1_b | NEONLoadStoreSingleStructPostIndex, + NEON_ST1_h_post = NEON_ST1_h | NEONLoadStoreSingleStructPostIndex, + NEON_ST1_s_post = NEON_ST1_s | NEONLoadStoreSingleStructPostIndex, + NEON_ST1_d_post = NEON_ST1_d | NEONLoadStoreSingleStructPostIndex, + + NEON_LD2_b_post = NEON_LD2_b | NEONLoadStoreSingleStructPostIndex, + NEON_LD2_h_post = NEON_LD2_h | NEONLoadStoreSingleStructPostIndex, + NEON_LD2_s_post = NEON_LD2_s | NEONLoadStoreSingleStructPostIndex, + NEON_LD2_d_post = NEON_LD2_d | NEONLoadStoreSingleStructPostIndex, + NEON_LD2R_post = NEON_LD2R | NEONLoadStoreSingleStructPostIndex, + NEON_ST2_b_post = NEON_ST2_b | NEONLoadStoreSingleStructPostIndex, + NEON_ST2_h_post = NEON_ST2_h | NEONLoadStoreSingleStructPostIndex, + NEON_ST2_s_post = NEON_ST2_s | NEONLoadStoreSingleStructPostIndex, + NEON_ST2_d_post = NEON_ST2_d | NEONLoadStoreSingleStructPostIndex, + + NEON_LD3_b_post = NEON_LD3_b | NEONLoadStoreSingleStructPostIndex, + NEON_LD3_h_post = NEON_LD3_h | NEONLoadStoreSingleStructPostIndex, + NEON_LD3_s_post = NEON_LD3_s | NEONLoadStoreSingleStructPostIndex, + NEON_LD3_d_post = NEON_LD3_d | NEONLoadStoreSingleStructPostIndex, + NEON_LD3R_post = NEON_LD3R | NEONLoadStoreSingleStructPostIndex, + NEON_ST3_b_post = NEON_ST3_b | NEONLoadStoreSingleStructPostIndex, + NEON_ST3_h_post = NEON_ST3_h | NEONLoadStoreSingleStructPostIndex, + NEON_ST3_s_post = NEON_ST3_s | NEONLoadStoreSingleStructPostIndex, + NEON_ST3_d_post = NEON_ST3_d | NEONLoadStoreSingleStructPostIndex, + + NEON_LD4_b_post = NEON_LD4_b | NEONLoadStoreSingleStructPostIndex, + NEON_LD4_h_post = NEON_LD4_h | NEONLoadStoreSingleStructPostIndex, + NEON_LD4_s_post = NEON_LD4_s | NEONLoadStoreSingleStructPostIndex, + NEON_LD4_d_post = NEON_LD4_d | NEONLoadStoreSingleStructPostIndex, + NEON_LD4R_post = NEON_LD4R | NEONLoadStoreSingleStructPostIndex, + NEON_ST4_b_post = NEON_ST4_b | NEONLoadStoreSingleStructPostIndex, + NEON_ST4_h_post = NEON_ST4_h | NEONLoadStoreSingleStructPostIndex, + NEON_ST4_s_post = NEON_ST4_s | NEONLoadStoreSingleStructPostIndex, + NEON_ST4_d_post = NEON_ST4_d | NEONLoadStoreSingleStructPostIndex +}; + +// NEON modified immediate. +enum NEONModifiedImmediateOp { + NEONModifiedImmediateFixed = 0x0F000400, + NEONModifiedImmediateFMask = 0x9FF80400, + NEONModifiedImmediateOpBit = 0x20000000, + NEONModifiedImmediate_FMOV = NEONModifiedImmediateFixed | 0x00000800, + NEONModifiedImmediate_MOVI = NEONModifiedImmediateFixed | 0x00000000, + NEONModifiedImmediate_MVNI = NEONModifiedImmediateFixed | 0x20000000, + NEONModifiedImmediate_ORR = NEONModifiedImmediateFixed | 0x00001000, + NEONModifiedImmediate_BIC = NEONModifiedImmediateFixed | 0x20001000 +}; + +// NEON shift immediate. +enum NEONShiftImmediateOp { + NEONShiftImmediateFixed = 0x0F000400, + NEONShiftImmediateFMask = 0x9F800400, + NEONShiftImmediateMask = 0xBF80FC00, + NEONShiftImmediateUBit = 0x20000000, + NEON_SHL = NEONShiftImmediateFixed | 0x00005000, + NEON_SSHLL = NEONShiftImmediateFixed | 0x0000A000, + NEON_USHLL = NEONShiftImmediateFixed | 0x2000A000, + NEON_SLI = NEONShiftImmediateFixed | 0x20005000, + NEON_SRI = NEONShiftImmediateFixed | 0x20004000, + NEON_SHRN = NEONShiftImmediateFixed | 0x00008000, + NEON_RSHRN = NEONShiftImmediateFixed | 0x00008800, + NEON_UQSHRN = NEONShiftImmediateFixed | 0x20009000, + NEON_UQRSHRN = NEONShiftImmediateFixed | 0x20009800, + NEON_SQSHRN = NEONShiftImmediateFixed | 0x00009000, + NEON_SQRSHRN = NEONShiftImmediateFixed | 0x00009800, + NEON_SQSHRUN = NEONShiftImmediateFixed | 0x20008000, + NEON_SQRSHRUN = NEONShiftImmediateFixed | 0x20008800, + NEON_SSHR = NEONShiftImmediateFixed | 0x00000000, + NEON_SRSHR = NEONShiftImmediateFixed | 0x00002000, + NEON_USHR = NEONShiftImmediateFixed | 0x20000000, + NEON_URSHR = NEONShiftImmediateFixed | 0x20002000, + NEON_SSRA = NEONShiftImmediateFixed | 0x00001000, + NEON_SRSRA = NEONShiftImmediateFixed | 0x00003000, + NEON_USRA = NEONShiftImmediateFixed | 0x20001000, + NEON_URSRA = NEONShiftImmediateFixed | 0x20003000, + NEON_SQSHLU = NEONShiftImmediateFixed | 0x20006000, + NEON_SCVTF_imm = NEONShiftImmediateFixed | 0x0000E000, + NEON_UCVTF_imm = NEONShiftImmediateFixed | 0x2000E000, + NEON_FCVTZS_imm = NEONShiftImmediateFixed | 0x0000F800, + NEON_FCVTZU_imm = NEONShiftImmediateFixed | 0x2000F800, + NEON_SQSHL_imm = NEONShiftImmediateFixed | 0x00007000, + NEON_UQSHL_imm = NEONShiftImmediateFixed | 0x20007000 +}; + +// NEON table. +enum NEONTableOp { + NEONTableFixed = 0x0E000000, + NEONTableFMask = 0xBF208C00, + NEONTableExt = 0x00001000, + NEONTableMask = 0xBF20FC00, + NEON_TBL_1v = NEONTableFixed | 0x00000000, + NEON_TBL_2v = NEONTableFixed | 0x00002000, + NEON_TBL_3v = NEONTableFixed | 0x00004000, + NEON_TBL_4v = NEONTableFixed | 0x00006000, + NEON_TBX_1v = NEON_TBL_1v | NEONTableExt, + NEON_TBX_2v = NEON_TBL_2v | NEONTableExt, + NEON_TBX_3v = NEON_TBL_3v | NEONTableExt, + NEON_TBX_4v = NEON_TBL_4v | NEONTableExt +}; + +// NEON perm. +enum NEONPermOp { + NEONPermFixed = 0x0E000800, + NEONPermFMask = 0xBF208C00, + NEONPermMask = 0x3F20FC00, + NEON_UZP1 = NEONPermFixed | 0x00001000, + NEON_TRN1 = NEONPermFixed | 0x00002000, + NEON_ZIP1 = NEONPermFixed | 0x00003000, + NEON_UZP2 = NEONPermFixed | 0x00005000, + NEON_TRN2 = NEONPermFixed | 0x00006000, + NEON_ZIP2 = NEONPermFixed | 0x00007000 +}; + +// NEON scalar instructions with two register operands. +enum NEONScalar2RegMiscOp { + NEONScalar2RegMiscFixed = 0x5E200800, + NEONScalar2RegMiscFMask = 0xDF3E0C00, + NEONScalar2RegMiscMask = NEON_Q | NEONScalar | NEON2RegMiscMask, + NEON_CMGT_zero_scalar = NEON_Q | NEONScalar | NEON_CMGT_zero, + NEON_CMEQ_zero_scalar = NEON_Q | NEONScalar | NEON_CMEQ_zero, + NEON_CMLT_zero_scalar = NEON_Q | NEONScalar | NEON_CMLT_zero, + NEON_CMGE_zero_scalar = NEON_Q | NEONScalar | NEON_CMGE_zero, + NEON_CMLE_zero_scalar = NEON_Q | NEONScalar | NEON_CMLE_zero, + NEON_ABS_scalar = NEON_Q | NEONScalar | NEON_ABS, + NEON_SQABS_scalar = NEON_Q | NEONScalar | NEON_SQABS, + NEON_NEG_scalar = NEON_Q | NEONScalar | NEON_NEG, + NEON_SQNEG_scalar = NEON_Q | NEONScalar | NEON_SQNEG, + NEON_SQXTN_scalar = NEON_Q | NEONScalar | NEON_SQXTN, + NEON_UQXTN_scalar = NEON_Q | NEONScalar | NEON_UQXTN, + NEON_SQXTUN_scalar = NEON_Q | NEONScalar | NEON_SQXTUN, + NEON_SUQADD_scalar = NEON_Q | NEONScalar | NEON_SUQADD, + NEON_USQADD_scalar = NEON_Q | NEONScalar | NEON_USQADD, + + NEONScalar2RegMiscOpcode = NEON2RegMiscOpcode, + NEON_NEG_scalar_opcode = NEON_NEG_scalar & NEONScalar2RegMiscOpcode, + + NEONScalar2RegMiscFPMask = NEONScalar2RegMiscMask | 0x00800000, + NEON_FRSQRTE_scalar = NEON_Q | NEONScalar | NEON_FRSQRTE, + NEON_FRECPE_scalar = NEON_Q | NEONScalar | NEON_FRECPE, + NEON_SCVTF_scalar = NEON_Q | NEONScalar | NEON_SCVTF, + NEON_UCVTF_scalar = NEON_Q | NEONScalar | NEON_UCVTF, + NEON_FCMGT_zero_scalar = NEON_Q | NEONScalar | NEON_FCMGT_zero, + NEON_FCMEQ_zero_scalar = NEON_Q | NEONScalar | NEON_FCMEQ_zero, + NEON_FCMLT_zero_scalar = NEON_Q | NEONScalar | NEON_FCMLT_zero, + NEON_FCMGE_zero_scalar = NEON_Q | NEONScalar | NEON_FCMGE_zero, + NEON_FCMLE_zero_scalar = NEON_Q | NEONScalar | NEON_FCMLE_zero, + NEON_FRECPX_scalar = NEONScalar2RegMiscFixed | 0x0081F000, + NEON_FCVTNS_scalar = NEON_Q | NEONScalar | NEON_FCVTNS, + NEON_FCVTNU_scalar = NEON_Q | NEONScalar | NEON_FCVTNU, + NEON_FCVTPS_scalar = NEON_Q | NEONScalar | NEON_FCVTPS, + NEON_FCVTPU_scalar = NEON_Q | NEONScalar | NEON_FCVTPU, + NEON_FCVTMS_scalar = NEON_Q | NEONScalar | NEON_FCVTMS, + NEON_FCVTMU_scalar = NEON_Q | NEONScalar | NEON_FCVTMU, + NEON_FCVTZS_scalar = NEON_Q | NEONScalar | NEON_FCVTZS, + NEON_FCVTZU_scalar = NEON_Q | NEONScalar | NEON_FCVTZU, + NEON_FCVTAS_scalar = NEON_Q | NEONScalar | NEON_FCVTAS, + NEON_FCVTAU_scalar = NEON_Q | NEONScalar | NEON_FCVTAU, + NEON_FCVTXN_scalar = NEON_Q | NEONScalar | NEON_FCVTXN +}; + +// NEON instructions with two register operands (FP16). +enum NEONScalar2RegMiscFP16Op { + NEONScalar2RegMiscFP16Fixed = 0x5E780800, + NEONScalar2RegMiscFP16FMask = 0xDF7E0C00, + NEONScalar2RegMiscFP16Mask = 0xFFFFFC00, + NEON_FCVTNS_H_scalar = NEON_Q | NEONScalar | NEON_FCVTNS_H, + NEON_FCVTMS_H_scalar = NEON_Q | NEONScalar | NEON_FCVTMS_H, + NEON_FCVTAS_H_scalar = NEON_Q | NEONScalar | NEON_FCVTAS_H, + NEON_SCVTF_H_scalar = NEON_Q | NEONScalar | NEON_SCVTF_H, + NEON_FCMGT_H_zero_scalar = NEON_Q | NEONScalar | NEON_FCMGT_H_zero, + NEON_FCMEQ_H_zero_scalar = NEON_Q | NEONScalar | NEON_FCMEQ_H_zero, + NEON_FCMLT_H_zero_scalar = NEON_Q | NEONScalar | NEON_FCMLT_H_zero, + NEON_FCVTPS_H_scalar = NEON_Q | NEONScalar | NEON_FCVTPS_H, + NEON_FCVTZS_H_scalar = NEON_Q | NEONScalar | NEON_FCVTZS_H, + NEON_FRECPE_H_scalar = NEON_Q | NEONScalar | NEON_FRECPE_H, + NEON_FRECPX_H_scalar = NEONScalar2RegMiscFP16Fixed | 0x0081F000, + NEON_FCVTNU_H_scalar = NEON_Q | NEONScalar | NEON_FCVTNU_H, + NEON_FCVTMU_H_scalar = NEON_Q | NEONScalar | NEON_FCVTMU_H, + NEON_FCVTAU_H_scalar = NEON_Q | NEONScalar | NEON_FCVTAU_H, + NEON_UCVTF_H_scalar = NEON_Q | NEONScalar | NEON_UCVTF_H, + NEON_FCMGE_H_zero_scalar = NEON_Q | NEONScalar | NEON_FCMGE_H_zero, + NEON_FCMLE_H_zero_scalar = NEON_Q | NEONScalar | NEON_FCMLE_H_zero, + NEON_FCVTPU_H_scalar = NEON_Q | NEONScalar | NEON_FCVTPU_H, + NEON_FCVTZU_H_scalar = NEON_Q | NEONScalar | NEON_FCVTZU_H, + NEON_FRSQRTE_H_scalar = NEON_Q | NEONScalar | NEON_FRSQRTE_H +}; + +// NEON scalar instructions with three same-type operands. +enum NEONScalar3SameOp { + NEONScalar3SameFixed = 0x5E200400, + NEONScalar3SameFMask = 0xDF200400, + NEONScalar3SameMask = 0xFF20FC00, + NEON_ADD_scalar = NEON_Q | NEONScalar | NEON_ADD, + NEON_CMEQ_scalar = NEON_Q | NEONScalar | NEON_CMEQ, + NEON_CMGE_scalar = NEON_Q | NEONScalar | NEON_CMGE, + NEON_CMGT_scalar = NEON_Q | NEONScalar | NEON_CMGT, + NEON_CMHI_scalar = NEON_Q | NEONScalar | NEON_CMHI, + NEON_CMHS_scalar = NEON_Q | NEONScalar | NEON_CMHS, + NEON_CMTST_scalar = NEON_Q | NEONScalar | NEON_CMTST, + NEON_SUB_scalar = NEON_Q | NEONScalar | NEON_SUB, + NEON_UQADD_scalar = NEON_Q | NEONScalar | NEON_UQADD, + NEON_SQADD_scalar = NEON_Q | NEONScalar | NEON_SQADD, + NEON_UQSUB_scalar = NEON_Q | NEONScalar | NEON_UQSUB, + NEON_SQSUB_scalar = NEON_Q | NEONScalar | NEON_SQSUB, + NEON_USHL_scalar = NEON_Q | NEONScalar | NEON_USHL, + NEON_SSHL_scalar = NEON_Q | NEONScalar | NEON_SSHL, + NEON_UQSHL_scalar = NEON_Q | NEONScalar | NEON_UQSHL, + NEON_SQSHL_scalar = NEON_Q | NEONScalar | NEON_SQSHL, + NEON_URSHL_scalar = NEON_Q | NEONScalar | NEON_URSHL, + NEON_SRSHL_scalar = NEON_Q | NEONScalar | NEON_SRSHL, + NEON_UQRSHL_scalar = NEON_Q | NEONScalar | NEON_UQRSHL, + NEON_SQRSHL_scalar = NEON_Q | NEONScalar | NEON_SQRSHL, + NEON_SQDMULH_scalar = NEON_Q | NEONScalar | NEON_SQDMULH, + NEON_SQRDMULH_scalar = NEON_Q | NEONScalar | NEON_SQRDMULH, + + // NEON floating point scalar instructions with three same-type operands. + NEONScalar3SameFPFixed = NEONScalar3SameFixed | 0x0000C000, + NEONScalar3SameFPFMask = NEONScalar3SameFMask | 0x0000C000, + NEONScalar3SameFPMask = NEONScalar3SameMask | 0x00800000, + NEON_FACGE_scalar = NEON_Q | NEONScalar | NEON_FACGE, + NEON_FACGT_scalar = NEON_Q | NEONScalar | NEON_FACGT, + NEON_FCMEQ_scalar = NEON_Q | NEONScalar | NEON_FCMEQ, + NEON_FCMGE_scalar = NEON_Q | NEONScalar | NEON_FCMGE, + NEON_FCMGT_scalar = NEON_Q | NEONScalar | NEON_FCMGT, + NEON_FMULX_scalar = NEON_Q | NEONScalar | NEON_FMULX, + NEON_FRECPS_scalar = NEON_Q | NEONScalar | NEON_FRECPS, + NEON_FRSQRTS_scalar = NEON_Q | NEONScalar | NEON_FRSQRTS, + NEON_FABD_scalar = NEON_Q | NEONScalar | NEON_FABD +}; + +// NEON scalar FP16 instructions with three same-type operands. +enum NEONScalar3SameFP16Op { + NEONScalar3SameFP16Fixed = 0x5E400400, + NEONScalar3SameFP16FMask = 0xDF60C400, + NEONScalar3SameFP16Mask = 0xFFE0FC00, + NEON_FABD_H_scalar = NEON_Q | NEONScalar | NEON_FABD_H, + NEON_FMULX_H_scalar = NEON_Q | NEONScalar | NEON_FMULX_H, + NEON_FCMEQ_H_scalar = NEON_Q | NEONScalar | NEON_FCMEQ_H, + NEON_FCMGE_H_scalar = NEON_Q | NEONScalar | NEON_FCMGE_H, + NEON_FCMGT_H_scalar = NEON_Q | NEONScalar | NEON_FCMGT_H, + NEON_FACGE_H_scalar = NEON_Q | NEONScalar | NEON_FACGE_H, + NEON_FACGT_H_scalar = NEON_Q | NEONScalar | NEON_FACGT_H, + NEON_FRECPS_H_scalar = NEON_Q | NEONScalar | NEON_FRECPS_H, + NEON_FRSQRTS_H_scalar = NEON_Q | NEONScalar | NEON_FRSQRTS_H +}; + +// 'Extra' NEON scalar instructions with three same-type operands. +enum NEONScalar3SameExtraOp { + NEONScalar3SameExtraFixed = 0x5E008400, + NEONScalar3SameExtraFMask = 0xDF208400, + NEONScalar3SameExtraMask = 0xFF20FC00, + NEON_SQRDMLAH_scalar = NEON_Q | NEONScalar | NEON_SQRDMLAH, + NEON_SQRDMLSH_scalar = NEON_Q | NEONScalar | NEON_SQRDMLSH +}; + +// NEON scalar instructions with three different-type operands. +enum NEONScalar3DiffOp { + NEONScalar3DiffFixed = 0x5E200000, + NEONScalar3DiffFMask = 0xDF200C00, + NEONScalar3DiffMask = NEON_Q | NEONScalar | NEON3DifferentMask, + NEON_SQDMLAL_scalar = NEON_Q | NEONScalar | NEON_SQDMLAL, + NEON_SQDMLSL_scalar = NEON_Q | NEONScalar | NEON_SQDMLSL, + NEON_SQDMULL_scalar = NEON_Q | NEONScalar | NEON_SQDMULL +}; + +// NEON scalar instructions with indexed element operand. +enum NEONScalarByIndexedElementOp { + NEONScalarByIndexedElementFixed = 0x5F000000, + NEONScalarByIndexedElementFMask = 0xDF000400, + NEONScalarByIndexedElementMask = 0xFF00F400, + NEON_SQDMLAL_byelement_scalar = NEON_Q | NEONScalar | NEON_SQDMLAL_byelement, + NEON_SQDMLSL_byelement_scalar = NEON_Q | NEONScalar | NEON_SQDMLSL_byelement, + NEON_SQDMULL_byelement_scalar = NEON_Q | NEONScalar | NEON_SQDMULL_byelement, + NEON_SQDMULH_byelement_scalar = NEON_Q | NEONScalar | NEON_SQDMULH_byelement, + NEON_SQRDMULH_byelement_scalar + = NEON_Q | NEONScalar | NEON_SQRDMULH_byelement, + NEON_SQRDMLAH_byelement_scalar + = NEON_Q | NEONScalar | NEON_SQRDMLAH_byelement, + NEON_SQRDMLSH_byelement_scalar + = NEON_Q | NEONScalar | NEON_SQRDMLSH_byelement, + NEON_FMLA_H_byelement_scalar = NEON_Q | NEONScalar | NEON_FMLA_H_byelement, + NEON_FMLS_H_byelement_scalar = NEON_Q | NEONScalar | NEON_FMLS_H_byelement, + NEON_FMUL_H_byelement_scalar = NEON_Q | NEONScalar | NEON_FMUL_H_byelement, + NEON_FMULX_H_byelement_scalar = NEON_Q | NEONScalar | NEON_FMULX_H_byelement, + + // Floating point instructions. + NEONScalarByIndexedElementFPFixed + = NEONScalarByIndexedElementFixed | 0x00800000, + NEONScalarByIndexedElementFPMask + = NEONScalarByIndexedElementMask | 0x00800000, + NEON_FMLA_byelement_scalar = NEON_Q | NEONScalar | NEON_FMLA_byelement, + NEON_FMLS_byelement_scalar = NEON_Q | NEONScalar | NEON_FMLS_byelement, + NEON_FMUL_byelement_scalar = NEON_Q | NEONScalar | NEON_FMUL_byelement, + NEON_FMULX_byelement_scalar = NEON_Q | NEONScalar | NEON_FMULX_byelement +}; + +// NEON scalar register copy. +enum NEONScalarCopyOp { + NEONScalarCopyFixed = 0x5E000400, + NEONScalarCopyFMask = 0xDFE08400, + NEONScalarCopyMask = 0xFFE0FC00, + NEON_DUP_ELEMENT_scalar = NEON_Q | NEONScalar | NEON_DUP_ELEMENT +}; + +// NEON scalar pairwise instructions. +enum NEONScalarPairwiseOp { + NEONScalarPairwiseFixed = 0x5E300800, + NEONScalarPairwiseFMask = 0xDF3E0C00, + NEONScalarPairwiseMask = 0xFFB1F800, + NEON_ADDP_scalar = NEONScalarPairwiseFixed | 0x0081B000, + NEON_FMAXNMP_h_scalar = NEONScalarPairwiseFixed | 0x0000C000, + NEON_FADDP_h_scalar = NEONScalarPairwiseFixed | 0x0000D000, + NEON_FMAXP_h_scalar = NEONScalarPairwiseFixed | 0x0000F000, + NEON_FMINNMP_h_scalar = NEONScalarPairwiseFixed | 0x0080C000, + NEON_FMINP_h_scalar = NEONScalarPairwiseFixed | 0x0080F000, + NEON_FMAXNMP_scalar = NEONScalarPairwiseFixed | 0x2000C000, + NEON_FMINNMP_scalar = NEONScalarPairwiseFixed | 0x2080C000, + NEON_FADDP_scalar = NEONScalarPairwiseFixed | 0x2000D000, + NEON_FMAXP_scalar = NEONScalarPairwiseFixed | 0x2000F000, + NEON_FMINP_scalar = NEONScalarPairwiseFixed | 0x2080F000 +}; + +// NEON scalar shift immediate. +enum NEONScalarShiftImmediateOp { + NEONScalarShiftImmediateFixed = 0x5F000400, + NEONScalarShiftImmediateFMask = 0xDF800400, + NEONScalarShiftImmediateMask = 0xFF80FC00, + NEON_SHL_scalar = NEON_Q | NEONScalar | NEON_SHL, + NEON_SLI_scalar = NEON_Q | NEONScalar | NEON_SLI, + NEON_SRI_scalar = NEON_Q | NEONScalar | NEON_SRI, + NEON_SSHR_scalar = NEON_Q | NEONScalar | NEON_SSHR, + NEON_USHR_scalar = NEON_Q | NEONScalar | NEON_USHR, + NEON_SRSHR_scalar = NEON_Q | NEONScalar | NEON_SRSHR, + NEON_URSHR_scalar = NEON_Q | NEONScalar | NEON_URSHR, + NEON_SSRA_scalar = NEON_Q | NEONScalar | NEON_SSRA, + NEON_USRA_scalar = NEON_Q | NEONScalar | NEON_USRA, + NEON_SRSRA_scalar = NEON_Q | NEONScalar | NEON_SRSRA, + NEON_URSRA_scalar = NEON_Q | NEONScalar | NEON_URSRA, + NEON_UQSHRN_scalar = NEON_Q | NEONScalar | NEON_UQSHRN, + NEON_UQRSHRN_scalar = NEON_Q | NEONScalar | NEON_UQRSHRN, + NEON_SQSHRN_scalar = NEON_Q | NEONScalar | NEON_SQSHRN, + NEON_SQRSHRN_scalar = NEON_Q | NEONScalar | NEON_SQRSHRN, + NEON_SQSHRUN_scalar = NEON_Q | NEONScalar | NEON_SQSHRUN, + NEON_SQRSHRUN_scalar = NEON_Q | NEONScalar | NEON_SQRSHRUN, + NEON_SQSHLU_scalar = NEON_Q | NEONScalar | NEON_SQSHLU, + NEON_SQSHL_imm_scalar = NEON_Q | NEONScalar | NEON_SQSHL_imm, + NEON_UQSHL_imm_scalar = NEON_Q | NEONScalar | NEON_UQSHL_imm, + NEON_SCVTF_imm_scalar = NEON_Q | NEONScalar | NEON_SCVTF_imm, + NEON_UCVTF_imm_scalar = NEON_Q | NEONScalar | NEON_UCVTF_imm, + NEON_FCVTZS_imm_scalar = NEON_Q | NEONScalar | NEON_FCVTZS_imm, + NEON_FCVTZU_imm_scalar = NEON_Q | NEONScalar | NEON_FCVTZU_imm +}; + +// Unimplemented and unallocated instructions. These are defined to make fixed +// bit assertion easier. +enum UnimplementedOp { + UnimplementedFixed = 0x00000000, + UnimplementedFMask = 0x00000000 +}; + +enum UnallocatedOp { + UnallocatedFixed = 0x00000000, + UnallocatedFMask = 0x00000000 +}; + +// Re-enable `clang-format` after the `enum`s. +// clang-format on + +} // namespace aarch64 +} // namespace vixl + +#endif // VIXL_AARCH64_CONSTANTS_AARCH64_H_ diff --git a/dep/vixl/include/vixl/aarch64/cpu-aarch64.h b/dep/vixl/include/vixl/aarch64/cpu-aarch64.h new file mode 100644 index 000000000..031fa42c8 --- /dev/null +++ b/dep/vixl/include/vixl/aarch64/cpu-aarch64.h @@ -0,0 +1,86 @@ +// Copyright 2014, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_CPU_AARCH64_H +#define VIXL_CPU_AARCH64_H + +#include "../globals-vixl.h" + +#include "instructions-aarch64.h" + +namespace vixl { +namespace aarch64 { + +class CPU { + public: + // Initialise CPU support. + static void SetUp(); + + // Ensures the data at a given address and with a given size is the same for + // the I and D caches. I and D caches are not automatically coherent on ARM + // so this operation is required before any dynamically generated code can + // safely run. + static void EnsureIAndDCacheCoherency(void *address, size_t length); + + // Handle tagged pointers. + template + static T SetPointerTag(T pointer, uint64_t tag) { + VIXL_ASSERT(IsUintN(kAddressTagWidth, tag)); + + // Use C-style casts to get static_cast behaviour for integral types (T), + // and reinterpret_cast behaviour for other types. + + uint64_t raw = (uint64_t)pointer; + VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(raw)); + + raw = (raw & ~kAddressTagMask) | (tag << kAddressTagOffset); + return (T)raw; + } + + template + static uint64_t GetPointerTag(T pointer) { + // Use C-style casts to get static_cast behaviour for integral types (T), + // and reinterpret_cast behaviour for other types. + + uint64_t raw = (uint64_t)pointer; + VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(raw)); + + return (raw & kAddressTagMask) >> kAddressTagOffset; + } + + private: + // Return the content of the cache type register. + static uint32_t GetCacheType(); + + // I and D cache line size in bytes. + static unsigned icache_line_size_; + static unsigned dcache_line_size_; +}; + +} // namespace aarch64 +} // namespace vixl + +#endif // VIXL_CPU_AARCH64_H diff --git a/dep/vixl/include/vixl/aarch64/cpu-features-auditor-aarch64.h b/dep/vixl/include/vixl/aarch64/cpu-features-auditor-aarch64.h new file mode 100644 index 000000000..9f034778a --- /dev/null +++ b/dep/vixl/include/vixl/aarch64/cpu-features-auditor-aarch64.h @@ -0,0 +1,125 @@ +// Copyright 2018, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Arm Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_AARCH64_CPU_FEATURES_AUDITOR_AARCH64_H_ +#define VIXL_AARCH64_CPU_FEATURES_AUDITOR_AARCH64_H_ + +#include + +#include "../cpu-features.h" +#include "decoder-aarch64.h" + +namespace vixl { +namespace aarch64 { + +// This visitor records the CPU features that each decoded instruction requires. +// It provides: +// - the set of CPU features required by the most recently decoded instruction, +// - a cumulative set of encountered CPU features, +// - an optional list of 'available' CPU features. +// +// Primarily, this allows the Disassembler and Simulator to share the same CPU +// features logic. However, it can be used standalone to scan code blocks for +// CPU features. +class CPUFeaturesAuditor : public DecoderVisitor { + public: + // Construction arguments: + // - If a decoder is specified, the CPUFeaturesAuditor automatically + // registers itself as a visitor. Otherwise, this can be done manually. + // + // - If an `available` features list is provided, it is used as a hint in + // cases where instructions may be provided by multiple separate features. + // An example of this is FP&SIMD loads and stores: some of these are used + // in both FP and integer SIMD code. If exactly one of those features is + // in `available` when one of these instructions is encountered, then the + // auditor will record that feature. Otherwise, it will record _both_ + // features. + explicit CPUFeaturesAuditor( + Decoder* decoder, const CPUFeatures& available = CPUFeatures::None()) + : available_(available), decoder_(decoder) { + if (decoder_ != NULL) decoder_->AppendVisitor(this); + } + + explicit CPUFeaturesAuditor( + const CPUFeatures& available = CPUFeatures::None()) + : available_(available), decoder_(NULL) {} + + virtual ~CPUFeaturesAuditor() { + if (decoder_ != NULL) decoder_->RemoveVisitor(this); + } + + void ResetSeenFeatures() { + seen_ = CPUFeatures::None(); + last_instruction_ = CPUFeatures::None(); + } + + // Query or set available CPUFeatures. + const CPUFeatures& GetAvailableFeatures() const { return available_; } + void SetAvailableFeatures(const CPUFeatures& available) { + available_ = available; + } + + // Query CPUFeatures seen since construction (or the last call to `Reset()`). + const CPUFeatures& GetSeenFeatures() const { return seen_; } + + // Query CPUFeatures from the last instruction visited by this auditor. + const CPUFeatures& GetInstructionFeatures() const { + return last_instruction_; + } + + bool InstructionIsAvailable() const { + return available_.Has(last_instruction_); + } + + // The common CPUFeatures interface operates on the available_ list. + CPUFeatures* GetCPUFeatures() { return &available_; } + void SetCPUFeatures(const CPUFeatures& available) { + SetAvailableFeatures(available); + } + +// Declare all Visitor functions. +#define DECLARE(A) \ + virtual void Visit##A(const Instruction* instr) VIXL_OVERRIDE; + VISITOR_LIST(DECLARE) +#undef DECLARE + + private: + class RecordInstructionFeaturesScope; + + void LoadStoreHelper(const Instruction* instr); + void LoadStorePairHelper(const Instruction* instr); + + CPUFeatures seen_; + CPUFeatures last_instruction_; + CPUFeatures available_; + + Decoder* decoder_; +}; + +} // namespace aarch64 +} // namespace vixl + +#endif // VIXL_AARCH64_CPU_FEATURES_AUDITOR_AARCH64_H_ diff --git a/dep/vixl/include/vixl/aarch64/decoder-aarch64.h b/dep/vixl/include/vixl/aarch64/decoder-aarch64.h new file mode 100644 index 000000000..100fbb352 --- /dev/null +++ b/dep/vixl/include/vixl/aarch64/decoder-aarch64.h @@ -0,0 +1,290 @@ +// Copyright 2014, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_AARCH64_DECODER_AARCH64_H_ +#define VIXL_AARCH64_DECODER_AARCH64_H_ + +#include + +#include "../globals-vixl.h" + +#include "instructions-aarch64.h" + + +// List macro containing all visitors needed by the decoder class. + +#define VISITOR_LIST_THAT_RETURN(V) \ + V(AddSubExtended) \ + V(AddSubImmediate) \ + V(AddSubShifted) \ + V(AddSubWithCarry) \ + V(AtomicMemory) \ + V(Bitfield) \ + V(CompareBranch) \ + V(ConditionalBranch) \ + V(ConditionalCompareImmediate) \ + V(ConditionalCompareRegister) \ + V(ConditionalSelect) \ + V(Crypto2RegSHA) \ + V(Crypto3RegSHA) \ + V(CryptoAES) \ + V(DataProcessing1Source) \ + V(DataProcessing2Source) \ + V(DataProcessing3Source) \ + V(Exception) \ + V(Extract) \ + V(FPCompare) \ + V(FPConditionalCompare) \ + V(FPConditionalSelect) \ + V(FPDataProcessing1Source) \ + V(FPDataProcessing2Source) \ + V(FPDataProcessing3Source) \ + V(FPFixedPointConvert) \ + V(FPImmediate) \ + V(FPIntegerConvert) \ + V(LoadLiteral) \ + V(LoadStoreExclusive) \ + V(LoadStorePairNonTemporal) \ + V(LoadStorePairOffset) \ + V(LoadStorePairPostIndex) \ + V(LoadStorePairPreIndex) \ + V(LoadStorePostIndex) \ + V(LoadStorePreIndex) \ + V(LoadStoreRegisterOffset) \ + V(LoadStoreUnscaledOffset) \ + V(LoadStoreUnsignedOffset) \ + V(LogicalImmediate) \ + V(LogicalShifted) \ + V(MoveWideImmediate) \ + V(NEON2RegMisc) \ + V(NEON2RegMiscFP16) \ + V(NEON3Different) \ + V(NEON3Same) \ + V(NEON3SameExtra) \ + V(NEON3SameFP16) \ + V(NEONAcrossLanes) \ + V(NEONByIndexedElement) \ + V(NEONCopy) \ + V(NEONExtract) \ + V(NEONLoadStoreMultiStruct) \ + V(NEONLoadStoreMultiStructPostIndex) \ + V(NEONLoadStoreSingleStruct) \ + V(NEONLoadStoreSingleStructPostIndex) \ + V(NEONModifiedImmediate) \ + V(NEONPerm) \ + V(NEONScalar2RegMisc) \ + V(NEONScalar2RegMiscFP16) \ + V(NEONScalar3Diff) \ + V(NEONScalar3Same) \ + V(NEONScalar3SameExtra) \ + V(NEONScalar3SameFP16) \ + V(NEONScalarByIndexedElement) \ + V(NEONScalarCopy) \ + V(NEONScalarPairwise) \ + V(NEONScalarShiftImmediate) \ + V(NEONShiftImmediate) \ + V(NEONTable) \ + V(PCRelAddressing) \ + V(System) \ + V(TestBranch) \ + V(UnconditionalBranch) \ + V(UnconditionalBranchToRegister) + +#define VISITOR_LIST_THAT_DONT_RETURN(V) \ + V(Unallocated) \ + V(Unimplemented) + +#define VISITOR_LIST(V) \ + VISITOR_LIST_THAT_RETURN(V) \ + VISITOR_LIST_THAT_DONT_RETURN(V) + +namespace vixl { +namespace aarch64 { + +// The Visitor interface. Disassembler and simulator (and other tools) +// must provide implementations for all of these functions. +class DecoderVisitor { + public: + enum VisitorConstness { kConstVisitor, kNonConstVisitor }; + explicit DecoderVisitor(VisitorConstness constness = kConstVisitor) + : constness_(constness) {} + + virtual ~DecoderVisitor() {} + +#define DECLARE(A) virtual void Visit##A(const Instruction* instr) = 0; + VISITOR_LIST(DECLARE) +#undef DECLARE + + bool IsConstVisitor() const { return constness_ == kConstVisitor; } + Instruction* MutableInstruction(const Instruction* instr) { + VIXL_ASSERT(!IsConstVisitor()); + return const_cast(instr); + } + + private: + const VisitorConstness constness_; +}; + + +class Decoder { + public: + Decoder() {} + + // Top-level wrappers around the actual decoding function. + void Decode(const Instruction* instr) { + std::list::iterator it; + for (it = visitors_.begin(); it != visitors_.end(); it++) { + VIXL_ASSERT((*it)->IsConstVisitor()); + } + DecodeInstruction(instr); + } + void Decode(Instruction* instr) { + DecodeInstruction(const_cast(instr)); + } + + // Decode all instructions from start (inclusive) to end (exclusive). + template + void Decode(T start, T end) { + for (T instr = start; instr < end; instr = instr->GetNextInstruction()) { + Decode(instr); + } + } + + // Register a new visitor class with the decoder. + // Decode() will call the corresponding visitor method from all registered + // visitor classes when decoding reaches the leaf node of the instruction + // decode tree. + // Visitors are called in order. + // A visitor can be registered multiple times. + // + // d.AppendVisitor(V1); + // d.AppendVisitor(V2); + // d.PrependVisitor(V2); + // d.AppendVisitor(V3); + // + // d.Decode(i); + // + // will call in order visitor methods in V2, V1, V2, V3. + void AppendVisitor(DecoderVisitor* visitor); + void PrependVisitor(DecoderVisitor* visitor); + // These helpers register `new_visitor` before or after the first instance of + // `registered_visiter` in the list. + // So if + // V1, V2, V1, V2 + // are registered in this order in the decoder, calls to + // d.InsertVisitorAfter(V3, V1); + // d.InsertVisitorBefore(V4, V2); + // will yield the order + // V1, V3, V4, V2, V1, V2 + // + // For more complex modifications of the order of registered visitors, one can + // directly access and modify the list of visitors via the `visitors()' + // accessor. + void InsertVisitorBefore(DecoderVisitor* new_visitor, + DecoderVisitor* registered_visitor); + void InsertVisitorAfter(DecoderVisitor* new_visitor, + DecoderVisitor* registered_visitor); + + // Remove all instances of a previously registered visitor class from the list + // of visitors stored by the decoder. + void RemoveVisitor(DecoderVisitor* visitor); + +#define DECLARE(A) void Visit##A(const Instruction* instr); + VISITOR_LIST(DECLARE) +#undef DECLARE + + + std::list* visitors() { return &visitors_; } + + private: + // Decodes an instruction and calls the visitor functions registered with the + // Decoder class. + void DecodeInstruction(const Instruction* instr); + + // Decode the PC relative addressing instruction, and call the corresponding + // visitors. + // On entry, instruction bits 27:24 = 0x0. + void DecodePCRelAddressing(const Instruction* instr); + + // Decode the add/subtract immediate instruction, and call the correspoding + // visitors. + // On entry, instruction bits 27:24 = 0x1. + void DecodeAddSubImmediate(const Instruction* instr); + + // Decode the branch, system command, and exception generation parts of + // the instruction tree, and call the corresponding visitors. + // On entry, instruction bits 27:24 = {0x4, 0x5, 0x6, 0x7}. + void DecodeBranchSystemException(const Instruction* instr); + + // Decode the load and store parts of the instruction tree, and call + // the corresponding visitors. + // On entry, instruction bits 27:24 = {0x8, 0x9, 0xC, 0xD}. + void DecodeLoadStore(const Instruction* instr); + + // Decode the logical immediate and move wide immediate parts of the + // instruction tree, and call the corresponding visitors. + // On entry, instruction bits 27:24 = 0x2. + void DecodeLogical(const Instruction* instr); + + // Decode the bitfield and extraction parts of the instruction tree, + // and call the corresponding visitors. + // On entry, instruction bits 27:24 = 0x3. + void DecodeBitfieldExtract(const Instruction* instr); + + // Decode the data processing parts of the instruction tree, and call the + // corresponding visitors. + // On entry, instruction bits 27:24 = {0x1, 0xA, 0xB}. + void DecodeDataProcessing(const Instruction* instr); + + // Decode the floating point parts of the instruction tree, and call the + // corresponding visitors. + // On entry, instruction bits 27:24 = {0xE, 0xF}. + void DecodeFP(const Instruction* instr); + + // Decode the Advanced SIMD (NEON) load/store part of the instruction tree, + // and call the corresponding visitors. + // On entry, instruction bits 29:25 = 0x6. + void DecodeNEONLoadStore(const Instruction* instr); + + // Decode the Advanced SIMD (NEON) vector data processing part of the + // instruction tree, and call the corresponding visitors. + // On entry, instruction bits 28:25 = 0x7. + void DecodeNEONVectorDataProcessing(const Instruction* instr); + + // Decode the Advanced SIMD (NEON) scalar data processing part of the + // instruction tree, and call the corresponding visitors. + // On entry, instruction bits 28:25 = 0xF. + void DecodeNEONScalarDataProcessing(const Instruction* instr); + + private: + // Visitors are registered in a list. + std::list visitors_; +}; + +} // namespace aarch64 +} // namespace vixl + +#endif // VIXL_AARCH64_DECODER_AARCH64_H_ diff --git a/dep/vixl/include/vixl/aarch64/disasm-aarch64.h b/dep/vixl/include/vixl/aarch64/disasm-aarch64.h new file mode 100644 index 000000000..c650bee98 --- /dev/null +++ b/dep/vixl/include/vixl/aarch64/disasm-aarch64.h @@ -0,0 +1,217 @@ +// Copyright 2015, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_AARCH64_DISASM_AARCH64_H +#define VIXL_AARCH64_DISASM_AARCH64_H + +#include "../globals-vixl.h" +#include "../utils-vixl.h" + +#include "cpu-features-auditor-aarch64.h" +#include "decoder-aarch64.h" +#include "instructions-aarch64.h" +#include "operands-aarch64.h" + +namespace vixl { +namespace aarch64 { + +class Disassembler : public DecoderVisitor { + public: + Disassembler(); + Disassembler(char* text_buffer, int buffer_size); + virtual ~Disassembler(); + char* GetOutput(); + +// Declare all Visitor functions. +#define DECLARE(A) \ + virtual void Visit##A(const Instruction* instr) VIXL_OVERRIDE; + VISITOR_LIST(DECLARE) +#undef DECLARE + + protected: + virtual void ProcessOutput(const Instruction* instr); + + // Default output functions. The functions below implement a default way of + // printing elements in the disassembly. A sub-class can override these to + // customize the disassembly output. + + // Prints the name of a register. + // TODO: This currently doesn't allow renaming of V registers. + virtual void AppendRegisterNameToOutput(const Instruction* instr, + const CPURegister& reg); + + // Prints a PC-relative offset. This is used for example when disassembling + // branches to immediate offsets. + virtual void AppendPCRelativeOffsetToOutput(const Instruction* instr, + int64_t offset); + + // Prints an address, in the general case. It can be code or data. This is + // used for example to print the target address of an ADR instruction. + virtual void AppendCodeRelativeAddressToOutput(const Instruction* instr, + const void* addr); + + // Prints the address of some code. + // This is used for example to print the target address of a branch to an + // immediate offset. + // A sub-class can for example override this method to lookup the address and + // print an appropriate name. + virtual void AppendCodeRelativeCodeAddressToOutput(const Instruction* instr, + const void* addr); + + // Prints the address of some data. + // This is used for example to print the source address of a load literal + // instruction. + virtual void AppendCodeRelativeDataAddressToOutput(const Instruction* instr, + const void* addr); + + // Same as the above, but for addresses that are not relative to the code + // buffer. They are currently not used by VIXL. + virtual void AppendAddressToOutput(const Instruction* instr, + const void* addr); + virtual void AppendCodeAddressToOutput(const Instruction* instr, + const void* addr); + virtual void AppendDataAddressToOutput(const Instruction* instr, + const void* addr); + + public: + // Get/Set the offset that should be added to code addresses when printing + // code-relative addresses in the AppendCodeRelativeAddressToOutput() + // helpers. + // Below is an example of how a branch immediate instruction in memory at + // address 0xb010200 would disassemble with different offsets. + // Base address | Disassembly + // 0x0 | 0xb010200: b #+0xcc (addr 0xb0102cc) + // 0x10000 | 0xb000200: b #+0xcc (addr 0xb0002cc) + // 0xb010200 | 0x0: b #+0xcc (addr 0xcc) + void MapCodeAddress(int64_t base_address, const Instruction* instr_address); + int64_t CodeRelativeAddress(const void* instr); + + private: + void Format(const Instruction* instr, + const char* mnemonic, + const char* format); + void Substitute(const Instruction* instr, const char* string); + int SubstituteField(const Instruction* instr, const char* format); + int SubstituteRegisterField(const Instruction* instr, const char* format); + int SubstituteImmediateField(const Instruction* instr, const char* format); + int SubstituteLiteralField(const Instruction* instr, const char* format); + int SubstituteBitfieldImmediateField(const Instruction* instr, + const char* format); + int SubstituteShiftField(const Instruction* instr, const char* format); + int SubstituteExtendField(const Instruction* instr, const char* format); + int SubstituteConditionField(const Instruction* instr, const char* format); + int SubstitutePCRelAddressField(const Instruction* instr, const char* format); + int SubstituteBranchTargetField(const Instruction* instr, const char* format); + int SubstituteLSRegOffsetField(const Instruction* instr, const char* format); + int SubstitutePrefetchField(const Instruction* instr, const char* format); + int SubstituteBarrierField(const Instruction* instr, const char* format); + int SubstituteSysOpField(const Instruction* instr, const char* format); + int SubstituteCrField(const Instruction* instr, const char* format); + bool RdIsZROrSP(const Instruction* instr) const { + return (instr->GetRd() == kZeroRegCode); + } + + bool RnIsZROrSP(const Instruction* instr) const { + return (instr->GetRn() == kZeroRegCode); + } + + bool RmIsZROrSP(const Instruction* instr) const { + return (instr->GetRm() == kZeroRegCode); + } + + bool RaIsZROrSP(const Instruction* instr) const { + return (instr->GetRa() == kZeroRegCode); + } + + bool IsMovzMovnImm(unsigned reg_size, uint64_t value); + + int64_t code_address_offset() const { return code_address_offset_; } + + protected: + void ResetOutput(); + void AppendToOutput(const char* string, ...) PRINTF_CHECK(2, 3); + + void set_code_address_offset(int64_t code_address_offset) { + code_address_offset_ = code_address_offset; + } + + char* buffer_; + uint32_t buffer_pos_; + uint32_t buffer_size_; + bool own_buffer_; + + int64_t code_address_offset_; +}; + + +class PrintDisassembler : public Disassembler { + public: + explicit PrintDisassembler(FILE* stream) + : cpu_features_auditor_(NULL), + cpu_features_prefix_("// Needs: "), + cpu_features_suffix_(""), + stream_(stream) {} + + // Convenience helpers for quick disassembly, without having to manually + // create a decoder. + void DisassembleBuffer(const Instruction* start, uint64_t size); + void DisassembleBuffer(const Instruction* start, const Instruction* end); + void Disassemble(const Instruction* instr); + + // If a CPUFeaturesAuditor is specified, it will be used to annotate + // disassembly. The CPUFeaturesAuditor is expected to visit the instructions + // _before_ the disassembler, such that the CPUFeatures information is + // available when the disassembler is called. + void RegisterCPUFeaturesAuditor(CPUFeaturesAuditor* auditor) { + cpu_features_auditor_ = auditor; + } + + // Set the prefix to appear before the CPU features annotations. + void SetCPUFeaturesPrefix(const char* prefix) { + VIXL_ASSERT(prefix != NULL); + cpu_features_prefix_ = prefix; + } + + // Set the suffix to appear after the CPU features annotations. + void SetCPUFeaturesSuffix(const char* suffix) { + VIXL_ASSERT(suffix != NULL); + cpu_features_suffix_ = suffix; + } + + protected: + virtual void ProcessOutput(const Instruction* instr) VIXL_OVERRIDE; + + CPUFeaturesAuditor* cpu_features_auditor_; + const char* cpu_features_prefix_; + const char* cpu_features_suffix_; + + private: + FILE* stream_; +}; +} // namespace aarch64 +} // namespace vixl + +#endif // VIXL_AARCH64_DISASM_AARCH64_H diff --git a/dep/vixl/include/vixl/aarch64/instructions-aarch64.h b/dep/vixl/include/vixl/aarch64/instructions-aarch64.h new file mode 100644 index 000000000..4e6bce751 --- /dev/null +++ b/dep/vixl/include/vixl/aarch64/instructions-aarch64.h @@ -0,0 +1,865 @@ +// Copyright 2015, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_AARCH64_INSTRUCTIONS_AARCH64_H_ +#define VIXL_AARCH64_INSTRUCTIONS_AARCH64_H_ + +#include "../globals-vixl.h" +#include "../utils-vixl.h" + +#include "constants-aarch64.h" + +namespace vixl { +namespace aarch64 { +// ISA constants. -------------------------------------------------------------- + +typedef uint32_t Instr; +const unsigned kInstructionSize = 4; +const unsigned kInstructionSizeLog2 = 2; +const unsigned kLiteralEntrySize = 4; +const unsigned kLiteralEntrySizeLog2 = 2; +const unsigned kMaxLoadLiteralRange = 1 * MBytes; + +// This is the nominal page size (as used by the adrp instruction); the actual +// size of the memory pages allocated by the kernel is likely to differ. +const unsigned kPageSize = 4 * KBytes; +const unsigned kPageSizeLog2 = 12; + +const unsigned kBRegSize = 8; +const unsigned kBRegSizeLog2 = 3; +const unsigned kBRegSizeInBytes = kBRegSize / 8; +const unsigned kBRegSizeInBytesLog2 = kBRegSizeLog2 - 3; +const unsigned kHRegSize = 16; +const unsigned kHRegSizeLog2 = 4; +const unsigned kHRegSizeInBytes = kHRegSize / 8; +const unsigned kHRegSizeInBytesLog2 = kHRegSizeLog2 - 3; +const unsigned kWRegSize = 32; +const unsigned kWRegSizeLog2 = 5; +const unsigned kWRegSizeInBytes = kWRegSize / 8; +const unsigned kWRegSizeInBytesLog2 = kWRegSizeLog2 - 3; +const unsigned kXRegSize = 64; +const unsigned kXRegSizeLog2 = 6; +const unsigned kXRegSizeInBytes = kXRegSize / 8; +const unsigned kXRegSizeInBytesLog2 = kXRegSizeLog2 - 3; +const unsigned kSRegSize = 32; +const unsigned kSRegSizeLog2 = 5; +const unsigned kSRegSizeInBytes = kSRegSize / 8; +const unsigned kSRegSizeInBytesLog2 = kSRegSizeLog2 - 3; +const unsigned kDRegSize = 64; +const unsigned kDRegSizeLog2 = 6; +const unsigned kDRegSizeInBytes = kDRegSize / 8; +const unsigned kDRegSizeInBytesLog2 = kDRegSizeLog2 - 3; +const unsigned kQRegSize = 128; +const unsigned kQRegSizeLog2 = 7; +const unsigned kQRegSizeInBytes = kQRegSize / 8; +const unsigned kQRegSizeInBytesLog2 = kQRegSizeLog2 - 3; +const uint64_t kWRegMask = UINT64_C(0xffffffff); +const uint64_t kXRegMask = UINT64_C(0xffffffffffffffff); +const uint64_t kHRegMask = UINT64_C(0xffff); +const uint64_t kSRegMask = UINT64_C(0xffffffff); +const uint64_t kDRegMask = UINT64_C(0xffffffffffffffff); +const uint64_t kSSignMask = UINT64_C(0x80000000); +const uint64_t kDSignMask = UINT64_C(0x8000000000000000); +const uint64_t kWSignMask = UINT64_C(0x80000000); +const uint64_t kXSignMask = UINT64_C(0x8000000000000000); +const uint64_t kByteMask = UINT64_C(0xff); +const uint64_t kHalfWordMask = UINT64_C(0xffff); +const uint64_t kWordMask = UINT64_C(0xffffffff); +const uint64_t kXMaxUInt = UINT64_C(0xffffffffffffffff); +const uint64_t kWMaxUInt = UINT64_C(0xffffffff); +const uint64_t kHMaxUInt = UINT64_C(0xffff); +// Define k*MinInt with "-k*MaxInt - 1", because the hexadecimal representation +// (e.g. "INT32_C(0x80000000)") has implementation-defined behaviour. +const int64_t kXMaxInt = INT64_C(0x7fffffffffffffff); +const int64_t kXMinInt = -kXMaxInt - 1; +const int32_t kWMaxInt = INT32_C(0x7fffffff); +const int32_t kWMinInt = -kWMaxInt - 1; +const int16_t kHMaxInt = INT16_C(0x7fff); +const int16_t kHMinInt = -kHMaxInt - 1; +const unsigned kFpRegCode = 29; +const unsigned kLinkRegCode = 30; +const unsigned kSpRegCode = 31; +const unsigned kZeroRegCode = 31; +const unsigned kSPRegInternalCode = 63; +const unsigned kRegCodeMask = 0x1f; + +const unsigned kAddressTagOffset = 56; +const unsigned kAddressTagWidth = 8; +const uint64_t kAddressTagMask = ((UINT64_C(1) << kAddressTagWidth) - 1) + << kAddressTagOffset; +VIXL_STATIC_ASSERT(kAddressTagMask == UINT64_C(0xff00000000000000)); + +const uint64_t kTTBRMask = UINT64_C(1) << 55; + +// Make these moved float constants backwards compatible +// with explicit vixl::aarch64:: namespace references. +using vixl::kDoubleMantissaBits; +using vixl::kDoubleExponentBits; +using vixl::kFloatMantissaBits; +using vixl::kFloatExponentBits; +using vixl::kFloat16MantissaBits; +using vixl::kFloat16ExponentBits; + +using vixl::kFP16PositiveInfinity; +using vixl::kFP16NegativeInfinity; +using vixl::kFP32PositiveInfinity; +using vixl::kFP32NegativeInfinity; +using vixl::kFP64PositiveInfinity; +using vixl::kFP64NegativeInfinity; + +using vixl::kFP16DefaultNaN; +using vixl::kFP32DefaultNaN; +using vixl::kFP64DefaultNaN; + +unsigned CalcLSDataSize(LoadStoreOp op); +unsigned CalcLSPairDataSize(LoadStorePairOp op); + +enum ImmBranchType { + UnknownBranchType = 0, + CondBranchType = 1, + UncondBranchType = 2, + CompareBranchType = 3, + TestBranchType = 4 +}; + +enum AddrMode { Offset, PreIndex, PostIndex }; + +enum Reg31Mode { Reg31IsStackPointer, Reg31IsZeroRegister }; + +// Instructions. --------------------------------------------------------------- + +class Instruction { + public: + Instr GetInstructionBits() const { + return *(reinterpret_cast(this)); + } + VIXL_DEPRECATED("GetInstructionBits", Instr InstructionBits() const) { + return GetInstructionBits(); + } + + void SetInstructionBits(Instr new_instr) { + *(reinterpret_cast(this)) = new_instr; + } + + int ExtractBit(int pos) const { return (GetInstructionBits() >> pos) & 1; } + VIXL_DEPRECATED("ExtractBit", int Bit(int pos) const) { + return ExtractBit(pos); + } + + uint32_t ExtractBits(int msb, int lsb) const { + return ExtractUnsignedBitfield32(msb, lsb, GetInstructionBits()); + } + VIXL_DEPRECATED("ExtractBits", uint32_t Bits(int msb, int lsb) const) { + return ExtractBits(msb, lsb); + } + + int32_t ExtractSignedBits(int msb, int lsb) const { + int32_t bits = *(reinterpret_cast(this)); + return ExtractSignedBitfield32(msb, lsb, bits); + } + VIXL_DEPRECATED("ExtractSignedBits", + int32_t SignedBits(int msb, int lsb) const) { + return ExtractSignedBits(msb, lsb); + } + + Instr Mask(uint32_t mask) const { + VIXL_ASSERT(mask != 0); + return GetInstructionBits() & mask; + } + +#define DEFINE_GETTER(Name, HighBit, LowBit, Func) \ + int32_t Get##Name() const { return this->Func(HighBit, LowBit); } \ + VIXL_DEPRECATED("Get" #Name, int32_t Name() const) { return Get##Name(); } + INSTRUCTION_FIELDS_LIST(DEFINE_GETTER) +#undef DEFINE_GETTER + + // ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST), + // formed from ImmPCRelLo and ImmPCRelHi. + int GetImmPCRel() const { + uint32_t hi = static_cast(GetImmPCRelHi()); + uint32_t lo = GetImmPCRelLo(); + uint32_t offset = (hi << ImmPCRelLo_width) | lo; + int width = ImmPCRelLo_width + ImmPCRelHi_width; + return ExtractSignedBitfield32(width - 1, 0, offset); + } + VIXL_DEPRECATED("GetImmPCRel", int ImmPCRel() const) { return GetImmPCRel(); } + + uint64_t GetImmLogical() const; + VIXL_DEPRECATED("GetImmLogical", uint64_t ImmLogical() const) { + return GetImmLogical(); + } + + unsigned GetImmNEONabcdefgh() const; + VIXL_DEPRECATED("GetImmNEONabcdefgh", unsigned ImmNEONabcdefgh() const) { + return GetImmNEONabcdefgh(); + } + + Float16 GetImmFP16() const; + + float GetImmFP32() const; + VIXL_DEPRECATED("GetImmFP32", float ImmFP32() const) { return GetImmFP32(); } + + double GetImmFP64() const; + VIXL_DEPRECATED("GetImmFP64", double ImmFP64() const) { return GetImmFP64(); } + + Float16 GetImmNEONFP16() const; + + float GetImmNEONFP32() const; + VIXL_DEPRECATED("GetImmNEONFP32", float ImmNEONFP32() const) { + return GetImmNEONFP32(); + } + + double GetImmNEONFP64() const; + VIXL_DEPRECATED("GetImmNEONFP64", double ImmNEONFP64() const) { + return GetImmNEONFP64(); + } + + unsigned GetSizeLS() const { + return CalcLSDataSize(static_cast(Mask(LoadStoreMask))); + } + VIXL_DEPRECATED("GetSizeLS", unsigned SizeLS() const) { return GetSizeLS(); } + + unsigned GetSizeLSPair() const { + return CalcLSPairDataSize( + static_cast(Mask(LoadStorePairMask))); + } + VIXL_DEPRECATED("GetSizeLSPair", unsigned SizeLSPair() const) { + return GetSizeLSPair(); + } + + int GetNEONLSIndex(int access_size_shift) const { + int64_t q = GetNEONQ(); + int64_t s = GetNEONS(); + int64_t size = GetNEONLSSize(); + int64_t index = (q << 3) | (s << 2) | size; + return static_cast(index >> access_size_shift); + } + VIXL_DEPRECATED("GetNEONLSIndex", + int NEONLSIndex(int access_size_shift) const) { + return GetNEONLSIndex(access_size_shift); + } + + // Helpers. + bool IsCondBranchImm() const { + return Mask(ConditionalBranchFMask) == ConditionalBranchFixed; + } + + bool IsUncondBranchImm() const { + return Mask(UnconditionalBranchFMask) == UnconditionalBranchFixed; + } + + bool IsCompareBranch() const { + return Mask(CompareBranchFMask) == CompareBranchFixed; + } + + bool IsTestBranch() const { return Mask(TestBranchFMask) == TestBranchFixed; } + + bool IsImmBranch() const { return GetBranchType() != UnknownBranchType; } + + bool IsPCRelAddressing() const { + return Mask(PCRelAddressingFMask) == PCRelAddressingFixed; + } + + bool IsLogicalImmediate() const { + return Mask(LogicalImmediateFMask) == LogicalImmediateFixed; + } + + bool IsAddSubImmediate() const { + return Mask(AddSubImmediateFMask) == AddSubImmediateFixed; + } + + bool IsAddSubExtended() const { + return Mask(AddSubExtendedFMask) == AddSubExtendedFixed; + } + + bool IsLoadOrStore() const { + return Mask(LoadStoreAnyFMask) == LoadStoreAnyFixed; + } + + bool IsLoad() const; + bool IsStore() const; + + bool IsLoadLiteral() const { + // This includes PRFM_lit. + return Mask(LoadLiteralFMask) == LoadLiteralFixed; + } + + bool IsMovn() const { + return (Mask(MoveWideImmediateMask) == MOVN_x) || + (Mask(MoveWideImmediateMask) == MOVN_w); + } + + static int GetImmBranchRangeBitwidth(ImmBranchType branch_type); + VIXL_DEPRECATED( + "GetImmBranchRangeBitwidth", + static int ImmBranchRangeBitwidth(ImmBranchType branch_type)) { + return GetImmBranchRangeBitwidth(branch_type); + } + + static int32_t GetImmBranchForwardRange(ImmBranchType branch_type); + VIXL_DEPRECATED( + "GetImmBranchForwardRange", + static int32_t ImmBranchForwardRange(ImmBranchType branch_type)) { + return GetImmBranchForwardRange(branch_type); + } + + static bool IsValidImmPCOffset(ImmBranchType branch_type, int64_t offset); + + // Indicate whether Rd can be the stack pointer or the zero register. This + // does not check that the instruction actually has an Rd field. + Reg31Mode GetRdMode() const { + // The following instructions use sp or wsp as Rd: + // Add/sub (immediate) when not setting the flags. + // Add/sub (extended) when not setting the flags. + // Logical (immediate) when not setting the flags. + // Otherwise, r31 is the zero register. + if (IsAddSubImmediate() || IsAddSubExtended()) { + if (Mask(AddSubSetFlagsBit)) { + return Reg31IsZeroRegister; + } else { + return Reg31IsStackPointer; + } + } + if (IsLogicalImmediate()) { + // Of the logical (immediate) instructions, only ANDS (and its aliases) + // can set the flags. The others can all write into sp. + // Note that some logical operations are not available to + // immediate-operand instructions, so we have to combine two masks here. + if (Mask(LogicalImmediateMask & LogicalOpMask) == ANDS) { + return Reg31IsZeroRegister; + } else { + return Reg31IsStackPointer; + } + } + return Reg31IsZeroRegister; + } + VIXL_DEPRECATED("GetRdMode", Reg31Mode RdMode() const) { return GetRdMode(); } + + // Indicate whether Rn can be the stack pointer or the zero register. This + // does not check that the instruction actually has an Rn field. + Reg31Mode GetRnMode() const { + // The following instructions use sp or wsp as Rn: + // All loads and stores. + // Add/sub (immediate). + // Add/sub (extended). + // Otherwise, r31 is the zero register. + if (IsLoadOrStore() || IsAddSubImmediate() || IsAddSubExtended()) { + return Reg31IsStackPointer; + } + return Reg31IsZeroRegister; + } + VIXL_DEPRECATED("GetRnMode", Reg31Mode RnMode() const) { return GetRnMode(); } + + ImmBranchType GetBranchType() const { + if (IsCondBranchImm()) { + return CondBranchType; + } else if (IsUncondBranchImm()) { + return UncondBranchType; + } else if (IsCompareBranch()) { + return CompareBranchType; + } else if (IsTestBranch()) { + return TestBranchType; + } else { + return UnknownBranchType; + } + } + VIXL_DEPRECATED("GetBranchType", ImmBranchType BranchType() const) { + return GetBranchType(); + } + + // Find the target of this instruction. 'this' may be a branch or a + // PC-relative addressing instruction. + const Instruction* GetImmPCOffsetTarget() const; + VIXL_DEPRECATED("GetImmPCOffsetTarget", + const Instruction* ImmPCOffsetTarget() const) { + return GetImmPCOffsetTarget(); + } + + // Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or + // a PC-relative addressing instruction. + void SetImmPCOffsetTarget(const Instruction* target); + // Patch a literal load instruction to load from 'source'. + void SetImmLLiteral(const Instruction* source); + + // The range of a load literal instruction, expressed as 'instr +- range'. + // The range is actually the 'positive' range; the branch instruction can + // target [instr - range - kInstructionSize, instr + range]. + static const int kLoadLiteralImmBitwidth = 19; + static const int kLoadLiteralRange = + (1 << kLoadLiteralImmBitwidth) / 2 - kInstructionSize; + + // Calculate the address of a literal referred to by a load-literal + // instruction, and return it as the specified type. + // + // The literal itself is safely mutable only if the backing buffer is safely + // mutable. + template + T GetLiteralAddress() const { + uint64_t base_raw = reinterpret_cast(this); + int64_t offset = GetImmLLiteral() * static_cast(kLiteralEntrySize); + uint64_t address_raw = base_raw + offset; + + // Cast the address using a C-style cast. A reinterpret_cast would be + // appropriate, but it can't cast one integral type to another. + T address = (T)(address_raw); + + // Assert that the address can be represented by the specified type. + VIXL_ASSERT((uint64_t)(address) == address_raw); + + return address; + } + template + VIXL_DEPRECATED("GetLiteralAddress", T LiteralAddress() const) { + return GetLiteralAddress(); + } + + uint32_t GetLiteral32() const { + uint32_t literal; + memcpy(&literal, GetLiteralAddress(), sizeof(literal)); + return literal; + } + VIXL_DEPRECATED("GetLiteral32", uint32_t Literal32() const) { + return GetLiteral32(); + } + + uint64_t GetLiteral64() const { + uint64_t literal; + memcpy(&literal, GetLiteralAddress(), sizeof(literal)); + return literal; + } + VIXL_DEPRECATED("GetLiteral64", uint64_t Literal64() const) { + return GetLiteral64(); + } + + float GetLiteralFP32() const { return RawbitsToFloat(GetLiteral32()); } + VIXL_DEPRECATED("GetLiteralFP32", float LiteralFP32() const) { + return GetLiteralFP32(); + } + + double GetLiteralFP64() const { return RawbitsToDouble(GetLiteral64()); } + VIXL_DEPRECATED("GetLiteralFP64", double LiteralFP64() const) { + return GetLiteralFP64(); + } + + Instruction* GetNextInstruction() { return this + kInstructionSize; } + const Instruction* GetNextInstruction() const { + return this + kInstructionSize; + } + VIXL_DEPRECATED("GetNextInstruction", + const Instruction* NextInstruction() const) { + return GetNextInstruction(); + } + + const Instruction* GetInstructionAtOffset(int64_t offset) const { + VIXL_ASSERT(IsWordAligned(this + offset)); + return this + offset; + } + VIXL_DEPRECATED("GetInstructionAtOffset", + const Instruction* InstructionAtOffset(int64_t offset) + const) { + return GetInstructionAtOffset(offset); + } + + template + static Instruction* Cast(T src) { + return reinterpret_cast(src); + } + + template + static const Instruction* CastConst(T src) { + return reinterpret_cast(src); + } + + private: + int GetImmBranch() const; + + static Float16 Imm8ToFloat16(uint32_t imm8); + static float Imm8ToFP32(uint32_t imm8); + static double Imm8ToFP64(uint32_t imm8); + + void SetPCRelImmTarget(const Instruction* target); + void SetBranchImmTarget(const Instruction* target); +}; + + +// Functions for handling NEON vector format information. +enum VectorFormat { + kFormatUndefined = 0xffffffff, + kFormat8B = NEON_8B, + kFormat16B = NEON_16B, + kFormat4H = NEON_4H, + kFormat8H = NEON_8H, + kFormat2S = NEON_2S, + kFormat4S = NEON_4S, + kFormat1D = NEON_1D, + kFormat2D = NEON_2D, + + // Scalar formats. We add the scalar bit to distinguish between scalar and + // vector enumerations; the bit is always set in the encoding of scalar ops + // and always clear for vector ops. Although kFormatD and kFormat1D appear + // to be the same, their meaning is subtly different. The first is a scalar + // operation, the second a vector operation that only affects one lane. + kFormatB = NEON_B | NEONScalar, + kFormatH = NEON_H | NEONScalar, + kFormatS = NEON_S | NEONScalar, + kFormatD = NEON_D | NEONScalar, + + // A value invented solely for FP16 scalar pairwise simulator trace tests. + kFormat2H = 0xfffffffe +}; + +const int kMaxLanesPerVector = 16; + +VectorFormat VectorFormatHalfWidth(VectorFormat vform); +VectorFormat VectorFormatDoubleWidth(VectorFormat vform); +VectorFormat VectorFormatDoubleLanes(VectorFormat vform); +VectorFormat VectorFormatHalfLanes(VectorFormat vform); +VectorFormat ScalarFormatFromLaneSize(int lanesize); +VectorFormat VectorFormatHalfWidthDoubleLanes(VectorFormat vform); +VectorFormat VectorFormatFillQ(VectorFormat vform); +VectorFormat ScalarFormatFromFormat(VectorFormat vform); +unsigned RegisterSizeInBitsFromFormat(VectorFormat vform); +unsigned RegisterSizeInBytesFromFormat(VectorFormat vform); +// TODO: Make the return types of these functions consistent. +unsigned LaneSizeInBitsFromFormat(VectorFormat vform); +int LaneSizeInBytesFromFormat(VectorFormat vform); +int LaneSizeInBytesLog2FromFormat(VectorFormat vform); +int LaneCountFromFormat(VectorFormat vform); +int MaxLaneCountFromFormat(VectorFormat vform); +bool IsVectorFormat(VectorFormat vform); +int64_t MaxIntFromFormat(VectorFormat vform); +int64_t MinIntFromFormat(VectorFormat vform); +uint64_t MaxUintFromFormat(VectorFormat vform); + + +// clang-format off +enum NEONFormat { + NF_UNDEF = 0, + NF_8B = 1, + NF_16B = 2, + NF_4H = 3, + NF_8H = 4, + NF_2S = 5, + NF_4S = 6, + NF_1D = 7, + NF_2D = 8, + NF_B = 9, + NF_H = 10, + NF_S = 11, + NF_D = 12 +}; +// clang-format on + +static const unsigned kNEONFormatMaxBits = 6; + +struct NEONFormatMap { + // The bit positions in the instruction to consider. + uint8_t bits[kNEONFormatMaxBits]; + + // Mapping from concatenated bits to format. + NEONFormat map[1 << kNEONFormatMaxBits]; +}; + +class NEONFormatDecoder { + public: + enum SubstitutionMode { kPlaceholder, kFormat }; + + // Construct a format decoder with increasingly specific format maps for each + // subsitution. If no format map is specified, the default is the integer + // format map. + explicit NEONFormatDecoder(const Instruction* instr) { + instrbits_ = instr->GetInstructionBits(); + SetFormatMaps(IntegerFormatMap()); + } + NEONFormatDecoder(const Instruction* instr, const NEONFormatMap* format) { + instrbits_ = instr->GetInstructionBits(); + SetFormatMaps(format); + } + NEONFormatDecoder(const Instruction* instr, + const NEONFormatMap* format0, + const NEONFormatMap* format1) { + instrbits_ = instr->GetInstructionBits(); + SetFormatMaps(format0, format1); + } + NEONFormatDecoder(const Instruction* instr, + const NEONFormatMap* format0, + const NEONFormatMap* format1, + const NEONFormatMap* format2) { + instrbits_ = instr->GetInstructionBits(); + SetFormatMaps(format0, format1, format2); + } + + // Set the format mapping for all or individual substitutions. + void SetFormatMaps(const NEONFormatMap* format0, + const NEONFormatMap* format1 = NULL, + const NEONFormatMap* format2 = NULL) { + VIXL_ASSERT(format0 != NULL); + formats_[0] = format0; + formats_[1] = (format1 == NULL) ? formats_[0] : format1; + formats_[2] = (format2 == NULL) ? formats_[1] : format2; + } + void SetFormatMap(unsigned index, const NEONFormatMap* format) { + VIXL_ASSERT(index <= ArrayLength(formats_)); + VIXL_ASSERT(format != NULL); + formats_[index] = format; + } + + // Substitute %s in the input string with the placeholder string for each + // register, ie. "'B", "'H", etc. + const char* SubstitutePlaceholders(const char* string) { + return Substitute(string, kPlaceholder, kPlaceholder, kPlaceholder); + } + + // Substitute %s in the input string with a new string based on the + // substitution mode. + const char* Substitute(const char* string, + SubstitutionMode mode0 = kFormat, + SubstitutionMode mode1 = kFormat, + SubstitutionMode mode2 = kFormat) { + snprintf(form_buffer_, + sizeof(form_buffer_), + string, + GetSubstitute(0, mode0), + GetSubstitute(1, mode1), + GetSubstitute(2, mode2)); + return form_buffer_; + } + + // Append a "2" to a mnemonic string based of the state of the Q bit. + const char* Mnemonic(const char* mnemonic) { + if ((instrbits_ & NEON_Q) != 0) { + snprintf(mne_buffer_, sizeof(mne_buffer_), "%s2", mnemonic); + return mne_buffer_; + } + return mnemonic; + } + + VectorFormat GetVectorFormat(int format_index = 0) { + return GetVectorFormat(formats_[format_index]); + } + + VectorFormat GetVectorFormat(const NEONFormatMap* format_map) { + static const VectorFormat vform[] = {kFormatUndefined, + kFormat8B, + kFormat16B, + kFormat4H, + kFormat8H, + kFormat2S, + kFormat4S, + kFormat1D, + kFormat2D, + kFormatB, + kFormatH, + kFormatS, + kFormatD}; + VIXL_ASSERT(GetNEONFormat(format_map) < ArrayLength(vform)); + return vform[GetNEONFormat(format_map)]; + } + + // Built in mappings for common cases. + + // The integer format map uses three bits (Q, size<1:0>) to encode the + // "standard" set of NEON integer vector formats. + static const NEONFormatMap* IntegerFormatMap() { + static const NEONFormatMap map = + {{23, 22, 30}, + {NF_8B, NF_16B, NF_4H, NF_8H, NF_2S, NF_4S, NF_UNDEF, NF_2D}}; + return ↦ + } + + // The long integer format map uses two bits (size<1:0>) to encode the + // long set of NEON integer vector formats. These are used in narrow, wide + // and long operations. + static const NEONFormatMap* LongIntegerFormatMap() { + static const NEONFormatMap map = {{23, 22}, {NF_8H, NF_4S, NF_2D}}; + return ↦ + } + + // The FP format map uses two bits (Q, size<0>) to encode the NEON FP vector + // formats: NF_2S, NF_4S, NF_2D. + static const NEONFormatMap* FPFormatMap() { + // The FP format map assumes two bits (Q, size<0>) are used to encode the + // NEON FP vector formats: NF_2S, NF_4S, NF_2D. + static const NEONFormatMap map = {{22, 30}, + {NF_2S, NF_4S, NF_UNDEF, NF_2D}}; + return ↦ + } + + // The FP16 format map uses one bit (Q) to encode the NEON vector format: + // NF_4H, NF_8H. + static const NEONFormatMap* FP16FormatMap() { + static const NEONFormatMap map = {{30}, {NF_4H, NF_8H}}; + return ↦ + } + + // The load/store format map uses three bits (Q, 11, 10) to encode the + // set of NEON vector formats. + static const NEONFormatMap* LoadStoreFormatMap() { + static const NEONFormatMap map = + {{11, 10, 30}, + {NF_8B, NF_16B, NF_4H, NF_8H, NF_2S, NF_4S, NF_1D, NF_2D}}; + return ↦ + } + + // The logical format map uses one bit (Q) to encode the NEON vector format: + // NF_8B, NF_16B. + static const NEONFormatMap* LogicalFormatMap() { + static const NEONFormatMap map = {{30}, {NF_8B, NF_16B}}; + return ↦ + } + + // The triangular format map uses between two and five bits to encode the NEON + // vector format: + // xxx10->8B, xxx11->16B, xx100->4H, xx101->8H + // x1000->2S, x1001->4S, 10001->2D, all others undefined. + static const NEONFormatMap* TriangularFormatMap() { + static const NEONFormatMap map = + {{19, 18, 17, 16, 30}, + {NF_UNDEF, NF_UNDEF, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B, + NF_2S, NF_4S, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B, + NF_UNDEF, NF_2D, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B, + NF_2S, NF_4S, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B}}; + return ↦ + } + + // The scalar format map uses two bits (size<1:0>) to encode the NEON scalar + // formats: NF_B, NF_H, NF_S, NF_D. + static const NEONFormatMap* ScalarFormatMap() { + static const NEONFormatMap map = {{23, 22}, {NF_B, NF_H, NF_S, NF_D}}; + return ↦ + } + + // The long scalar format map uses two bits (size<1:0>) to encode the longer + // NEON scalar formats: NF_H, NF_S, NF_D. + static const NEONFormatMap* LongScalarFormatMap() { + static const NEONFormatMap map = {{23, 22}, {NF_H, NF_S, NF_D}}; + return ↦ + } + + // The FP scalar format map assumes one bit (size<0>) is used to encode the + // NEON FP scalar formats: NF_S, NF_D. + static const NEONFormatMap* FPScalarFormatMap() { + static const NEONFormatMap map = {{22}, {NF_S, NF_D}}; + return ↦ + } + + // The FP scalar pairwise format map assumes two bits (U, size<0>) are used to + // encode the NEON FP scalar formats: NF_H, NF_S, NF_D. + static const NEONFormatMap* FPScalarPairwiseFormatMap() { + static const NEONFormatMap map = {{29, 22}, {NF_H, NF_UNDEF, NF_S, NF_D}}; + return ↦ + } + + // The triangular scalar format map uses between one and four bits to encode + // the NEON FP scalar formats: + // xxx1->B, xx10->H, x100->S, 1000->D, all others undefined. + static const NEONFormatMap* TriangularScalarFormatMap() { + static const NEONFormatMap map = {{19, 18, 17, 16}, + {NF_UNDEF, + NF_B, + NF_H, + NF_B, + NF_S, + NF_B, + NF_H, + NF_B, + NF_D, + NF_B, + NF_H, + NF_B, + NF_S, + NF_B, + NF_H, + NF_B}}; + return ↦ + } + + private: + // Get a pointer to a string that represents the format or placeholder for + // the specified substitution index, based on the format map and instruction. + const char* GetSubstitute(int index, SubstitutionMode mode) { + if (mode == kFormat) { + return NEONFormatAsString(GetNEONFormat(formats_[index])); + } + VIXL_ASSERT(mode == kPlaceholder); + return NEONFormatAsPlaceholder(GetNEONFormat(formats_[index])); + } + + // Get the NEONFormat enumerated value for bits obtained from the + // instruction based on the specified format mapping. + NEONFormat GetNEONFormat(const NEONFormatMap* format_map) { + return format_map->map[PickBits(format_map->bits)]; + } + + // Convert a NEONFormat into a string. + static const char* NEONFormatAsString(NEONFormat format) { + // clang-format off + static const char* formats[] = { + "undefined", + "8b", "16b", "4h", "8h", "2s", "4s", "1d", "2d", + "b", "h", "s", "d" + }; + // clang-format on + VIXL_ASSERT(format < ArrayLength(formats)); + return formats[format]; + } + + // Convert a NEONFormat into a register placeholder string. + static const char* NEONFormatAsPlaceholder(NEONFormat format) { + VIXL_ASSERT((format == NF_B) || (format == NF_H) || (format == NF_S) || + (format == NF_D) || (format == NF_UNDEF)); + // clang-format off + static const char* formats[] = { + "undefined", + "undefined", "undefined", "undefined", "undefined", + "undefined", "undefined", "undefined", "undefined", + "'B", "'H", "'S", "'D" + }; + // clang-format on + return formats[format]; + } + + // Select bits from instrbits_ defined by the bits array, concatenate them, + // and return the value. + uint8_t PickBits(const uint8_t bits[]) { + uint8_t result = 0; + for (unsigned b = 0; b < kNEONFormatMaxBits; b++) { + if (bits[b] == 0) break; + result <<= 1; + result |= ((instrbits_ & (1 << bits[b])) == 0) ? 0 : 1; + } + return result; + } + + Instr instrbits_; + const NEONFormatMap* formats_[3]; + char form_buffer_[64]; + char mne_buffer_[16]; +}; +} // namespace aarch64 +} // namespace vixl + +#endif // VIXL_AARCH64_INSTRUCTIONS_AARCH64_H_ diff --git a/dep/vixl/include/vixl/aarch64/instrument-aarch64.h b/dep/vixl/include/vixl/aarch64/instrument-aarch64.h new file mode 100644 index 000000000..4401b3eac --- /dev/null +++ b/dep/vixl/include/vixl/aarch64/instrument-aarch64.h @@ -0,0 +1,117 @@ +// Copyright 2014, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_AARCH64_INSTRUMENT_AARCH64_H_ +#define VIXL_AARCH64_INSTRUMENT_AARCH64_H_ + +#include "../globals-vixl.h" +#include "../utils-vixl.h" + +#include "constants-aarch64.h" +#include "decoder-aarch64.h" +#include "instrument-aarch64.h" + +namespace vixl { +namespace aarch64 { + +const int kCounterNameMaxLength = 256; +const uint64_t kDefaultInstrumentationSamplingPeriod = 1 << 22; + + +enum InstrumentState { InstrumentStateDisable = 0, InstrumentStateEnable = 1 }; + + +enum CounterType { + Gauge = 0, // Gauge counters reset themselves after reading. + Cumulative = 1 // Cumulative counters keep their value after reading. +}; + + +class Counter { + public: + explicit Counter(const char* name, CounterType type = Gauge); + + void Increment(); + void Enable(); + void Disable(); + bool IsEnabled(); + uint64_t GetCount(); + VIXL_DEPRECATED("GetCount", uint64_t count()) { return GetCount(); } + + const char* GetName(); + VIXL_DEPRECATED("GetName", const char* name()) { return GetName(); } + + CounterType GetType(); + VIXL_DEPRECATED("GetType", CounterType type()) { return GetType(); } + + private: + char name_[kCounterNameMaxLength]; + uint64_t count_; + bool enabled_; + CounterType type_; +}; + + +class Instrument : public DecoderVisitor { + public: + explicit Instrument( + const char* datafile = NULL, + uint64_t sample_period = kDefaultInstrumentationSamplingPeriod); + ~Instrument(); + + void Enable(); + void Disable(); + +// Declare all Visitor functions. +#define DECLARE(A) void Visit##A(const Instruction* instr) VIXL_OVERRIDE; + VISITOR_LIST(DECLARE) +#undef DECLARE + + private: + void Update(); + void DumpCounters(); + void DumpCounterNames(); + void DumpEventMarker(unsigned marker); + void HandleInstrumentationEvent(unsigned event); + Counter* GetCounter(const char* name); + + void InstrumentLoadStore(const Instruction* instr); + void InstrumentLoadStorePair(const Instruction* instr); + + std::list counters_; + + FILE* output_stream_; + + // Counter information is dumped every sample_period_ instructions decoded. + // For a sample_period_ = 0 a final counter value is only produced when the + // Instrumentation class is destroyed. + uint64_t sample_period_; +}; + +} // namespace aarch64 +} // namespace vixl + +#endif // VIXL_AARCH64_INSTRUMENT_AARCH64_H_ diff --git a/dep/vixl/include/vixl/aarch64/macro-assembler-aarch64.h b/dep/vixl/include/vixl/aarch64/macro-assembler-aarch64.h new file mode 100644 index 000000000..88ed55770 --- /dev/null +++ b/dep/vixl/include/vixl/aarch64/macro-assembler-aarch64.h @@ -0,0 +1,3965 @@ +// Copyright 2015, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_AARCH64_MACRO_ASSEMBLER_AARCH64_H_ +#define VIXL_AARCH64_MACRO_ASSEMBLER_AARCH64_H_ + +#include +#include + +#include "../code-generation-scopes-vixl.h" +#include "../globals-vixl.h" +#include "../macro-assembler-interface.h" + +#include "assembler-aarch64.h" +#include "instrument-aarch64.h" +// Required for runtime call support. +// TODO: Break this dependency. We should be able to separate out the necessary +// parts so that we don't need to include the whole simulator header. +#include "simulator-aarch64.h" +// Required in order to generate debugging instructions for the simulator. This +// is needed regardless of whether the simulator is included or not, since +// generating simulator specific instructions is controlled at runtime. +#include "simulator-constants-aarch64.h" + + +#define LS_MACRO_LIST(V) \ + V(Ldrb, Register&, rt, LDRB_w) \ + V(Strb, Register&, rt, STRB_w) \ + V(Ldrsb, Register&, rt, rt.Is64Bits() ? LDRSB_x : LDRSB_w) \ + V(Ldrh, Register&, rt, LDRH_w) \ + V(Strh, Register&, rt, STRH_w) \ + V(Ldrsh, Register&, rt, rt.Is64Bits() ? LDRSH_x : LDRSH_w) \ + V(Ldr, CPURegister&, rt, LoadOpFor(rt)) \ + V(Str, CPURegister&, rt, StoreOpFor(rt)) \ + V(Ldrsw, Register&, rt, LDRSW_x) + + +#define LSPAIR_MACRO_LIST(V) \ + V(Ldp, CPURegister&, rt, rt2, LoadPairOpFor(rt, rt2)) \ + V(Stp, CPURegister&, rt, rt2, StorePairOpFor(rt, rt2)) \ + V(Ldpsw, CPURegister&, rt, rt2, LDPSW_x) + +namespace vixl { +namespace aarch64 { + +// Forward declaration +class MacroAssembler; +class UseScratchRegisterScope; + +class Pool { + public: + explicit Pool(MacroAssembler* masm) + : checkpoint_(kNoCheckpointRequired), masm_(masm) { + Reset(); + } + + void Reset() { + checkpoint_ = kNoCheckpointRequired; + monitor_ = 0; + } + + void Block() { monitor_++; } + void Release(); + bool IsBlocked() const { return monitor_ != 0; } + + static const ptrdiff_t kNoCheckpointRequired = PTRDIFF_MAX; + + void SetNextCheckpoint(ptrdiff_t checkpoint); + ptrdiff_t GetCheckpoint() const { return checkpoint_; } + VIXL_DEPRECATED("GetCheckpoint", ptrdiff_t checkpoint() const) { + return GetCheckpoint(); + } + + enum EmitOption { kBranchRequired, kNoBranchRequired }; + + protected: + // Next buffer offset at which a check is required for this pool. + ptrdiff_t checkpoint_; + // Indicates whether the emission of this pool is blocked. + int monitor_; + // The MacroAssembler using this pool. + MacroAssembler* masm_; +}; + + +class LiteralPool : public Pool { + public: + explicit LiteralPool(MacroAssembler* masm); + ~LiteralPool(); + void Reset(); + + void AddEntry(RawLiteral* literal); + bool IsEmpty() const { return entries_.empty(); } + size_t GetSize() const; + VIXL_DEPRECATED("GetSize", size_t Size() const) { return GetSize(); } + + size_t GetMaxSize() const; + VIXL_DEPRECATED("GetMaxSize", size_t MaxSize() const) { return GetMaxSize(); } + + size_t GetOtherPoolsMaxSize() const; + VIXL_DEPRECATED("GetOtherPoolsMaxSize", size_t OtherPoolsMaxSize() const) { + return GetOtherPoolsMaxSize(); + } + + void CheckEmitFor(size_t amount, EmitOption option = kBranchRequired); + // Check whether we need to emit the literal pool in order to be able to + // safely emit a branch with a given range. + void CheckEmitForBranch(size_t range); + void Emit(EmitOption option = kNoBranchRequired); + + void SetNextRecommendedCheckpoint(ptrdiff_t offset); + ptrdiff_t GetNextRecommendedCheckpoint(); + VIXL_DEPRECATED("GetNextRecommendedCheckpoint", + ptrdiff_t NextRecommendedCheckpoint()) { + return GetNextRecommendedCheckpoint(); + } + + void UpdateFirstUse(ptrdiff_t use_position); + + void DeleteOnDestruction(RawLiteral* literal) { + deleted_on_destruction_.push_back(literal); + } + + // Recommended not exact since the pool can be blocked for short periods. + static const ptrdiff_t kRecommendedLiteralPoolRange = 128 * KBytes; + + private: + std::vector entries_; + size_t size_; + ptrdiff_t first_use_; + // The parent class `Pool` provides a `checkpoint_`, which is the buffer + // offset before which a check *must* occur. This recommended checkpoint + // indicates when we would like to start emitting the constant pool. The + // MacroAssembler can, but does not have to, check the buffer when the + // checkpoint is reached. + ptrdiff_t recommended_checkpoint_; + + std::vector deleted_on_destruction_; +}; + + +inline size_t LiteralPool::GetSize() const { + // Account for the pool header. + return size_ + kInstructionSize; +} + + +inline size_t LiteralPool::GetMaxSize() const { + // Account for the potential branch over the pool. + return GetSize() + kInstructionSize; +} + + +inline ptrdiff_t LiteralPool::GetNextRecommendedCheckpoint() { + return first_use_ + kRecommendedLiteralPoolRange; +} + + +class VeneerPool : public Pool { + public: + explicit VeneerPool(MacroAssembler* masm) : Pool(masm) {} + + void Reset(); + + void Block() { monitor_++; } + void Release(); + bool IsBlocked() const { return monitor_ != 0; } + bool IsEmpty() const { return unresolved_branches_.IsEmpty(); } + + class BranchInfo { + public: + BranchInfo() + : first_unreacheable_pc_(0), + pc_offset_(0), + label_(NULL), + branch_type_(UnknownBranchType) {} + BranchInfo(ptrdiff_t offset, Label* label, ImmBranchType branch_type) + : pc_offset_(offset), label_(label), branch_type_(branch_type) { + first_unreacheable_pc_ = + pc_offset_ + Instruction::GetImmBranchForwardRange(branch_type_); + } + + static bool IsValidComparison(const BranchInfo& branch_1, + const BranchInfo& branch_2) { + // BranchInfo are always compared against against other objects with + // the same branch type. + if (branch_1.branch_type_ != branch_2.branch_type_) { + return false; + } + // Since we should never have two branch infos with the same offsets, it + // first looks like we should check that offsets are different. However + // the operators may also be used to *search* for a branch info in the + // set. + bool same_offsets = (branch_1.pc_offset_ == branch_2.pc_offset_); + return (!same_offsets || ((branch_1.label_ == branch_2.label_) && + (branch_1.first_unreacheable_pc_ == + branch_2.first_unreacheable_pc_))); + } + + // We must provide comparison operators to work with InvalSet. + bool operator==(const BranchInfo& other) const { + VIXL_ASSERT(IsValidComparison(*this, other)); + return pc_offset_ == other.pc_offset_; + } + bool operator<(const BranchInfo& other) const { + VIXL_ASSERT(IsValidComparison(*this, other)); + return pc_offset_ < other.pc_offset_; + } + bool operator<=(const BranchInfo& other) const { + VIXL_ASSERT(IsValidComparison(*this, other)); + return pc_offset_ <= other.pc_offset_; + } + bool operator>(const BranchInfo& other) const { + VIXL_ASSERT(IsValidComparison(*this, other)); + return pc_offset_ > other.pc_offset_; + } + + // First instruction position that is not reachable by the branch using a + // positive branch offset. + ptrdiff_t first_unreacheable_pc_; + // Offset of the branch in the code generation buffer. + ptrdiff_t pc_offset_; + // The label branched to. + Label* label_; + ImmBranchType branch_type_; + }; + + bool BranchTypeUsesVeneers(ImmBranchType type) { + return (type != UnknownBranchType) && (type != UncondBranchType); + } + + void RegisterUnresolvedBranch(ptrdiff_t branch_pos, + Label* label, + ImmBranchType branch_type); + void DeleteUnresolvedBranchInfoForLabel(Label* label); + + bool ShouldEmitVeneer(int64_t first_unreacheable_pc, size_t amount); + bool ShouldEmitVeneers(size_t amount) { + return ShouldEmitVeneer(unresolved_branches_.GetFirstLimit(), amount); + } + + void CheckEmitFor(size_t amount, EmitOption option = kBranchRequired); + void Emit(EmitOption option, size_t margin); + + // The code size generated for a veneer. Currently one branch instruction. + // This is for code size checking purposes, and can be extended in the future + // for example if we decide to add nops between the veneers. + static const int kVeneerCodeSize = 1 * kInstructionSize; + // The maximum size of code other than veneers that can be generated when + // emitting a veneer pool. Currently there can be an additional branch to jump + // over the pool. + static const int kPoolNonVeneerCodeSize = 1 * kInstructionSize; + + void UpdateNextCheckPoint() { SetNextCheckpoint(GetNextCheckPoint()); } + + int GetNumberOfPotentialVeneers() const { + return static_cast(unresolved_branches_.GetSize()); + } + VIXL_DEPRECATED("GetNumberOfPotentialVeneers", + int NumberOfPotentialVeneers() const) { + return GetNumberOfPotentialVeneers(); + } + + size_t GetMaxSize() const { + return kPoolNonVeneerCodeSize + + unresolved_branches_.GetSize() * kVeneerCodeSize; + } + VIXL_DEPRECATED("GetMaxSize", size_t MaxSize() const) { return GetMaxSize(); } + + size_t GetOtherPoolsMaxSize() const; + VIXL_DEPRECATED("GetOtherPoolsMaxSize", size_t OtherPoolsMaxSize() const) { + return GetOtherPoolsMaxSize(); + } + + static const int kNPreallocatedInfos = 4; + static const ptrdiff_t kInvalidOffset = PTRDIFF_MAX; + static const size_t kReclaimFrom = 128; + static const size_t kReclaimFactor = 16; + + private: + typedef InvalSet + BranchInfoTypedSetBase; + typedef InvalSetIterator BranchInfoTypedSetIterBase; + + class BranchInfoTypedSet : public BranchInfoTypedSetBase { + public: + BranchInfoTypedSet() : BranchInfoTypedSetBase() {} + + ptrdiff_t GetFirstLimit() { + if (empty()) { + return kInvalidOffset; + } + return GetMinElementKey(); + } + VIXL_DEPRECATED("GetFirstLimit", ptrdiff_t FirstLimit()) { + return GetFirstLimit(); + } + }; + + class BranchInfoTypedSetIterator : public BranchInfoTypedSetIterBase { + public: + BranchInfoTypedSetIterator() : BranchInfoTypedSetIterBase(NULL) {} + explicit BranchInfoTypedSetIterator(BranchInfoTypedSet* typed_set) + : BranchInfoTypedSetIterBase(typed_set) {} + + // TODO: Remove these and use the STL-like interface instead. + using BranchInfoTypedSetIterBase::Advance; + using BranchInfoTypedSetIterBase::Current; + }; + + class BranchInfoSet { + public: + void insert(BranchInfo branch_info) { + ImmBranchType type = branch_info.branch_type_; + VIXL_ASSERT(IsValidBranchType(type)); + typed_set_[BranchIndexFromType(type)].insert(branch_info); + } + + void erase(BranchInfo branch_info) { + if (IsValidBranchType(branch_info.branch_type_)) { + int index = + BranchInfoSet::BranchIndexFromType(branch_info.branch_type_); + typed_set_[index].erase(branch_info); + } + } + + size_t GetSize() const { + size_t res = 0; + for (int i = 0; i < kNumberOfTrackedBranchTypes; i++) { + res += typed_set_[i].size(); + } + return res; + } + VIXL_DEPRECATED("GetSize", size_t size() const) { return GetSize(); } + + bool IsEmpty() const { + for (int i = 0; i < kNumberOfTrackedBranchTypes; i++) { + if (!typed_set_[i].empty()) { + return false; + } + } + return true; + } + VIXL_DEPRECATED("IsEmpty", bool empty() const) { return IsEmpty(); } + + ptrdiff_t GetFirstLimit() { + ptrdiff_t res = kInvalidOffset; + for (int i = 0; i < kNumberOfTrackedBranchTypes; i++) { + res = std::min(res, typed_set_[i].GetFirstLimit()); + } + return res; + } + VIXL_DEPRECATED("GetFirstLimit", ptrdiff_t FirstLimit()) { + return GetFirstLimit(); + } + + void Reset() { + for (int i = 0; i < kNumberOfTrackedBranchTypes; i++) { + typed_set_[i].clear(); + } + } + + static ImmBranchType BranchTypeFromIndex(int index) { + switch (index) { + case 0: + return CondBranchType; + case 1: + return CompareBranchType; + case 2: + return TestBranchType; + default: + VIXL_UNREACHABLE(); + return UnknownBranchType; + } + } + static int BranchIndexFromType(ImmBranchType branch_type) { + switch (branch_type) { + case CondBranchType: + return 0; + case CompareBranchType: + return 1; + case TestBranchType: + return 2; + default: + VIXL_UNREACHABLE(); + return 0; + } + } + + bool IsValidBranchType(ImmBranchType branch_type) { + return (branch_type != UnknownBranchType) && + (branch_type != UncondBranchType); + } + + private: + static const int kNumberOfTrackedBranchTypes = 3; + BranchInfoTypedSet typed_set_[kNumberOfTrackedBranchTypes]; + + friend class VeneerPool; + friend class BranchInfoSetIterator; + }; + + class BranchInfoSetIterator { + public: + explicit BranchInfoSetIterator(BranchInfoSet* set) : set_(set) { + for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) { + new (&sub_iterator_[i]) + BranchInfoTypedSetIterator(&(set_->typed_set_[i])); + } + } + + VeneerPool::BranchInfo* Current() { + for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) { + if (!sub_iterator_[i].Done()) { + return sub_iterator_[i].Current(); + } + } + VIXL_UNREACHABLE(); + return NULL; + } + + void Advance() { + VIXL_ASSERT(!Done()); + for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) { + if (!sub_iterator_[i].Done()) { + sub_iterator_[i].Advance(); + return; + } + } + VIXL_UNREACHABLE(); + } + + bool Done() const { + for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) { + if (!sub_iterator_[i].Done()) return false; + } + return true; + } + + void AdvanceToNextType() { + VIXL_ASSERT(!Done()); + for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) { + if (!sub_iterator_[i].Done()) { + sub_iterator_[i].Finish(); + return; + } + } + VIXL_UNREACHABLE(); + } + + void DeleteCurrentAndAdvance() { + for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) { + if (!sub_iterator_[i].Done()) { + sub_iterator_[i].DeleteCurrentAndAdvance(); + return; + } + } + } + + private: + BranchInfoSet* set_; + BranchInfoTypedSetIterator + sub_iterator_[BranchInfoSet::kNumberOfTrackedBranchTypes]; + }; + + ptrdiff_t GetNextCheckPoint() { + if (unresolved_branches_.IsEmpty()) { + return kNoCheckpointRequired; + } else { + return unresolved_branches_.GetFirstLimit(); + } + } + VIXL_DEPRECATED("GetNextCheckPoint", ptrdiff_t NextCheckPoint()) { + return GetNextCheckPoint(); + } + + // Information about unresolved (forward) branches. + BranchInfoSet unresolved_branches_; +}; + + +// Helper for common Emission checks. +// The macro-instruction maps to a single instruction. +class SingleEmissionCheckScope : public EmissionCheckScope { + public: + explicit SingleEmissionCheckScope(MacroAssemblerInterface* masm) + : EmissionCheckScope(masm, kInstructionSize) {} +}; + + +// The macro instruction is a "typical" macro-instruction. Typical macro- +// instruction only emit a few instructions, a few being defined as 8 here. +class MacroEmissionCheckScope : public EmissionCheckScope { + public: + explicit MacroEmissionCheckScope(MacroAssemblerInterface* masm) + : EmissionCheckScope(masm, kTypicalMacroInstructionMaxSize) {} + + private: + static const size_t kTypicalMacroInstructionMaxSize = 8 * kInstructionSize; +}; + + +enum BranchType { + // Copies of architectural conditions. + // The associated conditions can be used in place of those, the code will + // take care of reinterpreting them with the correct type. + integer_eq = eq, + integer_ne = ne, + integer_hs = hs, + integer_lo = lo, + integer_mi = mi, + integer_pl = pl, + integer_vs = vs, + integer_vc = vc, + integer_hi = hi, + integer_ls = ls, + integer_ge = ge, + integer_lt = lt, + integer_gt = gt, + integer_le = le, + integer_al = al, + integer_nv = nv, + + // These two are *different* from the architectural codes al and nv. + // 'always' is used to generate unconditional branches. + // 'never' is used to not generate a branch (generally as the inverse + // branch type of 'always). + always, + never, + // cbz and cbnz + reg_zero, + reg_not_zero, + // tbz and tbnz + reg_bit_clear, + reg_bit_set, + + // Aliases. + kBranchTypeFirstCondition = eq, + kBranchTypeLastCondition = nv, + kBranchTypeFirstUsingReg = reg_zero, + kBranchTypeFirstUsingBit = reg_bit_clear +}; + + +enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg }; + +// The macro assembler supports moving automatically pre-shifted immediates for +// arithmetic and logical instructions, and then applying a post shift in the +// instruction to undo the modification, in order to reduce the code emitted for +// an operation. For example: +// +// Add(x0, x0, 0x1f7de) => movz x16, 0xfbef; add x0, x0, x16, lsl #1. +// +// This optimisation can be only partially applied when the stack pointer is an +// operand or destination, so this enumeration is used to control the shift. +enum PreShiftImmMode { + kNoShift, // Don't pre-shift. + kLimitShiftForSP, // Limit pre-shift for add/sub extend use. + kAnyShift // Allow any pre-shift. +}; + + +class MacroAssembler : public Assembler, public MacroAssemblerInterface { + public: + explicit MacroAssembler( + PositionIndependentCodeOption pic = PositionIndependentCode); + MacroAssembler(size_t capacity, + PositionIndependentCodeOption pic = PositionIndependentCode); + MacroAssembler(byte* buffer, + size_t capacity, + PositionIndependentCodeOption pic = PositionIndependentCode); + ~MacroAssembler(); + + enum FinalizeOption { + kFallThrough, // There may be more code to execute after calling Finalize. + kUnreachable // Anything generated after calling Finalize is unreachable. + }; + + virtual vixl::internal::AssemblerBase* AsAssemblerBase() VIXL_OVERRIDE { + return this; + } + + // TODO(pools): implement these functions. + virtual void EmitPoolHeader() VIXL_OVERRIDE {} + virtual void EmitPoolFooter() VIXL_OVERRIDE {} + virtual void EmitPaddingBytes(int n) VIXL_OVERRIDE { USE(n); } + virtual void EmitNopBytes(int n) VIXL_OVERRIDE { USE(n); } + + // Start generating code from the beginning of the buffer, discarding any code + // and data that has already been emitted into the buffer. + // + // In order to avoid any accidental transfer of state, Reset ASSERTs that the + // constant pool is not blocked. + void Reset(); + + // Finalize a code buffer of generated instructions. This function must be + // called before executing or copying code from the buffer. By default, + // anything generated after this should not be reachable (the last instruction + // generated is an unconditional branch). If you need to generate more code, + // then set `option` to kFallThrough. + void FinalizeCode(FinalizeOption option = kUnreachable); + + + // Constant generation helpers. + // These functions return the number of instructions required to move the + // immediate into the destination register. Also, if the masm pointer is + // non-null, it generates the code to do so. + // The two features are implemented using one function to avoid duplication of + // the logic. + // The function can be used to evaluate the cost of synthesizing an + // instruction using 'mov immediate' instructions. A user might prefer loading + // a constant using the literal pool instead of using multiple 'mov immediate' + // instructions. + static int MoveImmediateHelper(MacroAssembler* masm, + const Register& rd, + uint64_t imm); + static bool OneInstrMoveImmediateHelper(MacroAssembler* masm, + const Register& dst, + int64_t imm); + + + // Logical macros. + void And(const Register& rd, const Register& rn, const Operand& operand); + void Ands(const Register& rd, const Register& rn, const Operand& operand); + void Bic(const Register& rd, const Register& rn, const Operand& operand); + void Bics(const Register& rd, const Register& rn, const Operand& operand); + void Orr(const Register& rd, const Register& rn, const Operand& operand); + void Orn(const Register& rd, const Register& rn, const Operand& operand); + void Eor(const Register& rd, const Register& rn, const Operand& operand); + void Eon(const Register& rd, const Register& rn, const Operand& operand); + void Tst(const Register& rn, const Operand& operand); + void LogicalMacro(const Register& rd, + const Register& rn, + const Operand& operand, + LogicalOp op); + + // Add and sub macros. + void Add(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S = LeaveFlags); + void Adds(const Register& rd, const Register& rn, const Operand& operand); + void Sub(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S = LeaveFlags); + void Subs(const Register& rd, const Register& rn, const Operand& operand); + void Cmn(const Register& rn, const Operand& operand); + void Cmp(const Register& rn, const Operand& operand); + void Neg(const Register& rd, const Operand& operand); + void Negs(const Register& rd, const Operand& operand); + + void AddSubMacro(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S, + AddSubOp op); + + // Add/sub with carry macros. + void Adc(const Register& rd, const Register& rn, const Operand& operand); + void Adcs(const Register& rd, const Register& rn, const Operand& operand); + void Sbc(const Register& rd, const Register& rn, const Operand& operand); + void Sbcs(const Register& rd, const Register& rn, const Operand& operand); + void Ngc(const Register& rd, const Operand& operand); + void Ngcs(const Register& rd, const Operand& operand); + void AddSubWithCarryMacro(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S, + AddSubWithCarryOp op); + + // Move macros. + void Mov(const Register& rd, uint64_t imm); + void Mov(const Register& rd, + const Operand& operand, + DiscardMoveMode discard_mode = kDontDiscardForSameWReg); + void Mvn(const Register& rd, uint64_t imm) { + Mov(rd, (rd.GetSizeInBits() == kXRegSize) ? ~imm : (~imm & kWRegMask)); + } + void Mvn(const Register& rd, const Operand& operand); + + // Try to move an immediate into the destination register in a single + // instruction. Returns true for success, and updates the contents of dst. + // Returns false, otherwise. + bool TryOneInstrMoveImmediate(const Register& dst, int64_t imm); + + // Move an immediate into register dst, and return an Operand object for + // use with a subsequent instruction that accepts a shift. The value moved + // into dst is not necessarily equal to imm; it may have had a shifting + // operation applied to it that will be subsequently undone by the shift + // applied in the Operand. + Operand MoveImmediateForShiftedOp(const Register& dst, + int64_t imm, + PreShiftImmMode mode); + + void Move(const GenericOperand& dst, const GenericOperand& src); + + // Synthesises the address represented by a MemOperand into a register. + void ComputeAddress(const Register& dst, const MemOperand& mem_op); + + // Conditional macros. + void Ccmp(const Register& rn, + const Operand& operand, + StatusFlags nzcv, + Condition cond); + void Ccmn(const Register& rn, + const Operand& operand, + StatusFlags nzcv, + Condition cond); + void ConditionalCompareMacro(const Register& rn, + const Operand& operand, + StatusFlags nzcv, + Condition cond, + ConditionalCompareOp op); + + // On return, the boolean values pointed to will indicate whether `left` and + // `right` should be synthesised in a temporary register. + static void GetCselSynthesisInformation(const Register& rd, + const Operand& left, + const Operand& right, + bool* should_synthesise_left, + bool* should_synthesise_right) { + // Note that the helper does not need to look at the condition. + CselHelper(NULL, + rd, + left, + right, + eq, + should_synthesise_left, + should_synthesise_right); + } + + void Csel(const Register& rd, + const Operand& left, + const Operand& right, + Condition cond) { + CselHelper(this, rd, left, right, cond); + } + +// Load/store macros. +#define DECLARE_FUNCTION(FN, REGTYPE, REG, OP) \ + void FN(const REGTYPE REG, const MemOperand& addr); + LS_MACRO_LIST(DECLARE_FUNCTION) +#undef DECLARE_FUNCTION + + void LoadStoreMacro(const CPURegister& rt, + const MemOperand& addr, + LoadStoreOp op); + +#define DECLARE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \ + void FN(const REGTYPE REG, const REGTYPE REG2, const MemOperand& addr); + LSPAIR_MACRO_LIST(DECLARE_FUNCTION) +#undef DECLARE_FUNCTION + + void LoadStorePairMacro(const CPURegister& rt, + const CPURegister& rt2, + const MemOperand& addr, + LoadStorePairOp op); + + void Prfm(PrefetchOperation op, const MemOperand& addr); + + // Push or pop up to 4 registers of the same width to or from the stack, + // using the current stack pointer as set by SetStackPointer. + // + // If an argument register is 'NoReg', all further arguments are also assumed + // to be 'NoReg', and are thus not pushed or popped. + // + // Arguments are ordered such that "Push(a, b);" is functionally equivalent + // to "Push(a); Push(b);". + // + // It is valid to push the same register more than once, and there is no + // restriction on the order in which registers are specified. + // + // It is not valid to pop into the same register more than once in one + // operation, not even into the zero register. + // + // If the current stack pointer (as set by SetStackPointer) is sp, then it + // must be aligned to 16 bytes on entry and the total size of the specified + // registers must also be a multiple of 16 bytes. + // + // Even if the current stack pointer is not the system stack pointer (sp), + // Push (and derived methods) will still modify the system stack pointer in + // order to comply with ABI rules about accessing memory below the system + // stack pointer. + // + // Other than the registers passed into Pop, the stack pointer and (possibly) + // the system stack pointer, these methods do not modify any other registers. + void Push(const CPURegister& src0, + const CPURegister& src1 = NoReg, + const CPURegister& src2 = NoReg, + const CPURegister& src3 = NoReg); + void Pop(const CPURegister& dst0, + const CPURegister& dst1 = NoReg, + const CPURegister& dst2 = NoReg, + const CPURegister& dst3 = NoReg); + + // Alternative forms of Push and Pop, taking a RegList or CPURegList that + // specifies the registers that are to be pushed or popped. Higher-numbered + // registers are associated with higher memory addresses (as in the A32 push + // and pop instructions). + // + // (Push|Pop)SizeRegList allow you to specify the register size as a + // parameter. Only kXRegSize, kWRegSize, kDRegSize and kSRegSize are + // supported. + // + // Otherwise, (Push|Pop)(CPU|X|W|D|S)RegList is preferred. + void PushCPURegList(CPURegList registers); + void PopCPURegList(CPURegList registers); + + void PushSizeRegList( + RegList registers, + unsigned reg_size, + CPURegister::RegisterType type = CPURegister::kRegister) { + PushCPURegList(CPURegList(type, reg_size, registers)); + } + void PopSizeRegList(RegList registers, + unsigned reg_size, + CPURegister::RegisterType type = CPURegister::kRegister) { + PopCPURegList(CPURegList(type, reg_size, registers)); + } + void PushXRegList(RegList regs) { PushSizeRegList(regs, kXRegSize); } + void PopXRegList(RegList regs) { PopSizeRegList(regs, kXRegSize); } + void PushWRegList(RegList regs) { PushSizeRegList(regs, kWRegSize); } + void PopWRegList(RegList regs) { PopSizeRegList(regs, kWRegSize); } + void PushDRegList(RegList regs) { + PushSizeRegList(regs, kDRegSize, CPURegister::kVRegister); + } + void PopDRegList(RegList regs) { + PopSizeRegList(regs, kDRegSize, CPURegister::kVRegister); + } + void PushSRegList(RegList regs) { + PushSizeRegList(regs, kSRegSize, CPURegister::kVRegister); + } + void PopSRegList(RegList regs) { + PopSizeRegList(regs, kSRegSize, CPURegister::kVRegister); + } + + // Push the specified register 'count' times. + void PushMultipleTimes(int count, Register src); + + // Poke 'src' onto the stack. The offset is in bytes. + // + // If the current stack pointer (as set by SetStackPointer) is sp, then sp + // must be aligned to 16 bytes. + void Poke(const Register& src, const Operand& offset); + + // Peek at a value on the stack, and put it in 'dst'. The offset is in bytes. + // + // If the current stack pointer (as set by SetStackPointer) is sp, then sp + // must be aligned to 16 bytes. + void Peek(const Register& dst, const Operand& offset); + + // Alternative forms of Peek and Poke, taking a RegList or CPURegList that + // specifies the registers that are to be pushed or popped. Higher-numbered + // registers are associated with higher memory addresses. + // + // (Peek|Poke)SizeRegList allow you to specify the register size as a + // parameter. Only kXRegSize, kWRegSize, kDRegSize and kSRegSize are + // supported. + // + // Otherwise, (Peek|Poke)(CPU|X|W|D|S)RegList is preferred. + void PeekCPURegList(CPURegList registers, int64_t offset) { + LoadCPURegList(registers, MemOperand(StackPointer(), offset)); + } + void PokeCPURegList(CPURegList registers, int64_t offset) { + StoreCPURegList(registers, MemOperand(StackPointer(), offset)); + } + + void PeekSizeRegList( + RegList registers, + int64_t offset, + unsigned reg_size, + CPURegister::RegisterType type = CPURegister::kRegister) { + PeekCPURegList(CPURegList(type, reg_size, registers), offset); + } + void PokeSizeRegList( + RegList registers, + int64_t offset, + unsigned reg_size, + CPURegister::RegisterType type = CPURegister::kRegister) { + PokeCPURegList(CPURegList(type, reg_size, registers), offset); + } + void PeekXRegList(RegList regs, int64_t offset) { + PeekSizeRegList(regs, offset, kXRegSize); + } + void PokeXRegList(RegList regs, int64_t offset) { + PokeSizeRegList(regs, offset, kXRegSize); + } + void PeekWRegList(RegList regs, int64_t offset) { + PeekSizeRegList(regs, offset, kWRegSize); + } + void PokeWRegList(RegList regs, int64_t offset) { + PokeSizeRegList(regs, offset, kWRegSize); + } + void PeekDRegList(RegList regs, int64_t offset) { + PeekSizeRegList(regs, offset, kDRegSize, CPURegister::kVRegister); + } + void PokeDRegList(RegList regs, int64_t offset) { + PokeSizeRegList(regs, offset, kDRegSize, CPURegister::kVRegister); + } + void PeekSRegList(RegList regs, int64_t offset) { + PeekSizeRegList(regs, offset, kSRegSize, CPURegister::kVRegister); + } + void PokeSRegList(RegList regs, int64_t offset) { + PokeSizeRegList(regs, offset, kSRegSize, CPURegister::kVRegister); + } + + + // Claim or drop stack space without actually accessing memory. + // + // If the current stack pointer (as set by SetStackPointer) is sp, then it + // must be aligned to 16 bytes and the size claimed or dropped must be a + // multiple of 16 bytes. + void Claim(const Operand& size); + void Drop(const Operand& size); + + // Preserve the callee-saved registers (as defined by AAPCS64). + // + // Higher-numbered registers are pushed before lower-numbered registers, and + // thus get higher addresses. + // Floating-point registers are pushed before general-purpose registers, and + // thus get higher addresses. + // + // This method must not be called unless StackPointer() is sp, and it is + // aligned to 16 bytes. + void PushCalleeSavedRegisters(); + + // Restore the callee-saved registers (as defined by AAPCS64). + // + // Higher-numbered registers are popped after lower-numbered registers, and + // thus come from higher addresses. + // Floating-point registers are popped after general-purpose registers, and + // thus come from higher addresses. + // + // This method must not be called unless StackPointer() is sp, and it is + // aligned to 16 bytes. + void PopCalleeSavedRegisters(); + + void LoadCPURegList(CPURegList registers, const MemOperand& src); + void StoreCPURegList(CPURegList registers, const MemOperand& dst); + + // Remaining instructions are simple pass-through calls to the assembler. + void Adr(const Register& rd, Label* label) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + adr(rd, label); + } + void Adrp(const Register& rd, Label* label) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + adrp(rd, label); + } + void Asr(const Register& rd, const Register& rn, unsigned shift) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + asr(rd, rn, shift); + } + void Asr(const Register& rd, const Register& rn, const Register& rm) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + SingleEmissionCheckScope guard(this); + asrv(rd, rn, rm); + } + + // Branch type inversion relies on these relations. + VIXL_STATIC_ASSERT((reg_zero == (reg_not_zero ^ 1)) && + (reg_bit_clear == (reg_bit_set ^ 1)) && + (always == (never ^ 1))); + + BranchType InvertBranchType(BranchType type) { + if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) { + return static_cast( + InvertCondition(static_cast(type))); + } else { + return static_cast(type ^ 1); + } + } + + void B(Label* label, BranchType type, Register reg = NoReg, int bit = -1); + + void B(Label* label); + void B(Label* label, Condition cond); + void B(Condition cond, Label* label) { B(label, cond); } + void Bfm(const Register& rd, + const Register& rn, + unsigned immr, + unsigned imms) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + bfm(rd, rn, immr, imms); + } + void Bfi(const Register& rd, + const Register& rn, + unsigned lsb, + unsigned width) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + bfi(rd, rn, lsb, width); + } + void Bfc(const Register& rd, unsigned lsb, unsigned width) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + bfc(rd, lsb, width); + } + void Bfxil(const Register& rd, + const Register& rn, + unsigned lsb, + unsigned width) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + bfxil(rd, rn, lsb, width); + } + void Bind(Label* label); + // Bind a label to a specified offset from the start of the buffer. + void BindToOffset(Label* label, ptrdiff_t offset); + void Bl(Label* label) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + bl(label); + } + void Blr(const Register& xn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!xn.IsZero()); + SingleEmissionCheckScope guard(this); + blr(xn); + } + void Br(const Register& xn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!xn.IsZero()); + SingleEmissionCheckScope guard(this); + br(xn); + } + void Braaz(const Register& xn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + braaz(xn); + } + void Brabz(const Register& xn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + brabz(xn); + } + void Blraaz(const Register& xn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + blraaz(xn); + } + void Blrabz(const Register& xn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + blrabz(xn); + } + void Retaa() { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + retaa(); + } + void Retab() { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + retab(); + } + void Braa(const Register& xn, const Register& xm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + braa(xn, xm); + } + void Brab(const Register& xn, const Register& xm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + brab(xn, xm); + } + void Blraa(const Register& xn, const Register& xm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + blraa(xn, xm); + } + void Blrab(const Register& xn, const Register& xm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + blrab(xn, xm); + } + void Brk(int code = 0) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + brk(code); + } + void Cbnz(const Register& rt, Label* label); + void Cbz(const Register& rt, Label* label); + void Cinc(const Register& rd, const Register& rn, Condition cond) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + cinc(rd, rn, cond); + } + void Cinv(const Register& rd, const Register& rn, Condition cond) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + cinv(rd, rn, cond); + } + +#define PAUTH_SYSTEM_MODES(V) \ + V(az) \ + V(bz) \ + V(asp) \ + V(bsp) + +#define DEFINE_MACRO_ASM_FUNCS(SUFFIX) \ + void Paci##SUFFIX() { \ + VIXL_ASSERT(allow_macro_instructions_); \ + SingleEmissionCheckScope guard(this); \ + paci##SUFFIX(); \ + } \ + void Auti##SUFFIX() { \ + VIXL_ASSERT(allow_macro_instructions_); \ + SingleEmissionCheckScope guard(this); \ + auti##SUFFIX(); \ + } + + PAUTH_SYSTEM_MODES(DEFINE_MACRO_ASM_FUNCS) +#undef DEFINE_MACRO_ASM_FUNCS + + // The 1716 pac and aut instructions encourage people to use x16 and x17 + // directly, perhaps without realising that this is forbidden. For example: + // + // UseScratchRegisterScope temps(&masm); + // Register temp = temps.AcquireX(); // temp will be x16 + // __ Mov(x17, ptr); + // __ Mov(x16, modifier); // Will override temp! + // __ Pacia1716(); + // + // To work around this issue, you must exclude x16 and x17 from the scratch + // register list. You may need to replace them with other registers: + // + // UseScratchRegisterScope temps(&masm); + // temps.Exclude(x16, x17); + // temps.Include(x10, x11); + // __ Mov(x17, ptr); + // __ Mov(x16, modifier); + // __ Pacia1716(); + void Pacia1716() { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!GetScratchRegisterList()->IncludesAliasOf(x16)); + VIXL_ASSERT(!GetScratchRegisterList()->IncludesAliasOf(x17)); + SingleEmissionCheckScope guard(this); + pacia1716(); + } + void Pacib1716() { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!GetScratchRegisterList()->IncludesAliasOf(x16)); + VIXL_ASSERT(!GetScratchRegisterList()->IncludesAliasOf(x17)); + SingleEmissionCheckScope guard(this); + pacib1716(); + } + void Autia1716() { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!GetScratchRegisterList()->IncludesAliasOf(x16)); + VIXL_ASSERT(!GetScratchRegisterList()->IncludesAliasOf(x17)); + SingleEmissionCheckScope guard(this); + autia1716(); + } + void Autib1716() { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!GetScratchRegisterList()->IncludesAliasOf(x16)); + VIXL_ASSERT(!GetScratchRegisterList()->IncludesAliasOf(x17)); + SingleEmissionCheckScope guard(this); + autib1716(); + } + void Xpaclri() { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + xpaclri(); + } + void Clrex() { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + clrex(); + } + void Cls(const Register& rd, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + cls(rd, rn); + } + void Clz(const Register& rd, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + clz(rd, rn); + } + void Cneg(const Register& rd, const Register& rn, Condition cond) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + cneg(rd, rn, cond); + } + void Esb() { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + esb(); + } + void Csdb() { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + csdb(); + } + void Cset(const Register& rd, Condition cond) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + cset(rd, cond); + } + void Csetm(const Register& rd, Condition cond) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + csetm(rd, cond); + } + void Csinc(const Register& rd, + const Register& rn, + const Register& rm, + Condition cond) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + VIXL_ASSERT((cond != al) && (cond != nv)); + SingleEmissionCheckScope guard(this); + csinc(rd, rn, rm, cond); + } + void Csinv(const Register& rd, + const Register& rn, + const Register& rm, + Condition cond) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + VIXL_ASSERT((cond != al) && (cond != nv)); + SingleEmissionCheckScope guard(this); + csinv(rd, rn, rm, cond); + } + void Csneg(const Register& rd, + const Register& rn, + const Register& rm, + Condition cond) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + VIXL_ASSERT((cond != al) && (cond != nv)); + SingleEmissionCheckScope guard(this); + csneg(rd, rn, rm, cond); + } + void Dmb(BarrierDomain domain, BarrierType type) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + dmb(domain, type); + } + void Dsb(BarrierDomain domain, BarrierType type) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + dsb(domain, type); + } + void Extr(const Register& rd, + const Register& rn, + const Register& rm, + unsigned lsb) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + SingleEmissionCheckScope guard(this); + extr(rd, rn, rm, lsb); + } + void Fadd(const VRegister& vd, const VRegister& vn, const VRegister& vm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fadd(vd, vn, vm); + } + void Fccmp(const VRegister& vn, + const VRegister& vm, + StatusFlags nzcv, + Condition cond, + FPTrapFlags trap = DisableTrap) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT((cond != al) && (cond != nv)); + SingleEmissionCheckScope guard(this); + FPCCompareMacro(vn, vm, nzcv, cond, trap); + } + void Fccmpe(const VRegister& vn, + const VRegister& vm, + StatusFlags nzcv, + Condition cond) { + Fccmp(vn, vm, nzcv, cond, EnableTrap); + } + void Fcmp(const VRegister& vn, + const VRegister& vm, + FPTrapFlags trap = DisableTrap) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + FPCompareMacro(vn, vm, trap); + } + void Fcmp(const VRegister& vn, double value, FPTrapFlags trap = DisableTrap); + void Fcmpe(const VRegister& vn, double value); + void Fcmpe(const VRegister& vn, const VRegister& vm) { + Fcmp(vn, vm, EnableTrap); + } + void Fcsel(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + Condition cond) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT((cond != al) && (cond != nv)); + SingleEmissionCheckScope guard(this); + fcsel(vd, vn, vm, cond); + } + void Fcvt(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fcvt(vd, vn); + } + void Fcvtl(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fcvtl(vd, vn); + } + void Fcvtl2(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fcvtl2(vd, vn); + } + void Fcvtn(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fcvtn(vd, vn); + } + void Fcvtn2(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fcvtn2(vd, vn); + } + void Fcvtxn(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fcvtxn(vd, vn); + } + void Fcvtxn2(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fcvtxn2(vd, vn); + } + void Fcvtas(const Register& rd, const VRegister& vn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + fcvtas(rd, vn); + } + void Fcvtau(const Register& rd, const VRegister& vn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + fcvtau(rd, vn); + } + void Fcvtms(const Register& rd, const VRegister& vn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + fcvtms(rd, vn); + } + void Fcvtmu(const Register& rd, const VRegister& vn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + fcvtmu(rd, vn); + } + void Fcvtns(const Register& rd, const VRegister& vn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + fcvtns(rd, vn); + } + void Fcvtnu(const Register& rd, const VRegister& vn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + fcvtnu(rd, vn); + } + void Fcvtps(const Register& rd, const VRegister& vn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + fcvtps(rd, vn); + } + void Fcvtpu(const Register& rd, const VRegister& vn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + fcvtpu(rd, vn); + } + void Fcvtzs(const Register& rd, const VRegister& vn, int fbits = 0) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + fcvtzs(rd, vn, fbits); + } + void Fjcvtzs(const Register& rd, const VRegister& vn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + fjcvtzs(rd, vn); + } + void Fcvtzu(const Register& rd, const VRegister& vn, int fbits = 0) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + fcvtzu(rd, vn, fbits); + } + void Fdiv(const VRegister& vd, const VRegister& vn, const VRegister& vm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fdiv(vd, vn, vm); + } + void Fmax(const VRegister& vd, const VRegister& vn, const VRegister& vm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fmax(vd, vn, vm); + } + void Fmaxnm(const VRegister& vd, const VRegister& vn, const VRegister& vm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fmaxnm(vd, vn, vm); + } + void Fmin(const VRegister& vd, const VRegister& vn, const VRegister& vm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fmin(vd, vn, vm); + } + void Fminnm(const VRegister& vd, const VRegister& vn, const VRegister& vm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fminnm(vd, vn, vm); + } + void Fmov(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + // Only emit an instruction if vd and vn are different, and they are both D + // registers. fmov(s0, s0) is not a no-op because it clears the top word of + // d0. Technically, fmov(d0, d0) is not a no-op either because it clears + // the top of q0, but VRegister does not currently support Q registers. + if (!vd.Is(vn) || !vd.Is64Bits()) { + fmov(vd, vn); + } + } + void Fmov(const VRegister& vd, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + fmov(vd, rn); + } + void Fmov(const VRegister& vd, const XRegister& xn) { + Fmov(vd, Register(xn)); + } + void Fmov(const VRegister& vd, const WRegister& wn) { + Fmov(vd, Register(wn)); + } + void Fmov(const VRegister& vd, int index, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fmov(vd, index, rn); + } + void Fmov(const Register& rd, const VRegister& vn, int index) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fmov(rd, vn, index); + } + + // Provide explicit double and float interfaces for FP immediate moves, rather + // than relying on implicit C++ casts. This allows signalling NaNs to be + // preserved when the immediate matches the format of vd. Most systems convert + // signalling NaNs to quiet NaNs when converting between float and double. + void Fmov(VRegister vd, double imm); + void Fmov(VRegister vd, float imm); + void Fmov(VRegister vd, const Float16 imm); + // Provide a template to allow other types to be converted automatically. + template + void Fmov(VRegister vd, T imm) { + VIXL_ASSERT(allow_macro_instructions_); + Fmov(vd, static_cast(imm)); + } + void Fmov(Register rd, VRegister vn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + fmov(rd, vn); + } + void Fmul(const VRegister& vd, const VRegister& vn, const VRegister& vm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fmul(vd, vn, vm); + } + void Fnmul(const VRegister& vd, const VRegister& vn, const VRegister& vm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fnmul(vd, vn, vm); + } + void Fmadd(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + const VRegister& va) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fmadd(vd, vn, vm, va); + } + void Fmsub(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + const VRegister& va) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fmsub(vd, vn, vm, va); + } + void Fnmadd(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + const VRegister& va) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fnmadd(vd, vn, vm, va); + } + void Fnmsub(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + const VRegister& va) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fnmsub(vd, vn, vm, va); + } + void Fsub(const VRegister& vd, const VRegister& vn, const VRegister& vm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fsub(vd, vn, vm); + } + void Hint(SystemHint code) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + hint(code); + } + void Hint(int imm7) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + hint(imm7); + } + void Hlt(int code) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + hlt(code); + } + void Isb() { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + isb(); + } + void Ldar(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldar(rt, src); + } + void Ldarb(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldarb(rt, src); + } + void Ldarh(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldarh(rt, src); + } + void Ldlar(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldlar(rt, src); + } + void Ldlarb(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldlarb(rt, src); + } + void Ldlarh(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldlarh(rt, src); + } + void Ldaxp(const Register& rt, const Register& rt2, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rt.Aliases(rt2)); + SingleEmissionCheckScope guard(this); + ldaxp(rt, rt2, src); + } + void Ldaxr(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldaxr(rt, src); + } + void Ldaxrb(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldaxrb(rt, src); + } + void Ldaxrh(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldaxrh(rt, src); + } + +// clang-format off +#define COMPARE_AND_SWAP_SINGLE_MACRO_LIST(V) \ + V(cas, Cas) \ + V(casa, Casa) \ + V(casl, Casl) \ + V(casal, Casal) \ + V(casb, Casb) \ + V(casab, Casab) \ + V(caslb, Caslb) \ + V(casalb, Casalb) \ + V(cash, Cash) \ + V(casah, Casah) \ + V(caslh, Caslh) \ + V(casalh, Casalh) +// clang-format on + +#define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \ + void MASM(const Register& rs, const Register& rt, const MemOperand& src) { \ + VIXL_ASSERT(allow_macro_instructions_); \ + SingleEmissionCheckScope guard(this); \ + ASM(rs, rt, src); \ + } + COMPARE_AND_SWAP_SINGLE_MACRO_LIST(DEFINE_MACRO_ASM_FUNC) +#undef DEFINE_MACRO_ASM_FUNC + + +// clang-format off +#define COMPARE_AND_SWAP_PAIR_MACRO_LIST(V) \ + V(casp, Casp) \ + V(caspa, Caspa) \ + V(caspl, Caspl) \ + V(caspal, Caspal) +// clang-format on + +#define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \ + void MASM(const Register& rs, \ + const Register& rs2, \ + const Register& rt, \ + const Register& rt2, \ + const MemOperand& src) { \ + VIXL_ASSERT(allow_macro_instructions_); \ + SingleEmissionCheckScope guard(this); \ + ASM(rs, rs2, rt, rt2, src); \ + } + COMPARE_AND_SWAP_PAIR_MACRO_LIST(DEFINE_MACRO_ASM_FUNC) +#undef DEFINE_MACRO_ASM_FUNC + +// These macros generate all the variations of the atomic memory operations, +// e.g. ldadd, ldadda, ldaddb, staddl, etc. + +// clang-format off +#define ATOMIC_MEMORY_SIMPLE_MACRO_LIST(V, DEF, MASM_PRE, ASM_PRE) \ + V(DEF, MASM_PRE##add, ASM_PRE##add) \ + V(DEF, MASM_PRE##clr, ASM_PRE##clr) \ + V(DEF, MASM_PRE##eor, ASM_PRE##eor) \ + V(DEF, MASM_PRE##set, ASM_PRE##set) \ + V(DEF, MASM_PRE##smax, ASM_PRE##smax) \ + V(DEF, MASM_PRE##smin, ASM_PRE##smin) \ + V(DEF, MASM_PRE##umax, ASM_PRE##umax) \ + V(DEF, MASM_PRE##umin, ASM_PRE##umin) + +#define ATOMIC_MEMORY_STORE_MACRO_MODES(V, MASM, ASM) \ + V(MASM, ASM) \ + V(MASM##l, ASM##l) \ + V(MASM##b, ASM##b) \ + V(MASM##lb, ASM##lb) \ + V(MASM##h, ASM##h) \ + V(MASM##lh, ASM##lh) + +#define ATOMIC_MEMORY_LOAD_MACRO_MODES(V, MASM, ASM) \ + ATOMIC_MEMORY_STORE_MACRO_MODES(V, MASM, ASM) \ + V(MASM##a, ASM##a) \ + V(MASM##al, ASM##al) \ + V(MASM##ab, ASM##ab) \ + V(MASM##alb, ASM##alb) \ + V(MASM##ah, ASM##ah) \ + V(MASM##alh, ASM##alh) +// clang-format on + +#define DEFINE_MACRO_LOAD_ASM_FUNC(MASM, ASM) \ + void MASM(const Register& rs, const Register& rt, const MemOperand& src) { \ + VIXL_ASSERT(allow_macro_instructions_); \ + SingleEmissionCheckScope guard(this); \ + ASM(rs, rt, src); \ + } +#define DEFINE_MACRO_STORE_ASM_FUNC(MASM, ASM) \ + void MASM(const Register& rs, const MemOperand& src) { \ + VIXL_ASSERT(allow_macro_instructions_); \ + SingleEmissionCheckScope guard(this); \ + ASM(rs, src); \ + } + + ATOMIC_MEMORY_SIMPLE_MACRO_LIST(ATOMIC_MEMORY_LOAD_MACRO_MODES, + DEFINE_MACRO_LOAD_ASM_FUNC, + Ld, + ld) + ATOMIC_MEMORY_SIMPLE_MACRO_LIST(ATOMIC_MEMORY_STORE_MACRO_MODES, + DEFINE_MACRO_STORE_ASM_FUNC, + St, + st) + +#define DEFINE_MACRO_SWP_ASM_FUNC(MASM, ASM) \ + void MASM(const Register& rs, const Register& rt, const MemOperand& src) { \ + VIXL_ASSERT(allow_macro_instructions_); \ + SingleEmissionCheckScope guard(this); \ + ASM(rs, rt, src); \ + } + + ATOMIC_MEMORY_LOAD_MACRO_MODES(DEFINE_MACRO_SWP_ASM_FUNC, Swp, swp) + +#undef DEFINE_MACRO_LOAD_ASM_FUNC +#undef DEFINE_MACRO_STORE_ASM_FUNC +#undef DEFINE_MACRO_SWP_ASM_FUNC + + void Ldaprb(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldaprb(rt, src); + } + + void Ldaprh(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldaprh(rt, src); + } + + void Ldapr(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldapr(rt, src); + } + + void Ldnp(const CPURegister& rt, + const CPURegister& rt2, + const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldnp(rt, rt2, src); + } + // Provide both double and float interfaces for FP immediate loads, rather + // than relying on implicit C++ casts. This allows signalling NaNs to be + // preserved when the immediate matches the format of fd. Most systems convert + // signalling NaNs to quiet NaNs when converting between float and double. + void Ldr(const VRegister& vt, double imm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + RawLiteral* literal; + if (vt.IsD()) { + literal = new Literal(imm, + &literal_pool_, + RawLiteral::kDeletedOnPlacementByPool); + } else { + literal = new Literal(static_cast(imm), + &literal_pool_, + RawLiteral::kDeletedOnPlacementByPool); + } + ldr(vt, literal); + } + void Ldr(const VRegister& vt, float imm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + RawLiteral* literal; + if (vt.IsS()) { + literal = new Literal(imm, + &literal_pool_, + RawLiteral::kDeletedOnPlacementByPool); + } else { + literal = new Literal(static_cast(imm), + &literal_pool_, + RawLiteral::kDeletedOnPlacementByPool); + } + ldr(vt, literal); + } + void Ldr(const VRegister& vt, uint64_t high64, uint64_t low64) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(vt.IsQ()); + SingleEmissionCheckScope guard(this); + ldr(vt, + new Literal(high64, + low64, + &literal_pool_, + RawLiteral::kDeletedOnPlacementByPool)); + } + void Ldr(const Register& rt, uint64_t imm) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rt.IsZero()); + SingleEmissionCheckScope guard(this); + RawLiteral* literal; + if (rt.Is64Bits()) { + literal = new Literal(imm, + &literal_pool_, + RawLiteral::kDeletedOnPlacementByPool); + } else { + VIXL_ASSERT(rt.Is32Bits()); + VIXL_ASSERT(IsUint32(imm) || IsInt32(imm)); + literal = new Literal(static_cast(imm), + &literal_pool_, + RawLiteral::kDeletedOnPlacementByPool); + } + ldr(rt, literal); + } + void Ldrsw(const Register& rt, uint32_t imm) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rt.IsZero()); + SingleEmissionCheckScope guard(this); + ldrsw(rt, + new Literal(imm, + &literal_pool_, + RawLiteral::kDeletedOnPlacementByPool)); + } + void Ldr(const CPURegister& rt, RawLiteral* literal) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldr(rt, literal); + } + void Ldrsw(const Register& rt, RawLiteral* literal) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldrsw(rt, literal); + } + void Ldxp(const Register& rt, const Register& rt2, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rt.Aliases(rt2)); + SingleEmissionCheckScope guard(this); + ldxp(rt, rt2, src); + } + void Ldxr(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldxr(rt, src); + } + void Ldxrb(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldxrb(rt, src); + } + void Ldxrh(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldxrh(rt, src); + } + void Lsl(const Register& rd, const Register& rn, unsigned shift) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + lsl(rd, rn, shift); + } + void Lsl(const Register& rd, const Register& rn, const Register& rm) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + SingleEmissionCheckScope guard(this); + lslv(rd, rn, rm); + } + void Lsr(const Register& rd, const Register& rn, unsigned shift) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + lsr(rd, rn, shift); + } + void Lsr(const Register& rd, const Register& rn, const Register& rm) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + SingleEmissionCheckScope guard(this); + lsrv(rd, rn, rm); + } + void Madd(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + VIXL_ASSERT(!ra.IsZero()); + SingleEmissionCheckScope guard(this); + madd(rd, rn, rm, ra); + } + void Mneg(const Register& rd, const Register& rn, const Register& rm) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + SingleEmissionCheckScope guard(this); + mneg(rd, rn, rm); + } + void Mov(const Register& rd, + const Register& rn, + DiscardMoveMode discard_mode = kDontDiscardForSameWReg) { + VIXL_ASSERT(allow_macro_instructions_); + // Emit a register move only if the registers are distinct, or if they are + // not X registers. + // + // Note that mov(w0, w0) is not a no-op because it clears the top word of + // x0. A flag is provided (kDiscardForSameWReg) if a move between the same W + // registers is not required to clear the top word of the X register. In + // this case, the instruction is discarded. + // + // If the sp is an operand, add #0 is emitted, otherwise, orr #0. + if (!rd.Is(rn) || + (rd.Is32Bits() && (discard_mode == kDontDiscardForSameWReg))) { + SingleEmissionCheckScope guard(this); + mov(rd, rn); + } + } + void Movk(const Register& rd, uint64_t imm, int shift = -1) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + movk(rd, imm, shift); + } + void Mrs(const Register& rt, SystemRegister sysreg) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rt.IsZero()); + SingleEmissionCheckScope guard(this); + mrs(rt, sysreg); + } + void Msr(SystemRegister sysreg, const Register& rt) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rt.IsZero()); + SingleEmissionCheckScope guard(this); + msr(sysreg, rt); + } + void Sys(int op1, int crn, int crm, int op2, const Register& rt = xzr) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sys(op1, crn, crm, op2, rt); + } + void Dc(DataCacheOp op, const Register& rt) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + dc(op, rt); + } + void Ic(InstructionCacheOp op, const Register& rt) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ic(op, rt); + } + void Msub(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + VIXL_ASSERT(!ra.IsZero()); + SingleEmissionCheckScope guard(this); + msub(rd, rn, rm, ra); + } + void Mul(const Register& rd, const Register& rn, const Register& rm) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + SingleEmissionCheckScope guard(this); + mul(rd, rn, rm); + } + void Nop() { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + nop(); + } + void Rbit(const Register& rd, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + rbit(rd, rn); + } + void Ret(const Register& xn = lr) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!xn.IsZero()); + SingleEmissionCheckScope guard(this); + ret(xn); + } + void Rev(const Register& rd, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + rev(rd, rn); + } + void Rev16(const Register& rd, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + rev16(rd, rn); + } + void Rev32(const Register& rd, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + rev32(rd, rn); + } + void Rev64(const Register& rd, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + rev64(rd, rn); + } + +#define PAUTH_MASM_VARIATIONS(V) \ + V(Paci, paci) \ + V(Pacd, pacd) \ + V(Auti, auti) \ + V(Autd, autd) + +#define DEFINE_MACRO_ASM_FUNCS(MASM_PRE, ASM_PRE) \ + void MASM_PRE##a(const Register& xd, const Register& xn) { \ + VIXL_ASSERT(allow_macro_instructions_); \ + SingleEmissionCheckScope guard(this); \ + ASM_PRE##a(xd, xn); \ + } \ + void MASM_PRE##za(const Register& xd) { \ + VIXL_ASSERT(allow_macro_instructions_); \ + SingleEmissionCheckScope guard(this); \ + ASM_PRE##za(xd); \ + } \ + void MASM_PRE##b(const Register& xd, const Register& xn) { \ + VIXL_ASSERT(allow_macro_instructions_); \ + SingleEmissionCheckScope guard(this); \ + ASM_PRE##b(xd, xn); \ + } \ + void MASM_PRE##zb(const Register& xd) { \ + VIXL_ASSERT(allow_macro_instructions_); \ + SingleEmissionCheckScope guard(this); \ + ASM_PRE##zb(xd); \ + } + + PAUTH_MASM_VARIATIONS(DEFINE_MACRO_ASM_FUNCS) +#undef DEFINE_MACRO_ASM_FUNCS + + void Pacga(const Register& xd, const Register& xn, const Register& xm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + pacga(xd, xn, xm); + } + + void Xpaci(const Register& xd) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + xpaci(xd); + } + + void Xpacd(const Register& xd) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + xpacd(xd); + } + void Ror(const Register& rd, const Register& rs, unsigned shift) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rs.IsZero()); + SingleEmissionCheckScope guard(this); + ror(rd, rs, shift); + } + void Ror(const Register& rd, const Register& rn, const Register& rm) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + SingleEmissionCheckScope guard(this); + rorv(rd, rn, rm); + } + void Sbfiz(const Register& rd, + const Register& rn, + unsigned lsb, + unsigned width) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + sbfiz(rd, rn, lsb, width); + } + void Sbfm(const Register& rd, + const Register& rn, + unsigned immr, + unsigned imms) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + sbfm(rd, rn, immr, imms); + } + void Sbfx(const Register& rd, + const Register& rn, + unsigned lsb, + unsigned width) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + sbfx(rd, rn, lsb, width); + } + void Scvtf(const VRegister& vd, const Register& rn, int fbits = 0) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + scvtf(vd, rn, fbits); + } + void Sdiv(const Register& rd, const Register& rn, const Register& rm) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + SingleEmissionCheckScope guard(this); + sdiv(rd, rn, rm); + } + void Smaddl(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + VIXL_ASSERT(!ra.IsZero()); + SingleEmissionCheckScope guard(this); + smaddl(rd, rn, rm, ra); + } + void Smsubl(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + VIXL_ASSERT(!ra.IsZero()); + SingleEmissionCheckScope guard(this); + smsubl(rd, rn, rm, ra); + } + void Smull(const Register& rd, const Register& rn, const Register& rm) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + SingleEmissionCheckScope guard(this); + smull(rd, rn, rm); + } + void Smulh(const Register& xd, const Register& xn, const Register& xm) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!xd.IsZero()); + VIXL_ASSERT(!xn.IsZero()); + VIXL_ASSERT(!xm.IsZero()); + SingleEmissionCheckScope guard(this); + smulh(xd, xn, xm); + } + void Stlr(const Register& rt, const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + stlr(rt, dst); + } + void Stlrb(const Register& rt, const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + stlrb(rt, dst); + } + void Stlrh(const Register& rt, const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + stlrh(rt, dst); + } + void Stllr(const Register& rt, const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + stllr(rt, dst); + } + void Stllrb(const Register& rt, const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + stllrb(rt, dst); + } + void Stllrh(const Register& rt, const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + stllrh(rt, dst); + } + void Stlxp(const Register& rs, + const Register& rt, + const Register& rt2, + const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister())); + VIXL_ASSERT(!rs.Aliases(rt)); + VIXL_ASSERT(!rs.Aliases(rt2)); + SingleEmissionCheckScope guard(this); + stlxp(rs, rt, rt2, dst); + } + void Stlxr(const Register& rs, const Register& rt, const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister())); + VIXL_ASSERT(!rs.Aliases(rt)); + SingleEmissionCheckScope guard(this); + stlxr(rs, rt, dst); + } + void Stlxrb(const Register& rs, const Register& rt, const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister())); + VIXL_ASSERT(!rs.Aliases(rt)); + SingleEmissionCheckScope guard(this); + stlxrb(rs, rt, dst); + } + void Stlxrh(const Register& rs, const Register& rt, const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister())); + VIXL_ASSERT(!rs.Aliases(rt)); + SingleEmissionCheckScope guard(this); + stlxrh(rs, rt, dst); + } + void Stnp(const CPURegister& rt, + const CPURegister& rt2, + const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + stnp(rt, rt2, dst); + } + void Stxp(const Register& rs, + const Register& rt, + const Register& rt2, + const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister())); + VIXL_ASSERT(!rs.Aliases(rt)); + VIXL_ASSERT(!rs.Aliases(rt2)); + SingleEmissionCheckScope guard(this); + stxp(rs, rt, rt2, dst); + } + void Stxr(const Register& rs, const Register& rt, const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister())); + VIXL_ASSERT(!rs.Aliases(rt)); + SingleEmissionCheckScope guard(this); + stxr(rs, rt, dst); + } + void Stxrb(const Register& rs, const Register& rt, const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister())); + VIXL_ASSERT(!rs.Aliases(rt)); + SingleEmissionCheckScope guard(this); + stxrb(rs, rt, dst); + } + void Stxrh(const Register& rs, const Register& rt, const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister())); + VIXL_ASSERT(!rs.Aliases(rt)); + SingleEmissionCheckScope guard(this); + stxrh(rs, rt, dst); + } + void Svc(int code) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + svc(code); + } + void Sxtb(const Register& rd, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + sxtb(rd, rn); + } + void Sxth(const Register& rd, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + sxth(rd, rn); + } + void Sxtw(const Register& rd, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + sxtw(rd, rn); + } + void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + tbl(vd, vn, vm); + } + void Tbl(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + tbl(vd, vn, vn2, vm); + } + void Tbl(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vn3, + const VRegister& vm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + tbl(vd, vn, vn2, vn3, vm); + } + void Tbl(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vn3, + const VRegister& vn4, + const VRegister& vm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + tbl(vd, vn, vn2, vn3, vn4, vm); + } + void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + tbx(vd, vn, vm); + } + void Tbx(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + tbx(vd, vn, vn2, vm); + } + void Tbx(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vn3, + const VRegister& vm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + tbx(vd, vn, vn2, vn3, vm); + } + void Tbx(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vn3, + const VRegister& vn4, + const VRegister& vm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + tbx(vd, vn, vn2, vn3, vn4, vm); + } + void Tbnz(const Register& rt, unsigned bit_pos, Label* label); + void Tbz(const Register& rt, unsigned bit_pos, Label* label); + void Ubfiz(const Register& rd, + const Register& rn, + unsigned lsb, + unsigned width) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + ubfiz(rd, rn, lsb, width); + } + void Ubfm(const Register& rd, + const Register& rn, + unsigned immr, + unsigned imms) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + ubfm(rd, rn, immr, imms); + } + void Ubfx(const Register& rd, + const Register& rn, + unsigned lsb, + unsigned width) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + ubfx(rd, rn, lsb, width); + } + void Ucvtf(const VRegister& vd, const Register& rn, int fbits = 0) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + ucvtf(vd, rn, fbits); + } + void Udiv(const Register& rd, const Register& rn, const Register& rm) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + SingleEmissionCheckScope guard(this); + udiv(rd, rn, rm); + } + void Umaddl(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + VIXL_ASSERT(!ra.IsZero()); + SingleEmissionCheckScope guard(this); + umaddl(rd, rn, rm, ra); + } + void Umull(const Register& rd, const Register& rn, const Register& rm) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + SingleEmissionCheckScope guard(this); + umull(rd, rn, rm); + } + void Umulh(const Register& xd, const Register& xn, const Register& xm) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!xd.IsZero()); + VIXL_ASSERT(!xn.IsZero()); + VIXL_ASSERT(!xm.IsZero()); + SingleEmissionCheckScope guard(this); + umulh(xd, xn, xm); + } + void Umsubl(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + VIXL_ASSERT(!ra.IsZero()); + SingleEmissionCheckScope guard(this); + umsubl(rd, rn, rm, ra); + } + void Unreachable() { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + if (generate_simulator_code_) { + hlt(kUnreachableOpcode); + } else { + // Branch to 0 to generate a segfault. + // lr - kInstructionSize is the address of the offending instruction. + blr(xzr); + } + } + void Uxtb(const Register& rd, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + uxtb(rd, rn); + } + void Uxth(const Register& rd, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + uxth(rd, rn); + } + void Uxtw(const Register& rd, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + uxtw(rd, rn); + } + +// NEON 3 vector register instructions. +#define NEON_3VREG_MACRO_LIST(V) \ + V(add, Add) \ + V(addhn, Addhn) \ + V(addhn2, Addhn2) \ + V(addp, Addp) \ + V(and_, And) \ + V(bic, Bic) \ + V(bif, Bif) \ + V(bit, Bit) \ + V(bsl, Bsl) \ + V(cmeq, Cmeq) \ + V(cmge, Cmge) \ + V(cmgt, Cmgt) \ + V(cmhi, Cmhi) \ + V(cmhs, Cmhs) \ + V(cmtst, Cmtst) \ + V(eor, Eor) \ + V(fabd, Fabd) \ + V(facge, Facge) \ + V(facgt, Facgt) \ + V(faddp, Faddp) \ + V(fcmeq, Fcmeq) \ + V(fcmge, Fcmge) \ + V(fcmgt, Fcmgt) \ + V(fmaxnmp, Fmaxnmp) \ + V(fmaxp, Fmaxp) \ + V(fminnmp, Fminnmp) \ + V(fminp, Fminp) \ + V(fmla, Fmla) \ + V(fmls, Fmls) \ + V(fmulx, Fmulx) \ + V(frecps, Frecps) \ + V(frsqrts, Frsqrts) \ + V(mla, Mla) \ + V(mls, Mls) \ + V(mul, Mul) \ + V(orn, Orn) \ + V(orr, Orr) \ + V(pmul, Pmul) \ + V(pmull, Pmull) \ + V(pmull2, Pmull2) \ + V(raddhn, Raddhn) \ + V(raddhn2, Raddhn2) \ + V(rsubhn, Rsubhn) \ + V(rsubhn2, Rsubhn2) \ + V(saba, Saba) \ + V(sabal, Sabal) \ + V(sabal2, Sabal2) \ + V(sabd, Sabd) \ + V(sabdl, Sabdl) \ + V(sabdl2, Sabdl2) \ + V(saddl, Saddl) \ + V(saddl2, Saddl2) \ + V(saddw, Saddw) \ + V(saddw2, Saddw2) \ + V(shadd, Shadd) \ + V(shsub, Shsub) \ + V(smax, Smax) \ + V(smaxp, Smaxp) \ + V(smin, Smin) \ + V(sminp, Sminp) \ + V(smlal, Smlal) \ + V(smlal2, Smlal2) \ + V(smlsl, Smlsl) \ + V(smlsl2, Smlsl2) \ + V(smull, Smull) \ + V(smull2, Smull2) \ + V(sqadd, Sqadd) \ + V(sqdmlal, Sqdmlal) \ + V(sqdmlal2, Sqdmlal2) \ + V(sqdmlsl, Sqdmlsl) \ + V(sqdmlsl2, Sqdmlsl2) \ + V(sqdmulh, Sqdmulh) \ + V(sqdmull, Sqdmull) \ + V(sqdmull2, Sqdmull2) \ + V(sqrdmulh, Sqrdmulh) \ + V(sdot, Sdot) \ + V(sqrdmlah, Sqrdmlah) \ + V(udot, Udot) \ + V(sqrdmlsh, Sqrdmlsh) \ + V(sqrshl, Sqrshl) \ + V(sqshl, Sqshl) \ + V(sqsub, Sqsub) \ + V(srhadd, Srhadd) \ + V(srshl, Srshl) \ + V(sshl, Sshl) \ + V(ssubl, Ssubl) \ + V(ssubl2, Ssubl2) \ + V(ssubw, Ssubw) \ + V(ssubw2, Ssubw2) \ + V(sub, Sub) \ + V(subhn, Subhn) \ + V(subhn2, Subhn2) \ + V(trn1, Trn1) \ + V(trn2, Trn2) \ + V(uaba, Uaba) \ + V(uabal, Uabal) \ + V(uabal2, Uabal2) \ + V(uabd, Uabd) \ + V(uabdl, Uabdl) \ + V(uabdl2, Uabdl2) \ + V(uaddl, Uaddl) \ + V(uaddl2, Uaddl2) \ + V(uaddw, Uaddw) \ + V(uaddw2, Uaddw2) \ + V(uhadd, Uhadd) \ + V(uhsub, Uhsub) \ + V(umax, Umax) \ + V(umaxp, Umaxp) \ + V(umin, Umin) \ + V(uminp, Uminp) \ + V(umlal, Umlal) \ + V(umlal2, Umlal2) \ + V(umlsl, Umlsl) \ + V(umlsl2, Umlsl2) \ + V(umull, Umull) \ + V(umull2, Umull2) \ + V(uqadd, Uqadd) \ + V(uqrshl, Uqrshl) \ + V(uqshl, Uqshl) \ + V(uqsub, Uqsub) \ + V(urhadd, Urhadd) \ + V(urshl, Urshl) \ + V(ushl, Ushl) \ + V(usubl, Usubl) \ + V(usubl2, Usubl2) \ + V(usubw, Usubw) \ + V(usubw2, Usubw2) \ + V(uzp1, Uzp1) \ + V(uzp2, Uzp2) \ + V(zip1, Zip1) \ + V(zip2, Zip2) + +#define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \ + void MASM(const VRegister& vd, const VRegister& vn, const VRegister& vm) { \ + VIXL_ASSERT(allow_macro_instructions_); \ + SingleEmissionCheckScope guard(this); \ + ASM(vd, vn, vm); \ + } + NEON_3VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC) +#undef DEFINE_MACRO_ASM_FUNC + +// NEON 2 vector register instructions. +#define NEON_2VREG_MACRO_LIST(V) \ + V(abs, Abs) \ + V(addp, Addp) \ + V(addv, Addv) \ + V(cls, Cls) \ + V(clz, Clz) \ + V(cnt, Cnt) \ + V(fabs, Fabs) \ + V(faddp, Faddp) \ + V(fcvtas, Fcvtas) \ + V(fcvtau, Fcvtau) \ + V(fcvtms, Fcvtms) \ + V(fcvtmu, Fcvtmu) \ + V(fcvtns, Fcvtns) \ + V(fcvtnu, Fcvtnu) \ + V(fcvtps, Fcvtps) \ + V(fcvtpu, Fcvtpu) \ + V(fmaxnmp, Fmaxnmp) \ + V(fmaxnmv, Fmaxnmv) \ + V(fmaxp, Fmaxp) \ + V(fmaxv, Fmaxv) \ + V(fminnmp, Fminnmp) \ + V(fminnmv, Fminnmv) \ + V(fminp, Fminp) \ + V(fminv, Fminv) \ + V(fneg, Fneg) \ + V(frecpe, Frecpe) \ + V(frecpx, Frecpx) \ + V(frinta, Frinta) \ + V(frinti, Frinti) \ + V(frintm, Frintm) \ + V(frintn, Frintn) \ + V(frintp, Frintp) \ + V(frintx, Frintx) \ + V(frintz, Frintz) \ + V(frsqrte, Frsqrte) \ + V(fsqrt, Fsqrt) \ + V(mov, Mov) \ + V(mvn, Mvn) \ + V(neg, Neg) \ + V(not_, Not) \ + V(rbit, Rbit) \ + V(rev16, Rev16) \ + V(rev32, Rev32) \ + V(rev64, Rev64) \ + V(sadalp, Sadalp) \ + V(saddlp, Saddlp) \ + V(saddlv, Saddlv) \ + V(smaxv, Smaxv) \ + V(sminv, Sminv) \ + V(sqabs, Sqabs) \ + V(sqneg, Sqneg) \ + V(sqxtn, Sqxtn) \ + V(sqxtn2, Sqxtn2) \ + V(sqxtun, Sqxtun) \ + V(sqxtun2, Sqxtun2) \ + V(suqadd, Suqadd) \ + V(sxtl, Sxtl) \ + V(sxtl2, Sxtl2) \ + V(uadalp, Uadalp) \ + V(uaddlp, Uaddlp) \ + V(uaddlv, Uaddlv) \ + V(umaxv, Umaxv) \ + V(uminv, Uminv) \ + V(uqxtn, Uqxtn) \ + V(uqxtn2, Uqxtn2) \ + V(urecpe, Urecpe) \ + V(ursqrte, Ursqrte) \ + V(usqadd, Usqadd) \ + V(uxtl, Uxtl) \ + V(uxtl2, Uxtl2) \ + V(xtn, Xtn) \ + V(xtn2, Xtn2) + +#define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \ + void MASM(const VRegister& vd, const VRegister& vn) { \ + VIXL_ASSERT(allow_macro_instructions_); \ + SingleEmissionCheckScope guard(this); \ + ASM(vd, vn); \ + } + NEON_2VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC) +#undef DEFINE_MACRO_ASM_FUNC + +// NEON 2 vector register with immediate instructions. +#define NEON_2VREG_FPIMM_MACRO_LIST(V) \ + V(fcmeq, Fcmeq) \ + V(fcmge, Fcmge) \ + V(fcmgt, Fcmgt) \ + V(fcmle, Fcmle) \ + V(fcmlt, Fcmlt) + +#define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \ + void MASM(const VRegister& vd, const VRegister& vn, double imm) { \ + VIXL_ASSERT(allow_macro_instructions_); \ + SingleEmissionCheckScope guard(this); \ + ASM(vd, vn, imm); \ + } + NEON_2VREG_FPIMM_MACRO_LIST(DEFINE_MACRO_ASM_FUNC) +#undef DEFINE_MACRO_ASM_FUNC + +// NEON by element instructions. +#define NEON_BYELEMENT_MACRO_LIST(V) \ + V(fmul, Fmul) \ + V(fmla, Fmla) \ + V(fmls, Fmls) \ + V(fmulx, Fmulx) \ + V(mul, Mul) \ + V(mla, Mla) \ + V(mls, Mls) \ + V(sqdmulh, Sqdmulh) \ + V(sqrdmulh, Sqrdmulh) \ + V(sdot, Sdot) \ + V(sqrdmlah, Sqrdmlah) \ + V(udot, Udot) \ + V(sqrdmlsh, Sqrdmlsh) \ + V(sqdmull, Sqdmull) \ + V(sqdmull2, Sqdmull2) \ + V(sqdmlal, Sqdmlal) \ + V(sqdmlal2, Sqdmlal2) \ + V(sqdmlsl, Sqdmlsl) \ + V(sqdmlsl2, Sqdmlsl2) \ + V(smull, Smull) \ + V(smull2, Smull2) \ + V(smlal, Smlal) \ + V(smlal2, Smlal2) \ + V(smlsl, Smlsl) \ + V(smlsl2, Smlsl2) \ + V(umull, Umull) \ + V(umull2, Umull2) \ + V(umlal, Umlal) \ + V(umlal2, Umlal2) \ + V(umlsl, Umlsl) \ + V(umlsl2, Umlsl2) + +#define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \ + void MASM(const VRegister& vd, \ + const VRegister& vn, \ + const VRegister& vm, \ + int vm_index) { \ + VIXL_ASSERT(allow_macro_instructions_); \ + SingleEmissionCheckScope guard(this); \ + ASM(vd, vn, vm, vm_index); \ + } + NEON_BYELEMENT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC) +#undef DEFINE_MACRO_ASM_FUNC + +#define NEON_2VREG_SHIFT_MACRO_LIST(V) \ + V(rshrn, Rshrn) \ + V(rshrn2, Rshrn2) \ + V(shl, Shl) \ + V(shll, Shll) \ + V(shll2, Shll2) \ + V(shrn, Shrn) \ + V(shrn2, Shrn2) \ + V(sli, Sli) \ + V(sqrshrn, Sqrshrn) \ + V(sqrshrn2, Sqrshrn2) \ + V(sqrshrun, Sqrshrun) \ + V(sqrshrun2, Sqrshrun2) \ + V(sqshl, Sqshl) \ + V(sqshlu, Sqshlu) \ + V(sqshrn, Sqshrn) \ + V(sqshrn2, Sqshrn2) \ + V(sqshrun, Sqshrun) \ + V(sqshrun2, Sqshrun2) \ + V(sri, Sri) \ + V(srshr, Srshr) \ + V(srsra, Srsra) \ + V(sshll, Sshll) \ + V(sshll2, Sshll2) \ + V(sshr, Sshr) \ + V(ssra, Ssra) \ + V(uqrshrn, Uqrshrn) \ + V(uqrshrn2, Uqrshrn2) \ + V(uqshl, Uqshl) \ + V(uqshrn, Uqshrn) \ + V(uqshrn2, Uqshrn2) \ + V(urshr, Urshr) \ + V(ursra, Ursra) \ + V(ushll, Ushll) \ + V(ushll2, Ushll2) \ + V(ushr, Ushr) \ + V(usra, Usra) + +#define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \ + void MASM(const VRegister& vd, const VRegister& vn, int shift) { \ + VIXL_ASSERT(allow_macro_instructions_); \ + SingleEmissionCheckScope guard(this); \ + ASM(vd, vn, shift); \ + } + NEON_2VREG_SHIFT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC) +#undef DEFINE_MACRO_ASM_FUNC + + void Bic(const VRegister& vd, const int imm8, const int left_shift = 0) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + bic(vd, imm8, left_shift); + } + void Cmeq(const VRegister& vd, const VRegister& vn, int imm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cmeq(vd, vn, imm); + } + void Cmge(const VRegister& vd, const VRegister& vn, int imm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cmge(vd, vn, imm); + } + void Cmgt(const VRegister& vd, const VRegister& vn, int imm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cmgt(vd, vn, imm); + } + void Cmle(const VRegister& vd, const VRegister& vn, int imm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cmle(vd, vn, imm); + } + void Cmlt(const VRegister& vd, const VRegister& vn, int imm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cmlt(vd, vn, imm); + } + void Dup(const VRegister& vd, const VRegister& vn, int index) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + dup(vd, vn, index); + } + void Dup(const VRegister& vd, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + dup(vd, rn); + } + void Ext(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int index) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ext(vd, vn, vm, index); + } + void Fcadd(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int rot) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fcadd(vd, vn, vm, rot); + } + void Fcmla(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index, + int rot) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fcmla(vd, vn, vm, vm_index, rot); + } + void Fcmla(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int rot) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fcmla(vd, vn, vm, rot); + } + void Ins(const VRegister& vd, + int vd_index, + const VRegister& vn, + int vn_index) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ins(vd, vd_index, vn, vn_index); + } + void Ins(const VRegister& vd, int vd_index, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ins(vd, vd_index, rn); + } + void Ld1(const VRegister& vt, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ld1(vt, src); + } + void Ld1(const VRegister& vt, const VRegister& vt2, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ld1(vt, vt2, src); + } + void Ld1(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ld1(vt, vt2, vt3, src); + } + void Ld1(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ld1(vt, vt2, vt3, vt4, src); + } + void Ld1(const VRegister& vt, int lane, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ld1(vt, lane, src); + } + void Ld1r(const VRegister& vt, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ld1r(vt, src); + } + void Ld2(const VRegister& vt, const VRegister& vt2, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ld2(vt, vt2, src); + } + void Ld2(const VRegister& vt, + const VRegister& vt2, + int lane, + const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ld2(vt, vt2, lane, src); + } + void Ld2r(const VRegister& vt, const VRegister& vt2, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ld2r(vt, vt2, src); + } + void Ld3(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ld3(vt, vt2, vt3, src); + } + void Ld3(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + int lane, + const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ld3(vt, vt2, vt3, lane, src); + } + void Ld3r(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ld3r(vt, vt2, vt3, src); + } + void Ld4(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ld4(vt, vt2, vt3, vt4, src); + } + void Ld4(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + int lane, + const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ld4(vt, vt2, vt3, vt4, lane, src); + } + void Ld4r(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ld4r(vt, vt2, vt3, vt4, src); + } + void Mov(const VRegister& vd, + int vd_index, + const VRegister& vn, + int vn_index) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + mov(vd, vd_index, vn, vn_index); + } + void Mov(const VRegister& vd, const VRegister& vn, int index) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + mov(vd, vn, index); + } + void Mov(const VRegister& vd, int vd_index, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + mov(vd, vd_index, rn); + } + void Mov(const Register& rd, const VRegister& vn, int vn_index) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + mov(rd, vn, vn_index); + } + void Movi(const VRegister& vd, + uint64_t imm, + Shift shift = LSL, + int shift_amount = 0); + void Movi(const VRegister& vd, uint64_t hi, uint64_t lo); + void Mvni(const VRegister& vd, + const int imm8, + Shift shift = LSL, + const int shift_amount = 0) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + mvni(vd, imm8, shift, shift_amount); + } + void Orr(const VRegister& vd, const int imm8, const int left_shift = 0) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + orr(vd, imm8, left_shift); + } + void Scvtf(const VRegister& vd, const VRegister& vn, int fbits = 0) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + scvtf(vd, vn, fbits); + } + void Ucvtf(const VRegister& vd, const VRegister& vn, int fbits = 0) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ucvtf(vd, vn, fbits); + } + void Fcvtzs(const VRegister& vd, const VRegister& vn, int fbits = 0) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fcvtzs(vd, vn, fbits); + } + void Fcvtzu(const VRegister& vd, const VRegister& vn, int fbits = 0) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fcvtzu(vd, vn, fbits); + } + void St1(const VRegister& vt, const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + st1(vt, dst); + } + void St1(const VRegister& vt, const VRegister& vt2, const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + st1(vt, vt2, dst); + } + void St1(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + st1(vt, vt2, vt3, dst); + } + void St1(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + st1(vt, vt2, vt3, vt4, dst); + } + void St1(const VRegister& vt, int lane, const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + st1(vt, lane, dst); + } + void St2(const VRegister& vt, const VRegister& vt2, const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + st2(vt, vt2, dst); + } + void St3(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + st3(vt, vt2, vt3, dst); + } + void St4(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + st4(vt, vt2, vt3, vt4, dst); + } + void St2(const VRegister& vt, + const VRegister& vt2, + int lane, + const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + st2(vt, vt2, lane, dst); + } + void St3(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + int lane, + const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + st3(vt, vt2, vt3, lane, dst); + } + void St4(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + int lane, + const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + st4(vt, vt2, vt3, vt4, lane, dst); + } + void Smov(const Register& rd, const VRegister& vn, int vn_index) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + smov(rd, vn, vn_index); + } + void Umov(const Register& rd, const VRegister& vn, int vn_index) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + umov(rd, vn, vn_index); + } + void Crc32b(const Register& rd, const Register& rn, const Register& rm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + crc32b(rd, rn, rm); + } + void Crc32h(const Register& rd, const Register& rn, const Register& rm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + crc32h(rd, rn, rm); + } + void Crc32w(const Register& rd, const Register& rn, const Register& rm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + crc32w(rd, rn, rm); + } + void Crc32x(const Register& rd, const Register& rn, const Register& rm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + crc32x(rd, rn, rm); + } + void Crc32cb(const Register& rd, const Register& rn, const Register& rm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + crc32cb(rd, rn, rm); + } + void Crc32ch(const Register& rd, const Register& rn, const Register& rm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + crc32ch(rd, rn, rm); + } + void Crc32cw(const Register& rd, const Register& rn, const Register& rm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + crc32cw(rd, rn, rm); + } + void Crc32cx(const Register& rd, const Register& rn, const Register& rm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + crc32cx(rd, rn, rm); + } + + template + Literal* CreateLiteralDestroyedWithPool(T value) { + return new Literal(value, + &literal_pool_, + RawLiteral::kDeletedOnPoolDestruction); + } + + template + Literal* CreateLiteralDestroyedWithPool(T high64, T low64) { + return new Literal(high64, + low64, + &literal_pool_, + RawLiteral::kDeletedOnPoolDestruction); + } + + // Push the system stack pointer (sp) down to allow the same to be done to + // the current stack pointer (according to StackPointer()). This must be + // called _before_ accessing the memory. + // + // This is necessary when pushing or otherwise adding things to the stack, to + // satisfy the AAPCS64 constraint that the memory below the system stack + // pointer is not accessed. + // + // This method asserts that StackPointer() is not sp, since the call does + // not make sense in that context. + // + // TODO: This method can only accept values of 'space' that can be encoded in + // one instruction. Refer to the implementation for details. + void BumpSystemStackPointer(const Operand& space); + + virtual bool AllowMacroInstructions() const VIXL_OVERRIDE { + return allow_macro_instructions_; + } + + virtual bool ArePoolsBlocked() const VIXL_OVERRIDE { + return IsLiteralPoolBlocked() && IsVeneerPoolBlocked(); + } + + void SetGenerateSimulatorCode(bool value) { + generate_simulator_code_ = value; + } + + bool GenerateSimulatorCode() const { return generate_simulator_code_; } + + size_t GetLiteralPoolSize() const { return literal_pool_.GetSize(); } + VIXL_DEPRECATED("GetLiteralPoolSize", size_t LiteralPoolSize() const) { + return GetLiteralPoolSize(); + } + + size_t GetLiteralPoolMaxSize() const { return literal_pool_.GetMaxSize(); } + VIXL_DEPRECATED("GetLiteralPoolMaxSize", size_t LiteralPoolMaxSize() const) { + return GetLiteralPoolMaxSize(); + } + + size_t GetVeneerPoolMaxSize() const { return veneer_pool_.GetMaxSize(); } + VIXL_DEPRECATED("GetVeneerPoolMaxSize", size_t VeneerPoolMaxSize() const) { + return GetVeneerPoolMaxSize(); + } + + // The number of unresolved branches that may require a veneer. + int GetNumberOfPotentialVeneers() const { + return veneer_pool_.GetNumberOfPotentialVeneers(); + } + VIXL_DEPRECATED("GetNumberOfPotentialVeneers", + int NumberOfPotentialVeneers() const) { + return GetNumberOfPotentialVeneers(); + } + + ptrdiff_t GetNextCheckPoint() const { + ptrdiff_t next_checkpoint_for_pools = + std::min(literal_pool_.GetCheckpoint(), veneer_pool_.GetCheckpoint()); + return std::min(next_checkpoint_for_pools, + static_cast(GetBuffer().GetCapacity())); + } + VIXL_DEPRECATED("GetNextCheckPoint", ptrdiff_t NextCheckPoint()) { + return GetNextCheckPoint(); + } + + void EmitLiteralPool(LiteralPool::EmitOption option) { + if (!literal_pool_.IsEmpty()) literal_pool_.Emit(option); + + checkpoint_ = GetNextCheckPoint(); + recommended_checkpoint_ = literal_pool_.GetNextRecommendedCheckpoint(); + } + + void CheckEmitFor(size_t amount); + void EnsureEmitFor(size_t amount) { + ptrdiff_t offset = amount; + ptrdiff_t max_pools_size = + literal_pool_.GetMaxSize() + veneer_pool_.GetMaxSize(); + ptrdiff_t cursor = GetCursorOffset(); + if ((cursor >= recommended_checkpoint_) || + ((cursor + offset + max_pools_size) >= checkpoint_)) { + CheckEmitFor(amount); + } + } + + void CheckEmitPoolsFor(size_t amount); + virtual void EnsureEmitPoolsFor(size_t amount) VIXL_OVERRIDE { + ptrdiff_t offset = amount; + ptrdiff_t max_pools_size = + literal_pool_.GetMaxSize() + veneer_pool_.GetMaxSize(); + ptrdiff_t cursor = GetCursorOffset(); + if ((cursor >= recommended_checkpoint_) || + ((cursor + offset + max_pools_size) >= checkpoint_)) { + CheckEmitPoolsFor(amount); + } + } + + // Set the current stack pointer, but don't generate any code. + void SetStackPointer(const Register& stack_pointer) { + VIXL_ASSERT(!GetScratchRegisterList()->IncludesAliasOf(stack_pointer)); + sp_ = stack_pointer; + } + + // Return the current stack pointer, as set by SetStackPointer. + const Register& StackPointer() const { return sp_; } + + CPURegList* GetScratchRegisterList() { return &tmp_list_; } + VIXL_DEPRECATED("GetScratchRegisterList", CPURegList* TmpList()) { + return GetScratchRegisterList(); + } + + CPURegList* GetScratchFPRegisterList() { return &fptmp_list_; } + VIXL_DEPRECATED("GetScratchFPRegisterList", CPURegList* FPTmpList()) { + return GetScratchFPRegisterList(); + } + + // Get or set the current (most-deeply-nested) UseScratchRegisterScope. + void SetCurrentScratchRegisterScope(UseScratchRegisterScope* scope) { + current_scratch_scope_ = scope; + } + UseScratchRegisterScope* GetCurrentScratchRegisterScope() { + return current_scratch_scope_; + } + + // Like printf, but print at run-time from generated code. + // + // The caller must ensure that arguments for floating-point placeholders + // (such as %e, %f or %g) are VRegisters in format 1S or 1D, and that + // arguments for integer placeholders are Registers. + // + // At the moment it is only possible to print the value of sp if it is the + // current stack pointer. Otherwise, the MacroAssembler will automatically + // update sp on every push (using BumpSystemStackPointer), so determining its + // value is difficult. + // + // Format placeholders that refer to more than one argument, or to a specific + // argument, are not supported. This includes formats like "%1$d" or "%.*d". + // + // This function automatically preserves caller-saved registers so that + // calling code can use Printf at any point without having to worry about + // corruption. The preservation mechanism generates a lot of code. If this is + // a problem, preserve the important registers manually and then call + // PrintfNoPreserve. Callee-saved registers are not used by Printf, and are + // implicitly preserved. + void Printf(const char* format, + CPURegister arg0 = NoCPUReg, + CPURegister arg1 = NoCPUReg, + CPURegister arg2 = NoCPUReg, + CPURegister arg3 = NoCPUReg); + + // Like Printf, but don't preserve any caller-saved registers, not even 'lr'. + // + // The return code from the system printf call will be returned in x0. + void PrintfNoPreserve(const char* format, + const CPURegister& arg0 = NoCPUReg, + const CPURegister& arg1 = NoCPUReg, + const CPURegister& arg2 = NoCPUReg, + const CPURegister& arg3 = NoCPUReg); + + // Trace control when running the debug simulator. + // + // For example: + // + // __ Trace(LOG_REGS, TRACE_ENABLE); + // Will add registers to the trace if it wasn't already the case. + // + // __ Trace(LOG_DISASM, TRACE_DISABLE); + // Will stop logging disassembly. It has no effect if the disassembly wasn't + // already being logged. + void Trace(TraceParameters parameters, TraceCommand command); + + // Log the requested data independently of what is being traced. + // + // For example: + // + // __ Log(LOG_FLAGS) + // Will output the flags. + void Log(TraceParameters parameters); + + // Enable or disable instrumentation when an Instrument visitor is attached to + // the simulator. + void EnableInstrumentation(); + void DisableInstrumentation(); + + // Add a marker to the instrumentation data produced by an Instrument visitor. + // The name is a two character string that will be attached to the marker in + // the output data. + void AnnotateInstrumentation(const char* marker_name); + + // Enable or disable CPU features dynamically. This mechanism allows users to + // strictly check the use of CPU features in different regions of code. + void SetSimulatorCPUFeatures(const CPUFeatures& features); + void EnableSimulatorCPUFeatures(const CPUFeatures& features); + void DisableSimulatorCPUFeatures(const CPUFeatures& features); + void SaveSimulatorCPUFeatures(); + void RestoreSimulatorCPUFeatures(); + + LiteralPool* GetLiteralPool() { return &literal_pool_; } + +// Support for simulated runtime calls. + +// `CallRuntime` requires variadic templating, that is only available from +// C++11. +#if __cplusplus >= 201103L +#define VIXL_HAS_MACROASSEMBLER_RUNTIME_CALL_SUPPORT +#endif // #if __cplusplus >= 201103L + +#ifdef VIXL_HAS_MACROASSEMBLER_RUNTIME_CALL_SUPPORT + template + void CallRuntimeHelper(R (*function)(P...), RuntimeCallType call_type); + + template + void CallRuntime(R (*function)(P...)) { + CallRuntimeHelper(function, kCallRuntime); + } + + template + void TailCallRuntime(R (*function)(P...)) { + CallRuntimeHelper(function, kTailCallRuntime); + } +#endif // #ifdef VIXL_HAS_MACROASSEMBLER_RUNTIME_CALL_SUPPORT + + protected: + void BlockLiteralPool() { literal_pool_.Block(); } + void ReleaseLiteralPool() { literal_pool_.Release(); } + bool IsLiteralPoolBlocked() const { return literal_pool_.IsBlocked(); } + void BlockVeneerPool() { veneer_pool_.Block(); } + void ReleaseVeneerPool() { veneer_pool_.Release(); } + bool IsVeneerPoolBlocked() const { return veneer_pool_.IsBlocked(); } + + virtual void BlockPools() VIXL_OVERRIDE { + BlockLiteralPool(); + BlockVeneerPool(); + } + + virtual void ReleasePools() VIXL_OVERRIDE { + ReleaseLiteralPool(); + ReleaseVeneerPool(); + } + + // The scopes below need to able to block and release a particular pool. + // TODO: Consider removing those scopes or move them to + // code-generation-scopes-vixl.h. + friend class BlockPoolsScope; + friend class BlockLiteralPoolScope; + friend class BlockVeneerPoolScope; + + virtual void SetAllowMacroInstructions(bool value) VIXL_OVERRIDE { + allow_macro_instructions_ = value; + } + + // Helper used to query information about code generation and to generate + // code for `csel`. + // Here and for the related helpers below: + // - Code is generated when `masm` is not `NULL`. + // - On return and when set, `should_synthesise_left` and + // `should_synthesise_right` will indicate whether `left` and `right` + // should be synthesized in a temporary register. + static void CselHelper(MacroAssembler* masm, + const Register& rd, + Operand left, + Operand right, + Condition cond, + bool* should_synthesise_left = NULL, + bool* should_synthesise_right = NULL); + + // The helper returns `true` if it can handle the specified arguments. + // Also see comments for `CselHelper()`. + static bool CselSubHelperTwoImmediates(MacroAssembler* masm, + const Register& rd, + int64_t left, + int64_t right, + Condition cond, + bool* should_synthesise_left, + bool* should_synthesise_right); + + // See comments for `CselHelper()`. + static bool CselSubHelperTwoOrderedImmediates(MacroAssembler* masm, + const Register& rd, + int64_t left, + int64_t right, + Condition cond); + + // See comments for `CselHelper()`. + static void CselSubHelperRightSmallImmediate(MacroAssembler* masm, + UseScratchRegisterScope* temps, + const Register& rd, + const Operand& left, + const Operand& right, + Condition cond, + bool* should_synthesise_left); + + private: + // The actual Push and Pop implementations. These don't generate any code + // other than that required for the push or pop. This allows + // (Push|Pop)CPURegList to bundle together setup code for a large block of + // registers. + // + // Note that size is per register, and is specified in bytes. + void PushHelper(int count, + int size, + const CPURegister& src0, + const CPURegister& src1, + const CPURegister& src2, + const CPURegister& src3); + void PopHelper(int count, + int size, + const CPURegister& dst0, + const CPURegister& dst1, + const CPURegister& dst2, + const CPURegister& dst3); + + void Movi16bitHelper(const VRegister& vd, uint64_t imm); + void Movi32bitHelper(const VRegister& vd, uint64_t imm); + void Movi64bitHelper(const VRegister& vd, uint64_t imm); + + // Perform necessary maintenance operations before a push or pop. + // + // Note that size is per register, and is specified in bytes. + void PrepareForPush(int count, int size); + void PrepareForPop(int count, int size); + + // The actual implementation of load and store operations for CPURegList. + enum LoadStoreCPURegListAction { kLoad, kStore }; + void LoadStoreCPURegListHelper(LoadStoreCPURegListAction operation, + CPURegList registers, + const MemOperand& mem); + // Returns a MemOperand suitable for loading or storing a CPURegList at `dst`. + // This helper may allocate registers from `scratch_scope` and generate code + // to compute an intermediate address. The resulting MemOperand is only valid + // as long as `scratch_scope` remains valid. + MemOperand BaseMemOperandForLoadStoreCPURegList( + const CPURegList& registers, + const MemOperand& mem, + UseScratchRegisterScope* scratch_scope); + + bool LabelIsOutOfRange(Label* label, ImmBranchType branch_type) { + return !Instruction::IsValidImmPCOffset(branch_type, + label->GetLocation() - + GetCursorOffset()); + } + + void ConfigureSimulatorCPUFeaturesHelper(const CPUFeatures& features, + DebugHltOpcode action); + + // Tell whether any of the macro instruction can be used. When false the + // MacroAssembler will assert if a method which can emit a variable number + // of instructions is called. + bool allow_macro_instructions_; + + // Indicates whether we should generate simulator or native code. + bool generate_simulator_code_; + + // The register to use as a stack pointer for stack operations. + Register sp_; + + // Scratch registers available for use by the MacroAssembler. + CPURegList tmp_list_; + CPURegList fptmp_list_; + + UseScratchRegisterScope* current_scratch_scope_; + + LiteralPool literal_pool_; + VeneerPool veneer_pool_; + + ptrdiff_t checkpoint_; + ptrdiff_t recommended_checkpoint_; + + friend class Pool; + friend class LiteralPool; +}; + + +inline size_t VeneerPool::GetOtherPoolsMaxSize() const { + return masm_->GetLiteralPoolMaxSize(); +} + + +inline size_t LiteralPool::GetOtherPoolsMaxSize() const { + return masm_->GetVeneerPoolMaxSize(); +} + + +inline void LiteralPool::SetNextRecommendedCheckpoint(ptrdiff_t offset) { + masm_->recommended_checkpoint_ = + std::min(masm_->recommended_checkpoint_, offset); + recommended_checkpoint_ = offset; +} + +class InstructionAccurateScope : public ExactAssemblyScope { + public: + VIXL_DEPRECATED("ExactAssemblyScope", + InstructionAccurateScope(MacroAssembler* masm, + int64_t count, + SizePolicy size_policy = kExactSize)) + : ExactAssemblyScope(masm, count * kInstructionSize, size_policy) {} +}; + +class BlockLiteralPoolScope { + public: + explicit BlockLiteralPoolScope(MacroAssembler* masm) : masm_(masm) { + masm_->BlockLiteralPool(); + } + + ~BlockLiteralPoolScope() { masm_->ReleaseLiteralPool(); } + + private: + MacroAssembler* masm_; +}; + + +class BlockVeneerPoolScope { + public: + explicit BlockVeneerPoolScope(MacroAssembler* masm) : masm_(masm) { + masm_->BlockVeneerPool(); + } + + ~BlockVeneerPoolScope() { masm_->ReleaseVeneerPool(); } + + private: + MacroAssembler* masm_; +}; + + +class BlockPoolsScope { + public: + explicit BlockPoolsScope(MacroAssembler* masm) : masm_(masm) { + masm_->BlockPools(); + } + + ~BlockPoolsScope() { masm_->ReleasePools(); } + + private: + MacroAssembler* masm_; +}; + + +// This scope utility allows scratch registers to be managed safely. The +// MacroAssembler's GetScratchRegisterList() (and GetScratchFPRegisterList()) is +// used as a pool of scratch registers. These registers can be allocated on +// demand, and will be returned at the end of the scope. +// +// When the scope ends, the MacroAssembler's lists will be restored to their +// original state, even if the lists were modified by some other means. +class UseScratchRegisterScope { + public: + // This constructor implicitly calls `Open` to initialise the scope (`masm` + // must not be `NULL`), so it is ready to use immediately after it has been + // constructed. + explicit UseScratchRegisterScope(MacroAssembler* masm) + : masm_(NULL), parent_(NULL), old_available_(0), old_availablefp_(0) { + Open(masm); + } + // This constructor does not implicitly initialise the scope. Instead, the + // user is required to explicitly call the `Open` function before using the + // scope. + UseScratchRegisterScope() + : masm_(NULL), parent_(NULL), old_available_(0), old_availablefp_(0) {} + + // This function performs the actual initialisation work. + void Open(MacroAssembler* masm); + + // The destructor always implicitly calls the `Close` function. + ~UseScratchRegisterScope() { Close(); } + + // This function performs the cleaning-up work. It must succeed even if the + // scope has not been opened. It is safe to call multiple times. + void Close(); + + + bool IsAvailable(const CPURegister& reg) const; + + + // Take a register from the appropriate temps list. It will be returned + // automatically when the scope ends. + Register AcquireW() { + return AcquireNextAvailable(masm_->GetScratchRegisterList()).W(); + } + Register AcquireX() { + return AcquireNextAvailable(masm_->GetScratchRegisterList()).X(); + } + VRegister AcquireH() { + return AcquireNextAvailable(masm_->GetScratchFPRegisterList()).H(); + } + VRegister AcquireS() { + return AcquireNextAvailable(masm_->GetScratchFPRegisterList()).S(); + } + VRegister AcquireD() { + return AcquireNextAvailable(masm_->GetScratchFPRegisterList()).D(); + } + + + Register AcquireRegisterOfSize(int size_in_bits); + Register AcquireSameSizeAs(const Register& reg) { + return AcquireRegisterOfSize(reg.GetSizeInBits()); + } + VRegister AcquireVRegisterOfSize(int size_in_bits); + VRegister AcquireSameSizeAs(const VRegister& reg) { + return AcquireVRegisterOfSize(reg.GetSizeInBits()); + } + CPURegister AcquireCPURegisterOfSize(int size_in_bits) { + return masm_->GetScratchRegisterList()->IsEmpty() + ? CPURegister(AcquireVRegisterOfSize(size_in_bits)) + : CPURegister(AcquireRegisterOfSize(size_in_bits)); + } + + + // Explicitly release an acquired (or excluded) register, putting it back in + // the appropriate temps list. + void Release(const CPURegister& reg); + + + // Make the specified registers available as scratch registers for the + // duration of this scope. + void Include(const CPURegList& list); + void Include(const Register& reg1, + const Register& reg2 = NoReg, + const Register& reg3 = NoReg, + const Register& reg4 = NoReg); + void Include(const VRegister& reg1, + const VRegister& reg2 = NoVReg, + const VRegister& reg3 = NoVReg, + const VRegister& reg4 = NoVReg); + + + // Make sure that the specified registers are not available in this scope. + // This can be used to prevent helper functions from using sensitive + // registers, for example. + void Exclude(const CPURegList& list); + void Exclude(const Register& reg1, + const Register& reg2 = NoReg, + const Register& reg3 = NoReg, + const Register& reg4 = NoReg); + void Exclude(const VRegister& reg1, + const VRegister& reg2 = NoVReg, + const VRegister& reg3 = NoVReg, + const VRegister& reg4 = NoVReg); + void Exclude(const CPURegister& reg1, + const CPURegister& reg2 = NoCPUReg, + const CPURegister& reg3 = NoCPUReg, + const CPURegister& reg4 = NoCPUReg); + + + // Prevent any scratch registers from being used in this scope. + void ExcludeAll(); + + private: + static CPURegister AcquireNextAvailable(CPURegList* available); + + static void ReleaseByCode(CPURegList* available, int code); + + static void ReleaseByRegList(CPURegList* available, RegList regs); + + static void IncludeByRegList(CPURegList* available, RegList exclude); + + static void ExcludeByRegList(CPURegList* available, RegList exclude); + + // The MacroAssembler maintains a list of available scratch registers, and + // also keeps track of the most recently-opened scope so that on destruction + // we can check that scopes do not outlive their parents. + MacroAssembler* masm_; + UseScratchRegisterScope* parent_; + + // The state of the available lists at the start of this scope. + RegList old_available_; // kRegister + RegList old_availablefp_; // kVRegister + + // Disallow copy constructor and operator=. + VIXL_DEBUG_NO_RETURN UseScratchRegisterScope(const UseScratchRegisterScope&) { + VIXL_UNREACHABLE(); + } + VIXL_DEBUG_NO_RETURN void operator=(const UseScratchRegisterScope&) { + VIXL_UNREACHABLE(); + } +}; + + +// Like CPUFeaturesScope, but also generate Simulation pseudo-instructions to +// control a Simulator's CPUFeatures dynamically. +// +// One major difference from CPUFeaturesScope is that this scope cannot offer +// a writable "CPUFeatures* GetCPUFeatures()", because every write to the +// features needs a corresponding macro instruction. +class SimulationCPUFeaturesScope { + public: + explicit SimulationCPUFeaturesScope( + MacroAssembler* masm, + CPUFeatures::Feature feature0 = CPUFeatures::kNone, + CPUFeatures::Feature feature1 = CPUFeatures::kNone, + CPUFeatures::Feature feature2 = CPUFeatures::kNone, + CPUFeatures::Feature feature3 = CPUFeatures::kNone) + : masm_(masm), + cpu_features_scope_(masm, feature0, feature1, feature2, feature3) { + masm_->SaveSimulatorCPUFeatures(); + masm_->EnableSimulatorCPUFeatures( + CPUFeatures(feature0, feature1, feature2, feature3)); + } + + SimulationCPUFeaturesScope(MacroAssembler* masm, const CPUFeatures& other) + : masm_(masm), cpu_features_scope_(masm, other) { + masm_->SaveSimulatorCPUFeatures(); + masm_->EnableSimulatorCPUFeatures(other); + } + + ~SimulationCPUFeaturesScope() { masm_->RestoreSimulatorCPUFeatures(); } + + const CPUFeatures* GetCPUFeatures() const { + return cpu_features_scope_.GetCPUFeatures(); + } + + void SetCPUFeatures(const CPUFeatures& cpu_features) { + cpu_features_scope_.SetCPUFeatures(cpu_features); + masm_->SetSimulatorCPUFeatures(cpu_features); + } + + private: + MacroAssembler* masm_; + CPUFeaturesScope cpu_features_scope_; +}; + + +// Variadic templating is only available from C++11. +#ifdef VIXL_HAS_MACROASSEMBLER_RUNTIME_CALL_SUPPORT + +// `R` stands for 'return type', and `P` for 'parameter types'. +template +void MacroAssembler::CallRuntimeHelper(R (*function)(P...), + RuntimeCallType call_type) { + if (generate_simulator_code_) { +#ifdef VIXL_HAS_SIMULATED_RUNTIME_CALL_SUPPORT + uintptr_t runtime_call_wrapper_address = reinterpret_cast( + &(Simulator::RuntimeCallStructHelper::Wrapper)); + uintptr_t function_address = reinterpret_cast(function); + + EmissionCheckScope guard(this, + kRuntimeCallLength, + CodeBufferCheckScope::kExactSize); + Label start; + bind(&start); + { + ExactAssemblyScope scope(this, kInstructionSize); + hlt(kRuntimeCallOpcode); + } + VIXL_ASSERT(GetSizeOfCodeGeneratedSince(&start) == + kRuntimeCallWrapperOffset); + dc(runtime_call_wrapper_address); + VIXL_ASSERT(GetSizeOfCodeGeneratedSince(&start) == + kRuntimeCallFunctionOffset); + dc(function_address); + VIXL_ASSERT(GetSizeOfCodeGeneratedSince(&start) == kRuntimeCallTypeOffset); + dc32(call_type); + VIXL_ASSERT(GetSizeOfCodeGeneratedSince(&start) == kRuntimeCallLength); +#else + VIXL_UNREACHABLE(); +#endif // #ifdef VIXL_HAS_SIMULATED_RUNTIME_CALL_SUPPORT + } else { + UseScratchRegisterScope temps(this); + Register temp = temps.AcquireX(); + Mov(temp, reinterpret_cast(function)); + if (call_type == kTailCallRuntime) { + Br(temp); + } else { + VIXL_ASSERT(call_type == kCallRuntime); + Blr(temp); + } + } +} + +#endif // #ifdef VIXL_HAS_MACROASSEMBLER_RUNTIME_CALL_SUPPORT + +} // namespace aarch64 + +// Required InvalSet template specialisations. +// TODO: These template specialisations should not live in this file. Move +// VeneerPool out of the aarch64 namespace in order to share its implementation +// later. +template <> +inline ptrdiff_t InvalSet:: + GetKey(const aarch64::VeneerPool::BranchInfo& branch_info) { + return branch_info.first_unreacheable_pc_; +} +template <> +inline void InvalSet:: + SetKey(aarch64::VeneerPool::BranchInfo* branch_info, ptrdiff_t key) { + branch_info->first_unreacheable_pc_ = key; +} + +} // namespace vixl + +#endif // VIXL_AARCH64_MACRO_ASSEMBLER_AARCH64_H_ diff --git a/dep/vixl/include/vixl/aarch64/operands-aarch64.h b/dep/vixl/include/vixl/aarch64/operands-aarch64.h new file mode 100644 index 000000000..e3dbfa3ec --- /dev/null +++ b/dep/vixl/include/vixl/aarch64/operands-aarch64.h @@ -0,0 +1,993 @@ +// Copyright 2016, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_AARCH64_OPERANDS_AARCH64_H_ +#define VIXL_AARCH64_OPERANDS_AARCH64_H_ + +#include "instructions-aarch64.h" + +namespace vixl { +namespace aarch64 { + +typedef uint64_t RegList; +static const int kRegListSizeInBits = sizeof(RegList) * 8; + + +// Registers. + +// Some CPURegister methods can return Register or VRegister types, so we need +// to declare them in advance. +class Register; +class VRegister; + +class CPURegister { + public: + enum RegisterType { + // The kInvalid value is used to detect uninitialized static instances, + // which are always zero-initialized before any constructors are called. + kInvalid = 0, + kRegister, + kVRegister, + kFPRegister = kVRegister, + kNoRegister + }; + + CPURegister() : code_(0), size_(0), type_(kNoRegister) { + VIXL_ASSERT(!IsValid()); + VIXL_ASSERT(IsNone()); + } + + CPURegister(unsigned code, unsigned size, RegisterType type) + : code_(code), size_(size), type_(type) { + VIXL_ASSERT(IsValidOrNone()); + } + + unsigned GetCode() const { + VIXL_ASSERT(IsValid()); + return code_; + } + VIXL_DEPRECATED("GetCode", unsigned code() const) { return GetCode(); } + + RegisterType GetType() const { + VIXL_ASSERT(IsValidOrNone()); + return type_; + } + VIXL_DEPRECATED("GetType", RegisterType type() const) { return GetType(); } + + RegList GetBit() const { + VIXL_ASSERT(code_ < (sizeof(RegList) * 8)); + return IsValid() ? (static_cast(1) << code_) : 0; + } + VIXL_DEPRECATED("GetBit", RegList Bit() const) { return GetBit(); } + + int GetSizeInBytes() const { + VIXL_ASSERT(IsValid()); + VIXL_ASSERT(size_ % 8 == 0); + return size_ / 8; + } + VIXL_DEPRECATED("GetSizeInBytes", int SizeInBytes() const) { + return GetSizeInBytes(); + } + + int GetSizeInBits() const { + VIXL_ASSERT(IsValid()); + return size_; + } + VIXL_DEPRECATED("GetSizeInBits", unsigned size() const) { + return GetSizeInBits(); + } + VIXL_DEPRECATED("GetSizeInBits", int SizeInBits() const) { + return GetSizeInBits(); + } + + bool Is8Bits() const { + VIXL_ASSERT(IsValid()); + return size_ == 8; + } + + bool Is16Bits() const { + VIXL_ASSERT(IsValid()); + return size_ == 16; + } + + bool Is32Bits() const { + VIXL_ASSERT(IsValid()); + return size_ == 32; + } + + bool Is64Bits() const { + VIXL_ASSERT(IsValid()); + return size_ == 64; + } + + bool Is128Bits() const { + VIXL_ASSERT(IsValid()); + return size_ == 128; + } + + bool IsValid() const { + if (IsValidRegister() || IsValidVRegister()) { + VIXL_ASSERT(!IsNone()); + return true; + } else { + // This assert is hit when the register has not been properly initialized. + // One cause for this can be an initialisation order fiasco. See + // https://isocpp.org/wiki/faq/ctors#static-init-order for some details. + VIXL_ASSERT(IsNone()); + return false; + } + } + + bool IsValidRegister() const { + return IsRegister() && ((size_ == kWRegSize) || (size_ == kXRegSize)) && + ((code_ < kNumberOfRegisters) || (code_ == kSPRegInternalCode)); + } + + bool IsValidVRegister() const { + return IsVRegister() && ((size_ == kBRegSize) || (size_ == kHRegSize) || + (size_ == kSRegSize) || (size_ == kDRegSize) || + (size_ == kQRegSize)) && + (code_ < kNumberOfVRegisters); + } + + bool IsValidFPRegister() const { + return IsFPRegister() && (code_ < kNumberOfVRegisters); + } + + bool IsNone() const { + // kNoRegister types should always have size 0 and code 0. + VIXL_ASSERT((type_ != kNoRegister) || (code_ == 0)); + VIXL_ASSERT((type_ != kNoRegister) || (size_ == 0)); + + return type_ == kNoRegister; + } + + bool Aliases(const CPURegister& other) const { + VIXL_ASSERT(IsValidOrNone() && other.IsValidOrNone()); + return (code_ == other.code_) && (type_ == other.type_); + } + + bool Is(const CPURegister& other) const { + VIXL_ASSERT(IsValidOrNone() && other.IsValidOrNone()); + return Aliases(other) && (size_ == other.size_); + } + + bool IsZero() const { + VIXL_ASSERT(IsValid()); + return IsRegister() && (code_ == kZeroRegCode); + } + + bool IsSP() const { + VIXL_ASSERT(IsValid()); + return IsRegister() && (code_ == kSPRegInternalCode); + } + + bool IsRegister() const { return type_ == kRegister; } + + bool IsVRegister() const { return type_ == kVRegister; } + + bool IsFPRegister() const { return IsS() || IsD(); } + + bool IsW() const { return IsValidRegister() && Is32Bits(); } + bool IsX() const { return IsValidRegister() && Is64Bits(); } + + // These assertions ensure that the size and type of the register are as + // described. They do not consider the number of lanes that make up a vector. + // So, for example, Is8B() implies IsD(), and Is1D() implies IsD, but IsD() + // does not imply Is1D() or Is8B(). + // Check the number of lanes, ie. the format of the vector, using methods such + // as Is8B(), Is1D(), etc. in the VRegister class. + bool IsV() const { return IsVRegister(); } + bool IsB() const { return IsV() && Is8Bits(); } + bool IsH() const { return IsV() && Is16Bits(); } + bool IsS() const { return IsV() && Is32Bits(); } + bool IsD() const { return IsV() && Is64Bits(); } + bool IsQ() const { return IsV() && Is128Bits(); } + + // Semantic type for sdot and udot instructions. + bool IsS4B() const { return IsS(); } + const VRegister& S4B() const { return S(); } + + const Register& W() const; + const Register& X() const; + const VRegister& V() const; + const VRegister& B() const; + const VRegister& H() const; + const VRegister& S() const; + const VRegister& D() const; + const VRegister& Q() const; + + bool IsSameType(const CPURegister& other) const { + return type_ == other.type_; + } + + bool IsSameSizeAndType(const CPURegister& other) const { + return (size_ == other.size_) && IsSameType(other); + } + + protected: + unsigned code_; + int size_; + RegisterType type_; + + private: + bool IsValidOrNone() const { return IsValid() || IsNone(); } +}; + + +class Register : public CPURegister { + public: + Register() : CPURegister() {} + explicit Register(const CPURegister& other) + : CPURegister(other.GetCode(), other.GetSizeInBits(), other.GetType()) { + VIXL_ASSERT(IsValidRegister()); + } + Register(unsigned code, unsigned size) : CPURegister(code, size, kRegister) {} + + bool IsValid() const { + VIXL_ASSERT(IsRegister() || IsNone()); + return IsValidRegister(); + } + + static const Register& GetWRegFromCode(unsigned code); + VIXL_DEPRECATED("GetWRegFromCode", + static const Register& WRegFromCode(unsigned code)) { + return GetWRegFromCode(code); + } + + static const Register& GetXRegFromCode(unsigned code); + VIXL_DEPRECATED("GetXRegFromCode", + static const Register& XRegFromCode(unsigned code)) { + return GetXRegFromCode(code); + } + + private: + static const Register wregisters[]; + static const Register xregisters[]; +}; + + +namespace internal { + +template +class FixedSizeRegister : public Register { + public: + FixedSizeRegister() : Register() {} + explicit FixedSizeRegister(unsigned code) : Register(code, size_in_bits) { + VIXL_ASSERT(IsValidRegister()); + } + explicit FixedSizeRegister(const Register& other) + : Register(other.GetCode(), size_in_bits) { + VIXL_ASSERT(other.GetSizeInBits() == size_in_bits); + VIXL_ASSERT(IsValidRegister()); + } + explicit FixedSizeRegister(const CPURegister& other) + : Register(other.GetCode(), other.GetSizeInBits()) { + VIXL_ASSERT(other.GetType() == kRegister); + VIXL_ASSERT(other.GetSizeInBits() == size_in_bits); + VIXL_ASSERT(IsValidRegister()); + } + + bool IsValid() const { + return Register::IsValid() && (GetSizeInBits() == size_in_bits); + } +}; + +} // namespace internal + +typedef internal::FixedSizeRegister XRegister; +typedef internal::FixedSizeRegister WRegister; + + +class VRegister : public CPURegister { + public: + VRegister() : CPURegister(), lanes_(1) {} + explicit VRegister(const CPURegister& other) + : CPURegister(other.GetCode(), other.GetSizeInBits(), other.GetType()), + lanes_(1) { + VIXL_ASSERT(IsValidVRegister()); + VIXL_ASSERT(IsPowerOf2(lanes_) && (lanes_ <= 16)); + } + VRegister(unsigned code, unsigned size, unsigned lanes = 1) + : CPURegister(code, size, kVRegister), lanes_(lanes) { + VIXL_ASSERT(IsPowerOf2(lanes_) && (lanes_ <= 16)); + } + VRegister(unsigned code, VectorFormat format) + : CPURegister(code, RegisterSizeInBitsFromFormat(format), kVRegister), + lanes_(IsVectorFormat(format) ? LaneCountFromFormat(format) : 1) { + VIXL_ASSERT(IsPowerOf2(lanes_) && (lanes_ <= 16)); + } + + bool IsValid() const { + VIXL_ASSERT(IsVRegister() || IsNone()); + return IsValidVRegister(); + } + + static const VRegister& GetBRegFromCode(unsigned code); + VIXL_DEPRECATED("GetBRegFromCode", + static const VRegister& BRegFromCode(unsigned code)) { + return GetBRegFromCode(code); + } + + static const VRegister& GetHRegFromCode(unsigned code); + VIXL_DEPRECATED("GetHRegFromCode", + static const VRegister& HRegFromCode(unsigned code)) { + return GetHRegFromCode(code); + } + + static const VRegister& GetSRegFromCode(unsigned code); + VIXL_DEPRECATED("GetSRegFromCode", + static const VRegister& SRegFromCode(unsigned code)) { + return GetSRegFromCode(code); + } + + static const VRegister& GetDRegFromCode(unsigned code); + VIXL_DEPRECATED("GetDRegFromCode", + static const VRegister& DRegFromCode(unsigned code)) { + return GetDRegFromCode(code); + } + + static const VRegister& GetQRegFromCode(unsigned code); + VIXL_DEPRECATED("GetQRegFromCode", + static const VRegister& QRegFromCode(unsigned code)) { + return GetQRegFromCode(code); + } + + static const VRegister& GetVRegFromCode(unsigned code); + VIXL_DEPRECATED("GetVRegFromCode", + static const VRegister& VRegFromCode(unsigned code)) { + return GetVRegFromCode(code); + } + + VRegister V8B() const { return VRegister(code_, kDRegSize, 8); } + VRegister V16B() const { return VRegister(code_, kQRegSize, 16); } + VRegister V2H() const { return VRegister(code_, kSRegSize, 2); } + VRegister V4H() const { return VRegister(code_, kDRegSize, 4); } + VRegister V8H() const { return VRegister(code_, kQRegSize, 8); } + VRegister V2S() const { return VRegister(code_, kDRegSize, 2); } + VRegister V4S() const { return VRegister(code_, kQRegSize, 4); } + VRegister V2D() const { return VRegister(code_, kQRegSize, 2); } + VRegister V1D() const { return VRegister(code_, kDRegSize, 1); } + + bool Is8B() const { return (Is64Bits() && (lanes_ == 8)); } + bool Is16B() const { return (Is128Bits() && (lanes_ == 16)); } + bool Is2H() const { return (Is32Bits() && (lanes_ == 2)); } + bool Is4H() const { return (Is64Bits() && (lanes_ == 4)); } + bool Is8H() const { return (Is128Bits() && (lanes_ == 8)); } + bool Is2S() const { return (Is64Bits() && (lanes_ == 2)); } + bool Is4S() const { return (Is128Bits() && (lanes_ == 4)); } + bool Is1D() const { return (Is64Bits() && (lanes_ == 1)); } + bool Is2D() const { return (Is128Bits() && (lanes_ == 2)); } + + // For consistency, we assert the number of lanes of these scalar registers, + // even though there are no vectors of equivalent total size with which they + // could alias. + bool Is1B() const { + VIXL_ASSERT(!(Is8Bits() && IsVector())); + return Is8Bits(); + } + bool Is1H() const { + VIXL_ASSERT(!(Is16Bits() && IsVector())); + return Is16Bits(); + } + bool Is1S() const { + VIXL_ASSERT(!(Is32Bits() && IsVector())); + return Is32Bits(); + } + + // Semantic type for sdot and udot instructions. + bool Is1S4B() const { return Is1S(); } + + + bool IsLaneSizeB() const { return GetLaneSizeInBits() == kBRegSize; } + bool IsLaneSizeH() const { return GetLaneSizeInBits() == kHRegSize; } + bool IsLaneSizeS() const { return GetLaneSizeInBits() == kSRegSize; } + bool IsLaneSizeD() const { return GetLaneSizeInBits() == kDRegSize; } + + int GetLanes() const { return lanes_; } + VIXL_DEPRECATED("GetLanes", int lanes() const) { return GetLanes(); } + + bool IsScalar() const { return lanes_ == 1; } + + bool IsVector() const { return lanes_ > 1; } + + bool IsSameFormat(const VRegister& other) const { + return (size_ == other.size_) && (lanes_ == other.lanes_); + } + + unsigned GetLaneSizeInBytes() const { return GetSizeInBytes() / lanes_; } + VIXL_DEPRECATED("GetLaneSizeInBytes", unsigned LaneSizeInBytes() const) { + return GetLaneSizeInBytes(); + } + + unsigned GetLaneSizeInBits() const { return GetLaneSizeInBytes() * 8; } + VIXL_DEPRECATED("GetLaneSizeInBits", unsigned LaneSizeInBits() const) { + return GetLaneSizeInBits(); + } + + private: + static const VRegister bregisters[]; + static const VRegister hregisters[]; + static const VRegister sregisters[]; + static const VRegister dregisters[]; + static const VRegister qregisters[]; + static const VRegister vregisters[]; + int lanes_; +}; + + +// Backward compatibility for FPRegisters. +typedef VRegister FPRegister; + +// No*Reg is used to indicate an unused argument, or an error case. Note that +// these all compare equal (using the Is() method). The Register and VRegister +// variants are provided for convenience. +const Register NoReg; +const VRegister NoVReg; +const FPRegister NoFPReg; // For backward compatibility. +const CPURegister NoCPUReg; + + +#define DEFINE_REGISTERS(N) \ + const WRegister w##N(N); \ + const XRegister x##N(N); +AARCH64_REGISTER_CODE_LIST(DEFINE_REGISTERS) +#undef DEFINE_REGISTERS +const WRegister wsp(kSPRegInternalCode); +const XRegister sp(kSPRegInternalCode); + + +#define DEFINE_VREGISTERS(N) \ + const VRegister b##N(N, kBRegSize); \ + const VRegister h##N(N, kHRegSize); \ + const VRegister s##N(N, kSRegSize); \ + const VRegister d##N(N, kDRegSize); \ + const VRegister q##N(N, kQRegSize); \ + const VRegister v##N(N, kQRegSize); +AARCH64_REGISTER_CODE_LIST(DEFINE_VREGISTERS) +#undef DEFINE_VREGISTERS + + +// Register aliases. +const XRegister ip0 = x16; +const XRegister ip1 = x17; +const XRegister lr = x30; +const XRegister xzr = x31; +const WRegister wzr = w31; + + +// AreAliased returns true if any of the named registers overlap. Arguments +// set to NoReg are ignored. The system stack pointer may be specified. +bool AreAliased(const CPURegister& reg1, + const CPURegister& reg2, + const CPURegister& reg3 = NoReg, + const CPURegister& reg4 = NoReg, + const CPURegister& reg5 = NoReg, + const CPURegister& reg6 = NoReg, + const CPURegister& reg7 = NoReg, + const CPURegister& reg8 = NoReg); + + +// AreSameSizeAndType returns true if all of the specified registers have the +// same size, and are of the same type. The system stack pointer may be +// specified. Arguments set to NoReg are ignored, as are any subsequent +// arguments. At least one argument (reg1) must be valid (not NoCPUReg). +bool AreSameSizeAndType(const CPURegister& reg1, + const CPURegister& reg2, + const CPURegister& reg3 = NoCPUReg, + const CPURegister& reg4 = NoCPUReg, + const CPURegister& reg5 = NoCPUReg, + const CPURegister& reg6 = NoCPUReg, + const CPURegister& reg7 = NoCPUReg, + const CPURegister& reg8 = NoCPUReg); + +// AreEven returns true if all of the specified registers have even register +// indices. Arguments set to NoReg are ignored, as are any subsequent +// arguments. At least one argument (reg1) must be valid (not NoCPUReg). +bool AreEven(const CPURegister& reg1, + const CPURegister& reg2, + const CPURegister& reg3 = NoReg, + const CPURegister& reg4 = NoReg, + const CPURegister& reg5 = NoReg, + const CPURegister& reg6 = NoReg, + const CPURegister& reg7 = NoReg, + const CPURegister& reg8 = NoReg); + + +// AreConsecutive returns true if all of the specified registers are +// consecutive in the register file. Arguments set to NoReg are ignored, as are +// any subsequent arguments. At least one argument (reg1) must be valid +// (not NoCPUReg). +bool AreConsecutive(const CPURegister& reg1, + const CPURegister& reg2, + const CPURegister& reg3 = NoCPUReg, + const CPURegister& reg4 = NoCPUReg); + + +// AreSameFormat returns true if all of the specified VRegisters have the same +// vector format. Arguments set to NoReg are ignored, as are any subsequent +// arguments. At least one argument (reg1) must be valid (not NoVReg). +bool AreSameFormat(const VRegister& reg1, + const VRegister& reg2, + const VRegister& reg3 = NoVReg, + const VRegister& reg4 = NoVReg); + + +// AreConsecutive returns true if all of the specified VRegisters are +// consecutive in the register file. Arguments set to NoReg are ignored, as are +// any subsequent arguments. At least one argument (reg1) must be valid +// (not NoVReg). +bool AreConsecutive(const VRegister& reg1, + const VRegister& reg2, + const VRegister& reg3 = NoVReg, + const VRegister& reg4 = NoVReg); + + +// Lists of registers. +class CPURegList { + public: + explicit CPURegList(CPURegister reg1, + CPURegister reg2 = NoCPUReg, + CPURegister reg3 = NoCPUReg, + CPURegister reg4 = NoCPUReg) + : list_(reg1.GetBit() | reg2.GetBit() | reg3.GetBit() | reg4.GetBit()), + size_(reg1.GetSizeInBits()), + type_(reg1.GetType()) { + VIXL_ASSERT(AreSameSizeAndType(reg1, reg2, reg3, reg4)); + VIXL_ASSERT(IsValid()); + } + + CPURegList(CPURegister::RegisterType type, unsigned size, RegList list) + : list_(list), size_(size), type_(type) { + VIXL_ASSERT(IsValid()); + } + + CPURegList(CPURegister::RegisterType type, + unsigned size, + unsigned first_reg, + unsigned last_reg) + : size_(size), type_(type) { + VIXL_ASSERT( + ((type == CPURegister::kRegister) && (last_reg < kNumberOfRegisters)) || + ((type == CPURegister::kVRegister) && + (last_reg < kNumberOfVRegisters))); + VIXL_ASSERT(last_reg >= first_reg); + list_ = (UINT64_C(1) << (last_reg + 1)) - 1; + list_ &= ~((UINT64_C(1) << first_reg) - 1); + VIXL_ASSERT(IsValid()); + } + + CPURegister::RegisterType GetType() const { + VIXL_ASSERT(IsValid()); + return type_; + } + VIXL_DEPRECATED("GetType", CPURegister::RegisterType type() const) { + return GetType(); + } + + // Combine another CPURegList into this one. Registers that already exist in + // this list are left unchanged. The type and size of the registers in the + // 'other' list must match those in this list. + void Combine(const CPURegList& other) { + VIXL_ASSERT(IsValid()); + VIXL_ASSERT(other.GetType() == type_); + VIXL_ASSERT(other.GetRegisterSizeInBits() == size_); + list_ |= other.GetList(); + } + + // Remove every register in the other CPURegList from this one. Registers that + // do not exist in this list are ignored. The type and size of the registers + // in the 'other' list must match those in this list. + void Remove(const CPURegList& other) { + VIXL_ASSERT(IsValid()); + VIXL_ASSERT(other.GetType() == type_); + VIXL_ASSERT(other.GetRegisterSizeInBits() == size_); + list_ &= ~other.GetList(); + } + + // Variants of Combine and Remove which take a single register. + void Combine(const CPURegister& other) { + VIXL_ASSERT(other.GetType() == type_); + VIXL_ASSERT(other.GetSizeInBits() == size_); + Combine(other.GetCode()); + } + + void Remove(const CPURegister& other) { + VIXL_ASSERT(other.GetType() == type_); + VIXL_ASSERT(other.GetSizeInBits() == size_); + Remove(other.GetCode()); + } + + // Variants of Combine and Remove which take a single register by its code; + // the type and size of the register is inferred from this list. + void Combine(int code) { + VIXL_ASSERT(IsValid()); + VIXL_ASSERT(CPURegister(code, size_, type_).IsValid()); + list_ |= (UINT64_C(1) << code); + } + + void Remove(int code) { + VIXL_ASSERT(IsValid()); + VIXL_ASSERT(CPURegister(code, size_, type_).IsValid()); + list_ &= ~(UINT64_C(1) << code); + } + + static CPURegList Union(const CPURegList& list_1, const CPURegList& list_2) { + VIXL_ASSERT(list_1.type_ == list_2.type_); + VIXL_ASSERT(list_1.size_ == list_2.size_); + return CPURegList(list_1.type_, list_1.size_, list_1.list_ | list_2.list_); + } + static CPURegList Union(const CPURegList& list_1, + const CPURegList& list_2, + const CPURegList& list_3); + static CPURegList Union(const CPURegList& list_1, + const CPURegList& list_2, + const CPURegList& list_3, + const CPURegList& list_4); + + static CPURegList Intersection(const CPURegList& list_1, + const CPURegList& list_2) { + VIXL_ASSERT(list_1.type_ == list_2.type_); + VIXL_ASSERT(list_1.size_ == list_2.size_); + return CPURegList(list_1.type_, list_1.size_, list_1.list_ & list_2.list_); + } + static CPURegList Intersection(const CPURegList& list_1, + const CPURegList& list_2, + const CPURegList& list_3); + static CPURegList Intersection(const CPURegList& list_1, + const CPURegList& list_2, + const CPURegList& list_3, + const CPURegList& list_4); + + bool Overlaps(const CPURegList& other) const { + return (type_ == other.type_) && ((list_ & other.list_) != 0); + } + + RegList GetList() const { + VIXL_ASSERT(IsValid()); + return list_; + } + VIXL_DEPRECATED("GetList", RegList list() const) { return GetList(); } + + void SetList(RegList new_list) { + VIXL_ASSERT(IsValid()); + list_ = new_list; + } + VIXL_DEPRECATED("SetList", void set_list(RegList new_list)) { + return SetList(new_list); + } + + // Remove all callee-saved registers from the list. This can be useful when + // preparing registers for an AAPCS64 function call, for example. + void RemoveCalleeSaved(); + + CPURegister PopLowestIndex(); + CPURegister PopHighestIndex(); + + // AAPCS64 callee-saved registers. + static CPURegList GetCalleeSaved(unsigned size = kXRegSize); + static CPURegList GetCalleeSavedV(unsigned size = kDRegSize); + + // AAPCS64 caller-saved registers. Note that this includes lr. + // TODO(all): Determine how we handle d8-d15 being callee-saved, but the top + // 64-bits being caller-saved. + static CPURegList GetCallerSaved(unsigned size = kXRegSize); + static CPURegList GetCallerSavedV(unsigned size = kDRegSize); + + bool IsEmpty() const { + VIXL_ASSERT(IsValid()); + return list_ == 0; + } + + bool IncludesAliasOf(const CPURegister& other) const { + VIXL_ASSERT(IsValid()); + return (type_ == other.GetType()) && ((other.GetBit() & list_) != 0); + } + + bool IncludesAliasOf(int code) const { + VIXL_ASSERT(IsValid()); + return ((code & list_) != 0); + } + + int GetCount() const { + VIXL_ASSERT(IsValid()); + return CountSetBits(list_); + } + VIXL_DEPRECATED("GetCount", int Count()) const { return GetCount(); } + + int GetRegisterSizeInBits() const { + VIXL_ASSERT(IsValid()); + return size_; + } + VIXL_DEPRECATED("GetRegisterSizeInBits", int RegisterSizeInBits() const) { + return GetRegisterSizeInBits(); + } + + int GetRegisterSizeInBytes() const { + int size_in_bits = GetRegisterSizeInBits(); + VIXL_ASSERT((size_in_bits % 8) == 0); + return size_in_bits / 8; + } + VIXL_DEPRECATED("GetRegisterSizeInBytes", int RegisterSizeInBytes() const) { + return GetRegisterSizeInBytes(); + } + + unsigned GetTotalSizeInBytes() const { + VIXL_ASSERT(IsValid()); + return GetRegisterSizeInBytes() * GetCount(); + } + VIXL_DEPRECATED("GetTotalSizeInBytes", unsigned TotalSizeInBytes() const) { + return GetTotalSizeInBytes(); + } + + private: + RegList list_; + int size_; + CPURegister::RegisterType type_; + + bool IsValid() const; +}; + + +// AAPCS64 callee-saved registers. +extern const CPURegList kCalleeSaved; +extern const CPURegList kCalleeSavedV; + + +// AAPCS64 caller-saved registers. Note that this includes lr. +extern const CPURegList kCallerSaved; +extern const CPURegList kCallerSavedV; + + +// Operand. +class Operand { + public: + // # + // where is int64_t. + // This is allowed to be an implicit constructor because Operand is + // a wrapper class that doesn't normally perform any type conversion. + Operand(int64_t immediate = 0); // NOLINT(runtime/explicit) + + // rm, { #} + // where is one of {LSL, LSR, ASR, ROR}. + // is uint6_t. + // This is allowed to be an implicit constructor because Operand is + // a wrapper class that doesn't normally perform any type conversion. + Operand(Register reg, + Shift shift = LSL, + unsigned shift_amount = 0); // NOLINT(runtime/explicit) + + // rm, { {#}} + // where is one of {UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW, SXTX}. + // is uint2_t. + explicit Operand(Register reg, Extend extend, unsigned shift_amount = 0); + + bool IsImmediate() const; + bool IsPlainRegister() const; + bool IsShiftedRegister() const; + bool IsExtendedRegister() const; + bool IsZero() const; + + // This returns an LSL shift (<= 4) operand as an equivalent extend operand, + // which helps in the encoding of instructions that use the stack pointer. + Operand ToExtendedRegister() const; + + int64_t GetImmediate() const { + VIXL_ASSERT(IsImmediate()); + return immediate_; + } + VIXL_DEPRECATED("GetImmediate", int64_t immediate() const) { + return GetImmediate(); + } + + int64_t GetEquivalentImmediate() const { + return IsZero() ? 0 : GetImmediate(); + } + + Register GetRegister() const { + VIXL_ASSERT(IsShiftedRegister() || IsExtendedRegister()); + return reg_; + } + VIXL_DEPRECATED("GetRegister", Register reg() const) { return GetRegister(); } + Register GetBaseRegister() const { return GetRegister(); } + + Shift GetShift() const { + VIXL_ASSERT(IsShiftedRegister()); + return shift_; + } + VIXL_DEPRECATED("GetShift", Shift shift() const) { return GetShift(); } + + Extend GetExtend() const { + VIXL_ASSERT(IsExtendedRegister()); + return extend_; + } + VIXL_DEPRECATED("GetExtend", Extend extend() const) { return GetExtend(); } + + unsigned GetShiftAmount() const { + VIXL_ASSERT(IsShiftedRegister() || IsExtendedRegister()); + return shift_amount_; + } + VIXL_DEPRECATED("GetShiftAmount", unsigned shift_amount() const) { + return GetShiftAmount(); + } + + private: + int64_t immediate_; + Register reg_; + Shift shift_; + Extend extend_; + unsigned shift_amount_; +}; + + +// MemOperand represents the addressing mode of a load or store instruction. +class MemOperand { + public: + // Creates an invalid `MemOperand`. + MemOperand(); + explicit MemOperand(Register base, + int64_t offset = 0, + AddrMode addrmode = Offset); + MemOperand(Register base, + Register regoffset, + Shift shift = LSL, + unsigned shift_amount = 0); + MemOperand(Register base, + Register regoffset, + Extend extend, + unsigned shift_amount = 0); + MemOperand(Register base, const Operand& offset, AddrMode addrmode = Offset); + + const Register& GetBaseRegister() const { return base_; } + VIXL_DEPRECATED("GetBaseRegister", const Register& base() const) { + return GetBaseRegister(); + } + + const Register& GetRegisterOffset() const { return regoffset_; } + VIXL_DEPRECATED("GetRegisterOffset", const Register& regoffset() const) { + return GetRegisterOffset(); + } + + int64_t GetOffset() const { return offset_; } + VIXL_DEPRECATED("GetOffset", int64_t offset() const) { return GetOffset(); } + + AddrMode GetAddrMode() const { return addrmode_; } + VIXL_DEPRECATED("GetAddrMode", AddrMode addrmode() const) { + return GetAddrMode(); + } + + Shift GetShift() const { return shift_; } + VIXL_DEPRECATED("GetShift", Shift shift() const) { return GetShift(); } + + Extend GetExtend() const { return extend_; } + VIXL_DEPRECATED("GetExtend", Extend extend() const) { return GetExtend(); } + + unsigned GetShiftAmount() const { return shift_amount_; } + VIXL_DEPRECATED("GetShiftAmount", unsigned shift_amount() const) { + return GetShiftAmount(); + } + + bool IsImmediateOffset() const; + bool IsRegisterOffset() const; + bool IsPreIndex() const; + bool IsPostIndex() const; + + void AddOffset(int64_t offset); + + bool IsValid() const { + return base_.IsValid() && + ((addrmode_ == Offset) || (addrmode_ == PreIndex) || + (addrmode_ == PostIndex)) && + ((shift_ == NO_SHIFT) || (extend_ == NO_EXTEND)) && + ((offset_ == 0) || !regoffset_.IsValid()); + } + + bool Equals(const MemOperand& other) const { + return base_.Is(other.base_) && regoffset_.Is(other.regoffset_) && + (offset_ == other.offset_) && (addrmode_ == other.addrmode_) && + (shift_ == other.shift_) && (extend_ == other.extend_) && + (shift_amount_ == other.shift_amount_); + } + + private: + Register base_; + Register regoffset_; + int64_t offset_; + AddrMode addrmode_; + Shift shift_; + Extend extend_; + unsigned shift_amount_; +}; + +// This an abstraction that can represent a register or memory location. The +// `MacroAssembler` provides helpers to move data between generic operands. +class GenericOperand { + public: + GenericOperand() { VIXL_ASSERT(!IsValid()); } + GenericOperand(const CPURegister& reg); // NOLINT(runtime/explicit) + GenericOperand(const MemOperand& mem_op, + size_t mem_op_size = 0); // NOLINT(runtime/explicit) + + bool IsValid() const { return cpu_register_.IsValid() != mem_op_.IsValid(); } + + bool Equals(const GenericOperand& other) const; + + bool IsCPURegister() const { + VIXL_ASSERT(IsValid()); + return cpu_register_.IsValid(); + } + + bool IsRegister() const { + return IsCPURegister() && cpu_register_.IsRegister(); + } + + bool IsVRegister() const { + return IsCPURegister() && cpu_register_.IsVRegister(); + } + + bool IsSameCPURegisterType(const GenericOperand& other) { + return IsCPURegister() && other.IsCPURegister() && + GetCPURegister().IsSameType(other.GetCPURegister()); + } + + bool IsMemOperand() const { + VIXL_ASSERT(IsValid()); + return mem_op_.IsValid(); + } + + CPURegister GetCPURegister() const { + VIXL_ASSERT(IsCPURegister()); + return cpu_register_; + } + + MemOperand GetMemOperand() const { + VIXL_ASSERT(IsMemOperand()); + return mem_op_; + } + + size_t GetMemOperandSizeInBytes() const { + VIXL_ASSERT(IsMemOperand()); + return mem_op_size_; + } + + size_t GetSizeInBytes() const { + return IsCPURegister() ? cpu_register_.GetSizeInBytes() + : GetMemOperandSizeInBytes(); + } + + size_t GetSizeInBits() const { return GetSizeInBytes() * kBitsPerByte; } + + private: + CPURegister cpu_register_; + MemOperand mem_op_; + // The size of the memory region pointed to, in bytes. + // We only support sizes up to X/D register sizes. + size_t mem_op_size_; +}; +} +} // namespace vixl::aarch64 + +#endif // VIXL_AARCH64_OPERANDS_AARCH64_H_ diff --git a/dep/vixl/include/vixl/aarch64/simulator-aarch64.h b/dep/vixl/include/vixl/aarch64/simulator-aarch64.h new file mode 100644 index 000000000..061a7dab3 --- /dev/null +++ b/dep/vixl/include/vixl/aarch64/simulator-aarch64.h @@ -0,0 +1,3258 @@ +// Copyright 2015, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_AARCH64_SIMULATOR_AARCH64_H_ +#define VIXL_AARCH64_SIMULATOR_AARCH64_H_ + +#include + +#include "../globals-vixl.h" +#include "../utils-vixl.h" +#include "../cpu-features.h" + +#include "abi-aarch64.h" +#include "cpu-features-auditor-aarch64.h" +#include "disasm-aarch64.h" +#include "instructions-aarch64.h" +#include "instrument-aarch64.h" +#include "simulator-constants-aarch64.h" + +#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64 + +// These are only used for the ABI feature, and depend on checks performed for +// it. +#ifdef VIXL_HAS_ABI_SUPPORT +#include +#if __cplusplus >= 201402L +// Required for `std::index_sequence` +#include +#endif +#endif + +namespace vixl { +namespace aarch64 { + +// Representation of memory, with typed getters and setters for access. +class Memory { + public: + template + static T AddressUntag(T address) { + // Cast the address using a C-style cast. A reinterpret_cast would be + // appropriate, but it can't cast one integral type to another. + uint64_t bits = (uint64_t)address; + return (T)(bits & ~kAddressTagMask); + } + + template + static T Read(A address) { + T value; + address = AddressUntag(address); + VIXL_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) || + (sizeof(value) == 4) || (sizeof(value) == 8) || + (sizeof(value) == 16)); + memcpy(&value, reinterpret_cast(address), sizeof(value)); + return value; + } + + template + static void Write(A address, T value) { + address = AddressUntag(address); + VIXL_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) || + (sizeof(value) == 4) || (sizeof(value) == 8) || + (sizeof(value) == 16)); + memcpy(reinterpret_cast(address), &value, sizeof(value)); + } +}; + +// Represent a register (r0-r31, v0-v31). +template +class SimRegisterBase { + public: + SimRegisterBase() : written_since_last_log_(false) {} + + // Write the specified value. The value is zero-extended if necessary. + template + void Write(T new_value) { + if (sizeof(new_value) < kSizeInBytes) { + // All AArch64 registers are zero-extending. + memset(value_ + sizeof(new_value), 0, kSizeInBytes - sizeof(new_value)); + } + WriteLane(new_value, 0); + NotifyRegisterWrite(); + } + template + VIXL_DEPRECATED("Write", void Set(T new_value)) { + Write(new_value); + } + + // Insert a typed value into a register, leaving the rest of the register + // unchanged. The lane parameter indicates where in the register the value + // should be inserted, in the range [ 0, sizeof(value_) / sizeof(T) ), where + // 0 represents the least significant bits. + template + void Insert(int lane, T new_value) { + WriteLane(new_value, lane); + NotifyRegisterWrite(); + } + + // Get the value as the specified type. The value is truncated if necessary. + template + T Get() const { + return GetLane(0); + } + + // Get the lane value as the specified type. The value is truncated if + // necessary. + template + T GetLane(int lane) const { + T result; + ReadLane(&result, lane); + return result; + } + template + VIXL_DEPRECATED("GetLane", T Get(int lane) const) { + return GetLane(lane); + } + + // TODO: Make this return a map of updated bytes, so that we can highlight + // updated lanes for load-and-insert. (That never happens for scalar code, but + // NEON has some instructions that can update individual lanes.) + bool WrittenSinceLastLog() const { return written_since_last_log_; } + + void NotifyRegisterLogged() { written_since_last_log_ = false; } + + protected: + uint8_t value_[kSizeInBytes]; + + // Helpers to aid with register tracing. + bool written_since_last_log_; + + void NotifyRegisterWrite() { written_since_last_log_ = true; } + + private: + template + void ReadLane(T* dst, int lane) const { + VIXL_ASSERT(lane >= 0); + VIXL_ASSERT((sizeof(*dst) + (lane * sizeof(*dst))) <= kSizeInBytes); + memcpy(dst, &value_[lane * sizeof(*dst)], sizeof(*dst)); + } + + template + void WriteLane(T src, int lane) { + VIXL_ASSERT(lane >= 0); + VIXL_ASSERT((sizeof(src) + (lane * sizeof(src))) <= kSizeInBytes); + memcpy(&value_[lane * sizeof(src)], &src, sizeof(src)); + } +}; +typedef SimRegisterBase SimRegister; // r0-r31 +typedef SimRegisterBase SimVRegister; // v0-v31 + +// The default ReadLane and WriteLane methods assume what we are copying is +// "trivially copyable" by using memcpy. We have to provide alternative +// implementations for SimFloat16 which cannot be copied this way. + +template <> +template <> +inline void SimVRegister::ReadLane(vixl::internal::SimFloat16* dst, + int lane) const { + uint16_t rawbits; + ReadLane(&rawbits, lane); + *dst = RawbitsToFloat16(rawbits); +} + +template <> +template <> +inline void SimVRegister::WriteLane(vixl::internal::SimFloat16 src, int lane) { + WriteLane(Float16ToRawbits(src), lane); +} + +// Representation of a vector register, with typed getters and setters for lanes +// and additional information to represent lane state. +class LogicVRegister { + public: + inline LogicVRegister( + SimVRegister& other) // NOLINT(runtime/references)(runtime/explicit) + : register_(other) { + for (size_t i = 0; i < ArrayLength(saturated_); i++) { + saturated_[i] = kNotSaturated; + } + for (size_t i = 0; i < ArrayLength(round_); i++) { + round_[i] = 0; + } + } + + int64_t Int(VectorFormat vform, int index) const { + int64_t element; + switch (LaneSizeInBitsFromFormat(vform)) { + case 8: + element = register_.GetLane(index); + break; + case 16: + element = register_.GetLane(index); + break; + case 32: + element = register_.GetLane(index); + break; + case 64: + element = register_.GetLane(index); + break; + default: + VIXL_UNREACHABLE(); + return 0; + } + return element; + } + + uint64_t Uint(VectorFormat vform, int index) const { + uint64_t element; + switch (LaneSizeInBitsFromFormat(vform)) { + case 8: + element = register_.GetLane(index); + break; + case 16: + element = register_.GetLane(index); + break; + case 32: + element = register_.GetLane(index); + break; + case 64: + element = register_.GetLane(index); + break; + default: + VIXL_UNREACHABLE(); + return 0; + } + return element; + } + + uint64_t UintLeftJustified(VectorFormat vform, int index) const { + return Uint(vform, index) << (64 - LaneSizeInBitsFromFormat(vform)); + } + + int64_t IntLeftJustified(VectorFormat vform, int index) const { + uint64_t value = UintLeftJustified(vform, index); + int64_t result; + memcpy(&result, &value, sizeof(result)); + return result; + } + + void SetInt(VectorFormat vform, int index, int64_t value) const { + switch (LaneSizeInBitsFromFormat(vform)) { + case 8: + register_.Insert(index, static_cast(value)); + break; + case 16: + register_.Insert(index, static_cast(value)); + break; + case 32: + register_.Insert(index, static_cast(value)); + break; + case 64: + register_.Insert(index, static_cast(value)); + break; + default: + VIXL_UNREACHABLE(); + return; + } + } + + void SetIntArray(VectorFormat vform, const int64_t* src) const { + ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + SetInt(vform, i, src[i]); + } + } + + void SetUint(VectorFormat vform, int index, uint64_t value) const { + switch (LaneSizeInBitsFromFormat(vform)) { + case 8: + register_.Insert(index, static_cast(value)); + break; + case 16: + register_.Insert(index, static_cast(value)); + break; + case 32: + register_.Insert(index, static_cast(value)); + break; + case 64: + register_.Insert(index, static_cast(value)); + break; + default: + VIXL_UNREACHABLE(); + return; + } + } + + void SetUintArray(VectorFormat vform, const uint64_t* src) const { + ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + SetUint(vform, i, src[i]); + } + } + + void ReadUintFromMem(VectorFormat vform, int index, uint64_t addr) const { + switch (LaneSizeInBitsFromFormat(vform)) { + case 8: + register_.Insert(index, Memory::Read(addr)); + break; + case 16: + register_.Insert(index, Memory::Read(addr)); + break; + case 32: + register_.Insert(index, Memory::Read(addr)); + break; + case 64: + register_.Insert(index, Memory::Read(addr)); + break; + default: + VIXL_UNREACHABLE(); + return; + } + } + + void WriteUintToMem(VectorFormat vform, int index, uint64_t addr) const { + uint64_t value = Uint(vform, index); + switch (LaneSizeInBitsFromFormat(vform)) { + case 8: + Memory::Write(addr, static_cast(value)); + break; + case 16: + Memory::Write(addr, static_cast(value)); + break; + case 32: + Memory::Write(addr, static_cast(value)); + break; + case 64: + Memory::Write(addr, value); + break; + } + } + + template + T Float(int index) const { + return register_.GetLane(index); + } + + template + void SetFloat(int index, T value) const { + register_.Insert(index, value); + } + + // When setting a result in a register of size less than Q, the top bits of + // the Q register must be cleared. + void ClearForWrite(VectorFormat vform) const { + unsigned size = RegisterSizeInBytesFromFormat(vform); + for (unsigned i = size; i < kQRegSizeInBytes; i++) { + SetUint(kFormat16B, i, 0); + } + } + + // Saturation state for each lane of a vector. + enum Saturation { + kNotSaturated = 0, + kSignedSatPositive = 1 << 0, + kSignedSatNegative = 1 << 1, + kSignedSatMask = kSignedSatPositive | kSignedSatNegative, + kSignedSatUndefined = kSignedSatMask, + kUnsignedSatPositive = 1 << 2, + kUnsignedSatNegative = 1 << 3, + kUnsignedSatMask = kUnsignedSatPositive | kUnsignedSatNegative, + kUnsignedSatUndefined = kUnsignedSatMask + }; + + // Getters for saturation state. + Saturation GetSignedSaturation(int index) { + return static_cast(saturated_[index] & kSignedSatMask); + } + + Saturation GetUnsignedSaturation(int index) { + return static_cast(saturated_[index] & kUnsignedSatMask); + } + + // Setters for saturation state. + void ClearSat(int index) { saturated_[index] = kNotSaturated; } + + void SetSignedSat(int index, bool positive) { + SetSatFlag(index, positive ? kSignedSatPositive : kSignedSatNegative); + } + + void SetUnsignedSat(int index, bool positive) { + SetSatFlag(index, positive ? kUnsignedSatPositive : kUnsignedSatNegative); + } + + void SetSatFlag(int index, Saturation sat) { + saturated_[index] = static_cast(saturated_[index] | sat); + VIXL_ASSERT((sat & kUnsignedSatMask) != kUnsignedSatUndefined); + VIXL_ASSERT((sat & kSignedSatMask) != kSignedSatUndefined); + } + + // Saturate lanes of a vector based on saturation state. + LogicVRegister& SignedSaturate(VectorFormat vform) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + Saturation sat = GetSignedSaturation(i); + if (sat == kSignedSatPositive) { + SetInt(vform, i, MaxIntFromFormat(vform)); + } else if (sat == kSignedSatNegative) { + SetInt(vform, i, MinIntFromFormat(vform)); + } + } + return *this; + } + + LogicVRegister& UnsignedSaturate(VectorFormat vform) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + Saturation sat = GetUnsignedSaturation(i); + if (sat == kUnsignedSatPositive) { + SetUint(vform, i, MaxUintFromFormat(vform)); + } else if (sat == kUnsignedSatNegative) { + SetUint(vform, i, 0); + } + } + return *this; + } + + // Getter for rounding state. + bool GetRounding(int index) { return round_[index]; } + + // Setter for rounding state. + void SetRounding(int index, bool round) { round_[index] = round; } + + // Round lanes of a vector based on rounding state. + LogicVRegister& Round(VectorFormat vform) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + SetUint(vform, i, Uint(vform, i) + (GetRounding(i) ? 1 : 0)); + } + return *this; + } + + // Unsigned halve lanes of a vector, and use the saturation state to set the + // top bit. + LogicVRegister& Uhalve(VectorFormat vform) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + uint64_t val = Uint(vform, i); + SetRounding(i, (val & 1) == 1); + val >>= 1; + if (GetUnsignedSaturation(i) != kNotSaturated) { + // If the operation causes unsigned saturation, the bit shifted into the + // most significant bit must be set. + val |= (MaxUintFromFormat(vform) >> 1) + 1; + } + SetInt(vform, i, val); + } + return *this; + } + + // Signed halve lanes of a vector, and use the carry state to set the top bit. + LogicVRegister& Halve(VectorFormat vform) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + int64_t val = Int(vform, i); + SetRounding(i, (val & 1) == 1); + val >>= 1; + if (GetSignedSaturation(i) != kNotSaturated) { + // If the operation causes signed saturation, the sign bit must be + // inverted. + val ^= (MaxUintFromFormat(vform) >> 1) + 1; + } + SetInt(vform, i, val); + } + return *this; + } + + private: + SimVRegister& register_; + + // Allocate one saturation state entry per lane; largest register is type Q, + // and lanes can be a minimum of one byte wide. + Saturation saturated_[kQRegSizeInBytes]; + + // Allocate one rounding state entry per lane. + bool round_[kQRegSizeInBytes]; +}; + +// The proper way to initialize a simulated system register (such as NZCV) is as +// follows: +// SimSystemRegister nzcv = SimSystemRegister::DefaultValueFor(NZCV); +class SimSystemRegister { + public: + // The default constructor represents a register which has no writable bits. + // It is not possible to set its value to anything other than 0. + SimSystemRegister() : value_(0), write_ignore_mask_(0xffffffff) {} + + uint32_t GetRawValue() const { return value_; } + VIXL_DEPRECATED("GetRawValue", uint32_t RawValue() const) { + return GetRawValue(); + } + + void SetRawValue(uint32_t new_value) { + value_ = (value_ & write_ignore_mask_) | (new_value & ~write_ignore_mask_); + } + + uint32_t ExtractBits(int msb, int lsb) const { + return ExtractUnsignedBitfield32(msb, lsb, value_); + } + VIXL_DEPRECATED("ExtractBits", uint32_t Bits(int msb, int lsb) const) { + return ExtractBits(msb, lsb); + } + + int32_t ExtractSignedBits(int msb, int lsb) const { + return ExtractSignedBitfield32(msb, lsb, value_); + } + VIXL_DEPRECATED("ExtractSignedBits", + int32_t SignedBits(int msb, int lsb) const) { + return ExtractSignedBits(msb, lsb); + } + + void SetBits(int msb, int lsb, uint32_t bits); + + // Default system register values. + static SimSystemRegister DefaultValueFor(SystemRegister id); + +#define DEFINE_GETTER(Name, HighBit, LowBit, Func) \ + uint32_t Get##Name() const { return this->Func(HighBit, LowBit); } \ + VIXL_DEPRECATED("Get" #Name, uint32_t Name() const) { return Get##Name(); } \ + void Set##Name(uint32_t bits) { SetBits(HighBit, LowBit, bits); } +#define DEFINE_WRITE_IGNORE_MASK(Name, Mask) \ + static const uint32_t Name##WriteIgnoreMask = ~static_cast(Mask); + + SYSTEM_REGISTER_FIELDS_LIST(DEFINE_GETTER, DEFINE_WRITE_IGNORE_MASK) + +#undef DEFINE_ZERO_BITS +#undef DEFINE_GETTER + + protected: + // Most system registers only implement a few of the bits in the word. Other + // bits are "read-as-zero, write-ignored". The write_ignore_mask argument + // describes the bits which are not modifiable. + SimSystemRegister(uint32_t value, uint32_t write_ignore_mask) + : value_(value), write_ignore_mask_(write_ignore_mask) {} + + uint32_t value_; + uint32_t write_ignore_mask_; +}; + + +class SimExclusiveLocalMonitor { + public: + SimExclusiveLocalMonitor() : kSkipClearProbability(8), seed_(0x87654321) { + Clear(); + } + + // Clear the exclusive monitor (like clrex). + void Clear() { + address_ = 0; + size_ = 0; + } + + // Clear the exclusive monitor most of the time. + void MaybeClear() { + if ((seed_ % kSkipClearProbability) != 0) { + Clear(); + } + + // Advance seed_ using a simple linear congruential generator. + seed_ = (seed_ * 48271) % 2147483647; + } + + // Mark the address range for exclusive access (like load-exclusive). + void MarkExclusive(uint64_t address, size_t size) { + address_ = address; + size_ = size; + } + + // Return true if the address range is marked (like store-exclusive). + // This helper doesn't implicitly clear the monitor. + bool IsExclusive(uint64_t address, size_t size) { + VIXL_ASSERT(size > 0); + // Be pedantic: Require both the address and the size to match. + return (size == size_) && (address == address_); + } + + private: + uint64_t address_; + size_t size_; + + const int kSkipClearProbability; + uint32_t seed_; +}; + + +// We can't accurate simulate the global monitor since it depends on external +// influences. Instead, this implementation occasionally causes accesses to +// fail, according to kPassProbability. +class SimExclusiveGlobalMonitor { + public: + SimExclusiveGlobalMonitor() : kPassProbability(8), seed_(0x87654321) {} + + bool IsExclusive(uint64_t address, size_t size) { + USE(address, size); + + bool pass = (seed_ % kPassProbability) != 0; + // Advance seed_ using a simple linear congruential generator. + seed_ = (seed_ * 48271) % 2147483647; + return pass; + } + + private: + const int kPassProbability; + uint32_t seed_; +}; + + +class Simulator : public DecoderVisitor { + public: + explicit Simulator(Decoder* decoder, FILE* stream = stdout); + ~Simulator(); + + void ResetState(); + + // Run the simulator. + virtual void Run(); + void RunFrom(const Instruction* first); + + +#if defined(VIXL_HAS_ABI_SUPPORT) && __cplusplus >= 201103L && \ + (defined(__clang__) || GCC_VERSION_OR_NEWER(4, 9, 1)) + // Templated `RunFrom` version taking care of passing arguments and returning + // the result value. + // This allows code like: + // int32_t res = simulator.RunFrom(GenerateCode(), + // 0x123); + // It requires VIXL's ABI features, and C++11 or greater. + // Also, the initialisation of tuples is incorrect in GCC before 4.9.1: + // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=51253 + template + R RunFrom(const Instruction* code, P... arguments) { + return RunFromStructHelper::Wrapper(this, code, arguments...); + } + + template + struct RunFromStructHelper { + static R Wrapper(Simulator* simulator, + const Instruction* code, + P... arguments) { + ABI abi; + std::tuple unused_tuple{ + // TODO: We currently do not support arguments passed on the stack. We + // could do so by using `WriteGenericOperand()` here, but may need to + // add features to handle situations where the stack is or is not set + // up. + (simulator->WriteCPURegister(abi.GetNextParameterGenericOperand

() + .GetCPURegister(), + arguments), + arguments)...}; + simulator->RunFrom(code); + return simulator->ReadGenericOperand(abi.GetReturnGenericOperand()); + } + }; + + // Partial specialization when the return type is `void`. + template + struct RunFromStructHelper { + static void Wrapper(Simulator* simulator, + const Instruction* code, + P... arguments) { + ABI abi; + std::tuple unused_tuple{ + // TODO: We currently do not support arguments passed on the stack. We + // could do so by using `WriteGenericOperand()` here, but may need to + // add features to handle situations where the stack is or is not set + // up. + (simulator->WriteCPURegister(abi.GetNextParameterGenericOperand

() + .GetCPURegister(), + arguments), + arguments)...}; + simulator->RunFrom(code); + } + }; +#endif + + // Execution ends when the PC hits this address. + static const Instruction* kEndOfSimAddress; + + // Simulation helpers. + const Instruction* ReadPc() const { return pc_; } + VIXL_DEPRECATED("ReadPc", const Instruction* pc() const) { return ReadPc(); } + + enum BranchLogMode { LogBranches, NoBranchLog }; + + void WritePc(const Instruction* new_pc, + BranchLogMode log_mode = LogBranches) { + if (log_mode == LogBranches) LogTakenBranch(new_pc); + pc_ = Memory::AddressUntag(new_pc); + pc_modified_ = true; + } + VIXL_DEPRECATED("WritePc", void set_pc(const Instruction* new_pc)) { + return WritePc(new_pc); + } + + void IncrementPc() { + if (!pc_modified_) { + pc_ = pc_->GetNextInstruction(); + } + } + VIXL_DEPRECATED("IncrementPc", void increment_pc()) { IncrementPc(); } + + void ExecuteInstruction() { + // The program counter should always be aligned. + VIXL_ASSERT(IsWordAligned(pc_)); + pc_modified_ = false; + + // decoder_->Decode(...) triggers at least the following visitors: + // 1. The CPUFeaturesAuditor (`cpu_features_auditor_`). + // 2. The PrintDisassembler (`print_disasm_`), if enabled. + // 3. The Simulator (`this`). + // User can add additional visitors at any point, but the Simulator requires + // that the ordering above is preserved. + decoder_->Decode(pc_); + IncrementPc(); + LogAllWrittenRegisters(); + + VIXL_CHECK(cpu_features_auditor_.InstructionIsAvailable()); + } + +// Declare all Visitor functions. +#define DECLARE(A) \ + virtual void Visit##A(const Instruction* instr) VIXL_OVERRIDE; + VISITOR_LIST_THAT_RETURN(DECLARE) +#undef DECLARE + +#define DECLARE(A) \ + VIXL_DEBUG_NO_RETURN virtual void Visit##A(const Instruction* instr) \ + VIXL_OVERRIDE; + VISITOR_LIST_THAT_DONT_RETURN(DECLARE) +#undef DECLARE + + + // Integer register accessors. + + // Basic accessor: Read the register as the specified type. + template + T ReadRegister(unsigned code, Reg31Mode r31mode = Reg31IsZeroRegister) const { + VIXL_ASSERT( + code < kNumberOfRegisters || + ((r31mode == Reg31IsZeroRegister) && (code == kSPRegInternalCode))); + if ((code == 31) && (r31mode == Reg31IsZeroRegister)) { + T result; + memset(&result, 0, sizeof(result)); + return result; + } + if ((r31mode == Reg31IsZeroRegister) && (code == kSPRegInternalCode)) { + code = 31; + } + return registers_[code].Get(); + } + template + VIXL_DEPRECATED("ReadRegister", + T reg(unsigned code, Reg31Mode r31mode = Reg31IsZeroRegister) + const) { + return ReadRegister(code, r31mode); + } + + // Common specialized accessors for the ReadRegister() template. + int32_t ReadWRegister(unsigned code, + Reg31Mode r31mode = Reg31IsZeroRegister) const { + return ReadRegister(code, r31mode); + } + VIXL_DEPRECATED("ReadWRegister", + int32_t wreg(unsigned code, + Reg31Mode r31mode = Reg31IsZeroRegister) const) { + return ReadWRegister(code, r31mode); + } + + int64_t ReadXRegister(unsigned code, + Reg31Mode r31mode = Reg31IsZeroRegister) const { + return ReadRegister(code, r31mode); + } + VIXL_DEPRECATED("ReadXRegister", + int64_t xreg(unsigned code, + Reg31Mode r31mode = Reg31IsZeroRegister) const) { + return ReadXRegister(code, r31mode); + } + + // As above, with parameterized size and return type. The value is + // either zero-extended or truncated to fit, as required. + template + T ReadRegister(unsigned size, + unsigned code, + Reg31Mode r31mode = Reg31IsZeroRegister) const { + uint64_t raw; + switch (size) { + case kWRegSize: + raw = ReadRegister(code, r31mode); + break; + case kXRegSize: + raw = ReadRegister(code, r31mode); + break; + default: + VIXL_UNREACHABLE(); + return 0; + } + + T result; + VIXL_STATIC_ASSERT(sizeof(result) <= sizeof(raw)); + // Copy the result and truncate to fit. This assumes a little-endian host. + memcpy(&result, &raw, sizeof(result)); + return result; + } + template + VIXL_DEPRECATED("ReadRegister", + T reg(unsigned size, + unsigned code, + Reg31Mode r31mode = Reg31IsZeroRegister) const) { + return ReadRegister(size, code, r31mode); + } + + // Use int64_t by default if T is not specified. + int64_t ReadRegister(unsigned size, + unsigned code, + Reg31Mode r31mode = Reg31IsZeroRegister) const { + return ReadRegister(size, code, r31mode); + } + VIXL_DEPRECATED("ReadRegister", + int64_t reg(unsigned size, + unsigned code, + Reg31Mode r31mode = Reg31IsZeroRegister) const) { + return ReadRegister(size, code, r31mode); + } + + enum RegLogMode { LogRegWrites, NoRegLog }; + + // Write 'value' into an integer register. The value is zero-extended. This + // behaviour matches AArch64 register writes. + template + void WriteRegister(unsigned code, + T value, + RegLogMode log_mode = LogRegWrites, + Reg31Mode r31mode = Reg31IsZeroRegister) { + if (sizeof(T) < kWRegSizeInBytes) { + // We use a C-style cast on purpose here. + // Since we do not have access to 'constepxr if', the casts in this `if` + // must be valid even if we know the code will never be executed, in + // particular when `T` is a pointer type. + int64_t tmp_64bit = (int64_t)value; + int32_t tmp_32bit = static_cast(tmp_64bit); + WriteRegister(code, tmp_32bit, log_mode, r31mode); + return; + } + + VIXL_ASSERT((sizeof(T) == kWRegSizeInBytes) || + (sizeof(T) == kXRegSizeInBytes)); + VIXL_ASSERT( + code < kNumberOfRegisters || + ((r31mode == Reg31IsZeroRegister) && (code == kSPRegInternalCode))); + + if ((code == 31) && (r31mode == Reg31IsZeroRegister)) { + return; + } + + if ((r31mode == Reg31IsZeroRegister) && (code == kSPRegInternalCode)) { + code = 31; + } + + registers_[code].Write(value); + + if (log_mode == LogRegWrites) LogRegister(code, r31mode); + } + template + VIXL_DEPRECATED("WriteRegister", + void set_reg(unsigned code, + T value, + RegLogMode log_mode = LogRegWrites, + Reg31Mode r31mode = Reg31IsZeroRegister)) { + WriteRegister(code, value, log_mode, r31mode); + } + + // Common specialized accessors for the set_reg() template. + void WriteWRegister(unsigned code, + int32_t value, + RegLogMode log_mode = LogRegWrites, + Reg31Mode r31mode = Reg31IsZeroRegister) { + WriteRegister(code, value, log_mode, r31mode); + } + VIXL_DEPRECATED("WriteWRegister", + void set_wreg(unsigned code, + int32_t value, + RegLogMode log_mode = LogRegWrites, + Reg31Mode r31mode = Reg31IsZeroRegister)) { + WriteWRegister(code, value, log_mode, r31mode); + } + + void WriteXRegister(unsigned code, + int64_t value, + RegLogMode log_mode = LogRegWrites, + Reg31Mode r31mode = Reg31IsZeroRegister) { + WriteRegister(code, value, log_mode, r31mode); + } + VIXL_DEPRECATED("WriteXRegister", + void set_xreg(unsigned code, + int64_t value, + RegLogMode log_mode = LogRegWrites, + Reg31Mode r31mode = Reg31IsZeroRegister)) { + WriteXRegister(code, value, log_mode, r31mode); + } + + // As above, with parameterized size and type. The value is either + // zero-extended or truncated to fit, as required. + template + void WriteRegister(unsigned size, + unsigned code, + T value, + RegLogMode log_mode = LogRegWrites, + Reg31Mode r31mode = Reg31IsZeroRegister) { + // Zero-extend the input. + uint64_t raw = 0; + VIXL_STATIC_ASSERT(sizeof(value) <= sizeof(raw)); + memcpy(&raw, &value, sizeof(value)); + + // Write (and possibly truncate) the value. + switch (size) { + case kWRegSize: + WriteRegister(code, static_cast(raw), log_mode, r31mode); + break; + case kXRegSize: + WriteRegister(code, raw, log_mode, r31mode); + break; + default: + VIXL_UNREACHABLE(); + return; + } + } + template + VIXL_DEPRECATED("WriteRegister", + void set_reg(unsigned size, + unsigned code, + T value, + RegLogMode log_mode = LogRegWrites, + Reg31Mode r31mode = Reg31IsZeroRegister)) { + WriteRegister(size, code, value, log_mode, r31mode); + } + + // Common specialized accessors for the set_reg() template. + + // Commonly-used special cases. + template + void WriteLr(T value) { + WriteRegister(kLinkRegCode, value); + } + template + VIXL_DEPRECATED("WriteLr", void set_lr(T value)) { + WriteLr(value); + } + + template + void WriteSp(T value) { + WriteRegister(31, value, LogRegWrites, Reg31IsStackPointer); + } + template + VIXL_DEPRECATED("WriteSp", void set_sp(T value)) { + WriteSp(value); + } + + // Vector register accessors. + // These are equivalent to the integer register accessors, but for vector + // registers. + + // A structure for representing a 128-bit Q register. + struct qreg_t { + uint8_t val[kQRegSizeInBytes]; + }; + + // Basic accessor: read the register as the specified type. + template + T ReadVRegister(unsigned code) const { + VIXL_STATIC_ASSERT( + (sizeof(T) == kBRegSizeInBytes) || (sizeof(T) == kHRegSizeInBytes) || + (sizeof(T) == kSRegSizeInBytes) || (sizeof(T) == kDRegSizeInBytes) || + (sizeof(T) == kQRegSizeInBytes)); + VIXL_ASSERT(code < kNumberOfVRegisters); + + return vregisters_[code].Get(); + } + template + VIXL_DEPRECATED("ReadVRegister", T vreg(unsigned code) const) { + return ReadVRegister(code); + } + + // Common specialized accessors for the vreg() template. + int8_t ReadBRegister(unsigned code) const { + return ReadVRegister(code); + } + VIXL_DEPRECATED("ReadBRegister", int8_t breg(unsigned code) const) { + return ReadBRegister(code); + } + + vixl::internal::SimFloat16 ReadHRegister(unsigned code) const { + return RawbitsToFloat16(ReadHRegisterBits(code)); + } + VIXL_DEPRECATED("ReadHRegister", int16_t hreg(unsigned code) const) { + return Float16ToRawbits(ReadHRegister(code)); + } + + uint16_t ReadHRegisterBits(unsigned code) const { + return ReadVRegister(code); + } + + float ReadSRegister(unsigned code) const { + return ReadVRegister(code); + } + VIXL_DEPRECATED("ReadSRegister", float sreg(unsigned code) const) { + return ReadSRegister(code); + } + + uint32_t ReadSRegisterBits(unsigned code) const { + return ReadVRegister(code); + } + VIXL_DEPRECATED("ReadSRegisterBits", + uint32_t sreg_bits(unsigned code) const) { + return ReadSRegisterBits(code); + } + + double ReadDRegister(unsigned code) const { + return ReadVRegister(code); + } + VIXL_DEPRECATED("ReadDRegister", double dreg(unsigned code) const) { + return ReadDRegister(code); + } + + uint64_t ReadDRegisterBits(unsigned code) const { + return ReadVRegister(code); + } + VIXL_DEPRECATED("ReadDRegisterBits", + uint64_t dreg_bits(unsigned code) const) { + return ReadDRegisterBits(code); + } + + qreg_t ReadQRegister(unsigned code) const { + return ReadVRegister(code); + } + VIXL_DEPRECATED("ReadQRegister", qreg_t qreg(unsigned code) const) { + return ReadQRegister(code); + } + + // As above, with parameterized size and return type. The value is + // either zero-extended or truncated to fit, as required. + template + T ReadVRegister(unsigned size, unsigned code) const { + uint64_t raw = 0; + T result; + + switch (size) { + case kSRegSize: + raw = ReadVRegister(code); + break; + case kDRegSize: + raw = ReadVRegister(code); + break; + default: + VIXL_UNREACHABLE(); + break; + } + + VIXL_STATIC_ASSERT(sizeof(result) <= sizeof(raw)); + // Copy the result and truncate to fit. This assumes a little-endian host. + memcpy(&result, &raw, sizeof(result)); + return result; + } + template + VIXL_DEPRECATED("ReadVRegister", T vreg(unsigned size, unsigned code) const) { + return ReadVRegister(size, code); + } + + SimVRegister& ReadVRegister(unsigned code) { return vregisters_[code]; } + VIXL_DEPRECATED("ReadVRegister", SimVRegister& vreg(unsigned code)) { + return ReadVRegister(code); + } + + // Basic accessor: Write the specified value. + template + void WriteVRegister(unsigned code, + T value, + RegLogMode log_mode = LogRegWrites) { + VIXL_STATIC_ASSERT((sizeof(value) == kBRegSizeInBytes) || + (sizeof(value) == kHRegSizeInBytes) || + (sizeof(value) == kSRegSizeInBytes) || + (sizeof(value) == kDRegSizeInBytes) || + (sizeof(value) == kQRegSizeInBytes)); + VIXL_ASSERT(code < kNumberOfVRegisters); + vregisters_[code].Write(value); + + if (log_mode == LogRegWrites) { + LogVRegister(code, GetPrintRegisterFormat(value)); + } + } + template + VIXL_DEPRECATED("WriteVRegister", + void set_vreg(unsigned code, + T value, + RegLogMode log_mode = LogRegWrites)) { + WriteVRegister(code, value, log_mode); + } + + // Common specialized accessors for the WriteVRegister() template. + void WriteBRegister(unsigned code, + int8_t value, + RegLogMode log_mode = LogRegWrites) { + WriteVRegister(code, value, log_mode); + } + VIXL_DEPRECATED("WriteBRegister", + void set_breg(unsigned code, + int8_t value, + RegLogMode log_mode = LogRegWrites)) { + return WriteBRegister(code, value, log_mode); + } + + void WriteHRegister(unsigned code, + vixl::internal::SimFloat16 value, + RegLogMode log_mode = LogRegWrites) { + WriteVRegister(code, Float16ToRawbits(value), log_mode); + } + + void WriteHRegister(unsigned code, + int16_t value, + RegLogMode log_mode = LogRegWrites) { + WriteVRegister(code, value, log_mode); + } + VIXL_DEPRECATED("WriteHRegister", + void set_hreg(unsigned code, + int16_t value, + RegLogMode log_mode = LogRegWrites)) { + return WriteHRegister(code, value, log_mode); + } + + void WriteSRegister(unsigned code, + float value, + RegLogMode log_mode = LogRegWrites) { + WriteVRegister(code, value, log_mode); + } + VIXL_DEPRECATED("WriteSRegister", + void set_sreg(unsigned code, + float value, + RegLogMode log_mode = LogRegWrites)) { + WriteSRegister(code, value, log_mode); + } + + void WriteSRegisterBits(unsigned code, + uint32_t value, + RegLogMode log_mode = LogRegWrites) { + WriteVRegister(code, value, log_mode); + } + VIXL_DEPRECATED("WriteSRegisterBits", + void set_sreg_bits(unsigned code, + uint32_t value, + RegLogMode log_mode = LogRegWrites)) { + WriteSRegisterBits(code, value, log_mode); + } + + void WriteDRegister(unsigned code, + double value, + RegLogMode log_mode = LogRegWrites) { + WriteVRegister(code, value, log_mode); + } + VIXL_DEPRECATED("WriteDRegister", + void set_dreg(unsigned code, + double value, + RegLogMode log_mode = LogRegWrites)) { + WriteDRegister(code, value, log_mode); + } + + void WriteDRegisterBits(unsigned code, + uint64_t value, + RegLogMode log_mode = LogRegWrites) { + WriteVRegister(code, value, log_mode); + } + VIXL_DEPRECATED("WriteDRegisterBits", + void set_dreg_bits(unsigned code, + uint64_t value, + RegLogMode log_mode = LogRegWrites)) { + WriteDRegisterBits(code, value, log_mode); + } + + void WriteQRegister(unsigned code, + qreg_t value, + RegLogMode log_mode = LogRegWrites) { + WriteVRegister(code, value, log_mode); + } + VIXL_DEPRECATED("WriteQRegister", + void set_qreg(unsigned code, + qreg_t value, + RegLogMode log_mode = LogRegWrites)) { + WriteQRegister(code, value, log_mode); + } + + template + T ReadRegister(Register reg) const { + return ReadRegister(reg.GetCode(), Reg31IsZeroRegister); + } + + template + void WriteRegister(Register reg, + T value, + RegLogMode log_mode = LogRegWrites) { + WriteRegister(reg.GetCode(), value, log_mode, Reg31IsZeroRegister); + } + + template + T ReadVRegister(VRegister vreg) const { + return ReadVRegister(vreg.GetCode()); + } + + template + void WriteVRegister(VRegister vreg, + T value, + RegLogMode log_mode = LogRegWrites) { + WriteVRegister(vreg.GetCode(), value, log_mode); + } + + template + T ReadCPURegister(CPURegister reg) const { + if (reg.IsVRegister()) { + return ReadVRegister(VRegister(reg)); + } else { + return ReadRegister(Register(reg)); + } + } + + template + void WriteCPURegister(CPURegister reg, + T value, + RegLogMode log_mode = LogRegWrites) { + if (reg.IsVRegister()) { + WriteVRegister(VRegister(reg), value, log_mode); + } else { + WriteRegister(Register(reg), value, log_mode); + } + } + + uint64_t ComputeMemOperandAddress(const MemOperand& mem_op) const; + + template + T ReadGenericOperand(GenericOperand operand) const { + if (operand.IsCPURegister()) { + return ReadCPURegister(operand.GetCPURegister()); + } else { + VIXL_ASSERT(operand.IsMemOperand()); + return Memory::Read(ComputeMemOperandAddress(operand.GetMemOperand())); + } + } + + template + void WriteGenericOperand(GenericOperand operand, + T value, + RegLogMode log_mode = LogRegWrites) { + if (operand.IsCPURegister()) { + WriteCPURegister(operand.GetCPURegister(), value, log_mode); + } else { + VIXL_ASSERT(operand.IsMemOperand()); + Memory::Write(ComputeMemOperandAddress(operand.GetMemOperand()), value); + } + } + + bool ReadN() const { return nzcv_.GetN() != 0; } + VIXL_DEPRECATED("ReadN", bool N() const) { return ReadN(); } + + bool ReadZ() const { return nzcv_.GetZ() != 0; } + VIXL_DEPRECATED("ReadZ", bool Z() const) { return ReadZ(); } + + bool ReadC() const { return nzcv_.GetC() != 0; } + VIXL_DEPRECATED("ReadC", bool C() const) { return ReadC(); } + + bool ReadV() const { return nzcv_.GetV() != 0; } + VIXL_DEPRECATED("ReadV", bool V() const) { return ReadV(); } + + SimSystemRegister& ReadNzcv() { return nzcv_; } + VIXL_DEPRECATED("ReadNzcv", SimSystemRegister& nzcv()) { return ReadNzcv(); } + + // TODO: Find a way to make the fpcr_ members return the proper types, so + // these accessors are not necessary. + FPRounding ReadRMode() const { + return static_cast(fpcr_.GetRMode()); + } + VIXL_DEPRECATED("ReadRMode", FPRounding RMode()) { return ReadRMode(); } + + UseDefaultNaN ReadDN() const { + return fpcr_.GetDN() != 0 ? kUseDefaultNaN : kIgnoreDefaultNaN; + } + + VIXL_DEPRECATED("ReadDN", bool DN()) { + return ReadDN() == kUseDefaultNaN ? true : false; + } + + SimSystemRegister& ReadFpcr() { return fpcr_; } + VIXL_DEPRECATED("ReadFpcr", SimSystemRegister& fpcr()) { return ReadFpcr(); } + + // Specify relevant register formats for Print(V)Register and related helpers. + enum PrintRegisterFormat { + // The lane size. + kPrintRegLaneSizeB = 0 << 0, + kPrintRegLaneSizeH = 1 << 0, + kPrintRegLaneSizeS = 2 << 0, + kPrintRegLaneSizeW = kPrintRegLaneSizeS, + kPrintRegLaneSizeD = 3 << 0, + kPrintRegLaneSizeX = kPrintRegLaneSizeD, + kPrintRegLaneSizeQ = 4 << 0, + + kPrintRegLaneSizeOffset = 0, + kPrintRegLaneSizeMask = 7 << 0, + + // The lane count. + kPrintRegAsScalar = 0, + kPrintRegAsDVector = 1 << 3, + kPrintRegAsQVector = 2 << 3, + + kPrintRegAsVectorMask = 3 << 3, + + // Indicate floating-point format lanes. (This flag is only supported for + // S-, H-, and D-sized lanes.) + kPrintRegAsFP = 1 << 5, + + // Supported combinations. + + kPrintXReg = kPrintRegLaneSizeX | kPrintRegAsScalar, + kPrintWReg = kPrintRegLaneSizeW | kPrintRegAsScalar, + kPrintHReg = kPrintRegLaneSizeH | kPrintRegAsScalar | kPrintRegAsFP, + kPrintSReg = kPrintRegLaneSizeS | kPrintRegAsScalar | kPrintRegAsFP, + kPrintDReg = kPrintRegLaneSizeD | kPrintRegAsScalar | kPrintRegAsFP, + + kPrintReg1B = kPrintRegLaneSizeB | kPrintRegAsScalar, + kPrintReg8B = kPrintRegLaneSizeB | kPrintRegAsDVector, + kPrintReg16B = kPrintRegLaneSizeB | kPrintRegAsQVector, + kPrintReg1H = kPrintRegLaneSizeH | kPrintRegAsScalar, + kPrintReg4H = kPrintRegLaneSizeH | kPrintRegAsDVector, + kPrintReg8H = kPrintRegLaneSizeH | kPrintRegAsQVector, + kPrintReg1S = kPrintRegLaneSizeS | kPrintRegAsScalar, + kPrintReg2S = kPrintRegLaneSizeS | kPrintRegAsDVector, + kPrintReg4S = kPrintRegLaneSizeS | kPrintRegAsQVector, + kPrintReg1HFP = kPrintRegLaneSizeH | kPrintRegAsScalar | kPrintRegAsFP, + kPrintReg4HFP = kPrintRegLaneSizeH | kPrintRegAsDVector | kPrintRegAsFP, + kPrintReg8HFP = kPrintRegLaneSizeH | kPrintRegAsQVector | kPrintRegAsFP, + kPrintReg1SFP = kPrintRegLaneSizeS | kPrintRegAsScalar | kPrintRegAsFP, + kPrintReg2SFP = kPrintRegLaneSizeS | kPrintRegAsDVector | kPrintRegAsFP, + kPrintReg4SFP = kPrintRegLaneSizeS | kPrintRegAsQVector | kPrintRegAsFP, + kPrintReg1D = kPrintRegLaneSizeD | kPrintRegAsScalar, + kPrintReg2D = kPrintRegLaneSizeD | kPrintRegAsQVector, + kPrintReg1DFP = kPrintRegLaneSizeD | kPrintRegAsScalar | kPrintRegAsFP, + kPrintReg2DFP = kPrintRegLaneSizeD | kPrintRegAsQVector | kPrintRegAsFP, + kPrintReg1Q = kPrintRegLaneSizeQ | kPrintRegAsScalar + }; + + unsigned GetPrintRegLaneSizeInBytesLog2(PrintRegisterFormat format) { + return (format & kPrintRegLaneSizeMask) >> kPrintRegLaneSizeOffset; + } + + unsigned GetPrintRegLaneSizeInBytes(PrintRegisterFormat format) { + return 1 << GetPrintRegLaneSizeInBytesLog2(format); + } + + unsigned GetPrintRegSizeInBytesLog2(PrintRegisterFormat format) { + if (format & kPrintRegAsDVector) return kDRegSizeInBytesLog2; + if (format & kPrintRegAsQVector) return kQRegSizeInBytesLog2; + + // Scalar types. + return GetPrintRegLaneSizeInBytesLog2(format); + } + + unsigned GetPrintRegSizeInBytes(PrintRegisterFormat format) { + return 1 << GetPrintRegSizeInBytesLog2(format); + } + + unsigned GetPrintRegLaneCount(PrintRegisterFormat format) { + unsigned reg_size_log2 = GetPrintRegSizeInBytesLog2(format); + unsigned lane_size_log2 = GetPrintRegLaneSizeInBytesLog2(format); + VIXL_ASSERT(reg_size_log2 >= lane_size_log2); + return 1 << (reg_size_log2 - lane_size_log2); + } + + PrintRegisterFormat GetPrintRegisterFormatForSize(unsigned reg_size, + unsigned lane_size); + + PrintRegisterFormat GetPrintRegisterFormatForSize(unsigned size) { + return GetPrintRegisterFormatForSize(size, size); + } + + PrintRegisterFormat GetPrintRegisterFormatForSizeFP(unsigned size) { + switch (size) { + default: + VIXL_UNREACHABLE(); + return kPrintDReg; + case kDRegSizeInBytes: + return kPrintDReg; + case kSRegSizeInBytes: + return kPrintSReg; + case kHRegSizeInBytes: + return kPrintHReg; + } + } + + PrintRegisterFormat GetPrintRegisterFormatTryFP(PrintRegisterFormat format) { + if ((GetPrintRegLaneSizeInBytes(format) == kHRegSizeInBytes) || + (GetPrintRegLaneSizeInBytes(format) == kSRegSizeInBytes) || + (GetPrintRegLaneSizeInBytes(format) == kDRegSizeInBytes)) { + return static_cast(format | kPrintRegAsFP); + } + return format; + } + + template + PrintRegisterFormat GetPrintRegisterFormat(T value) { + return GetPrintRegisterFormatForSize(sizeof(value)); + } + + PrintRegisterFormat GetPrintRegisterFormat(double value) { + VIXL_STATIC_ASSERT(sizeof(value) == kDRegSizeInBytes); + return GetPrintRegisterFormatForSizeFP(sizeof(value)); + } + + PrintRegisterFormat GetPrintRegisterFormat(float value) { + VIXL_STATIC_ASSERT(sizeof(value) == kSRegSizeInBytes); + return GetPrintRegisterFormatForSizeFP(sizeof(value)); + } + + PrintRegisterFormat GetPrintRegisterFormat(Float16 value) { + VIXL_STATIC_ASSERT(sizeof(Float16ToRawbits(value)) == kHRegSizeInBytes); + return GetPrintRegisterFormatForSizeFP(sizeof(Float16ToRawbits(value))); + } + + PrintRegisterFormat GetPrintRegisterFormat(VectorFormat vform); + PrintRegisterFormat GetPrintRegisterFormatFP(VectorFormat vform); + + // Print all registers of the specified types. + void PrintRegisters(); + void PrintVRegisters(); + void PrintSystemRegisters(); + + // As above, but only print the registers that have been updated. + void PrintWrittenRegisters(); + void PrintWrittenVRegisters(); + + // As above, but respect LOG_REG and LOG_VREG. + void LogWrittenRegisters() { + if (GetTraceParameters() & LOG_REGS) PrintWrittenRegisters(); + } + void LogWrittenVRegisters() { + if (GetTraceParameters() & LOG_VREGS) PrintWrittenVRegisters(); + } + void LogAllWrittenRegisters() { + LogWrittenRegisters(); + LogWrittenVRegisters(); + } + + // Print individual register values (after update). + void PrintRegister(unsigned code, Reg31Mode r31mode = Reg31IsStackPointer); + void PrintVRegister(unsigned code, PrintRegisterFormat format); + void PrintSystemRegister(SystemRegister id); + void PrintTakenBranch(const Instruction* target); + + // Like Print* (above), but respect GetTraceParameters(). + void LogRegister(unsigned code, Reg31Mode r31mode = Reg31IsStackPointer) { + if (GetTraceParameters() & LOG_REGS) PrintRegister(code, r31mode); + } + void LogVRegister(unsigned code, PrintRegisterFormat format) { + if (GetTraceParameters() & LOG_VREGS) PrintVRegister(code, format); + } + void LogSystemRegister(SystemRegister id) { + if (GetTraceParameters() & LOG_SYSREGS) PrintSystemRegister(id); + } + void LogTakenBranch(const Instruction* target) { + if (GetTraceParameters() & LOG_BRANCH) PrintTakenBranch(target); + } + + // Print memory accesses. + void PrintRead(uintptr_t address, + unsigned reg_code, + PrintRegisterFormat format); + void PrintWrite(uintptr_t address, + unsigned reg_code, + PrintRegisterFormat format); + void PrintVRead(uintptr_t address, + unsigned reg_code, + PrintRegisterFormat format, + unsigned lane); + void PrintVWrite(uintptr_t address, + unsigned reg_code, + PrintRegisterFormat format, + unsigned lane); + + // Like Print* (above), but respect GetTraceParameters(). + void LogRead(uintptr_t address, + unsigned reg_code, + PrintRegisterFormat format) { + if (GetTraceParameters() & LOG_REGS) PrintRead(address, reg_code, format); + } + void LogWrite(uintptr_t address, + unsigned reg_code, + PrintRegisterFormat format) { + if (GetTraceParameters() & LOG_WRITE) PrintWrite(address, reg_code, format); + } + void LogVRead(uintptr_t address, + unsigned reg_code, + PrintRegisterFormat format, + unsigned lane = 0) { + if (GetTraceParameters() & LOG_VREGS) { + PrintVRead(address, reg_code, format, lane); + } + } + void LogVWrite(uintptr_t address, + unsigned reg_code, + PrintRegisterFormat format, + unsigned lane = 0) { + if (GetTraceParameters() & LOG_WRITE) { + PrintVWrite(address, reg_code, format, lane); + } + } + + // Helper functions for register tracing. + void PrintRegisterRawHelper(unsigned code, + Reg31Mode r31mode, + int size_in_bytes = kXRegSizeInBytes); + void PrintVRegisterRawHelper(unsigned code, + int bytes = kQRegSizeInBytes, + int lsb = 0); + void PrintVRegisterFPHelper(unsigned code, + unsigned lane_size_in_bytes, + int lane_count = 1, + int rightmost_lane = 0); + + VIXL_NO_RETURN void DoUnreachable(const Instruction* instr); + void DoTrace(const Instruction* instr); + void DoLog(const Instruction* instr); + + static const char* WRegNameForCode(unsigned code, + Reg31Mode mode = Reg31IsZeroRegister); + static const char* XRegNameForCode(unsigned code, + Reg31Mode mode = Reg31IsZeroRegister); + static const char* HRegNameForCode(unsigned code); + static const char* SRegNameForCode(unsigned code); + static const char* DRegNameForCode(unsigned code); + static const char* VRegNameForCode(unsigned code); + + bool IsColouredTrace() const { return coloured_trace_; } + VIXL_DEPRECATED("IsColouredTrace", bool coloured_trace() const) { + return IsColouredTrace(); + } + + void SetColouredTrace(bool value); + VIXL_DEPRECATED("SetColouredTrace", void set_coloured_trace(bool value)) { + SetColouredTrace(value); + } + + // Values for traces parameters defined in simulator-constants-aarch64.h in + // enum TraceParameters. + int GetTraceParameters() const { return trace_parameters_; } + VIXL_DEPRECATED("GetTraceParameters", int trace_parameters() const) { + return GetTraceParameters(); + } + + void SetTraceParameters(int parameters); + VIXL_DEPRECATED("SetTraceParameters", + void set_trace_parameters(int parameters)) { + SetTraceParameters(parameters); + } + + void SetInstructionStats(bool value); + VIXL_DEPRECATED("SetInstructionStats", + void set_instruction_stats(bool value)) { + SetInstructionStats(value); + } + + // Clear the simulated local monitor to force the next store-exclusive + // instruction to fail. + void ClearLocalMonitor() { local_monitor_.Clear(); } + + void SilenceExclusiveAccessWarning() { + print_exclusive_access_warning_ = false; + } + + enum PointerType { kDataPointer, kInstructionPointer }; + + struct PACKey { + uint64_t high; + uint64_t low; + int number; + }; + + // Current implementation is that all pointers are tagged. + bool HasTBI(uint64_t ptr, PointerType type) { + USE(ptr, type); + return true; + } + + // Current implementation uses 48-bit virtual addresses. + int GetBottomPACBit(uint64_t ptr, int ttbr) { + USE(ptr, ttbr); + VIXL_ASSERT((ttbr == 0) || (ttbr == 1)); + return 48; + } + + // The top PAC bit is 55 for the purposes of relative bit fields with TBI, + // however bit 55 is the TTBR bit regardless of TBI so isn't part of the PAC + // codes in pointers. + int GetTopPACBit(uint64_t ptr, PointerType type) { + return HasTBI(ptr, type) ? 55 : 63; + } + + // Armv8.3 Pointer authentication helpers. + uint64_t CalculatePACMask(uint64_t ptr, PointerType type, int ext_bit); + uint64_t ComputePAC(uint64_t data, uint64_t context, PACKey key); + uint64_t AuthPAC(uint64_t ptr, + uint64_t context, + PACKey key, + PointerType type); + uint64_t AddPAC(uint64_t ptr, uint64_t context, PACKey key, PointerType type); + uint64_t StripPAC(uint64_t ptr, PointerType type); + + // The common CPUFeatures interface with the set of available features. + + CPUFeatures* GetCPUFeatures() { + return cpu_features_auditor_.GetCPUFeatures(); + } + + void SetCPUFeatures(const CPUFeatures& cpu_features) { + cpu_features_auditor_.SetCPUFeatures(cpu_features); + } + + // The set of features that the simulator has encountered. + const CPUFeatures& GetSeenFeatures() { + return cpu_features_auditor_.GetSeenFeatures(); + } + void ResetSeenFeatures() { cpu_features_auditor_.ResetSeenFeatures(); } + +// Runtime call emulation support. +// It requires VIXL's ABI features, and C++11 or greater. +// Also, the initialisation of the tuples in RuntimeCall(Non)Void is incorrect +// in GCC before 4.9.1: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=51253 +#if defined(VIXL_HAS_ABI_SUPPORT) && __cplusplus >= 201103L && \ + (defined(__clang__) || GCC_VERSION_OR_NEWER(4, 9, 1)) + +#define VIXL_HAS_SIMULATED_RUNTIME_CALL_SUPPORT + +// The implementation of the runtime call helpers require the functionality +// provided by `std::index_sequence`. It is only available from C++14, but +// we want runtime call simulation to work from C++11, so we emulate if +// necessary. +#if __cplusplus >= 201402L + template + using local_index_sequence = std::index_sequence; + template + using __local_index_sequence_for = std::index_sequence_for; +#else + // Emulate the behaviour of `std::index_sequence` and + // `std::index_sequence_for`. + // Naming follow the `std` names, prefixed with `emulated_`. + template + struct emulated_index_sequence {}; + + // A recursive template to create a sequence of indexes. + // The base case (for `N == 0`) is declared outside of the class scope, as + // required by C++. + template + struct emulated_make_index_sequence_helper + : emulated_make_index_sequence_helper {}; + + template + struct emulated_make_index_sequence : emulated_make_index_sequence_helper { + }; + + template + struct emulated_index_sequence_for + : emulated_make_index_sequence {}; + + template + using local_index_sequence = emulated_index_sequence; + template + using __local_index_sequence_for = emulated_index_sequence_for; +#endif + + // Expand the argument tuple and perform the call. + template + R DoRuntimeCall(R (*function)(P...), + std::tuple arguments, + local_index_sequence) { + return function(std::get(arguments)...); + } + + template + void RuntimeCallNonVoid(R (*function)(P...)) { + ABI abi; + std::tuple argument_operands{ + ReadGenericOperand

(abi.GetNextParameterGenericOperand

())...}; + R return_value = DoRuntimeCall(function, + argument_operands, + __local_index_sequence_for{}); + WriteGenericOperand(abi.GetReturnGenericOperand(), return_value); + } + + template + void RuntimeCallVoid(R (*function)(P...)) { + ABI abi; + std::tuple argument_operands{ + ReadGenericOperand

(abi.GetNextParameterGenericOperand

())...}; + DoRuntimeCall(function, + argument_operands, + __local_index_sequence_for{}); + } + + // We use `struct` for `void` return type specialisation. + template + struct RuntimeCallStructHelper { + static void Wrapper(Simulator* simulator, uintptr_t function_pointer) { + R (*function)(P...) = reinterpret_cast(function_pointer); + simulator->RuntimeCallNonVoid(function); + } + }; + + // Partial specialization when the return type is `void`. + template + struct RuntimeCallStructHelper { + static void Wrapper(Simulator* simulator, uintptr_t function_pointer) { + void (*function)(P...) = + reinterpret_cast(function_pointer); + simulator->RuntimeCallVoid(function); + } + }; +#endif + + protected: + const char* clr_normal; + const char* clr_flag_name; + const char* clr_flag_value; + const char* clr_reg_name; + const char* clr_reg_value; + const char* clr_vreg_name; + const char* clr_vreg_value; + const char* clr_memory_address; + const char* clr_warning; + const char* clr_warning_message; + const char* clr_printf; + const char* clr_branch_marker; + + // Simulation helpers ------------------------------------ + bool ConditionPassed(Condition cond) { + switch (cond) { + case eq: + return ReadZ(); + case ne: + return !ReadZ(); + case hs: + return ReadC(); + case lo: + return !ReadC(); + case mi: + return ReadN(); + case pl: + return !ReadN(); + case vs: + return ReadV(); + case vc: + return !ReadV(); + case hi: + return ReadC() && !ReadZ(); + case ls: + return !(ReadC() && !ReadZ()); + case ge: + return ReadN() == ReadV(); + case lt: + return ReadN() != ReadV(); + case gt: + return !ReadZ() && (ReadN() == ReadV()); + case le: + return !(!ReadZ() && (ReadN() == ReadV())); + case nv: + VIXL_FALLTHROUGH(); + case al: + return true; + default: + VIXL_UNREACHABLE(); + return false; + } + } + + bool ConditionPassed(Instr cond) { + return ConditionPassed(static_cast(cond)); + } + + bool ConditionFailed(Condition cond) { return !ConditionPassed(cond); } + + void AddSubHelper(const Instruction* instr, int64_t op2); + uint64_t AddWithCarry(unsigned reg_size, + bool set_flags, + uint64_t left, + uint64_t right, + int carry_in = 0); + void LogicalHelper(const Instruction* instr, int64_t op2); + void ConditionalCompareHelper(const Instruction* instr, int64_t op2); + void LoadStoreHelper(const Instruction* instr, + int64_t offset, + AddrMode addrmode); + void LoadStorePairHelper(const Instruction* instr, AddrMode addrmode); + template + void CompareAndSwapHelper(const Instruction* instr); + template + void CompareAndSwapPairHelper(const Instruction* instr); + template + void AtomicMemorySimpleHelper(const Instruction* instr); + template + void AtomicMemorySwapHelper(const Instruction* instr); + template + void LoadAcquireRCpcHelper(const Instruction* instr); + uintptr_t AddressModeHelper(unsigned addr_reg, + int64_t offset, + AddrMode addrmode); + void NEONLoadStoreMultiStructHelper(const Instruction* instr, + AddrMode addr_mode); + void NEONLoadStoreSingleStructHelper(const Instruction* instr, + AddrMode addr_mode); + + uint64_t AddressUntag(uint64_t address) { return address & ~kAddressTagMask; } + + template + T* AddressUntag(T* address) { + uintptr_t address_raw = reinterpret_cast(address); + return reinterpret_cast(AddressUntag(address_raw)); + } + + int64_t ShiftOperand(unsigned reg_size, + int64_t value, + Shift shift_type, + unsigned amount) const; + int64_t ExtendValue(unsigned reg_width, + int64_t value, + Extend extend_type, + unsigned left_shift = 0) const; + uint16_t PolynomialMult(uint8_t op1, uint8_t op2) const; + + void ld1(VectorFormat vform, LogicVRegister dst, uint64_t addr); + void ld1(VectorFormat vform, LogicVRegister dst, int index, uint64_t addr); + void ld1r(VectorFormat vform, LogicVRegister dst, uint64_t addr); + void ld2(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + uint64_t addr); + void ld2(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + int index, + uint64_t addr); + void ld2r(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + uint64_t addr); + void ld3(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + LogicVRegister dst3, + uint64_t addr); + void ld3(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + LogicVRegister dst3, + int index, + uint64_t addr); + void ld3r(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + LogicVRegister dst3, + uint64_t addr); + void ld4(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + LogicVRegister dst3, + LogicVRegister dst4, + uint64_t addr); + void ld4(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + LogicVRegister dst3, + LogicVRegister dst4, + int index, + uint64_t addr); + void ld4r(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + LogicVRegister dst3, + LogicVRegister dst4, + uint64_t addr); + void st1(VectorFormat vform, LogicVRegister src, uint64_t addr); + void st1(VectorFormat vform, LogicVRegister src, int index, uint64_t addr); + void st2(VectorFormat vform, + LogicVRegister src, + LogicVRegister src2, + uint64_t addr); + void st2(VectorFormat vform, + LogicVRegister src, + LogicVRegister src2, + int index, + uint64_t addr); + void st3(VectorFormat vform, + LogicVRegister src, + LogicVRegister src2, + LogicVRegister src3, + uint64_t addr); + void st3(VectorFormat vform, + LogicVRegister src, + LogicVRegister src2, + LogicVRegister src3, + int index, + uint64_t addr); + void st4(VectorFormat vform, + LogicVRegister src, + LogicVRegister src2, + LogicVRegister src3, + LogicVRegister src4, + uint64_t addr); + void st4(VectorFormat vform, + LogicVRegister src, + LogicVRegister src2, + LogicVRegister src3, + LogicVRegister src4, + int index, + uint64_t addr); + LogicVRegister cmp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + Condition cond); + LogicVRegister cmp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + int imm, + Condition cond); + LogicVRegister cmptst(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister add(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister addp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister mla(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister mls(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister mul(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister mul(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister mla(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister mls(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister pmul(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + + typedef LogicVRegister (Simulator::*ByElementOp)(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister fmul(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister fmla(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister fmls(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister fmulx(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister smull(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister smull2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister umull(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister umull2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister smlal(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister smlal2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister umlal(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister umlal2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister smlsl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister smlsl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister umlsl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister umlsl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister sqdmull(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister sqdmull2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister sqdmlal(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister sqdmlal2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister sqdmlsl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister sqdmlsl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister sqdmulh(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister sqrdmulh(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister sdot(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister sqrdmlah(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister udot(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister sqrdmlsh(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister sub(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister and_(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister orr(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister orn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister eor(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister bic(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister bic(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + uint64_t imm); + LogicVRegister bif(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister bit(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister bsl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister cls(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister clz(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister cnt(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister not_(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister rbit(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister rev(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int revSize); + LogicVRegister rev16(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister rev32(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister rev64(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister addlp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + bool is_signed, + bool do_accumulate); + LogicVRegister saddlp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister uaddlp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister sadalp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister uadalp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister ext(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + template + LogicVRegister fcadd(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int rot); + LogicVRegister fcadd(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int rot); + template + LogicVRegister fcmla(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index, + int rot); + LogicVRegister fcmla(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index, + int rot); + template + LogicVRegister fcmla(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int rot); + LogicVRegister fcmla(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int rot); + LogicVRegister ins_element(VectorFormat vform, + LogicVRegister dst, + int dst_index, + const LogicVRegister& src, + int src_index); + LogicVRegister ins_immediate(VectorFormat vform, + LogicVRegister dst, + int dst_index, + uint64_t imm); + LogicVRegister dup_element(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int src_index); + LogicVRegister dup_immediate(VectorFormat vform, + LogicVRegister dst, + uint64_t imm); + LogicVRegister movi(VectorFormat vform, LogicVRegister dst, uint64_t imm); + LogicVRegister mvni(VectorFormat vform, LogicVRegister dst, uint64_t imm); + LogicVRegister orr(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + uint64_t imm); + LogicVRegister sshl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister ushl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister sminmax(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool max); + LogicVRegister smax(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister smin(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister sminmaxp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool max); + LogicVRegister smaxp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister sminp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister addp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister addv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister uaddlv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister saddlv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister sminmaxv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + bool max); + LogicVRegister smaxv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister sminv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister uxtl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister uxtl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister sxtl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister sxtl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister tbl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& ind); + LogicVRegister tbl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& tab2, + const LogicVRegister& ind); + LogicVRegister tbl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& tab2, + const LogicVRegister& tab3, + const LogicVRegister& ind); + LogicVRegister tbl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& tab2, + const LogicVRegister& tab3, + const LogicVRegister& tab4, + const LogicVRegister& ind); + LogicVRegister Table(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& ind, + bool zero_out_of_bounds, + const LogicVRegister* tab1, + const LogicVRegister* tab2 = NULL, + const LogicVRegister* tab3 = NULL, + const LogicVRegister* tab4 = NULL); + LogicVRegister tbx(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& ind); + LogicVRegister tbx(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& tab2, + const LogicVRegister& ind); + LogicVRegister tbx(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& tab2, + const LogicVRegister& tab3, + const LogicVRegister& ind); + LogicVRegister tbx(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& tab2, + const LogicVRegister& tab3, + const LogicVRegister& tab4, + const LogicVRegister& ind); + LogicVRegister uaddl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister uaddl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister uaddw(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister uaddw2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister saddl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister saddl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister saddw(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister saddw2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister usubl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister usubl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister usubw(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister usubw2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister ssubl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister ssubl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister ssubw(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister ssubw2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister uminmax(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool max); + LogicVRegister umax(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister umin(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister uminmaxp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool max); + LogicVRegister umaxp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister uminp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister uminmaxv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + bool max); + LogicVRegister umaxv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister uminv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister trn1(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister trn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister zip1(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister zip2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister uzp1(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister uzp2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister shl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister scvtf(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int fbits, + FPRounding rounding_mode); + LogicVRegister ucvtf(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int fbits, + FPRounding rounding_mode); + LogicVRegister sshll(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister sshll2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister shll(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister shll2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister ushll(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister ushll2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister sli(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister sri(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister sshr(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister ushr(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister ssra(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister usra(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister srsra(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister ursra(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister suqadd(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister usqadd(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister sqshl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister uqshl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister sqshlu(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister abs(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister neg(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister extractnarrow(VectorFormat vform, + LogicVRegister dst, + bool dstIsSigned, + const LogicVRegister& src, + bool srcIsSigned); + LogicVRegister xtn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister sqxtn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister uqxtn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister sqxtun(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister absdiff(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool issigned); + LogicVRegister saba(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister uaba(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister shrn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister shrn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister rshrn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister rshrn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister uqshrn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister uqshrn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister uqrshrn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister uqrshrn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister sqshrn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister sqshrn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister sqrshrn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister sqrshrn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister sqshrun(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister sqshrun2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister sqrshrun(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister sqrshrun2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister sqrdmulh(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool round = true); + LogicVRegister dot(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool is_signed); + LogicVRegister sdot(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister udot(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister sqrdmlash(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool round = true, + bool sub_op = false); + LogicVRegister sqrdmlah(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool round = true); + LogicVRegister sqrdmlsh(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool round = true); + LogicVRegister sqdmulh(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); +#define NEON_3VREG_LOGIC_LIST(V) \ + V(addhn) \ + V(addhn2) \ + V(raddhn) \ + V(raddhn2) \ + V(subhn) \ + V(subhn2) \ + V(rsubhn) \ + V(rsubhn2) \ + V(pmull) \ + V(pmull2) \ + V(sabal) \ + V(sabal2) \ + V(uabal) \ + V(uabal2) \ + V(sabdl) \ + V(sabdl2) \ + V(uabdl) \ + V(uabdl2) \ + V(smull) \ + V(smull2) \ + V(umull) \ + V(umull2) \ + V(smlal) \ + V(smlal2) \ + V(umlal) \ + V(umlal2) \ + V(smlsl) \ + V(smlsl2) \ + V(umlsl) \ + V(umlsl2) \ + V(sqdmlal) \ + V(sqdmlal2) \ + V(sqdmlsl) \ + V(sqdmlsl2) \ + V(sqdmull) \ + V(sqdmull2) + +#define DEFINE_LOGIC_FUNC(FXN) \ + LogicVRegister FXN(VectorFormat vform, \ + LogicVRegister dst, \ + const LogicVRegister& src1, \ + const LogicVRegister& src2); + NEON_3VREG_LOGIC_LIST(DEFINE_LOGIC_FUNC) +#undef DEFINE_LOGIC_FUNC + +#define NEON_FP3SAME_LIST(V) \ + V(fadd, FPAdd, false) \ + V(fsub, FPSub, true) \ + V(fmul, FPMul, true) \ + V(fmulx, FPMulx, true) \ + V(fdiv, FPDiv, true) \ + V(fmax, FPMax, false) \ + V(fmin, FPMin, false) \ + V(fmaxnm, FPMaxNM, false) \ + V(fminnm, FPMinNM, false) + +#define DECLARE_NEON_FP_VECTOR_OP(FN, OP, PROCNAN) \ + template \ + LogicVRegister FN(VectorFormat vform, \ + LogicVRegister dst, \ + const LogicVRegister& src1, \ + const LogicVRegister& src2); \ + LogicVRegister FN(VectorFormat vform, \ + LogicVRegister dst, \ + const LogicVRegister& src1, \ + const LogicVRegister& src2); + NEON_FP3SAME_LIST(DECLARE_NEON_FP_VECTOR_OP) +#undef DECLARE_NEON_FP_VECTOR_OP + +#define NEON_FPPAIRWISE_LIST(V) \ + V(faddp, fadd, FPAdd) \ + V(fmaxp, fmax, FPMax) \ + V(fmaxnmp, fmaxnm, FPMaxNM) \ + V(fminp, fmin, FPMin) \ + V(fminnmp, fminnm, FPMinNM) + +#define DECLARE_NEON_FP_PAIR_OP(FNP, FN, OP) \ + LogicVRegister FNP(VectorFormat vform, \ + LogicVRegister dst, \ + const LogicVRegister& src1, \ + const LogicVRegister& src2); \ + LogicVRegister FNP(VectorFormat vform, \ + LogicVRegister dst, \ + const LogicVRegister& src); + NEON_FPPAIRWISE_LIST(DECLARE_NEON_FP_PAIR_OP) +#undef DECLARE_NEON_FP_PAIR_OP + + template + LogicVRegister frecps(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister frecps(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + template + LogicVRegister frsqrts(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister frsqrts(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + template + LogicVRegister fmla(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister fmla(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + template + LogicVRegister fmls(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister fmls(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister fnmul(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + + template + LogicVRegister fcmp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + Condition cond); + LogicVRegister fcmp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + Condition cond); + LogicVRegister fabscmp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + Condition cond); + LogicVRegister fcmp_zero(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + Condition cond); + + template + LogicVRegister fneg(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister fneg(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + template + LogicVRegister frecpx(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister frecpx(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + template + LogicVRegister fabs_(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister fabs_(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister fabd(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister frint(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + FPRounding rounding_mode, + bool inexact_exception = false); + LogicVRegister fcvts(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + FPRounding rounding_mode, + int fbits = 0); + LogicVRegister fcvtu(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + FPRounding rounding_mode, + int fbits = 0); + LogicVRegister fcvtl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister fcvtl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister fcvtn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister fcvtn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister fcvtxn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister fcvtxn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister fsqrt(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister frsqrte(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister frecpe(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + FPRounding rounding); + LogicVRegister ursqrte(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister urecpe(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + + template + struct TFPMinMaxOp { + typedef T (Simulator::*type)(T a, T b); + }; + + template + LogicVRegister fminmaxv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + typename TFPMinMaxOp::type Op); + + LogicVRegister fminv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister fmaxv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister fminnmv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister fmaxnmv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + + static const uint32_t CRC32_POLY = 0x04C11DB7; + static const uint32_t CRC32C_POLY = 0x1EDC6F41; + uint32_t Poly32Mod2(unsigned n, uint64_t data, uint32_t poly); + template + uint32_t Crc32Checksum(uint32_t acc, T val, uint32_t poly); + uint32_t Crc32Checksum(uint32_t acc, uint64_t val, uint32_t poly); + + void SysOp_W(int op, int64_t val); + + template + T FPRecipSqrtEstimate(T op); + template + T FPRecipEstimate(T op, FPRounding rounding); + template + R FPToFixed(T op, int fbits, bool is_signed, FPRounding rounding); + + void FPCompare(double val0, double val1, FPTrapFlags trap); + double FPRoundInt(double value, FPRounding round_mode); + double recip_sqrt_estimate(double a); + double recip_estimate(double a); + double FPRecipSqrtEstimate(double a); + double FPRecipEstimate(double a); + double FixedToDouble(int64_t src, int fbits, FPRounding round_mode); + double UFixedToDouble(uint64_t src, int fbits, FPRounding round_mode); + float FixedToFloat(int64_t src, int fbits, FPRounding round_mode); + float UFixedToFloat(uint64_t src, int fbits, FPRounding round_mode); + ::vixl::internal::SimFloat16 FixedToFloat16(int64_t src, + int fbits, + FPRounding round_mode); + ::vixl::internal::SimFloat16 UFixedToFloat16(uint64_t src, + int fbits, + FPRounding round_mode); + int16_t FPToInt16(double value, FPRounding rmode); + int32_t FPToInt32(double value, FPRounding rmode); + int64_t FPToInt64(double value, FPRounding rmode); + uint16_t FPToUInt16(double value, FPRounding rmode); + uint32_t FPToUInt32(double value, FPRounding rmode); + uint64_t FPToUInt64(double value, FPRounding rmode); + int32_t FPToFixedJS(double value); + + template + T FPAdd(T op1, T op2); + + template + T FPNeg(T op); + + template + T FPDiv(T op1, T op2); + + template + T FPMax(T a, T b); + + template + T FPMaxNM(T a, T b); + + template + T FPMin(T a, T b); + + template + T FPMinNM(T a, T b); + + template + T FPMul(T op1, T op2); + + template + T FPMulx(T op1, T op2); + + template + T FPMulAdd(T a, T op1, T op2); + + template + T FPSqrt(T op); + + template + T FPSub(T op1, T op2); + + template + T FPRecipStepFused(T op1, T op2); + + template + T FPRSqrtStepFused(T op1, T op2); + + // This doesn't do anything at the moment. We'll need it if we want support + // for cumulative exception bits or floating-point exceptions. + void FPProcessException() {} + + bool FPProcessNaNs(const Instruction* instr); + + // Pseudo Printf instruction + void DoPrintf(const Instruction* instr); + + // Pseudo-instructions to configure CPU features dynamically. + void DoConfigureCPUFeatures(const Instruction* instr); + + void DoSaveCPUFeatures(const Instruction* instr); + void DoRestoreCPUFeatures(const Instruction* instr); + +// Simulate a runtime call. +#ifndef VIXL_HAS_SIMULATED_RUNTIME_CALL_SUPPORT + VIXL_NO_RETURN_IN_DEBUG_MODE +#endif + void DoRuntimeCall(const Instruction* instr); + + // Processor state --------------------------------------- + + // Simulated monitors for exclusive access instructions. + SimExclusiveLocalMonitor local_monitor_; + SimExclusiveGlobalMonitor global_monitor_; + + // Output stream. + FILE* stream_; + PrintDisassembler* print_disasm_; + + // Instruction statistics instrumentation. + Instrument* instrumentation_; + + // General purpose registers. Register 31 is the stack pointer. + SimRegister registers_[kNumberOfRegisters]; + + // Vector registers + SimVRegister vregisters_[kNumberOfVRegisters]; + + // Program Status Register. + // bits[31, 27]: Condition flags N, Z, C, and V. + // (Negative, Zero, Carry, Overflow) + SimSystemRegister nzcv_; + + // Floating-Point Control Register + SimSystemRegister fpcr_; + + // Only a subset of FPCR features are supported by the simulator. This helper + // checks that the FPCR settings are supported. + // + // This is checked when floating-point instructions are executed, not when + // FPCR is set. This allows generated code to modify FPCR for external + // functions, or to save and restore it when entering and leaving generated + // code. + void AssertSupportedFPCR() { + // No flush-to-zero support. + VIXL_ASSERT(ReadFpcr().GetFZ() == 0); + // Ties-to-even rounding only. + VIXL_ASSERT(ReadFpcr().GetRMode() == FPTieEven); + + // The simulator does not support half-precision operations so + // GetFpcr().AHP() is irrelevant, and is not checked here. + } + + static int CalcNFlag(uint64_t result, unsigned reg_size) { + return (result >> (reg_size - 1)) & 1; + } + + static int CalcZFlag(uint64_t result) { return (result == 0) ? 1 : 0; } + + static const uint32_t kConditionFlagsMask = 0xf0000000; + + // Stack + byte* stack_; + static const int stack_protection_size_ = 256; + // 2 KB stack. + static const int stack_size_ = 2 * 1024 + 2 * stack_protection_size_; + byte* stack_limit_; + + Decoder* decoder_; + // Indicates if the pc has been modified by the instruction and should not be + // automatically incremented. + bool pc_modified_; + const Instruction* pc_; + + static const char* xreg_names[]; + static const char* wreg_names[]; + static const char* hreg_names[]; + static const char* sreg_names[]; + static const char* dreg_names[]; + static const char* vreg_names[]; + + private: + static const PACKey kPACKeyIA; + static const PACKey kPACKeyIB; + static const PACKey kPACKeyDA; + static const PACKey kPACKeyDB; + static const PACKey kPACKeyGA; + + template + static T FPDefaultNaN(); + + // Standard NaN processing. + template + T FPProcessNaN(T op) { + VIXL_ASSERT(IsNaN(op)); + if (IsSignallingNaN(op)) { + FPProcessException(); + } + return (ReadDN() == kUseDefaultNaN) ? FPDefaultNaN() : ToQuietNaN(op); + } + + template + T FPProcessNaNs(T op1, T op2) { + if (IsSignallingNaN(op1)) { + return FPProcessNaN(op1); + } else if (IsSignallingNaN(op2)) { + return FPProcessNaN(op2); + } else if (IsNaN(op1)) { + VIXL_ASSERT(IsQuietNaN(op1)); + return FPProcessNaN(op1); + } else if (IsNaN(op2)) { + VIXL_ASSERT(IsQuietNaN(op2)); + return FPProcessNaN(op2); + } else { + return 0.0; + } + } + + template + T FPProcessNaNs3(T op1, T op2, T op3) { + if (IsSignallingNaN(op1)) { + return FPProcessNaN(op1); + } else if (IsSignallingNaN(op2)) { + return FPProcessNaN(op2); + } else if (IsSignallingNaN(op3)) { + return FPProcessNaN(op3); + } else if (IsNaN(op1)) { + VIXL_ASSERT(IsQuietNaN(op1)); + return FPProcessNaN(op1); + } else if (IsNaN(op2)) { + VIXL_ASSERT(IsQuietNaN(op2)); + return FPProcessNaN(op2); + } else if (IsNaN(op3)) { + VIXL_ASSERT(IsQuietNaN(op3)); + return FPProcessNaN(op3); + } else { + return 0.0; + } + } + + bool coloured_trace_; + + // A set of TraceParameters flags. + int trace_parameters_; + + // Indicates whether the instruction instrumentation is active. + bool instruction_stats_; + + // Indicates whether the exclusive-access warning has been printed. + bool print_exclusive_access_warning_; + void PrintExclusiveAccessWarning(); + + CPUFeaturesAuditor cpu_features_auditor_; + std::vector saved_cpu_features_; +}; + +#if defined(VIXL_HAS_SIMULATED_RUNTIME_CALL_SUPPORT) && __cplusplus < 201402L +// Base case of the recursive template used to emulate C++14 +// `std::index_sequence`. +template +struct Simulator::emulated_make_index_sequence_helper<0, I...> + : Simulator::emulated_index_sequence {}; +#endif + +} // namespace aarch64 +} // namespace vixl + +#endif // VIXL_INCLUDE_SIMULATOR_AARCH64 + +#endif // VIXL_AARCH64_SIMULATOR_AARCH64_H_ diff --git a/dep/vixl/include/vixl/aarch64/simulator-constants-aarch64.h b/dep/vixl/include/vixl/aarch64/simulator-constants-aarch64.h new file mode 100644 index 000000000..6631043d5 --- /dev/null +++ b/dep/vixl/include/vixl/aarch64/simulator-constants-aarch64.h @@ -0,0 +1,192 @@ +// Copyright 2015, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_AARCH64_SIMULATOR_CONSTANTS_AARCH64_H_ +#define VIXL_AARCH64_SIMULATOR_CONSTANTS_AARCH64_H_ + +#include "instructions-aarch64.h" + +namespace vixl { +namespace aarch64 { + +// Debug instructions. +// +// VIXL's macro-assembler and simulator support a few pseudo instructions to +// make debugging easier. These pseudo instructions do not exist on real +// hardware. +// +// TODO: Also consider allowing these pseudo-instructions to be disabled in the +// simulator, so that users can check that the input is a valid native code. +// (This isn't possible in all cases. Printf won't work, for example.) +// +// Each debug pseudo instruction is represented by a HLT instruction. The HLT +// immediate field is used to identify the type of debug pseudo instruction. + +enum DebugHltOpcode { + kUnreachableOpcode = 0xdeb0, + kPrintfOpcode, + kTraceOpcode, + kLogOpcode, + kRuntimeCallOpcode, + kSetCPUFeaturesOpcode, + kEnableCPUFeaturesOpcode, + kDisableCPUFeaturesOpcode, + kSaveCPUFeaturesOpcode, + kRestoreCPUFeaturesOpcode, + // Aliases. + kDebugHltFirstOpcode = kUnreachableOpcode, + kDebugHltLastOpcode = kLogOpcode +}; +VIXL_DEPRECATED("DebugHltOpcode", typedef DebugHltOpcode DebugHltOpcodes); + +// Each pseudo instruction uses a custom encoding for additional arguments, as +// described below. + +// Unreachable - kUnreachableOpcode +// +// Instruction which should never be executed. This is used as a guard in parts +// of the code that should not be reachable, such as in data encoded inline in +// the instructions. + +// Printf - kPrintfOpcode +// - arg_count: The number of arguments. +// - arg_pattern: A set of PrintfArgPattern values, packed into two-bit fields. +// +// Simulate a call to printf. +// +// Floating-point and integer arguments are passed in separate sets of registers +// in AAPCS64 (even for varargs functions), so it is not possible to determine +// the type of each argument without some information about the values that were +// passed in. This information could be retrieved from the printf format string, +// but the format string is not trivial to parse so we encode the relevant +// information with the HLT instruction. +// +// Also, the following registers are populated (as if for a native Aarch64 +// call): +// x0: The format string +// x1-x7: Optional arguments, if type == CPURegister::kRegister +// d0-d7: Optional arguments, if type == CPURegister::kFPRegister +const unsigned kPrintfArgCountOffset = 1 * kInstructionSize; +const unsigned kPrintfArgPatternListOffset = 2 * kInstructionSize; +const unsigned kPrintfLength = 3 * kInstructionSize; + +const unsigned kPrintfMaxArgCount = 4; + +// The argument pattern is a set of two-bit-fields, each with one of the +// following values: +enum PrintfArgPattern { + kPrintfArgW = 1, + kPrintfArgX = 2, + // There is no kPrintfArgS because floats are always converted to doubles in C + // varargs calls. + kPrintfArgD = 3 +}; +static const unsigned kPrintfArgPatternBits = 2; + +// Trace - kTraceOpcode +// - parameter: TraceParameter stored as a uint32_t +// - command: TraceCommand stored as a uint32_t +// +// Allow for trace management in the generated code. This enables or disables +// automatic tracing of the specified information for every simulated +// instruction. +const unsigned kTraceParamsOffset = 1 * kInstructionSize; +const unsigned kTraceCommandOffset = 2 * kInstructionSize; +const unsigned kTraceLength = 3 * kInstructionSize; + +// Trace parameters. +enum TraceParameters { + LOG_DISASM = 1 << 0, // Log disassembly. + LOG_REGS = 1 << 1, // Log general purpose registers. + LOG_VREGS = 1 << 2, // Log NEON and floating-point registers. + LOG_SYSREGS = 1 << 3, // Log the flags and system registers. + LOG_WRITE = 1 << 4, // Log writes to memory. + LOG_BRANCH = 1 << 5, // Log taken branches. + + LOG_NONE = 0, + LOG_STATE = LOG_REGS | LOG_VREGS | LOG_SYSREGS, + LOG_ALL = LOG_DISASM | LOG_STATE | LOG_WRITE | LOG_BRANCH +}; + +// Trace commands. +enum TraceCommand { TRACE_ENABLE = 1, TRACE_DISABLE = 2 }; + +// Log - kLogOpcode +// - parameter: TraceParameter stored as a uint32_t +// +// Print the specified information once. This mechanism is separate from Trace. +// In particular, _all_ of the specified registers are printed, rather than just +// the registers that the instruction writes. +// +// Any combination of the TraceParameters values can be used, except that +// LOG_DISASM is not supported for Log. +const unsigned kLogParamsOffset = 1 * kInstructionSize; +const unsigned kLogLength = 2 * kInstructionSize; + +// Runtime call simulation - kRuntimeCallOpcode +enum RuntimeCallType { kCallRuntime, kTailCallRuntime }; + +const unsigned kRuntimeCallWrapperOffset = 1 * kInstructionSize; +// The size of a pointer on host. +const unsigned kRuntimeCallAddressSize = sizeof(uintptr_t); +const unsigned kRuntimeCallFunctionOffset = + kRuntimeCallWrapperOffset + kRuntimeCallAddressSize; +const unsigned kRuntimeCallTypeOffset = + kRuntimeCallFunctionOffset + kRuntimeCallAddressSize; +const unsigned kRuntimeCallLength = kRuntimeCallTypeOffset + sizeof(uint32_t); + +// Enable or disable CPU features - kSetCPUFeaturesOpcode +// - kEnableCPUFeaturesOpcode +// - kDisableCPUFeaturesOpcode +// - parameter[...]: A list of `CPUFeatures::Feature`s, encoded as +// ConfigureCPUFeaturesElementType and terminated with CPUFeatures::kNone. +// - [Padding to align to kInstructionSize.] +// +// 'Set' completely overwrites the existing CPU features. +// 'Enable' and 'Disable' update the existing CPU features. +// +// These mechanisms allows users to strictly check the use of CPU features in +// different regions of code. +// +// These have no effect on the set of 'seen' features (as reported by +// CPUFeaturesAuditor::HasSeen(...)). +typedef uint8_t ConfigureCPUFeaturesElementType; +const unsigned kConfigureCPUFeaturesListOffset = 1 * kInstructionSize; + +// Save or restore CPU features - kSaveCPUFeaturesOpcode +// - kRestoreCPUFeaturesOpcode +// +// These mechanisms provide a stack-like mechanism for preserving the CPU +// features, or restoring the last-preserved features. These pseudo-instructions +// take no arguments. +// +// These have no effect on the set of 'seen' features (as reported by +// CPUFeaturesAuditor::HasSeen(...)). + +} // namespace aarch64 +} // namespace vixl + +#endif // VIXL_AARCH64_SIMULATOR_CONSTANTS_AARCH64_H_ diff --git a/dep/vixl/include/vixl/assembler-base-vixl.h b/dep/vixl/include/vixl/assembler-base-vixl.h new file mode 100644 index 000000000..ee54dcbc2 --- /dev/null +++ b/dep/vixl/include/vixl/assembler-base-vixl.h @@ -0,0 +1,101 @@ +// Copyright 2016, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_ASSEMBLER_BASE_H +#define VIXL_ASSEMBLER_BASE_H + +#include "code-buffer-vixl.h" + +namespace vixl { + +class CodeBufferCheckScope; + +namespace internal { + +class AssemblerBase { + public: + AssemblerBase() : allow_assembler_(false) {} + explicit AssemblerBase(size_t capacity) + : buffer_(capacity), allow_assembler_(false) {} + AssemblerBase(byte* buffer, size_t capacity) + : buffer_(buffer, capacity), allow_assembler_(false) {} + + virtual ~AssemblerBase() {} + + // Finalize a code buffer of generated instructions. This function must be + // called before executing or copying code from the buffer. + void FinalizeCode() { GetBuffer()->SetClean(); } + + ptrdiff_t GetCursorOffset() const { return GetBuffer().GetCursorOffset(); } + + // Return the address of the cursor. + template + T GetCursorAddress() const { + VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t)); + return GetBuffer().GetOffsetAddress(GetCursorOffset()); + } + + size_t GetSizeOfCodeGenerated() const { return GetCursorOffset(); } + + // Accessors. + CodeBuffer* GetBuffer() { return &buffer_; } + const CodeBuffer& GetBuffer() const { return buffer_; } + bool AllowAssembler() const { return allow_assembler_; } + + protected: + void SetAllowAssembler(bool allow) { allow_assembler_ = allow; } + + // CodeBufferCheckScope must be able to temporarily allow the assembler. + friend class vixl::CodeBufferCheckScope; + + // Buffer where the code is emitted. + CodeBuffer buffer_; + + private: + bool allow_assembler_; + + public: + // Deprecated public interface. + + // Return the address of an offset in the buffer. + template + VIXL_DEPRECATED("GetBuffer().GetOffsetAddress(offset)", + T GetOffsetAddress(ptrdiff_t offset) const) { + return GetBuffer().GetOffsetAddress(offset); + } + + // Return the address of the start of the buffer. + template + VIXL_DEPRECATED("GetBuffer().GetStartAddress()", + T GetStartAddress() const) { + return GetBuffer().GetOffsetAddress(0); + } +}; + +} // namespace internal +} // namespace vixl + +#endif // VIXL_ASSEMBLER_BASE_H diff --git a/dep/vixl/include/vixl/code-buffer-vixl.h b/dep/vixl/include/vixl/code-buffer-vixl.h new file mode 100644 index 000000000..d0d815e44 --- /dev/null +++ b/dep/vixl/include/vixl/code-buffer-vixl.h @@ -0,0 +1,191 @@ +// Copyright 2017, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_CODE_BUFFER_H +#define VIXL_CODE_BUFFER_H + +#include + +#include "globals-vixl.h" +#include "utils-vixl.h" + +namespace vixl { + +class CodeBuffer { + public: + static const size_t kDefaultCapacity = 4 * KBytes; + + explicit CodeBuffer(size_t capacity = kDefaultCapacity); + CodeBuffer(byte* buffer, size_t capacity); + ~CodeBuffer(); + + void Reset(); + +#ifdef VIXL_CODE_BUFFER_MMAP + void SetExecutable(); + void SetWritable(); +#else + // These require page-aligned memory blocks, which we can only guarantee with + // mmap. + VIXL_NO_RETURN_IN_DEBUG_MODE void SetExecutable() { VIXL_UNIMPLEMENTED(); } + VIXL_NO_RETURN_IN_DEBUG_MODE void SetWritable() { VIXL_UNIMPLEMENTED(); } +#endif + + ptrdiff_t GetOffsetFrom(ptrdiff_t offset) const { + ptrdiff_t cursor_offset = cursor_ - buffer_; + VIXL_ASSERT((offset >= 0) && (offset <= cursor_offset)); + return cursor_offset - offset; + } + VIXL_DEPRECATED("GetOffsetFrom", + ptrdiff_t OffsetFrom(ptrdiff_t offset) const) { + return GetOffsetFrom(offset); + } + + ptrdiff_t GetCursorOffset() const { return GetOffsetFrom(0); } + VIXL_DEPRECATED("GetCursorOffset", ptrdiff_t CursorOffset() const) { + return GetCursorOffset(); + } + + void Rewind(ptrdiff_t offset) { + byte* rewound_cursor = buffer_ + offset; + VIXL_ASSERT((buffer_ <= rewound_cursor) && (rewound_cursor <= cursor_)); + cursor_ = rewound_cursor; + } + + template + T GetOffsetAddress(ptrdiff_t offset) const { + VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t)); + VIXL_ASSERT((offset >= 0) && (offset <= (cursor_ - buffer_))); + return reinterpret_cast(buffer_ + offset); + } + + // Return the address of the start or end of the emitted code. + template + T GetStartAddress() const { + VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t)); + return GetOffsetAddress(0); + } + template + T GetEndAddress() const { + VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t)); + return GetOffsetAddress(GetSizeInBytes()); + } + + size_t GetRemainingBytes() const { + VIXL_ASSERT((cursor_ >= buffer_) && (cursor_ <= (buffer_ + capacity_))); + return (buffer_ + capacity_) - cursor_; + } + VIXL_DEPRECATED("GetRemainingBytes", size_t RemainingBytes() const) { + return GetRemainingBytes(); + } + + size_t GetSizeInBytes() const { + VIXL_ASSERT((cursor_ >= buffer_) && (cursor_ <= (buffer_ + capacity_))); + return cursor_ - buffer_; + } + + // A code buffer can emit: + // * 8, 16, 32 or 64-bit data: constant. + // * 16 or 32-bit data: instruction. + // * string: debug info. + void Emit8(uint8_t data) { Emit(data); } + + void Emit16(uint16_t data) { Emit(data); } + + void Emit32(uint32_t data) { Emit(data); } + + void Emit64(uint64_t data) { Emit(data); } + + void EmitString(const char* string); + + void EmitData(const void* data, size_t size); + + template + void Emit(T value) { + VIXL_ASSERT(HasSpaceFor(sizeof(value))); + dirty_ = true; + memcpy(cursor_, &value, sizeof(value)); + cursor_ += sizeof(value); + } + + void UpdateData(size_t offset, const void* data, size_t size); + + // Align to 32bit. + void Align(); + + // Ensure there is enough space for and emit 'n' zero bytes. + void EmitZeroedBytes(int n); + + bool Is16bitAligned() const { return IsAligned<2>(cursor_); } + + bool Is32bitAligned() const { return IsAligned<4>(cursor_); } + + size_t GetCapacity() const { return capacity_; } + VIXL_DEPRECATED("GetCapacity", size_t capacity() const) { + return GetCapacity(); + } + + bool IsManaged() const { return managed_; } + + void Grow(size_t new_capacity); + + bool IsDirty() const { return dirty_; } + + void SetClean() { dirty_ = false; } + + bool HasSpaceFor(size_t amount) const { + return GetRemainingBytes() >= amount; + } + + void EnsureSpaceFor(size_t amount, bool* has_grown) { + bool is_full = !HasSpaceFor(amount); + if (is_full) Grow(capacity_ * 2 + amount); + VIXL_ASSERT(has_grown != NULL); + *has_grown = is_full; + } + void EnsureSpaceFor(size_t amount) { + bool dummy; + EnsureSpaceFor(amount, &dummy); + } + + private: + // Backing store of the buffer. + byte* buffer_; + // If true the backing store is allocated and deallocated by the buffer. The + // backing store can then grow on demand. If false the backing store is + // provided by the user and cannot be resized internally. + bool managed_; + // Pointer to the next location to be written. + byte* cursor_; + // True if there has been any write since the buffer was created or cleaned. + bool dirty_; + // Capacity in bytes of the backing store. + size_t capacity_; +}; + +} // namespace vixl + +#endif // VIXL_CODE_BUFFER_H diff --git a/dep/vixl/include/vixl/code-generation-scopes-vixl.h b/dep/vixl/include/vixl/code-generation-scopes-vixl.h new file mode 100644 index 000000000..b7ea2d92b --- /dev/null +++ b/dep/vixl/include/vixl/code-generation-scopes-vixl.h @@ -0,0 +1,322 @@ +// Copyright 2016, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +#ifndef VIXL_CODE_GENERATION_SCOPES_H_ +#define VIXL_CODE_GENERATION_SCOPES_H_ + + +#include "assembler-base-vixl.h" +#include "macro-assembler-interface.h" + + +namespace vixl { + +// This scope will: +// - Allow code emission from the specified `Assembler`. +// - Optionally reserve space in the `CodeBuffer` (if it is managed by VIXL). +// - Optionally, on destruction, check the size of the generated code. +// (The size can be either exact or a maximum size.) +class CodeBufferCheckScope { + public: + // Tell whether or not the scope needs to ensure the associated CodeBuffer + // has enough space for the requested size. + enum BufferSpacePolicy { + kReserveBufferSpace, + kDontReserveBufferSpace, + + // Deprecated, but kept for backward compatibility. + kCheck = kReserveBufferSpace, + kNoCheck = kDontReserveBufferSpace + }; + + // Tell whether or not the scope should assert the amount of code emitted + // within the scope is consistent with the requested amount. + enum SizePolicy { + kNoAssert, // Do not check the size of the code emitted. + kExactSize, // The code emitted must be exactly size bytes. + kMaximumSize // The code emitted must be at most size bytes. + }; + + // This constructor implicitly calls `Open` to initialise the scope + // (`assembler` must not be `NULL`), so it is ready to use immediately after + // it has been constructed. + CodeBufferCheckScope(internal::AssemblerBase* assembler, + size_t size, + BufferSpacePolicy check_policy = kReserveBufferSpace, + SizePolicy size_policy = kMaximumSize) + : assembler_(NULL), initialised_(false) { + Open(assembler, size, check_policy, size_policy); + } + + // This constructor does not implicitly initialise the scope. Instead, the + // user is required to explicitly call the `Open` function before using the + // scope. + CodeBufferCheckScope() : assembler_(NULL), initialised_(false) { + // Nothing to do. + } + + virtual ~CodeBufferCheckScope() { Close(); } + + // This function performs the actual initialisation work. + void Open(internal::AssemblerBase* assembler, + size_t size, + BufferSpacePolicy check_policy = kReserveBufferSpace, + SizePolicy size_policy = kMaximumSize) { + VIXL_ASSERT(!initialised_); + VIXL_ASSERT(assembler != NULL); + assembler_ = assembler; + if (check_policy == kReserveBufferSpace) { + assembler->GetBuffer()->EnsureSpaceFor(size); + } +#ifdef VIXL_DEBUG + limit_ = assembler_->GetSizeOfCodeGenerated() + size; + assert_policy_ = size_policy; + previous_allow_assembler_ = assembler_->AllowAssembler(); + assembler_->SetAllowAssembler(true); +#else + USE(size_policy); +#endif + initialised_ = true; + } + + // This function performs the cleaning-up work. It must succeed even if the + // scope has not been opened. It is safe to call multiple times. + void Close() { +#ifdef VIXL_DEBUG + if (!initialised_) { + return; + } + assembler_->SetAllowAssembler(previous_allow_assembler_); + switch (assert_policy_) { + case kNoAssert: + break; + case kExactSize: + VIXL_ASSERT(assembler_->GetSizeOfCodeGenerated() == limit_); + break; + case kMaximumSize: + VIXL_ASSERT(assembler_->GetSizeOfCodeGenerated() <= limit_); + break; + default: + VIXL_UNREACHABLE(); + } +#endif + initialised_ = false; + } + + protected: + internal::AssemblerBase* assembler_; + SizePolicy assert_policy_; + size_t limit_; + bool previous_allow_assembler_; + bool initialised_; +}; + + +// This scope will: +// - Do the same as `CodeBufferCheckSCope`, but: +// - If managed by VIXL, always reserve space in the `CodeBuffer`. +// - Always check the size (exact or maximum) of the generated code on +// destruction. +// - Emit pools if the specified size would push them out of range. +// - Block pools emission for the duration of the scope. +// This scope allows the `Assembler` and `MacroAssembler` to be freely and +// safely mixed for its duration. +class EmissionCheckScope : public CodeBufferCheckScope { + public: + // This constructor implicitly calls `Open` (when `masm` is not `NULL`) to + // initialise the scope, so it is ready to use immediately after it has been + // constructed. + EmissionCheckScope(MacroAssemblerInterface* masm, + size_t size, + SizePolicy size_policy = kMaximumSize) { + Open(masm, size, size_policy); + } + + // This constructor does not implicitly initialise the scope. Instead, the + // user is required to explicitly call the `Open` function before using the + // scope. + EmissionCheckScope() {} + + virtual ~EmissionCheckScope() { Close(); } + + enum PoolPolicy { + // Do not forbid pool emission inside the scope. Pools will not be emitted + // on `Open` either. + kIgnorePools, + // Force pools to be generated on `Open` if necessary and block their + // emission inside the scope. + kBlockPools, + // Deprecated, but kept for backward compatibility. + kCheckPools = kBlockPools + }; + + void Open(MacroAssemblerInterface* masm, + size_t size, + SizePolicy size_policy = kMaximumSize) { + Open(masm, size, size_policy, kBlockPools); + } + + void Close() { + if (!initialised_) { + return; + } + if (masm_ == NULL) { + // Nothing to do. + return; + } + // Perform the opposite of `Open`, which is: + // - Check the code generation limit was not exceeded. + // - Release the pools. + CodeBufferCheckScope::Close(); + if (pool_policy_ == kBlockPools) { + masm_->ReleasePools(); + } + VIXL_ASSERT(!initialised_); + } + + protected: + void Open(MacroAssemblerInterface* masm, + size_t size, + SizePolicy size_policy, + PoolPolicy pool_policy) { + if (masm == NULL) { + // Nothing to do. + // We may reach this point in a context of conditional code generation. + // See `aarch64::MacroAssembler::MoveImmediateHelper()` for an example. + return; + } + masm_ = masm; + pool_policy_ = pool_policy; + if (pool_policy_ == kBlockPools) { + // To avoid duplicating the work to check that enough space is available + // in the buffer, do not use the more generic `EnsureEmitFor()`. It is + // done below when opening `CodeBufferCheckScope`. + masm->EnsureEmitPoolsFor(size); + masm->BlockPools(); + } + // The buffer should be checked *after* we emit the pools. + CodeBufferCheckScope::Open(masm->AsAssemblerBase(), + size, + kReserveBufferSpace, + size_policy); + VIXL_ASSERT(initialised_); + } + + // This constructor should only be used from code that is *currently + // generating* the pools, to avoid an infinite loop. + EmissionCheckScope(MacroAssemblerInterface* masm, + size_t size, + SizePolicy size_policy, + PoolPolicy pool_policy) { + Open(masm, size, size_policy, pool_policy); + } + + MacroAssemblerInterface* masm_; + PoolPolicy pool_policy_; +}; + +// Use this scope when you need a one-to-one mapping between methods and +// instructions. This scope will: +// - Do the same as `EmissionCheckScope`. +// - Block access to the MacroAssemblerInterface (using run-time assertions). +class ExactAssemblyScope : public EmissionCheckScope { + public: + // This constructor implicitly calls `Open` (when `masm` is not `NULL`) to + // initialise the scope, so it is ready to use immediately after it has been + // constructed. + ExactAssemblyScope(MacroAssemblerInterface* masm, + size_t size, + SizePolicy size_policy = kExactSize) { + Open(masm, size, size_policy); + } + + // This constructor does not implicitly initialise the scope. Instead, the + // user is required to explicitly call the `Open` function before using the + // scope. + ExactAssemblyScope() {} + + virtual ~ExactAssemblyScope() { Close(); } + + void Open(MacroAssemblerInterface* masm, + size_t size, + SizePolicy size_policy = kExactSize) { + Open(masm, size, size_policy, kBlockPools); + } + + void Close() { + if (!initialised_) { + return; + } + if (masm_ == NULL) { + // Nothing to do. + return; + } +#ifdef VIXL_DEBUG + masm_->SetAllowMacroInstructions(previous_allow_macro_assembler_); +#else + USE(previous_allow_macro_assembler_); +#endif + EmissionCheckScope::Close(); + } + + protected: + // This protected constructor allows overriding the pool policy. It is + // available to allow this scope to be used in code that handles generation + // of pools. + ExactAssemblyScope(MacroAssemblerInterface* masm, + size_t size, + SizePolicy assert_policy, + PoolPolicy pool_policy) { + Open(masm, size, assert_policy, pool_policy); + } + + void Open(MacroAssemblerInterface* masm, + size_t size, + SizePolicy size_policy, + PoolPolicy pool_policy) { + VIXL_ASSERT(size_policy != kNoAssert); + if (masm == NULL) { + // Nothing to do. + return; + } + // Rely on EmissionCheckScope::Open to initialise `masm_` and + // `pool_policy_`. + EmissionCheckScope::Open(masm, size, size_policy, pool_policy); +#ifdef VIXL_DEBUG + previous_allow_macro_assembler_ = masm->AllowMacroInstructions(); + masm->SetAllowMacroInstructions(false); +#endif + } + + private: + bool previous_allow_macro_assembler_; +}; + + +} // namespace vixl + +#endif // VIXL_CODE_GENERATION_SCOPES_H_ diff --git a/dep/vixl/include/vixl/compiler-intrinsics-vixl.h b/dep/vixl/include/vixl/compiler-intrinsics-vixl.h new file mode 100644 index 000000000..b27f94ebf --- /dev/null +++ b/dep/vixl/include/vixl/compiler-intrinsics-vixl.h @@ -0,0 +1,160 @@ +// Copyright 2015, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +#ifndef VIXL_COMPILER_INTRINSICS_H +#define VIXL_COMPILER_INTRINSICS_H + +#include "globals-vixl.h" + +namespace vixl { + +// Helper to check whether the version of GCC used is greater than the specified +// requirement. +#define MAJOR 1000000 +#define MINOR 1000 +#if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__) +#define GCC_VERSION_OR_NEWER(major, minor, patchlevel) \ + ((__GNUC__ * (MAJOR) + __GNUC_MINOR__ * (MINOR) + __GNUC_PATCHLEVEL__) >= \ + ((major) * (MAJOR) + ((minor)) * (MINOR) + (patchlevel))) +#elif defined(__GNUC__) && defined(__GNUC_MINOR__) +#define GCC_VERSION_OR_NEWER(major, minor, patchlevel) \ + ((__GNUC__ * (MAJOR) + __GNUC_MINOR__ * (MINOR)) >= \ + ((major) * (MAJOR) + ((minor)) * (MINOR) + (patchlevel))) +#else +#define GCC_VERSION_OR_NEWER(major, minor, patchlevel) 0 +#endif + + +#if defined(__clang__) && !defined(VIXL_NO_COMPILER_BUILTINS) + +// clang-format off +#define COMPILER_HAS_BUILTIN_CLRSB (__has_builtin(__builtin_clrsb)) +#define COMPILER_HAS_BUILTIN_CLZ (__has_builtin(__builtin_clz)) +#define COMPILER_HAS_BUILTIN_CTZ (__has_builtin(__builtin_ctz)) +#define COMPILER_HAS_BUILTIN_FFS (__has_builtin(__builtin_ffs)) +#define COMPILER_HAS_BUILTIN_POPCOUNT (__has_builtin(__builtin_popcount)) +// clang-format on + +#elif defined(__GNUC__) && !defined(VIXL_NO_COMPILER_BUILTINS) +// The documentation for these builtins is available at: +// https://gcc.gnu.org/onlinedocs/gcc-$MAJOR.$MINOR.$PATCHLEVEL/gcc//Other-Builtins.html + +// clang-format off +# define COMPILER_HAS_BUILTIN_CLRSB (GCC_VERSION_OR_NEWER(4, 7, 0)) +# define COMPILER_HAS_BUILTIN_CLZ (GCC_VERSION_OR_NEWER(3, 4, 0)) +# define COMPILER_HAS_BUILTIN_CTZ (GCC_VERSION_OR_NEWER(3, 4, 0)) +# define COMPILER_HAS_BUILTIN_FFS (GCC_VERSION_OR_NEWER(3, 4, 0)) +# define COMPILER_HAS_BUILTIN_POPCOUNT (GCC_VERSION_OR_NEWER(3, 4, 0)) +// clang-format on + +#else +// One can define VIXL_NO_COMPILER_BUILTINS to force using the manually +// implemented C++ methods. + +// clang-format off +#define COMPILER_HAS_BUILTIN_BSWAP false +#define COMPILER_HAS_BUILTIN_CLRSB false +#define COMPILER_HAS_BUILTIN_CLZ false +#define COMPILER_HAS_BUILTIN_CTZ false +#define COMPILER_HAS_BUILTIN_FFS false +#define COMPILER_HAS_BUILTIN_POPCOUNT false +// clang-format on + +#endif + + +template +inline bool IsPowerOf2(V value) { + return (value != 0) && ((value & (value - 1)) == 0); +} + + +// Declaration of fallback functions. +int CountLeadingSignBitsFallBack(int64_t value, int width); +int CountLeadingZerosFallBack(uint64_t value, int width); +int CountSetBitsFallBack(uint64_t value, int width); +int CountTrailingZerosFallBack(uint64_t value, int width); + + +// Implementation of intrinsics functions. +// TODO: The implementations could be improved for sizes different from 32bit +// and 64bit: we could mask the values and call the appropriate builtin. + +template +inline int CountLeadingSignBits(V value, int width = (sizeof(V) * 8)) { +#if COMPILER_HAS_BUILTIN_CLRSB + if (width == 32) { + return __builtin_clrsb(value); + } else if (width == 64) { + return __builtin_clrsbll(value); + } +#endif + return CountLeadingSignBitsFallBack(value, width); +} + + +template +inline int CountLeadingZeros(V value, int width = (sizeof(V) * 8)) { +#if COMPILER_HAS_BUILTIN_CLZ + if (width == 32) { + return (value == 0) ? 32 : __builtin_clz(static_cast(value)); + } else if (width == 64) { + return (value == 0) ? 64 : __builtin_clzll(value); + } +#endif + return CountLeadingZerosFallBack(value, width); +} + + +template +inline int CountSetBits(V value, int width = (sizeof(V) * 8)) { +#if COMPILER_HAS_BUILTIN_POPCOUNT + if (width == 32) { + return __builtin_popcount(static_cast(value)); + } else if (width == 64) { + return __builtin_popcountll(value); + } +#endif + return CountSetBitsFallBack(value, width); +} + + +template +inline int CountTrailingZeros(V value, int width = (sizeof(V) * 8)) { +#if COMPILER_HAS_BUILTIN_CTZ + if (width == 32) { + return (value == 0) ? 32 : __builtin_ctz(static_cast(value)); + } else if (width == 64) { + return (value == 0) ? 64 : __builtin_ctzll(value); + } +#endif + return CountTrailingZerosFallBack(value, width); +} + +} // namespace vixl + +#endif // VIXL_COMPILER_INTRINSICS_H diff --git a/dep/vixl/include/vixl/cpu-features.h b/dep/vixl/include/vixl/cpu-features.h new file mode 100644 index 000000000..f94b955fa --- /dev/null +++ b/dep/vixl/include/vixl/cpu-features.h @@ -0,0 +1,364 @@ +// Copyright 2018, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_CPU_FEATURES_H +#define VIXL_CPU_FEATURES_H + +#include + +#include "globals-vixl.h" + +namespace vixl { + + +// clang-format off +#define VIXL_CPU_FEATURE_LIST(V) \ + /* If set, the OS traps and emulates MRS accesses to relevant (EL1) ID_* */ \ + /* registers, so that the detailed feature registers can be read */ \ + /* directly. */ \ + V(kIDRegisterEmulation, "ID register emulation", "cpuid") \ + \ + V(kFP, "FP", "fp") \ + V(kNEON, "NEON", "asimd") \ + V(kCRC32, "CRC32", "crc32") \ + /* Cryptographic support instructions. */ \ + V(kAES, "AES", "aes") \ + V(kSHA1, "SHA1", "sha1") \ + V(kSHA2, "SHA2", "sha2") \ + /* A form of PMULL{2} with a 128-bit (1Q) result. */ \ + V(kPmull1Q, "Pmull1Q", "pmull") \ + /* Atomic operations on memory: CAS, LDADD, STADD, SWP, etc. */ \ + V(kAtomics, "Atomics", "atomics") \ + /* Limited ordering regions: LDLAR, STLLR and their variants. */ \ + V(kLORegions, "LORegions", NULL) \ + /* Rounding doubling multiply add/subtract: SQRDMLAH and SQRDMLSH. */ \ + V(kRDM, "RDM", "asimdrdm") \ + /* SDOT and UDOT support (in NEON). */ \ + V(kDotProduct, "DotProduct", "asimddp") \ + /* Half-precision (FP16) support for FP and NEON, respectively. */ \ + V(kFPHalf, "FPHalf", "fphp") \ + V(kNEONHalf, "NEONHalf", "asimdhp") \ + /* The RAS extension, including the ESB instruction. */ \ + V(kRAS, "RAS", NULL) \ + /* Data cache clean to the point of persistence: DC CVAP. */ \ + V(kDCPoP, "DCPoP", "dcpop") \ + /* Cryptographic support instructions. */ \ + V(kSHA3, "SHA3", "sha3") \ + V(kSHA512, "SHA512", "sha512") \ + V(kSM3, "SM3", "sm3") \ + V(kSM4, "SM4", "sm4") \ + /* Pointer authentication for addresses. */ \ + V(kPAuth, "PAuth", NULL) \ + /* Pointer authentication for addresses uses QARMA. */ \ + V(kPAuthQARMA, "PAuthQARMA", NULL) \ + /* Generic authentication (using the PACGA instruction). */ \ + V(kPAuthGeneric, "PAuthGeneric", NULL) \ + /* Generic authentication uses QARMA. */ \ + V(kPAuthGenericQARMA, "PAuthGenericQARMA", NULL) \ + /* JavaScript-style FP <-> integer conversion instruction: FJCVTZS. */ \ + V(kJSCVT, "JSCVT", "jscvt") \ + /* RCpc-based model (for weaker release consistency): LDAPR and variants. */ \ + V(kRCpc, "RCpc", "lrcpc") \ + /* Complex number support for NEON: FCMLA and FCADD. */ \ + V(kFcma, "Fcma", "fcma") +// clang-format on + + +class CPUFeaturesConstIterator; + +// A representation of the set of features known to be supported by the target +// device. Each feature is represented by a simple boolean flag. +// +// - When the Assembler is asked to assemble an instruction, it asserts (in +// debug mode) that the necessary features are available. +// +// - TODO: The MacroAssembler relies on the Assembler's assertions, but in +// some cases it may be useful for macros to generate a fall-back sequence +// in case features are not available. +// +// - The Simulator assumes by default that all features are available, but it +// is possible to configure it to fail if the simulated code uses features +// that are not enabled. +// +// The Simulator also offers pseudo-instructions to allow features to be +// enabled and disabled dynamically. This is useful when you want to ensure +// that some features are constrained to certain areas of code. +// +// - The base Disassembler knows nothing about CPU features, but the +// PrintDisassembler can be configured to annotate its output with warnings +// about unavailable features. The Simulator uses this feature when +// instruction trace is enabled. +// +// - The Decoder-based components -- the Simulator and PrintDisassembler -- +// rely on a CPUFeaturesAuditor visitor. This visitor keeps a list of +// features actually encountered so that a large block of code can be +// examined (either directly or through simulation), and the required +// features analysed later. +// +// Expected usage: +// +// // By default, VIXL uses CPUFeatures::AArch64LegacyBaseline(), for +// // compatibility with older version of VIXL. +// MacroAssembler masm; +// +// // Generate code only for the current CPU. +// masm.SetCPUFeatures(CPUFeatures::InferFromOS()); +// +// // Turn off feature checking entirely. +// masm.SetCPUFeatures(CPUFeatures::All()); +// +// Feature set manipulation: +// +// CPUFeatures f; // The default constructor gives an empty set. +// // Individual features can be added (or removed). +// f.Combine(CPUFeatures::kFP, CPUFeatures::kNEON, CPUFeatures::AES); +// f.Remove(CPUFeatures::kNEON); +// +// // Some helpers exist for extensions that provide several features. +// f.Remove(CPUFeatures::All()); +// f.Combine(CPUFeatures::AArch64LegacyBaseline()); +// +// // Chained construction is also possible. +// CPUFeatures g = +// f.With(CPUFeatures::kPmull1Q).Without(CPUFeatures::kCRC32); +// +// // Features can be queried. Where multiple features are given, they are +// // combined with logical AND. +// if (h.Has(CPUFeatures::kNEON)) { ... } +// if (h.Has(CPUFeatures::kFP, CPUFeatures::kNEON)) { ... } +// if (h.Has(g)) { ... } +// // If the empty set is requested, the result is always 'true'. +// VIXL_ASSERT(h.Has(CPUFeatures())); +// +// // For debug and reporting purposes, features can be enumerated (or +// // printed directly): +// std::cout << CPUFeatures::kNEON; // Prints something like "NEON". +// std::cout << f; // Prints something like "FP, NEON, CRC32". +class CPUFeatures { + public: + // clang-format off + // Individual features. + // These should be treated as opaque tokens. User code should not rely on + // specific numeric values or ordering. + enum Feature { + // Refer to VIXL_CPU_FEATURE_LIST (above) for the list of feature names that + // this class supports. + + kNone = -1, +#define VIXL_DECLARE_FEATURE(SYMBOL, NAME, CPUINFO) SYMBOL, + VIXL_CPU_FEATURE_LIST(VIXL_DECLARE_FEATURE) +#undef VIXL_DECLARE_FEATURE + kNumberOfFeatures + }; + // clang-format on + + // By default, construct with no features enabled. + CPUFeatures() : features_(0) {} + + // Construct with some features already enabled. + CPUFeatures(Feature feature0, + Feature feature1 = kNone, + Feature feature2 = kNone, + Feature feature3 = kNone); + + // Construct with all features enabled. This can be used to disable feature + // checking: `Has(...)` returns true regardless of the argument. + static CPUFeatures All(); + + // Construct an empty CPUFeatures. This is equivalent to the default + // constructor, but is provided for symmetry and convenience. + static CPUFeatures None() { return CPUFeatures(); } + + // The presence of these features was assumed by version of VIXL before this + // API was added, so using this set by default ensures API compatibility. + static CPUFeatures AArch64LegacyBaseline() { + return CPUFeatures(kFP, kNEON, kCRC32); + } + + // Construct a new CPUFeatures object based on what the OS reports. + static CPUFeatures InferFromOS(); + + // Combine another CPUFeatures object into this one. Features that already + // exist in this set are left unchanged. + void Combine(const CPUFeatures& other); + + // Combine specific features into this set. Features that already exist in + // this set are left unchanged. + void Combine(Feature feature0, + Feature feature1 = kNone, + Feature feature2 = kNone, + Feature feature3 = kNone); + + // Remove features in another CPUFeatures object from this one. + void Remove(const CPUFeatures& other); + + // Remove specific features from this set. + void Remove(Feature feature0, + Feature feature1 = kNone, + Feature feature2 = kNone, + Feature feature3 = kNone); + + // Chaining helpers for convenient construction. + CPUFeatures With(const CPUFeatures& other) const; + CPUFeatures With(Feature feature0, + Feature feature1 = kNone, + Feature feature2 = kNone, + Feature feature3 = kNone) const; + CPUFeatures Without(const CPUFeatures& other) const; + CPUFeatures Without(Feature feature0, + Feature feature1 = kNone, + Feature feature2 = kNone, + Feature feature3 = kNone) const; + + // Query features. + // Note that an empty query (like `Has(kNone)`) always returns true. + bool Has(const CPUFeatures& other) const; + bool Has(Feature feature0, + Feature feature1 = kNone, + Feature feature2 = kNone, + Feature feature3 = kNone) const; + + // Return the number of enabled features. + size_t Count() const; + + // Check for equivalence. + bool operator==(const CPUFeatures& other) const { + return Has(other) && other.Has(*this); + } + bool operator!=(const CPUFeatures& other) const { return !(*this == other); } + + typedef CPUFeaturesConstIterator const_iterator; + + const_iterator begin() const; + const_iterator end() const; + + private: + // Each bit represents a feature. This field will be replaced as needed if + // features are added. + uint64_t features_; + + friend std::ostream& operator<<(std::ostream& os, + const vixl::CPUFeatures& features); +}; + +std::ostream& operator<<(std::ostream& os, vixl::CPUFeatures::Feature feature); +std::ostream& operator<<(std::ostream& os, const vixl::CPUFeatures& features); + +// This is not a proper C++ iterator type, but it simulates enough of +// ForwardIterator that simple loops can be written. +class CPUFeaturesConstIterator { + public: + CPUFeaturesConstIterator(const CPUFeatures* cpu_features = NULL, + CPUFeatures::Feature start = CPUFeatures::kNone) + : cpu_features_(cpu_features), feature_(start) { + VIXL_ASSERT(IsValid()); + } + + bool operator==(const CPUFeaturesConstIterator& other) const; + bool operator!=(const CPUFeaturesConstIterator& other) const { + return !(*this == other); + } + CPUFeatures::Feature operator++(); + CPUFeatures::Feature operator++(int); + + CPUFeatures::Feature operator*() const { + VIXL_ASSERT(IsValid()); + return feature_; + } + + // For proper support of C++'s simplest "Iterator" concept, this class would + // have to define member types (such as CPUFeaturesIterator::pointer) to make + // it appear as if it iterates over Feature objects in memory. That is, we'd + // need CPUFeatures::iterator to behave like std::vector::iterator. + // This is at least partially possible -- the std::vector specialisation + // does something similar -- but it doesn't seem worthwhile for a + // special-purpose debug helper, so they are omitted here. + private: + const CPUFeatures* cpu_features_; + CPUFeatures::Feature feature_; + + bool IsValid() const { + return ((cpu_features_ == NULL) && (feature_ == CPUFeatures::kNone)) || + cpu_features_->Has(feature_); + } +}; + +// A convenience scope for temporarily modifying a CPU features object. This +// allows features to be enabled for short sequences. +// +// Expected usage: +// +// { +// CPUFeaturesScope cpu(&masm, CPUFeatures::kCRC32); +// // This scope can now use CRC32, as well as anything else that was enabled +// // before the scope. +// +// ... +// +// // At the end of the scope, the original CPU features are restored. +// } +class CPUFeaturesScope { + public: + // Start a CPUFeaturesScope on any object that implements + // `CPUFeatures* GetCPUFeatures()`. + template + explicit CPUFeaturesScope(T* cpu_features_wrapper, + CPUFeatures::Feature feature0 = CPUFeatures::kNone, + CPUFeatures::Feature feature1 = CPUFeatures::kNone, + CPUFeatures::Feature feature2 = CPUFeatures::kNone, + CPUFeatures::Feature feature3 = CPUFeatures::kNone) + : cpu_features_(cpu_features_wrapper->GetCPUFeatures()), + old_features_(*cpu_features_) { + cpu_features_->Combine(feature0, feature1, feature2, feature3); + } + + template + CPUFeaturesScope(T* cpu_features_wrapper, const CPUFeatures& other) + : cpu_features_(cpu_features_wrapper->GetCPUFeatures()), + old_features_(*cpu_features_) { + cpu_features_->Combine(other); + } + + ~CPUFeaturesScope() { *cpu_features_ = old_features_; } + + // For advanced usage, the CPUFeatures object can be accessed directly. + // The scope will restore the original state when it ends. + + CPUFeatures* GetCPUFeatures() const { return cpu_features_; } + + void SetCPUFeatures(const CPUFeatures& cpu_features) { + *cpu_features_ = cpu_features; + } + + private: + CPUFeatures* const cpu_features_; + const CPUFeatures old_features_; +}; + + +} // namespace vixl + +#endif // VIXL_CPU_FEATURES_H diff --git a/dep/vixl/include/vixl/globals-vixl.h b/dep/vixl/include/vixl/globals-vixl.h new file mode 100644 index 000000000..727d4947f --- /dev/null +++ b/dep/vixl/include/vixl/globals-vixl.h @@ -0,0 +1,284 @@ +// Copyright 2015, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_GLOBALS_H +#define VIXL_GLOBALS_H + +// Get standard C99 macros for integer types. +#ifndef __STDC_CONSTANT_MACROS +#define __STDC_CONSTANT_MACROS +#endif + +#ifndef __STDC_LIMIT_MACROS +#define __STDC_LIMIT_MACROS +#endif + +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif + +extern "C" { +#include +#include +} + +#include +#include +#include +#include +#include + +#include "platform-vixl.h" + +#ifdef VIXL_NEGATIVE_TESTING +#include +#include +#include +#endif + +namespace vixl { + +typedef uint8_t byte; + +const int KBytes = 1024; +const int MBytes = 1024 * KBytes; + +const int kBitsPerByte = 8; + +template +struct Unsigned; + +template <> +struct Unsigned<32> { + typedef uint32_t type; +}; + +template <> +struct Unsigned<64> { + typedef uint64_t type; +}; + +} // namespace vixl + +// Detect the host's pointer size. +#if (UINTPTR_MAX == UINT32_MAX) +#define VIXL_HOST_POINTER_32 +#elif (UINTPTR_MAX == UINT64_MAX) +#define VIXL_HOST_POINTER_64 +#else +#error "Unsupported host pointer size." +#endif + +#ifdef VIXL_NEGATIVE_TESTING +#define VIXL_ABORT() \ + do { \ + std::ostringstream oss; \ + oss << "Aborting in " << __FILE__ << ", line " << __LINE__ << std::endl; \ + throw std::runtime_error(oss.str()); \ + } while (false) +#define VIXL_ABORT_WITH_MSG(msg) \ + do { \ + std::ostringstream oss; \ + oss << (msg) << "in " << __FILE__ << ", line " << __LINE__ << std::endl; \ + throw std::runtime_error(oss.str()); \ + } while (false) +#define VIXL_CHECK(condition) \ + do { \ + if (!(condition)) { \ + std::ostringstream oss; \ + oss << "Assertion failed (" #condition ")\nin "; \ + oss << __FILE__ << ", line " << __LINE__ << std::endl; \ + throw std::runtime_error(oss.str()); \ + } \ + } while (false) +#else +#define VIXL_ABORT() \ + do { \ + printf("Aborting in %s, line %i\n", __FILE__, __LINE__); \ + abort(); \ + } while (false) +#define VIXL_ABORT_WITH_MSG(msg) \ + do { \ + printf("%sin %s, line %i\n", (msg), __FILE__, __LINE__); \ + abort(); \ + } while (false) +#define VIXL_CHECK(condition) \ + do { \ + if (!(condition)) { \ + printf("Assertion failed (%s)\nin %s, line %i\n", \ + #condition, \ + __FILE__, \ + __LINE__); \ + abort(); \ + } \ + } while (false) +#endif +#ifdef VIXL_DEBUG +#define VIXL_ASSERT(condition) VIXL_CHECK(condition) +#define VIXL_UNIMPLEMENTED() \ + do { \ + VIXL_ABORT_WITH_MSG("UNIMPLEMENTED "); \ + } while (false) +#define VIXL_UNREACHABLE() \ + do { \ + VIXL_ABORT_WITH_MSG("UNREACHABLE "); \ + } while (false) +#else +#define VIXL_ASSERT(condition) ((void)0) +#define VIXL_UNIMPLEMENTED() ((void)0) +#define VIXL_UNREACHABLE() ((void)0) +#endif +// This is not as powerful as template based assertions, but it is simple. +// It assumes that the descriptions are unique. If this starts being a problem, +// we can switch to a different implemention. +#define VIXL_CONCAT(a, b) a##b +#if __cplusplus >= 201103L +#define VIXL_STATIC_ASSERT_LINE(line_unused, condition, message) \ + static_assert(condition, message) +#else +#define VIXL_STATIC_ASSERT_LINE(line, condition, message_unused) \ + typedef char VIXL_CONCAT(STATIC_ASSERT_LINE_, line)[(condition) ? 1 : -1] \ + __attribute__((unused)) +#endif +#define VIXL_STATIC_ASSERT(condition) \ + VIXL_STATIC_ASSERT_LINE(__LINE__, condition, "") +#define VIXL_STATIC_ASSERT_MESSAGE(condition, message) \ + VIXL_STATIC_ASSERT_LINE(__LINE__, condition, message) + +#define VIXL_WARNING(message) \ + do { \ + printf("WARNING in %s, line %i: %s", __FILE__, __LINE__, message); \ + } while (false) + +template +inline void USE(const T1&) {} + +template +inline void USE(const T1&, const T2&) {} + +template +inline void USE(const T1&, const T2&, const T3&) {} + +template +inline void USE(const T1&, const T2&, const T3&, const T4&) {} + +#define VIXL_ALIGNMENT_EXCEPTION() \ + do { \ + fprintf(stderr, "ALIGNMENT EXCEPTION\t"); \ + VIXL_ABORT(); \ + } while (0) + +// The clang::fallthrough attribute is used along with the Wimplicit-fallthrough +// argument to annotate intentional fall-through between switch labels. +// For more information please refer to: +// http://clang.llvm.org/docs/AttributeReference.html#fallthrough-clang-fallthrough +#ifndef __has_warning +#define __has_warning(x) 0 +#endif + +// Fallthrough annotation for Clang and C++11(201103L). +#if __has_warning("-Wimplicit-fallthrough") && __cplusplus >= 201103L +#define VIXL_FALLTHROUGH() [[clang::fallthrough]] +// Fallthrough annotation for GCC >= 7. +#elif __GNUC__ >= 7 +#define VIXL_FALLTHROUGH() __attribute__((fallthrough)) +#else +#define VIXL_FALLTHROUGH() \ + do { \ + } while (0) +#endif + +#if __cplusplus >= 201103L +#define VIXL_NO_RETURN [[noreturn]] +#else +#define VIXL_NO_RETURN __attribute__((noreturn)) +#endif +#ifdef VIXL_DEBUG +#define VIXL_NO_RETURN_IN_DEBUG_MODE VIXL_NO_RETURN +#else +#define VIXL_NO_RETURN_IN_DEBUG_MODE +#endif + +#if __cplusplus >= 201103L +#define VIXL_OVERRIDE override +#else +#define VIXL_OVERRIDE +#endif + +// Some functions might only be marked as "noreturn" for the DEBUG build. This +// macro should be used for such cases (for more details see what +// VIXL_UNREACHABLE expands to). +#ifdef VIXL_DEBUG +#define VIXL_DEBUG_NO_RETURN VIXL_NO_RETURN +#else +#define VIXL_DEBUG_NO_RETURN +#endif + +#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64 +#ifndef VIXL_AARCH64_GENERATE_SIMULATOR_CODE +#define VIXL_AARCH64_GENERATE_SIMULATOR_CODE 1 +#endif +#else +#ifndef VIXL_AARCH64_GENERATE_SIMULATOR_CODE +#define VIXL_AARCH64_GENERATE_SIMULATOR_CODE 0 +#endif +#if VIXL_AARCH64_GENERATE_SIMULATOR_CODE +#warning "Generating Simulator instructions without Simulator support." +#endif +#endif + +// We do not have a simulator for AArch32, although we can pretend we do so that +// tests that require running natively can be skipped. +#ifndef __arm__ +#define VIXL_INCLUDE_SIMULATOR_AARCH32 +#ifndef VIXL_AARCH32_GENERATE_SIMULATOR_CODE +#define VIXL_AARCH32_GENERATE_SIMULATOR_CODE 1 +#endif +#else +#ifndef VIXL_AARCH32_GENERATE_SIMULATOR_CODE +#define VIXL_AARCH32_GENERATE_SIMULATOR_CODE 0 +#endif +#endif + +#ifdef USE_SIMULATOR +#error "Please see the release notes for USE_SIMULATOR." +#endif + +// Target Architecture/ISA +#ifdef VIXL_INCLUDE_TARGET_A64 +#define VIXL_INCLUDE_TARGET_AARCH64 +#endif + +#if defined(VIXL_INCLUDE_TARGET_A32) && defined(VIXL_INCLUDE_TARGET_T32) +#define VIXL_INCLUDE_TARGET_AARCH32 +#elif defined(VIXL_INCLUDE_TARGET_A32) +#define VIXL_INCLUDE_TARGET_A32_ONLY +#else +#define VIXL_INCLUDE_TARGET_T32_ONLY +#endif + + +#endif // VIXL_GLOBALS_H diff --git a/dep/vixl/include/vixl/invalset-vixl.h b/dep/vixl/include/vixl/invalset-vixl.h new file mode 100644 index 000000000..9f9425b34 --- /dev/null +++ b/dep/vixl/include/vixl/invalset-vixl.h @@ -0,0 +1,915 @@ +// Copyright 2015, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_INVALSET_H_ +#define VIXL_INVALSET_H_ + +#include + +#include +#include + +#include "globals-vixl.h" + +namespace vixl { + +// We define a custom data structure template and its iterator as `std` +// containers do not fit the performance requirements for some of our use cases. +// +// The structure behaves like an iterable unordered set with special properties +// and restrictions. "InvalSet" stands for "Invalidatable Set". +// +// Restrictions and requirements: +// - Adding an element already present in the set is illegal. In debug mode, +// this is checked at insertion time. +// - The templated class `ElementType` must provide comparison operators so that +// `std::sort()` can be used. +// - A key must be available to represent invalid elements. +// - Elements with an invalid key must compare higher or equal to any other +// element. +// +// Use cases and performance considerations: +// Our use cases present two specificities that allow us to design this +// structure to provide fast insertion *and* fast search and deletion +// operations: +// - Elements are (generally) inserted in order (sorted according to their key). +// - A key is available to mark elements as invalid (deleted). +// The backing `std::vector` allows for fast insertions. When +// searching for an element we ensure the elements are sorted (this is generally +// the case) and perform a binary search. When deleting an element we do not +// free the associated memory immediately. Instead, an element to be deleted is +// marked with the 'invalid' key. Other methods of the container take care of +// ignoring entries marked as invalid. +// To avoid the overhead of the `std::vector` container when only few entries +// are used, a number of elements are preallocated. + +// 'ElementType' and 'KeyType' are respectively the types of the elements and +// their key. The structure only reclaims memory when safe to do so, if the +// number of elements that can be reclaimed is greater than `RECLAIM_FROM` and +// greater than ` / RECLAIM_FACTOR. +// clang-format off +#define TEMPLATE_INVALSET_P_DECL \ + class ElementType, \ + unsigned N_PREALLOCATED_ELEMENTS, \ + class KeyType, \ + KeyType INVALID_KEY, \ + size_t RECLAIM_FROM, \ + unsigned RECLAIM_FACTOR +// clang-format on + +#define TEMPLATE_INVALSET_P_DEF \ + ElementType, N_PREALLOCATED_ELEMENTS, KeyType, INVALID_KEY, RECLAIM_FROM, \ + RECLAIM_FACTOR + +template +class InvalSetIterator; // Forward declaration. + +template +class InvalSet { + public: + InvalSet(); + ~InvalSet(); + + static const size_t kNPreallocatedElements = N_PREALLOCATED_ELEMENTS; + static const KeyType kInvalidKey = INVALID_KEY; + + // C++ STL iterator interface. + typedef InvalSetIterator > iterator; + iterator begin(); + iterator end(); + + // It is illegal to insert an element already present in the set. + void insert(const ElementType& element); + + // Looks for the specified element in the set and - if found - deletes it. + // The return value is the number of elements erased: either 0 or 1. + size_t erase(const ElementType& element); + + // This indicates the number of (valid) elements stored in this set. + size_t size() const; + + // Returns true if no elements are stored in the set. + // Note that this does not mean the the backing storage is empty: it can still + // contain invalid elements. + bool empty() const; + + void clear(); + + const ElementType GetMinElement(); + + // This returns the key of the minimum element in the set. + KeyType GetMinElementKey(); + + static bool IsValid(const ElementType& element); + static KeyType GetKey(const ElementType& element); + static void SetKey(ElementType* element, KeyType key); + + typedef ElementType _ElementType; + typedef KeyType _KeyType; + + protected: + // Returns a pointer to the element in vector_ if it was found, or NULL + // otherwise. + ElementType* Search(const ElementType& element); + + // The argument *must* point to an element stored in *this* set. + // This function is not allowed to move elements in the backing vector + // storage. + void EraseInternal(ElementType* element); + + // The elements in the range searched must be sorted. + ElementType* BinarySearch(const ElementType& element, + ElementType* start, + ElementType* end) const; + + // Sort the elements. + enum SortType { + // The 'hard' version guarantees that invalid elements are moved to the end + // of the container. + kHardSort, + // The 'soft' version only guarantees that the elements will be sorted. + // Invalid elements may still be present anywhere in the set. + kSoftSort + }; + void Sort(SortType sort_type); + + // Delete the elements that have an invalid key. The complexity is linear + // with the size of the vector. + void Clean(); + + const ElementType Front() const; + const ElementType Back() const; + + // Delete invalid trailing elements and return the last valid element in the + // set. + const ElementType CleanBack(); + + // Returns a pointer to the start or end of the backing storage. + const ElementType* StorageBegin() const; + const ElementType* StorageEnd() const; + ElementType* StorageBegin(); + ElementType* StorageEnd(); + + // Returns the index of the element within the backing storage. The element + // must belong to the backing storage. + size_t GetElementIndex(const ElementType* element) const; + + // Returns the element at the specified index in the backing storage. + const ElementType* GetElementAt(size_t index) const; + ElementType* GetElementAt(size_t index); + + static const ElementType* GetFirstValidElement(const ElementType* from, + const ElementType* end); + + void CacheMinElement(); + const ElementType GetCachedMinElement() const; + + bool ShouldReclaimMemory() const; + void ReclaimMemory(); + + bool IsUsingVector() const { return vector_ != NULL; } + void SetSorted(bool sorted) { sorted_ = sorted; } + + // We cache some data commonly required by users to improve performance. + // We cannot cache pointers to elements as we do not control the backing + // storage. + bool valid_cached_min_; + size_t cached_min_index_; // Valid iff `valid_cached_min_` is true. + KeyType cached_min_key_; // Valid iff `valid_cached_min_` is true. + + // Indicates whether the elements are sorted. + bool sorted_; + + // This represents the number of (valid) elements in this set. + size_t size_; + + // The backing storage is either the array of preallocated elements or the + // vector. The structure starts by using the preallocated elements, and + // transitions (permanently) to using the vector once more than + // kNPreallocatedElements are used. + // Elements are only invalidated when using the vector. The preallocated + // storage always only contains valid elements. + ElementType preallocated_[kNPreallocatedElements]; + std::vector* vector_; + + // Iterators acquire and release this monitor. While a set is acquired, + // certain operations are illegal to ensure that the iterator will + // correctly iterate over the elements in the set. + int monitor_; +#ifdef VIXL_DEBUG + int monitor() const { return monitor_; } + void Acquire() { monitor_++; } + void Release() { + monitor_--; + VIXL_ASSERT(monitor_ >= 0); + } +#endif + + private: +// The copy constructor and assignment operator are not used and the defaults +// are unsafe, so disable them (without an implementation). +#if __cplusplus >= 201103L + InvalSet(const InvalSet& other) = delete; + InvalSet operator=(const InvalSet& other) = delete; +#else + InvalSet(const InvalSet& other); + InvalSet operator=(const InvalSet& other); +#endif + + friend class InvalSetIterator >; +}; + + +template +class InvalSetIterator : public std::iterator { + private: + // Redefine types to mirror the associated set types. + typedef typename S::_ElementType ElementType; + typedef typename S::_KeyType KeyType; + + public: + explicit InvalSetIterator(S* inval_set = NULL); + + // This class implements the standard copy-swap idiom. + ~InvalSetIterator(); + InvalSetIterator(const InvalSetIterator& other); + InvalSetIterator& operator=(InvalSetIterator other); +#if __cplusplus >= 201103L + InvalSetIterator(InvalSetIterator&& other) noexcept; +#endif + + friend void swap(InvalSetIterator& a, InvalSetIterator& b) { + using std::swap; + swap(a.using_vector_, b.using_vector_); + swap(a.index_, b.index_); + swap(a.inval_set_, b.inval_set_); + } + + // Return true if the iterator is at the end of the set. + bool Done() const; + + // Move this iterator to the end of the set. + void Finish(); + + // Delete the current element and advance the iterator to point to the next + // element. + void DeleteCurrentAndAdvance(); + + static bool IsValid(const ElementType& element); + static KeyType GetKey(const ElementType& element); + + // Extra helpers to support the forward-iterator interface. + InvalSetIterator& operator++(); // Pre-increment. + InvalSetIterator operator++(int); // Post-increment. + bool operator==(const InvalSetIterator& rhs) const; + bool operator!=(const InvalSetIterator& rhs) const { + return !(*this == rhs); + } + ElementType& operator*() { return *Current(); } + const ElementType& operator*() const { return *Current(); } + ElementType* operator->() { return Current(); } + const ElementType* operator->() const { return Current(); } + + protected: + void MoveToValidElement(); + + // Indicates if the iterator is looking at the vector or at the preallocated + // elements. + bool using_vector_; + // Used when looking at the preallocated elements, or in debug mode when using + // the vector to track how many times the iterator has advanced. + size_t index_; + typename std::vector::iterator iterator_; + S* inval_set_; + + // TODO: These helpers are deprecated and will be removed in future versions + // of VIXL. + ElementType* Current() const; + void Advance(); +}; + + +template +InvalSet::InvalSet() + : valid_cached_min_(false), sorted_(true), size_(0), vector_(NULL) { +#ifdef VIXL_DEBUG + monitor_ = 0; +#endif +} + + +template +InvalSet::~InvalSet() { + VIXL_ASSERT(monitor_ == 0); + delete vector_; +} + + +template +typename InvalSet::iterator +InvalSet::begin() { + return iterator(this); +} + + +template +typename InvalSet::iterator +InvalSet::end() { + iterator end(this); + end.Finish(); + return end; +} + + +template +void InvalSet::insert(const ElementType& element) { + VIXL_ASSERT(monitor() == 0); + VIXL_ASSERT(IsValid(element)); + VIXL_ASSERT(Search(element) == NULL); + SetSorted(empty() || (sorted_ && (element > CleanBack()))); + if (IsUsingVector()) { + vector_->push_back(element); + } else { + if (size_ < kNPreallocatedElements) { + preallocated_[size_] = element; + } else { + // Transition to using the vector. + vector_ = + new std::vector(preallocated_, preallocated_ + size_); + vector_->push_back(element); + } + } + size_++; + + if (valid_cached_min_ && (element < GetMinElement())) { + cached_min_index_ = IsUsingVector() ? vector_->size() - 1 : size_ - 1; + cached_min_key_ = GetKey(element); + valid_cached_min_ = true; + } + + if (ShouldReclaimMemory()) { + ReclaimMemory(); + } +} + + +template +size_t InvalSet::erase(const ElementType& element) { + VIXL_ASSERT(monitor() == 0); + VIXL_ASSERT(IsValid(element)); + ElementType* local_element = Search(element); + if (local_element != NULL) { + EraseInternal(local_element); + return 1; + } + return 0; +} + + +template +ElementType* InvalSet::Search( + const ElementType& element) { + VIXL_ASSERT(monitor() == 0); + if (empty()) { + return NULL; + } + if (ShouldReclaimMemory()) { + ReclaimMemory(); + } + if (!sorted_) { + Sort(kHardSort); + } + if (!valid_cached_min_) { + CacheMinElement(); + } + return BinarySearch(element, GetElementAt(cached_min_index_), StorageEnd()); +} + + +template +size_t InvalSet::size() const { + return size_; +} + + +template +bool InvalSet::empty() const { + return size_ == 0; +} + + +template +void InvalSet::clear() { + VIXL_ASSERT(monitor() == 0); + size_ = 0; + if (IsUsingVector()) { + vector_->clear(); + } + SetSorted(true); + valid_cached_min_ = false; +} + + +template +const ElementType InvalSet::GetMinElement() { + VIXL_ASSERT(monitor() == 0); + VIXL_ASSERT(!empty()); + CacheMinElement(); + return *GetElementAt(cached_min_index_); +} + + +template +KeyType InvalSet::GetMinElementKey() { + VIXL_ASSERT(monitor() == 0); + if (valid_cached_min_) { + return cached_min_key_; + } else { + return GetKey(GetMinElement()); + } +} + + +template +bool InvalSet::IsValid(const ElementType& element) { + return GetKey(element) != kInvalidKey; +} + + +template +void InvalSet::EraseInternal(ElementType* element) { + // Note that this function must be safe even while an iterator has acquired + // this set. + VIXL_ASSERT(element != NULL); + size_t deleted_index = GetElementIndex(element); + if (IsUsingVector()) { + VIXL_ASSERT((&(vector_->front()) <= element) && + (element <= &(vector_->back()))); + SetKey(element, kInvalidKey); + } else { + VIXL_ASSERT((preallocated_ <= element) && + (element < (preallocated_ + kNPreallocatedElements))); + ElementType* end = preallocated_ + kNPreallocatedElements; + size_t copy_size = sizeof(*element) * (end - element - 1); + memmove(element, element + 1, copy_size); + } + size_--; + + if (valid_cached_min_ && (deleted_index == cached_min_index_)) { + if (sorted_ && !empty()) { + const ElementType* min = GetFirstValidElement(element, StorageEnd()); + cached_min_index_ = GetElementIndex(min); + cached_min_key_ = GetKey(*min); + valid_cached_min_ = true; + } else { + valid_cached_min_ = false; + } + } +} + + +template +ElementType* InvalSet::BinarySearch( + const ElementType& element, ElementType* start, ElementType* end) const { + if (start == end) { + return NULL; + } + VIXL_ASSERT(sorted_); + VIXL_ASSERT(start < end); + VIXL_ASSERT(!empty()); + + // Perform a binary search through the elements while ignoring invalid + // elements. + ElementType* elements = start; + size_t low = 0; + size_t high = (end - start) - 1; + while (low < high) { + // Find valid bounds. + while (!IsValid(elements[low]) && (low < high)) ++low; + while (!IsValid(elements[high]) && (low < high)) --high; + VIXL_ASSERT(low <= high); + // Avoid overflow when computing the middle index. + size_t middle = low + (high - low) / 2; + if ((middle == low) || (middle == high)) { + break; + } + while ((middle < high - 1) && !IsValid(elements[middle])) ++middle; + while ((low + 1 < middle) && !IsValid(elements[middle])) --middle; + if (!IsValid(elements[middle])) { + break; + } + if (elements[middle] < element) { + low = middle; + } else { + high = middle; + } + } + + if (elements[low] == element) return &elements[low]; + if (elements[high] == element) return &elements[high]; + return NULL; +} + + +template +void InvalSet::Sort(SortType sort_type) { + if (sort_type == kSoftSort) { + if (sorted_) { + return; + } + } + VIXL_ASSERT(monitor() == 0); + if (empty()) { + return; + } + + Clean(); + std::sort(StorageBegin(), StorageEnd()); + + SetSorted(true); + cached_min_index_ = 0; + cached_min_key_ = GetKey(Front()); + valid_cached_min_ = true; +} + + +template +void InvalSet::Clean() { + VIXL_ASSERT(monitor() == 0); + if (empty() || !IsUsingVector()) { + return; + } + // Manually iterate through the vector storage to discard invalid elements. + ElementType* start = &(vector_->front()); + ElementType* end = start + vector_->size(); + ElementType* c = start; + ElementType* first_invalid; + ElementType* first_valid; + ElementType* next_invalid; + + while ((c < end) && IsValid(*c)) c++; + first_invalid = c; + + while (c < end) { + while ((c < end) && !IsValid(*c)) c++; + first_valid = c; + while ((c < end) && IsValid(*c)) c++; + next_invalid = c; + + ptrdiff_t n_moved_elements = (next_invalid - first_valid); + memmove(first_invalid, first_valid, n_moved_elements * sizeof(*c)); + first_invalid = first_invalid + n_moved_elements; + c = next_invalid; + } + + // Delete the trailing invalid elements. + vector_->erase(vector_->begin() + (first_invalid - start), vector_->end()); + VIXL_ASSERT(vector_->size() == size_); + + if (sorted_) { + valid_cached_min_ = true; + cached_min_index_ = 0; + cached_min_key_ = GetKey(*GetElementAt(0)); + } else { + valid_cached_min_ = false; + } +} + + +template +const ElementType InvalSet::Front() const { + VIXL_ASSERT(!empty()); + return IsUsingVector() ? vector_->front() : preallocated_[0]; +} + + +template +const ElementType InvalSet::Back() const { + VIXL_ASSERT(!empty()); + return IsUsingVector() ? vector_->back() : preallocated_[size_ - 1]; +} + + +template +const ElementType InvalSet::CleanBack() { + VIXL_ASSERT(monitor() == 0); + if (IsUsingVector()) { + // Delete the invalid trailing elements. + typename std::vector::reverse_iterator it = vector_->rbegin(); + while (!IsValid(*it)) { + it++; + } + vector_->erase(it.base(), vector_->end()); + } + return Back(); +} + + +template +const ElementType* InvalSet::StorageBegin() const { + return IsUsingVector() ? &(vector_->front()) : preallocated_; +} + + +template +const ElementType* InvalSet::StorageEnd() const { + return IsUsingVector() ? &(vector_->back()) + 1 : preallocated_ + size_; +} + + +template +ElementType* InvalSet::StorageBegin() { + return IsUsingVector() ? &(vector_->front()) : preallocated_; +} + + +template +ElementType* InvalSet::StorageEnd() { + return IsUsingVector() ? &(vector_->back()) + 1 : preallocated_ + size_; +} + + +template +size_t InvalSet::GetElementIndex( + const ElementType* element) const { + VIXL_ASSERT((StorageBegin() <= element) && (element < StorageEnd())); + return element - StorageBegin(); +} + + +template +const ElementType* InvalSet::GetElementAt( + size_t index) const { + VIXL_ASSERT((IsUsingVector() && (index < vector_->size())) || + (index < size_)); + return StorageBegin() + index; +} + +template +ElementType* InvalSet::GetElementAt(size_t index) { + VIXL_ASSERT((IsUsingVector() && (index < vector_->size())) || + (index < size_)); + return StorageBegin() + index; +} + +template +const ElementType* InvalSet::GetFirstValidElement( + const ElementType* from, const ElementType* end) { + while ((from < end) && !IsValid(*from)) { + from++; + } + return from; +} + + +template +void InvalSet::CacheMinElement() { + VIXL_ASSERT(monitor() == 0); + VIXL_ASSERT(!empty()); + + if (valid_cached_min_) { + return; + } + + if (sorted_) { + const ElementType* min = GetFirstValidElement(StorageBegin(), StorageEnd()); + cached_min_index_ = GetElementIndex(min); + cached_min_key_ = GetKey(*min); + valid_cached_min_ = true; + } else { + Sort(kHardSort); + } + VIXL_ASSERT(valid_cached_min_); +} + + +template +bool InvalSet::ShouldReclaimMemory() const { + if (!IsUsingVector()) { + return false; + } + size_t n_invalid_elements = vector_->size() - size_; + return (n_invalid_elements > RECLAIM_FROM) && + (n_invalid_elements > vector_->size() / RECLAIM_FACTOR); +} + + +template +void InvalSet::ReclaimMemory() { + VIXL_ASSERT(monitor() == 0); + Clean(); +} + + +template +InvalSetIterator::InvalSetIterator(S* inval_set) + : using_vector_((inval_set != NULL) && inval_set->IsUsingVector()), + index_(0), + inval_set_(inval_set) { + if (inval_set != NULL) { + inval_set->Sort(S::kSoftSort); +#ifdef VIXL_DEBUG + inval_set->Acquire(); +#endif + if (using_vector_) { + iterator_ = typename std::vector::iterator( + inval_set_->vector_->begin()); + } + MoveToValidElement(); + } +} + + +template +InvalSetIterator::~InvalSetIterator() { +#ifdef VIXL_DEBUG + if (inval_set_ != NULL) inval_set_->Release(); +#endif +} + + +template +typename S::_ElementType* InvalSetIterator::Current() const { + VIXL_ASSERT(!Done()); + if (using_vector_) { + return &(*iterator_); + } else { + return &(inval_set_->preallocated_[index_]); + } +} + + +template +void InvalSetIterator::Advance() { + ++(*this); +} + + +template +bool InvalSetIterator::Done() const { + if (using_vector_) { + bool done = (iterator_ == inval_set_->vector_->end()); + VIXL_ASSERT(done == (index_ == inval_set_->size())); + return done; + } else { + return index_ == inval_set_->size(); + } +} + + +template +void InvalSetIterator::Finish() { + VIXL_ASSERT(inval_set_->sorted_); + if (using_vector_) { + iterator_ = inval_set_->vector_->end(); + } + index_ = inval_set_->size(); +} + + +template +void InvalSetIterator::DeleteCurrentAndAdvance() { + if (using_vector_) { + inval_set_->EraseInternal(&(*iterator_)); + MoveToValidElement(); + } else { + inval_set_->EraseInternal(inval_set_->preallocated_ + index_); + } +} + + +template +bool InvalSetIterator::IsValid(const ElementType& element) { + return S::IsValid(element); +} + + +template +typename S::_KeyType InvalSetIterator::GetKey(const ElementType& element) { + return S::GetKey(element); +} + + +template +void InvalSetIterator::MoveToValidElement() { + if (using_vector_) { + while ((iterator_ != inval_set_->vector_->end()) && !IsValid(*iterator_)) { + iterator_++; + } + } else { + VIXL_ASSERT(inval_set_->empty() || IsValid(inval_set_->preallocated_[0])); + // Nothing to do. + } +} + + +template +InvalSetIterator::InvalSetIterator(const InvalSetIterator& other) + : using_vector_(other.using_vector_), + index_(other.index_), + inval_set_(other.inval_set_) { +#ifdef VIXL_DEBUG + if (inval_set_ != NULL) inval_set_->Acquire(); +#endif +} + + +#if __cplusplus >= 201103L +template +InvalSetIterator::InvalSetIterator(InvalSetIterator&& other) noexcept + : using_vector_(false), + index_(0), + inval_set_(NULL) { + swap(*this, other); +} +#endif + + +template +InvalSetIterator& InvalSetIterator::operator=(InvalSetIterator other) { + swap(*this, other); + return *this; +} + + +template +bool InvalSetIterator::operator==(const InvalSetIterator& rhs) const { + bool equal = (inval_set_ == rhs.inval_set_); + + // If the inval_set_ matches, using_vector_ must also match. + VIXL_ASSERT(!equal || (using_vector_ == rhs.using_vector_)); + + if (using_vector_) { + equal = equal && (iterator_ == rhs.iterator_); + // In debug mode, index_ is maintained even with using_vector_. + VIXL_ASSERT(!equal || (index_ == rhs.index_)); + } else { + equal = equal && (index_ == rhs.index_); +#ifdef DEBUG + // If not using_vector_, iterator_ should be default-initialised. + typename std::vector::iterator default_iterator; + VIXL_ASSERT(iterator_ == default_iterator); + VIXL_ASSERT(rhs.iterator_ == default_iterator); +#endif + } + return equal; +} + + +template +InvalSetIterator& InvalSetIterator::operator++() { + // Pre-increment. + VIXL_ASSERT(!Done()); + if (using_vector_) { + iterator_++; +#ifdef VIXL_DEBUG + index_++; +#endif + MoveToValidElement(); + } else { + index_++; + } + return *this; +} + + +template +InvalSetIterator InvalSetIterator::operator++(int /* unused */) { + // Post-increment. + VIXL_ASSERT(!Done()); + InvalSetIterator old(*this); + ++(*this); + return old; +} + + +#undef TEMPLATE_INVALSET_P_DECL +#undef TEMPLATE_INVALSET_P_DEF + +} // namespace vixl + +#endif // VIXL_INVALSET_H_ diff --git a/dep/vixl/include/vixl/macro-assembler-interface.h b/dep/vixl/include/vixl/macro-assembler-interface.h new file mode 100644 index 000000000..a3194e308 --- /dev/null +++ b/dep/vixl/include/vixl/macro-assembler-interface.h @@ -0,0 +1,75 @@ +// Copyright 2016, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_MACRO_ASSEMBLER_INTERFACE_H +#define VIXL_MACRO_ASSEMBLER_INTERFACE_H + +#include "assembler-base-vixl.h" + +namespace vixl { + +class MacroAssemblerInterface { + public: + virtual internal::AssemblerBase* AsAssemblerBase() = 0; + + virtual ~MacroAssemblerInterface() {} + + virtual bool AllowMacroInstructions() const = 0; + virtual bool ArePoolsBlocked() const = 0; + + protected: + virtual void SetAllowMacroInstructions(bool allow) = 0; + + virtual void BlockPools() = 0; + virtual void ReleasePools() = 0; + virtual void EnsureEmitPoolsFor(size_t size) = 0; + + // Emit the branch over a literal/veneer pool, and any necessary padding + // before it. + virtual void EmitPoolHeader() = 0; + // When this is called, the label used for branching over the pool is bound. + // This can also generate additional padding, which must correspond to the + // alignment_ value passed to the PoolManager (which needs to keep track of + // the exact size of the generated pool). + virtual void EmitPoolFooter() = 0; + + // Emit n bytes of padding that does not have to be executable. + virtual void EmitPaddingBytes(int n) = 0; + // Emit n bytes of padding that has to be executable. Implementations must + // make sure this is a multiple of the instruction size. + virtual void EmitNopBytes(int n) = 0; + + // The following scopes need access to the above method in order to implement + // pool blocking and temporarily disable the macro-assembler. + friend class ExactAssemblyScope; + friend class EmissionCheckScope; + template + friend class PoolManager; +}; + +} // namespace vixl + +#endif // VIXL_MACRO_ASSEMBLER_INTERFACE_H diff --git a/dep/vixl/include/vixl/platform-vixl.h b/dep/vixl/include/vixl/platform-vixl.h new file mode 100644 index 000000000..99f54d0c4 --- /dev/null +++ b/dep/vixl/include/vixl/platform-vixl.h @@ -0,0 +1,39 @@ +// Copyright 2014, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef PLATFORM_H +#define PLATFORM_H + +// Define platform specific functionalities. +extern "C" { +#include +} + +namespace vixl { +inline void HostBreakpoint() { raise(SIGINT); } +} // namespace vixl + +#endif diff --git a/dep/vixl/include/vixl/pool-manager-impl.h b/dep/vixl/include/vixl/pool-manager-impl.h new file mode 100644 index 000000000..c49b643fc --- /dev/null +++ b/dep/vixl/include/vixl/pool-manager-impl.h @@ -0,0 +1,522 @@ +// Copyright 2017, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_POOL_MANAGER_IMPL_H_ +#define VIXL_POOL_MANAGER_IMPL_H_ + +#include "pool-manager.h" + +#include +#include "assembler-base-vixl.h" + +namespace vixl { + + +template +T PoolManager::Emit(MacroAssemblerInterface* masm, + T pc, + int num_bytes, + ForwardReference* new_reference, + LocationBase* new_object, + EmitOption option) { + // Make sure that the buffer still has the alignment we think it does. + VIXL_ASSERT(IsAligned(masm->AsAssemblerBase() + ->GetBuffer() + ->GetStartAddress(), + buffer_alignment_)); + + // We should not call this method when the pools are blocked. + VIXL_ASSERT(!IsBlocked()); + if (objects_.empty()) return pc; + + // Emit header. + if (option == kBranchRequired) { + masm->EmitPoolHeader(); + // TODO: The pc at this point might not actually be aligned according to + // alignment_. This is to support the current AARCH32 MacroAssembler which + // does not have a fixed size instruction set. In practice, the pc will be + // aligned to the alignment instructions need for the current instruction + // set, so we do not need to align it here. All other calculations do take + // the alignment into account, which only makes the checkpoint calculations + // more conservative when we use T32. Uncomment the following assertion if + // the AARCH32 MacroAssembler is modified to only support one ISA at the + // time. + // VIXL_ASSERT(pc == AlignUp(pc, alignment_)); + pc += header_size_; + } else { + // If the header is optional, we might need to add some extra padding to + // meet the minimum location of the first object. + if (pc < objects_[0].min_location_) { + int32_t padding = objects_[0].min_location_ - pc; + masm->EmitNopBytes(padding); + pc += padding; + } + } + + PoolObject* existing_object = GetObjectIfTracked(new_object); + + // Go through all objects and emit one by one. + for (objects_iter iter = objects_.begin(); iter != objects_.end();) { + PoolObject& current = *iter; + if (ShouldSkipObject(¤t, + pc, + num_bytes, + new_reference, + new_object, + existing_object)) { + ++iter; + continue; + } + LocationBase* label_base = current.label_base_; + T aligned_pc = AlignUp(pc, current.alignment_); + masm->EmitPaddingBytes(aligned_pc - pc); + pc = aligned_pc; + VIXL_ASSERT(pc >= current.min_location_); + VIXL_ASSERT(pc <= current.max_location_); + // First call SetLocation, which will also resolve the references, and then + // call EmitPoolObject, which might add a new reference. + label_base->SetLocation(masm->AsAssemblerBase(), pc); + label_base->EmitPoolObject(masm); + int object_size = label_base->GetPoolObjectSizeInBytes(); + if (label_base->ShouldDeletePoolObjectOnPlacement()) { + label_base->MarkBound(); + iter = RemoveAndDelete(iter); + } else { + VIXL_ASSERT(!current.label_base_->ShouldDeletePoolObjectOnPlacement()); + current.label_base_->UpdatePoolObject(¤t); + VIXL_ASSERT(current.alignment_ >= label_base->GetPoolObjectAlignment()); + ++iter; + } + pc += object_size; + } + + // Recalculate the checkpoint before emitting the footer. The footer might + // call Bind() which will check if we need to emit. + RecalculateCheckpoint(); + + // Always emit footer - this might add some padding. + masm->EmitPoolFooter(); + pc = AlignUp(pc, alignment_); + + return pc; +} + +template +bool PoolManager::ShouldSkipObject(PoolObject* pool_object, + T pc, + int num_bytes, + ForwardReference* new_reference, + LocationBase* new_object, + PoolObject* existing_object) const { + // We assume that all objects before this have been skipped and all objects + // after this will be emitted, therefore we will emit the whole pool. Add + // the header size and alignment, as well as the number of bytes we are + // planning to emit. + T max_actual_location = pc + num_bytes + max_pool_size_; + + if (new_reference != NULL) { + // If we're adding a new object, also assume that it will have to be emitted + // before the object we are considering to skip. + VIXL_ASSERT(new_object != NULL); + T new_object_alignment = std::max(new_reference->object_alignment_, + new_object->GetPoolObjectAlignment()); + if ((existing_object != NULL) && + (existing_object->alignment_ > new_object_alignment)) { + new_object_alignment = existing_object->alignment_; + } + max_actual_location += + (new_object->GetPoolObjectSizeInBytes() + new_object_alignment - 1); + } + + // Hard limit. + if (max_actual_location >= pool_object->max_location_) return false; + + // Use heuristic. + return (pc < pool_object->skip_until_location_hint_); +} + +template +T PoolManager::UpdateCheckpointForObject(T checkpoint, + const PoolObject* object) { + checkpoint -= object->label_base_->GetPoolObjectSizeInBytes(); + if (checkpoint > object->max_location_) checkpoint = object->max_location_; + checkpoint = AlignDown(checkpoint, object->alignment_); + return checkpoint; +} + +template +static T MaxCheckpoint() { + return std::numeric_limits::max(); +} + +template +static inline bool CheckCurrentPC(T pc, T checkpoint) { + VIXL_ASSERT(pc <= checkpoint); + // We must emit the pools if we are at the checkpoint now. + return pc == checkpoint; +} + +template +static inline bool CheckFuturePC(T pc, T checkpoint) { + // We do not need to emit the pools now if the projected future PC will be + // equal to the checkpoint (we will need to emit the pools then). + return pc > checkpoint; +} + +template +bool PoolManager::MustEmit(T pc, + int num_bytes, + ForwardReference* reference, + LocationBase* label_base) const { + // Check if we are at or past the checkpoint. + if (CheckCurrentPC(pc, checkpoint_)) return true; + + // Check if the future PC will be past the checkpoint. + pc += num_bytes; + if (CheckFuturePC(pc, checkpoint_)) return true; + + // No new reference - nothing to do. + if (reference == NULL) { + VIXL_ASSERT(label_base == NULL); + return false; + } + + if (objects_.empty()) { + // Basic assertions that restrictions on the new (and only) reference are + // possible to satisfy. + VIXL_ASSERT(AlignUp(pc + header_size_, alignment_) >= + reference->min_object_location_); + VIXL_ASSERT(pc <= reference->max_object_location_); + return false; + } + + // Check if the object is already being tracked. + const PoolObject* existing_object = GetObjectIfTracked(label_base); + if (existing_object != NULL) { + // If the existing_object is already in existing_objects_ and its new + // alignment and new location restrictions are not stricter, skip the more + // expensive check. + if ((reference->min_object_location_ <= existing_object->min_location_) && + (reference->max_object_location_ >= existing_object->max_location_) && + (reference->object_alignment_ <= existing_object->alignment_)) { + return false; + } + } + + // Create a temporary object. + PoolObject temp(label_base); + temp.RestrictRange(reference->min_object_location_, + reference->max_object_location_); + temp.RestrictAlignment(reference->object_alignment_); + if (existing_object != NULL) { + temp.RestrictRange(existing_object->min_location_, + existing_object->max_location_); + temp.RestrictAlignment(existing_object->alignment_); + } + + // Check if the new reference can be added after the end of the current pool. + // If yes, we don't need to emit. + T last_reachable = AlignDown(temp.max_location_, temp.alignment_); + const PoolObject& last = objects_.back(); + T after_pool = AlignDown(last.max_location_, last.alignment_) + + last.label_base_->GetPoolObjectSizeInBytes(); + // The current object can be placed at the end of the pool, even if the last + // object is placed at the last possible location. + if (last_reachable >= after_pool) return false; + // The current object can be placed after the code we are about to emit and + // after the existing pool (with a pessimistic size estimate). + if (last_reachable >= pc + num_bytes + max_pool_size_) return false; + + // We're not in a trivial case, so we need to recalculate the checkpoint. + + // Check (conservatively) if we can fit it into the objects_ array, without + // breaking our assumptions. Here we want to recalculate the checkpoint as + // if the new reference was added to the PoolManager but without actually + // adding it (as removing it is non-trivial). + + T checkpoint = MaxCheckpoint(); + // Will temp be the last object in objects_? + if (PoolObjectLessThan(last, temp)) { + checkpoint = UpdateCheckpointForObject(checkpoint, &temp); + if (checkpoint < temp.min_location_) return true; + } + + bool tempNotPlacedYet = true; + for (int i = static_cast(objects_.size()) - 1; i >= 0; --i) { + const PoolObject& current = objects_[i]; + if (tempNotPlacedYet && PoolObjectLessThan(current, temp)) { + checkpoint = UpdateCheckpointForObject(checkpoint, &temp); + if (checkpoint < temp.min_location_) return true; + if (CheckFuturePC(pc, checkpoint)) return true; + tempNotPlacedYet = false; + } + if (current.label_base_ == label_base) continue; + checkpoint = UpdateCheckpointForObject(checkpoint, ¤t); + if (checkpoint < current.min_location_) return true; + if (CheckFuturePC(pc, checkpoint)) return true; + } + // temp is the object with the smallest max_location_. + if (tempNotPlacedYet) { + checkpoint = UpdateCheckpointForObject(checkpoint, &temp); + if (checkpoint < temp.min_location_) return true; + } + + // Take the header into account. + checkpoint -= header_size_; + checkpoint = AlignDown(checkpoint, alignment_); + + return CheckFuturePC(pc, checkpoint); +} + +template +void PoolManager::RecalculateCheckpoint(SortOption sort_option) { + // TODO: Improve the max_pool_size_ estimate by starting from the + // min_location_ of the first object, calculating the end of the pool as if + // all objects were placed starting from there, and in the end adding the + // maximum object alignment found minus one (which is the maximum extra + // padding we would need if we were to relocate the pool to a different + // address). + max_pool_size_ = 0; + + if (objects_.empty()) { + checkpoint_ = MaxCheckpoint(); + return; + } + + // Sort objects by their max_location_. + if (sort_option == kSortRequired) { + std::sort(objects_.begin(), objects_.end(), PoolObjectLessThan); + } + + // Add the header size and header and footer max alignment to the maximum + // pool size. + max_pool_size_ += header_size_ + 2 * (alignment_ - 1); + + T checkpoint = MaxCheckpoint(); + int last_object_index = static_cast(objects_.size()) - 1; + for (int i = last_object_index; i >= 0; --i) { + // Bring back the checkpoint by the size of the current object, unless + // we need to bring it back more, then align. + PoolObject& current = objects_[i]; + checkpoint = UpdateCheckpointForObject(checkpoint, ¤t); + VIXL_ASSERT(checkpoint >= current.min_location_); + max_pool_size_ += (current.alignment_ - 1 + + current.label_base_->GetPoolObjectSizeInBytes()); + } + // Take the header into account. + checkpoint -= header_size_; + checkpoint = AlignDown(checkpoint, alignment_); + + // Update the checkpoint of the pool manager. + checkpoint_ = checkpoint; + + // NOTE: To handle min_location_ in the generic case, we could make a second + // pass of the objects_ vector, increasing the checkpoint as needed, while + // maintaining the alignment requirements. + // It should not be possible to have any issues with min_location_ with actual + // code, since there should always be some kind of branch over the pool, + // whether introduced by the pool emission or by the user, which will make + // sure the min_location_ requirement is satisfied. It's possible that the + // user could emit code in the literal pool and intentionally load the first + // value and then fall-through into the pool, but that is not a supported use + // of VIXL and we will assert in that case. +} + +template +bool PoolManager::PoolObjectLessThan(const PoolObject& a, + const PoolObject& b) { + if (a.max_location_ != b.max_location_) + return (a.max_location_ < b.max_location_); + int a_size = a.label_base_->GetPoolObjectSizeInBytes(); + int b_size = b.label_base_->GetPoolObjectSizeInBytes(); + if (a_size != b_size) return (a_size < b_size); + if (a.alignment_ != b.alignment_) return (a.alignment_ < b.alignment_); + if (a.min_location_ != b.min_location_) + return (a.min_location_ < b.min_location_); + return false; +} + +template +void PoolManager::AddObjectReference(const ForwardReference* reference, + LocationBase* label_base) { + VIXL_ASSERT(reference->object_alignment_ <= buffer_alignment_); + VIXL_ASSERT(label_base->GetPoolObjectAlignment() <= buffer_alignment_); + + PoolObject* object = GetObjectIfTracked(label_base); + + if (object == NULL) { + PoolObject new_object(label_base); + new_object.RestrictRange(reference->min_object_location_, + reference->max_object_location_); + new_object.RestrictAlignment(reference->object_alignment_); + Insert(new_object); + } else { + object->RestrictRange(reference->min_object_location_, + reference->max_object_location_); + object->RestrictAlignment(reference->object_alignment_); + + // Move the object, if needed. + if (objects_.size() != 1) { + PoolObject new_object(*object); + ptrdiff_t distance = std::distance(objects_.data(), object); + objects_.erase(objects_.begin() + distance); + Insert(new_object); + } + } + // No need to sort, we inserted the object in an already sorted array. + RecalculateCheckpoint(kNoSortRequired); +} + +template +void PoolManager::Insert(const PoolObject& new_object) { + bool inserted = false; + // Place the object in the right position. + for (objects_iter iter = objects_.begin(); iter != objects_.end(); ++iter) { + PoolObject& current = *iter; + if (!PoolObjectLessThan(current, new_object)) { + objects_.insert(iter, new_object); + inserted = true; + break; + } + } + if (!inserted) { + objects_.push_back(new_object); + } +} + +template +void PoolManager::RemoveAndDelete(PoolObject* object) { + for (objects_iter iter = objects_.begin(); iter != objects_.end(); ++iter) { + PoolObject& current = *iter; + if (current.label_base_ == object->label_base_) { + (void)RemoveAndDelete(iter); + return; + } + } + VIXL_UNREACHABLE(); +} + +template +typename PoolManager::objects_iter PoolManager::RemoveAndDelete( + objects_iter iter) { + PoolObject& object = *iter; + LocationBase* label_base = object.label_base_; + + // Check if we also need to delete the LocationBase object. + if (label_base->ShouldBeDeletedOnPoolManagerDestruction()) { + delete_on_destruction_.push_back(label_base); + } + if (label_base->ShouldBeDeletedOnPlacementByPoolManager()) { + VIXL_ASSERT(!label_base->ShouldBeDeletedOnPoolManagerDestruction()); + delete label_base; + } + + return objects_.erase(iter); +} + +template +T PoolManager::Bind(MacroAssemblerInterface* masm, + LocationBase* object, + T location) { + PoolObject* existing_object = GetObjectIfTracked(object); + int alignment; + T min_location; + if (existing_object == NULL) { + alignment = object->GetMaxAlignment(); + min_location = object->GetMinLocation(); + } else { + alignment = existing_object->alignment_; + min_location = existing_object->min_location_; + } + + // Align if needed, and add necessary padding to reach the min_location_. + T aligned_location = AlignUp(location, alignment); + masm->EmitNopBytes(aligned_location - location); + location = aligned_location; + while (location < min_location) { + masm->EmitNopBytes(alignment); + location += alignment; + } + + object->SetLocation(masm->AsAssemblerBase(), location); + object->MarkBound(); + + if (existing_object != NULL) { + RemoveAndDelete(existing_object); + // No need to sort, we removed the object from a sorted array. + RecalculateCheckpoint(kNoSortRequired); + } + + // We assume that the maximum padding we can possibly add here is less + // than the header alignment - hence that we're not going to go past our + // checkpoint. + VIXL_ASSERT(!CheckFuturePC(location, checkpoint_)); + return location; +} + +template +void PoolManager::Release(T pc) { + USE(pc); + if (--monitor_ == 0) { + // Ensure the pool has not been blocked for too long. + VIXL_ASSERT(pc <= checkpoint_); + } +} + +template +PoolManager::~PoolManager() { +#ifdef VIXL_DEBUG + // Check for unbound objects. + for (objects_iter iter = objects_.begin(); iter != objects_.end(); ++iter) { + // There should not be any bound objects left in the pool. For unbound + // objects, we will check in the destructor of the object itself. + VIXL_ASSERT(!(*iter).label_base_->IsBound()); + } +#endif + // Delete objects the pool manager owns. + for (typename std::vector *>::iterator + iter = delete_on_destruction_.begin(), + end = delete_on_destruction_.end(); + iter != end; + ++iter) { + delete *iter; + } +} + +template +int PoolManager::GetPoolSizeForTest() const { + // Iterate over objects and return their cumulative size. This does not take + // any padding into account, just the size of the objects themselves. + int size = 0; + for (const_objects_iter iter = objects_.begin(); iter != objects_.end(); + ++iter) { + size += (*iter).label_base_->GetPoolObjectSizeInBytes(); + } + return size; +} +} + +#endif // VIXL_POOL_MANAGER_IMPL_H_ diff --git a/dep/vixl/include/vixl/pool-manager.h b/dep/vixl/include/vixl/pool-manager.h new file mode 100644 index 000000000..b5cb867be --- /dev/null +++ b/dep/vixl/include/vixl/pool-manager.h @@ -0,0 +1,555 @@ +// Copyright 2017, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_POOL_MANAGER_H_ +#define VIXL_POOL_MANAGER_H_ + +#include + +#include +#include +#include +#include + +#include "globals-vixl.h" +#include "macro-assembler-interface.h" +#include "utils-vixl.h" + +namespace vixl { + +class TestPoolManager; + +// There are four classes declared in this header file: +// PoolManager, PoolObject, ForwardReference and LocationBase. + +// The PoolManager manages both literal and veneer pools, and is designed to be +// shared between AArch32 and AArch64. A pool is represented as an abstract +// collection of references to objects. The manager does not need to know +// architecture-specific details about literals and veneers; the actual +// emission of the pool objects is delegated. +// +// Literal and Label will derive from LocationBase. The MacroAssembler will +// create these objects as instructions that reference pool objects are +// encountered, and ask the PoolManager to track them. The PoolManager will +// create an internal PoolObject object for each object derived from +// LocationBase. Some of these PoolObject objects will be deleted when placed +// (e.g. the ones corresponding to Literals), whereas others will be updated +// with a new range when placed (e.g. Veneers) and deleted when Bind() is +// called on the PoolManager with their corresponding object as a parameter. +// +// A ForwardReference represents a reference to a PoolObject that will be +// placed later in the instruction stream. Each ForwardReference may only refer +// to one PoolObject, but many ForwardReferences may refer to the same +// object. +// +// A PoolObject represents an object that has not yet been placed. The final +// location of a PoolObject (and hence the LocationBase object to which it +// corresponds) is constrained mostly by the instructions that refer to it, but +// PoolObjects can also have inherent constraints, such as alignment. +// +// LocationBase objects, unlike PoolObject objects, can be used outside of the +// pool manager (e.g. as manually placed literals, which may still have +// forward references that need to be resolved). +// +// At the moment, each LocationBase will have at most one PoolObject that keeps +// the relevant information for placing this object in the pool. When that +// object is placed, all forward references of the object are resolved. For +// that reason, we do not need to keep track of the ForwardReference objects in +// the PoolObject. + +// T is an integral type used for representing locations. For a 32-bit +// architecture it will typically be int32_t, whereas for a 64-bit +// architecture it will be int64_t. +template +class ForwardReference; +template +class PoolObject; +template +class PoolManager; + +// Represents an object that has a size and alignment, and either has a known +// location or has not been placed yet. An object of a subclass of LocationBase +// will typically keep track of a number of ForwardReferences when it has not +// yet been placed, but LocationBase does not assume or implement that +// functionality. LocationBase provides virtual methods for emitting the +// object, updating all the forward references, and giving the PoolManager +// information on the lifetime of this object and the corresponding PoolObject. +template +class LocationBase { + public: + // The size of a LocationBase object is restricted to 4KB, in order to avoid + // situations where the size of the pool becomes larger than the range of + // an unconditional branch. This cannot happen without having large objects, + // as typically the range of an unconditional branch is the larger range + // an instruction supports. + // TODO: This would ideally be an architecture-specific value, perhaps + // another template parameter. + static const int kMaxObjectSize = 4 * KBytes; + + // By default, LocationBase objects are aligned naturally to their size. + LocationBase(uint32_t type, int size) + : pool_object_size_(size), + pool_object_alignment_(size), + pool_object_type_(type), + is_bound_(false), + location_(0) { + VIXL_ASSERT(size > 0); + VIXL_ASSERT(size <= kMaxObjectSize); + VIXL_ASSERT(IsPowerOf2(size)); + } + + // Allow alignment to be specified, as long as it is smaller than the size. + LocationBase(uint32_t type, int size, int alignment) + : pool_object_size_(size), + pool_object_alignment_(alignment), + pool_object_type_(type), + is_bound_(false), + location_(0) { + VIXL_ASSERT(size > 0); + VIXL_ASSERT(size <= kMaxObjectSize); + VIXL_ASSERT(IsPowerOf2(alignment)); + VIXL_ASSERT(alignment <= size); + } + + // Constructor for locations that are already bound. + explicit LocationBase(T location) + : pool_object_size_(-1), + pool_object_alignment_(-1), + pool_object_type_(0), + is_bound_(true), + location_(location) {} + + virtual ~LocationBase() {} + + // The PoolManager should assume ownership of some objects, and delete them + // after they have been placed. This can happen for example for literals that + // are created internally to the MacroAssembler and the user doesn't get a + // handle to. By default, the PoolManager will not do this. + virtual bool ShouldBeDeletedOnPlacementByPoolManager() const { return false; } + // The PoolManager should assume ownership of some objects, and delete them + // when it is destroyed. By default, the PoolManager will not do this. + virtual bool ShouldBeDeletedOnPoolManagerDestruction() const { return false; } + + // Emit the PoolObject. Derived classes will implement this method to emit + // the necessary data and/or code (for example, to emit a literal or a + // veneer). This should not add padding, as it is added explicitly by the pool + // manager. + virtual void EmitPoolObject(MacroAssemblerInterface* masm) = 0; + + // Resolve the references to this object. Will encode the necessary offset + // in the instruction corresponding to each reference and then delete it. + // TODO: An alternative here would be to provide a ResolveReference() + // method that only asks the LocationBase to resolve a specific reference + // (thus allowing the pool manager to resolve some of the references only). + // This would mean we need to have some kind of API to get all the references + // to a LabelObject. + virtual void ResolveReferences(internal::AssemblerBase* assembler) = 0; + + // Returns true when the PoolObject corresponding to this LocationBase object + // needs to be removed from the pool once placed, and false if it needs to + // be updated instead (in which case UpdatePoolObject will be called). + virtual bool ShouldDeletePoolObjectOnPlacement() const { return true; } + + // Update the PoolObject after placing it, if necessary. This will happen for + // example in the case of a placed veneer, where we need to use a new updated + // range and a new reference (from the newly added branch instruction). + // By default, this does nothing, to avoid forcing objects that will not need + // this to have an empty implementation. + virtual void UpdatePoolObject(PoolObject*) {} + + // Implement heuristics for emitting this object. If a margin is to be used + // as a hint during pool emission, we will try not to emit the object if we + // are further away from the maximum reachable location by more than the + // margin. + virtual bool UsePoolObjectEmissionMargin() const { return false; } + virtual T GetPoolObjectEmissionMargin() const { + VIXL_ASSERT(UsePoolObjectEmissionMargin() == false); + return 0; + } + + int GetPoolObjectSizeInBytes() const { return pool_object_size_; } + int GetPoolObjectAlignment() const { return pool_object_alignment_; } + uint32_t GetPoolObjectType() const { return pool_object_type_; } + + bool IsBound() const { return is_bound_; } + T GetLocation() const { return location_; } + + // This function can be called multiple times before the object is marked as + // bound with MarkBound() below. This is because some objects (e.g. the ones + // used to represent labels) can have veneers; every time we place a veneer + // we need to keep track of the location in order to resolve the references + // to the object. Reusing the location_ field for this is convenient. + void SetLocation(internal::AssemblerBase* assembler, T location) { + VIXL_ASSERT(!is_bound_); + location_ = location; + ResolveReferences(assembler); + } + + void MarkBound() { + VIXL_ASSERT(!is_bound_); + is_bound_ = true; + } + + // The following two functions are used when an object is bound by a call to + // PoolManager::Bind(). + virtual int GetMaxAlignment() const { + VIXL_ASSERT(!ShouldDeletePoolObjectOnPlacement()); + return 1; + } + virtual T GetMinLocation() const { + VIXL_ASSERT(!ShouldDeletePoolObjectOnPlacement()); + return 0; + } + + private: + // The size of the corresponding PoolObject, in bytes. + int pool_object_size_; + // The alignment of the corresponding PoolObject; this must be a power of two. + int pool_object_alignment_; + + // Different derived classes should have different type values. This can be + // used internally by the PoolManager for grouping of objects. + uint32_t pool_object_type_; + // Has the object been bound to a location yet? + bool is_bound_; + + protected: + // See comment on SetLocation() for the use of this field. + T location_; +}; + +template +class PoolObject { + public: + // By default, PoolObjects have no inherent position constraints. + explicit PoolObject(LocationBase* parent) + : label_base_(parent), + min_location_(0), + max_location_(std::numeric_limits::max()), + alignment_(parent->GetPoolObjectAlignment()), + skip_until_location_hint_(0), + type_(parent->GetPoolObjectType()) { + VIXL_ASSERT(IsPowerOf2(alignment_)); + UpdateLocationHint(); + } + + // Reset the minimum and maximum location and the alignment of the object. + // This function is public in order to allow the LocationBase corresponding to + // this PoolObject to update the PoolObject when placed, e.g. in the case of + // veneers. The size and type of the object cannot be modified. + void Update(T min, T max, int alignment) { + // We don't use RestrictRange here as the new range is independent of the + // old range (and the maximum location is typically larger). + min_location_ = min; + max_location_ = max; + RestrictAlignment(alignment); + UpdateLocationHint(); + } + + private: + void RestrictRange(T min, T max) { + VIXL_ASSERT(min <= max_location_); + VIXL_ASSERT(max >= min_location_); + min_location_ = std::max(min_location_, min); + max_location_ = std::min(max_location_, max); + UpdateLocationHint(); + } + + void RestrictAlignment(int alignment) { + VIXL_ASSERT(IsPowerOf2(alignment)); + VIXL_ASSERT(IsPowerOf2(alignment_)); + alignment_ = std::max(alignment_, alignment); + } + + void UpdateLocationHint() { + if (label_base_->UsePoolObjectEmissionMargin()) { + skip_until_location_hint_ = + max_location_ - label_base_->GetPoolObjectEmissionMargin(); + } + } + + // The LocationBase that this pool object represents. + LocationBase* label_base_; + + // Hard, precise location constraints for the start location of the object. + // They are both inclusive, that is the start location of the object can be + // at any location between min_location_ and max_location_, themselves + // included. + T min_location_; + T max_location_; + + // The alignment must be a power of two. + int alignment_; + + // Avoid generating this object until skip_until_location_hint_. This + // supports cases where placing the object in the pool has an inherent cost + // that could be avoided in some other way. Veneers are a typical example; we + // would prefer to branch directly (over a pool) rather than use veneers, so + // this value can be set using some heuristic to leave them in the pool. + // This value is only a hint, which will be ignored if it has to in order to + // meet the hard constraints we have. + T skip_until_location_hint_; + + // Used only to group objects of similar type together. The PoolManager does + // not know what the types represent. + uint32_t type_; + + friend class PoolManager; +}; + +// Class that represents a forward reference. It is the responsibility of +// LocationBase objects to keep track of forward references and patch them when +// an object is placed - this class is only used by the PoolManager in order to +// restrict the requirements on PoolObjects it is tracking. +template +class ForwardReference { + public: + ForwardReference(T location, + int size, + T min_object_location, + T max_object_location, + int object_alignment = 1) + : location_(location), + size_(size), + object_alignment_(object_alignment), + min_object_location_(min_object_location), + max_object_location_(max_object_location) { + VIXL_ASSERT(AlignDown(max_object_location, object_alignment) >= + min_object_location); + } + + bool LocationIsEncodable(T location) const { + return location >= min_object_location_ && + location <= max_object_location_ && + IsAligned(location, object_alignment_); + } + + T GetLocation() const { return location_; } + T GetMinLocation() const { return min_object_location_; } + T GetMaxLocation() const { return max_object_location_; } + int GetAlignment() const { return object_alignment_; } + + // Needed for InvalSet. + void SetLocationToInvalidateOnly(T location) { location_ = location; } + + private: + // The location of the thing that contains the reference. For example, this + // can be the location of the branch or load instruction. + T location_; + + // The size of the instruction that makes the reference, in bytes. + int size_; + + // The alignment that the object must satisfy for this reference - must be a + // power of two. + int object_alignment_; + + // Specify the possible locations where the object could be stored. AArch32's + // PC offset, and T32's PC alignment calculations should be applied by the + // Assembler, not here. The PoolManager deals only with simple locationes. + // Including min_object_adddress_ is necessary to handle AArch32 some + // instructions which have a minimum offset of 0, but also have the implicit + // PC offset. + // Note that this structure cannot handle sparse ranges, such as A32's ADR, + // but doing so is costly and probably not useful in practice. The min and + // and max object location both refer to the beginning of the object, are + // inclusive and are not affected by the object size. E.g. if + // max_object_location_ is equal to X, we can place the object at location X + // regardless of its size. + T min_object_location_; + T max_object_location_; + + friend class PoolManager; +}; + + +template +class PoolManager { + public: + PoolManager(int header_size, int alignment, int buffer_alignment) + : header_size_(header_size), + alignment_(alignment), + buffer_alignment_(buffer_alignment), + checkpoint_(std::numeric_limits::max()), + max_pool_size_(0), + monitor_(0) {} + + ~PoolManager(); + + // Check if we will need to emit the pool at location 'pc', when planning to + // generate a certain number of bytes. This optionally takes a + // ForwardReference we are about to generate, in which case the size of the + // reference must be included in 'num_bytes'. + bool MustEmit(T pc, + int num_bytes = 0, + ForwardReference* reference = NULL, + LocationBase* object = NULL) const; + + enum EmitOption { kBranchRequired, kNoBranchRequired }; + + // Emit the pool at location 'pc', using 'masm' as the macroassembler. + // The branch over the header can be optionally omitted using 'option'. + // Returns the new PC after pool emission. + // This expects a number of bytes that are about to be emitted, to be taken + // into account in heuristics for pool object emission. + // This also optionally takes a forward reference and an object as + // parameters, to be used in the case where emission of the pool is triggered + // by adding a new reference to the pool that does not fit. The pool manager + // will need this information in order to apply its heuristics correctly. + T Emit(MacroAssemblerInterface* masm, + T pc, + int num_bytes = 0, + ForwardReference* new_reference = NULL, + LocationBase* new_object = NULL, + EmitOption option = kBranchRequired); + + // Add 'reference' to 'object'. Should not be preceded by a call to MustEmit() + // that returned true, unless Emit() has been successfully afterwards. + void AddObjectReference(const ForwardReference* reference, + LocationBase* object); + + // This is to notify the pool that a LocationBase has been bound to a location + // and does not need to be tracked anymore. + // This will happen, for example, for Labels, which are manually bound by the + // user. + // This can potentially add some padding bytes in order to meet the object + // requirements, and will return the new location. + T Bind(MacroAssemblerInterface* masm, LocationBase* object, T location); + + // Functions for blocking and releasing the pools. + void Block() { monitor_++; } + void Release(T pc); + bool IsBlocked() const { return monitor_ != 0; } + + private: + typedef typename std::vector >::iterator objects_iter; + typedef + typename std::vector >::const_iterator const_objects_iter; + + PoolObject* GetObjectIfTracked(LocationBase* label) { + return const_cast*>( + static_cast*>(this)->GetObjectIfTracked(label)); + } + + const PoolObject* GetObjectIfTracked(LocationBase* label) const { + for (const_objects_iter iter = objects_.begin(); iter != objects_.end(); + ++iter) { + const PoolObject& current = *iter; + if (current.label_base_ == label) return ¤t; + } + return NULL; + } + + // Helper function for calculating the checkpoint. + enum SortOption { kSortRequired, kNoSortRequired }; + void RecalculateCheckpoint(SortOption sort_option = kSortRequired); + + // Comparison function for using std::sort() on objects_. PoolObject A is + // ordered before PoolObject B when A should be emitted before B. The + // comparison depends on the max_location_, size_, alignment_ and + // min_location_. + static bool PoolObjectLessThan(const PoolObject& a, + const PoolObject& b); + + // Helper function used in the checkpoint calculation. 'checkpoint' is the + // current checkpoint, which is modified to take 'object' into account. The + // new checkpoint is returned. + static T UpdateCheckpointForObject(T checkpoint, const PoolObject* object); + + // Helper function to add a new object into a sorted objects_ array. + void Insert(const PoolObject& new_object); + + // Helper functions to remove an object from objects_ and delete the + // corresponding LocationBase object, if necessary. This will be called + // either after placing the object, or when Bind() is called. + void RemoveAndDelete(PoolObject* object); + objects_iter RemoveAndDelete(objects_iter iter); + + // Helper function to check if we should skip emitting an object. + bool ShouldSkipObject(PoolObject* pool_object, + T pc, + int num_bytes, + ForwardReference* new_reference, + LocationBase* new_object, + PoolObject* existing_object) const; + + // Used only for debugging. + void DumpCurrentState(T pc) const; + + // Methods used for testing only, via the test friend classes. + bool PoolIsEmptyForTest() const { return objects_.empty(); } + T GetCheckpointForTest() const { return checkpoint_; } + int GetPoolSizeForTest() const; + + // The objects we are tracking references to. The objects_ vector is sorted + // at all times between calls to the public members of the PoolManager. It + // is sorted every time we add, delete or update a PoolObject. + // TODO: Consider a more efficient data structure here, to allow us to delete + // elements as we emit them. + std::vector > objects_; + + // Objects to be deleted on pool destruction. + std::vector*> delete_on_destruction_; + + // The header_size_ and alignment_ values are hardcoded for each instance of + // PoolManager. The PoolManager does not know how to emit the header, and + // relies on the EmitPoolHeader and EndPool methods of the + // MacroAssemblerInterface for that. It will also emit padding if necessary, + // both for the header and at the end of the pool, according to alignment_, + // and using the EmitNopBytes and EmitPaddingBytes method of the + // MacroAssemblerInterface. + + // The size of the header, in bytes. + int header_size_; + // The alignment of the header - must be a power of two. + int alignment_; + // The alignment of the buffer - we cannot guarantee any object alignment + // larger than this alignment. When a buffer is grown, this alignment has + // to be guaranteed. + // TODO: Consider extending this to describe the guaranteed alignment as the + // modulo of a known number. + int buffer_alignment_; + + // The current checkpoint. This is the latest location at which the pool + // *must* be emitted. This should not be visible outside the pool manager + // and should only be updated in RecalculateCheckpoint. + T checkpoint_; + + // Maximum size of the pool, assuming we need the maximum possible padding + // for each object and for the header. It is only updated in + // RecalculateCheckpoint. + T max_pool_size_; + + // Indicates whether the emission of this pool is blocked. + int monitor_; + + friend class vixl::TestPoolManager; +}; + + +} // namespace vixl + +#endif // VIXL_POOL_MANAGER_H_ diff --git a/dep/vixl/include/vixl/utils-vixl.h b/dep/vixl/include/vixl/utils-vixl.h new file mode 100644 index 000000000..1c76fcb28 --- /dev/null +++ b/dep/vixl/include/vixl/utils-vixl.h @@ -0,0 +1,1281 @@ +// Copyright 2015, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_UTILS_H +#define VIXL_UTILS_H + +#include +#include +#include +#include + +#include "compiler-intrinsics-vixl.h" +#include "globals-vixl.h" + +namespace vixl { + +// Macros for compile-time format checking. +#if GCC_VERSION_OR_NEWER(4, 4, 0) +#define PRINTF_CHECK(format_index, varargs_index) \ + __attribute__((format(gnu_printf, format_index, varargs_index))) +#else +#define PRINTF_CHECK(format_index, varargs_index) +#endif + +#ifdef __GNUC__ +#define VIXL_HAS_DEPRECATED_WITH_MSG +#elif defined(__clang__) +#ifdef __has_extension(attribute_deprecated_with_message) +#define VIXL_HAS_DEPRECATED_WITH_MSG +#endif +#endif + +#ifdef VIXL_HAS_DEPRECATED_WITH_MSG +#define VIXL_DEPRECATED(replaced_by, declarator) \ + __attribute__((deprecated("Use \"" replaced_by "\" instead"))) declarator +#else +#define VIXL_DEPRECATED(replaced_by, declarator) declarator +#endif + +#ifdef VIXL_DEBUG +#define VIXL_UNREACHABLE_OR_FALLTHROUGH() VIXL_UNREACHABLE() +#else +#define VIXL_UNREACHABLE_OR_FALLTHROUGH() VIXL_FALLTHROUGH() +#endif + +template +size_t ArrayLength(const T (&)[n]) { + return n; +} + +// Check number width. +// TODO: Refactor these using templates. +inline bool IsIntN(unsigned n, uint32_t x) { + VIXL_ASSERT((0 < n) && (n < 32)); + uint32_t limit = UINT32_C(1) << (n - 1); + return x < limit; +} +inline bool IsIntN(unsigned n, int32_t x) { + VIXL_ASSERT((0 < n) && (n < 32)); + int32_t limit = INT32_C(1) << (n - 1); + return (-limit <= x) && (x < limit); +} +inline bool IsIntN(unsigned n, uint64_t x) { + VIXL_ASSERT((0 < n) && (n < 64)); + uint64_t limit = UINT64_C(1) << (n - 1); + return x < limit; +} +inline bool IsIntN(unsigned n, int64_t x) { + VIXL_ASSERT((0 < n) && (n < 64)); + int64_t limit = INT64_C(1) << (n - 1); + return (-limit <= x) && (x < limit); +} +VIXL_DEPRECATED("IsIntN", inline bool is_intn(unsigned n, int64_t x)) { + return IsIntN(n, x); +} + +inline bool IsUintN(unsigned n, uint32_t x) { + VIXL_ASSERT((0 < n) && (n < 32)); + return !(x >> n); +} +inline bool IsUintN(unsigned n, int32_t x) { + VIXL_ASSERT((0 < n) && (n < 32)); + // Convert to an unsigned integer to avoid implementation-defined behavior. + return !(static_cast(x) >> n); +} +inline bool IsUintN(unsigned n, uint64_t x) { + VIXL_ASSERT((0 < n) && (n < 64)); + return !(x >> n); +} +inline bool IsUintN(unsigned n, int64_t x) { + VIXL_ASSERT((0 < n) && (n < 64)); + // Convert to an unsigned integer to avoid implementation-defined behavior. + return !(static_cast(x) >> n); +} +VIXL_DEPRECATED("IsUintN", inline bool is_uintn(unsigned n, int64_t x)) { + return IsUintN(n, x); +} + +inline uint64_t TruncateToUintN(unsigned n, uint64_t x) { + VIXL_ASSERT((0 < n) && (n < 64)); + return static_cast(x) & ((UINT64_C(1) << n) - 1); +} +VIXL_DEPRECATED("TruncateToUintN", + inline uint64_t truncate_to_intn(unsigned n, int64_t x)) { + return TruncateToUintN(n, x); +} + +// clang-format off +#define INT_1_TO_32_LIST(V) \ +V(1) V(2) V(3) V(4) V(5) V(6) V(7) V(8) \ +V(9) V(10) V(11) V(12) V(13) V(14) V(15) V(16) \ +V(17) V(18) V(19) V(20) V(21) V(22) V(23) V(24) \ +V(25) V(26) V(27) V(28) V(29) V(30) V(31) V(32) + +#define INT_33_TO_63_LIST(V) \ +V(33) V(34) V(35) V(36) V(37) V(38) V(39) V(40) \ +V(41) V(42) V(43) V(44) V(45) V(46) V(47) V(48) \ +V(49) V(50) V(51) V(52) V(53) V(54) V(55) V(56) \ +V(57) V(58) V(59) V(60) V(61) V(62) V(63) + +#define INT_1_TO_63_LIST(V) INT_1_TO_32_LIST(V) INT_33_TO_63_LIST(V) + +// clang-format on + +#define DECLARE_IS_INT_N(N) \ + inline bool IsInt##N(int64_t x) { return IsIntN(N, x); } \ + VIXL_DEPRECATED("IsInt" #N, inline bool is_int##N(int64_t x)) { \ + return IsIntN(N, x); \ + } + +#define DECLARE_IS_UINT_N(N) \ + inline bool IsUint##N(int64_t x) { return IsUintN(N, x); } \ + VIXL_DEPRECATED("IsUint" #N, inline bool is_uint##N(int64_t x)) { \ + return IsUintN(N, x); \ + } + +#define DECLARE_TRUNCATE_TO_UINT_32(N) \ + inline uint32_t TruncateToUint##N(uint64_t x) { \ + return static_cast(TruncateToUintN(N, x)); \ + } \ + VIXL_DEPRECATED("TruncateToUint" #N, \ + inline uint32_t truncate_to_int##N(int64_t x)) { \ + return TruncateToUint##N(x); \ + } + +INT_1_TO_63_LIST(DECLARE_IS_INT_N) +INT_1_TO_63_LIST(DECLARE_IS_UINT_N) +INT_1_TO_32_LIST(DECLARE_TRUNCATE_TO_UINT_32) + +#undef DECLARE_IS_INT_N +#undef DECLARE_IS_UINT_N +#undef DECLARE_TRUNCATE_TO_INT_N + +// Bit field extraction. +inline uint64_t ExtractUnsignedBitfield64(int msb, int lsb, uint64_t x) { + VIXL_ASSERT((static_cast(msb) < sizeof(x) * 8) && (lsb >= 0) && + (msb >= lsb)); + if ((msb == 63) && (lsb == 0)) return x; + return (x >> lsb) & ((static_cast(1) << (1 + msb - lsb)) - 1); +} + + +inline uint32_t ExtractUnsignedBitfield32(int msb, int lsb, uint32_t x) { + VIXL_ASSERT((static_cast(msb) < sizeof(x) * 8) && (lsb >= 0) && + (msb >= lsb)); + return TruncateToUint32(ExtractUnsignedBitfield64(msb, lsb, x)); +} + + +inline int64_t ExtractSignedBitfield64(int msb, int lsb, int64_t x) { + VIXL_ASSERT((static_cast(msb) < sizeof(x) * 8) && (lsb >= 0) && + (msb >= lsb)); + uint64_t temp = ExtractUnsignedBitfield64(msb, lsb, x); + // If the highest extracted bit is set, sign extend. + if ((temp >> (msb - lsb)) == 1) { + temp |= ~UINT64_C(0) << (msb - lsb); + } + int64_t result; + memcpy(&result, &temp, sizeof(result)); + return result; +} + + +inline int32_t ExtractSignedBitfield32(int msb, int lsb, int32_t x) { + VIXL_ASSERT((static_cast(msb) < sizeof(x) * 8) && (lsb >= 0) && + (msb >= lsb)); + uint32_t temp = TruncateToUint32(ExtractSignedBitfield64(msb, lsb, x)); + int32_t result; + memcpy(&result, &temp, sizeof(result)); + return result; +} + + +inline uint64_t RotateRight(uint64_t value, + unsigned int rotate, + unsigned int width) { + VIXL_ASSERT((width > 0) && (width <= 64)); + uint64_t width_mask = ~UINT64_C(0) >> (64 - width); + rotate &= 63; + if (rotate > 0) { + value &= width_mask; + value = (value << (width - rotate)) | (value >> rotate); + } + return value & width_mask; +} + + +// Wrapper class for passing FP16 values through the assembler. +// This is purely to aid with type checking/casting. +class Float16 { + public: + explicit Float16(double dvalue); + Float16() : rawbits_(0x0) {} + friend uint16_t Float16ToRawbits(Float16 value); + friend Float16 RawbitsToFloat16(uint16_t bits); + + protected: + uint16_t rawbits_; +}; + +// Floating point representation. +uint16_t Float16ToRawbits(Float16 value); + + +uint32_t FloatToRawbits(float value); +VIXL_DEPRECATED("FloatToRawbits", + inline uint32_t float_to_rawbits(float value)) { + return FloatToRawbits(value); +} + +uint64_t DoubleToRawbits(double value); +VIXL_DEPRECATED("DoubleToRawbits", + inline uint64_t double_to_rawbits(double value)) { + return DoubleToRawbits(value); +} + +Float16 RawbitsToFloat16(uint16_t bits); + +float RawbitsToFloat(uint32_t bits); +VIXL_DEPRECATED("RawbitsToFloat", + inline float rawbits_to_float(uint32_t bits)) { + return RawbitsToFloat(bits); +} + +double RawbitsToDouble(uint64_t bits); +VIXL_DEPRECATED("RawbitsToDouble", + inline double rawbits_to_double(uint64_t bits)) { + return RawbitsToDouble(bits); +} + +namespace internal { + +// Internal simulation class used solely by the simulator to +// provide an abstraction layer for any half-precision arithmetic. +class SimFloat16 : public Float16 { + public: + // TODO: We should investigate making this constructor explicit. + // This is currently difficult to do due to a number of templated + // functions in the simulator which rely on returning double values. + SimFloat16(double dvalue) : Float16(dvalue) {} // NOLINT(runtime/explicit) + SimFloat16(Float16 f) { // NOLINT(runtime/explicit) + this->rawbits_ = Float16ToRawbits(f); + } + SimFloat16() : Float16() {} + SimFloat16 operator-() const; + SimFloat16 operator+(SimFloat16 rhs) const; + SimFloat16 operator-(SimFloat16 rhs) const; + SimFloat16 operator*(SimFloat16 rhs) const; + SimFloat16 operator/(SimFloat16 rhs) const; + bool operator<(SimFloat16 rhs) const; + bool operator>(SimFloat16 rhs) const; + bool operator==(SimFloat16 rhs) const; + bool operator!=(SimFloat16 rhs) const; + // This is necessary for conversions peformed in (macro asm) Fmov. + bool operator==(double rhs) const; + operator double() const; +}; +} // namespace internal + +uint32_t Float16Sign(internal::SimFloat16 value); + +uint32_t Float16Exp(internal::SimFloat16 value); + +uint32_t Float16Mantissa(internal::SimFloat16 value); + +uint32_t FloatSign(float value); +VIXL_DEPRECATED("FloatSign", inline uint32_t float_sign(float value)) { + return FloatSign(value); +} + +uint32_t FloatExp(float value); +VIXL_DEPRECATED("FloatExp", inline uint32_t float_exp(float value)) { + return FloatExp(value); +} + +uint32_t FloatMantissa(float value); +VIXL_DEPRECATED("FloatMantissa", inline uint32_t float_mantissa(float value)) { + return FloatMantissa(value); +} + +uint32_t DoubleSign(double value); +VIXL_DEPRECATED("DoubleSign", inline uint32_t double_sign(double value)) { + return DoubleSign(value); +} + +uint32_t DoubleExp(double value); +VIXL_DEPRECATED("DoubleExp", inline uint32_t double_exp(double value)) { + return DoubleExp(value); +} + +uint64_t DoubleMantissa(double value); +VIXL_DEPRECATED("DoubleMantissa", + inline uint64_t double_mantissa(double value)) { + return DoubleMantissa(value); +} + +internal::SimFloat16 Float16Pack(uint16_t sign, + uint16_t exp, + uint16_t mantissa); + +float FloatPack(uint32_t sign, uint32_t exp, uint32_t mantissa); +VIXL_DEPRECATED("FloatPack", + inline float float_pack(uint32_t sign, + uint32_t exp, + uint32_t mantissa)) { + return FloatPack(sign, exp, mantissa); +} + +double DoublePack(uint64_t sign, uint64_t exp, uint64_t mantissa); +VIXL_DEPRECATED("DoublePack", + inline double double_pack(uint32_t sign, + uint32_t exp, + uint64_t mantissa)) { + return DoublePack(sign, exp, mantissa); +} + +// An fpclassify() function for 16-bit half-precision floats. +int Float16Classify(Float16 value); +VIXL_DEPRECATED("Float16Classify", inline int float16classify(uint16_t value)) { + return Float16Classify(RawbitsToFloat16(value)); +} + +bool IsZero(Float16 value); + +inline bool IsNaN(float value) { return std::isnan(value); } + +inline bool IsNaN(double value) { return std::isnan(value); } + +inline bool IsNaN(Float16 value) { return Float16Classify(value) == FP_NAN; } + +inline bool IsInf(float value) { return std::isinf(value); } + +inline bool IsInf(double value) { return std::isinf(value); } + +inline bool IsInf(Float16 value) { + return Float16Classify(value) == FP_INFINITE; +} + + +// NaN tests. +inline bool IsSignallingNaN(double num) { + const uint64_t kFP64QuietNaNMask = UINT64_C(0x0008000000000000); + uint64_t raw = DoubleToRawbits(num); + if (IsNaN(num) && ((raw & kFP64QuietNaNMask) == 0)) { + return true; + } + return false; +} + + +inline bool IsSignallingNaN(float num) { + const uint32_t kFP32QuietNaNMask = 0x00400000; + uint32_t raw = FloatToRawbits(num); + if (IsNaN(num) && ((raw & kFP32QuietNaNMask) == 0)) { + return true; + } + return false; +} + + +inline bool IsSignallingNaN(Float16 num) { + const uint16_t kFP16QuietNaNMask = 0x0200; + return IsNaN(num) && ((Float16ToRawbits(num) & kFP16QuietNaNMask) == 0); +} + + +template +inline bool IsQuietNaN(T num) { + return IsNaN(num) && !IsSignallingNaN(num); +} + + +// Convert the NaN in 'num' to a quiet NaN. +inline double ToQuietNaN(double num) { + const uint64_t kFP64QuietNaNMask = UINT64_C(0x0008000000000000); + VIXL_ASSERT(IsNaN(num)); + return RawbitsToDouble(DoubleToRawbits(num) | kFP64QuietNaNMask); +} + + +inline float ToQuietNaN(float num) { + const uint32_t kFP32QuietNaNMask = 0x00400000; + VIXL_ASSERT(IsNaN(num)); + return RawbitsToFloat(FloatToRawbits(num) | kFP32QuietNaNMask); +} + + +inline internal::SimFloat16 ToQuietNaN(internal::SimFloat16 num) { + const uint16_t kFP16QuietNaNMask = 0x0200; + VIXL_ASSERT(IsNaN(num)); + return internal::SimFloat16( + RawbitsToFloat16(Float16ToRawbits(num) | kFP16QuietNaNMask)); +} + + +// Fused multiply-add. +inline double FusedMultiplyAdd(double op1, double op2, double a) { + return fma(op1, op2, a); +} + + +inline float FusedMultiplyAdd(float op1, float op2, float a) { + return fmaf(op1, op2, a); +} + + +inline uint64_t LowestSetBit(uint64_t value) { return value & -value; } + + +template +inline int HighestSetBitPosition(T value) { + VIXL_ASSERT(value != 0); + return (sizeof(value) * 8 - 1) - CountLeadingZeros(value); +} + + +template +inline int WhichPowerOf2(V value) { + VIXL_ASSERT(IsPowerOf2(value)); + return CountTrailingZeros(value); +} + + +unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size); + + +int BitCount(uint64_t value); + + +template +T ReverseBits(T value) { + VIXL_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) || + (sizeof(value) == 4) || (sizeof(value) == 8)); + T result = 0; + for (unsigned i = 0; i < (sizeof(value) * 8); i++) { + result = (result << 1) | (value & 1); + value >>= 1; + } + return result; +} + + +template +inline T SignExtend(T val, int bitSize) { + VIXL_ASSERT(bitSize > 0); + T mask = (T(2) << (bitSize - 1)) - T(1); + val &= mask; + T sign_bits = -((val >> (bitSize - 1)) << bitSize); + val |= sign_bits; + return val; +} + + +template +T ReverseBytes(T value, int block_bytes_log2) { + VIXL_ASSERT((sizeof(value) == 4) || (sizeof(value) == 8)); + VIXL_ASSERT((1U << block_bytes_log2) <= sizeof(value)); + // Split the 64-bit value into an 8-bit array, where b[0] is the least + // significant byte, and b[7] is the most significant. + uint8_t bytes[8]; + uint64_t mask = UINT64_C(0xff00000000000000); + for (int i = 7; i >= 0; i--) { + bytes[i] = (static_cast(value) & mask) >> (i * 8); + mask >>= 8; + } + + // Permutation tables for REV instructions. + // permute_table[0] is used by REV16_x, REV16_w + // permute_table[1] is used by REV32_x, REV_w + // permute_table[2] is used by REV_x + VIXL_ASSERT((0 < block_bytes_log2) && (block_bytes_log2 < 4)); + static const uint8_t permute_table[3][8] = {{6, 7, 4, 5, 2, 3, 0, 1}, + {4, 5, 6, 7, 0, 1, 2, 3}, + {0, 1, 2, 3, 4, 5, 6, 7}}; + uint64_t temp = 0; + for (int i = 0; i < 8; i++) { + temp <<= 8; + temp |= bytes[permute_table[block_bytes_log2 - 1][i]]; + } + + T result; + VIXL_STATIC_ASSERT(sizeof(result) <= sizeof(temp)); + memcpy(&result, &temp, sizeof(result)); + return result; +} + +template +inline bool IsMultiple(T value) { + VIXL_ASSERT(IsPowerOf2(MULTIPLE)); + return (value & (MULTIPLE - 1)) == 0; +} + +template +inline bool IsMultiple(T value, unsigned multiple) { + VIXL_ASSERT(IsPowerOf2(multiple)); + return (value & (multiple - 1)) == 0; +} + +template +inline bool IsAligned(T pointer, int alignment) { + VIXL_ASSERT(IsPowerOf2(alignment)); + return (pointer & (alignment - 1)) == 0; +} + +// Pointer alignment +// TODO: rename/refactor to make it specific to instructions. +template +inline bool IsAligned(T pointer) { + VIXL_ASSERT(sizeof(pointer) == sizeof(intptr_t)); // NOLINT(runtime/sizeof) + // Use C-style casts to get static_cast behaviour for integral types (T), and + // reinterpret_cast behaviour for other types. + return IsAligned((intptr_t)(pointer), ALIGN); +} + +template +bool IsWordAligned(T pointer) { + return IsAligned<4>(pointer); +} + +// Increment a pointer until it has the specified alignment. The alignment must +// be a power of two. +template +T AlignUp(T pointer, + typename Unsigned::type alignment) { + VIXL_ASSERT(IsPowerOf2(alignment)); + // Use C-style casts to get static_cast behaviour for integral types (T), and + // reinterpret_cast behaviour for other types. + + typename Unsigned::type pointer_raw = + (typename Unsigned::type)pointer; + VIXL_STATIC_ASSERT(sizeof(pointer) <= sizeof(pointer_raw)); + + size_t mask = alignment - 1; + T result = (T)((pointer_raw + mask) & ~mask); + VIXL_ASSERT(result >= pointer); + + return result; +} + +// Decrement a pointer until it has the specified alignment. The alignment must +// be a power of two. +template +T AlignDown(T pointer, + typename Unsigned::type alignment) { + VIXL_ASSERT(IsPowerOf2(alignment)); + // Use C-style casts to get static_cast behaviour for integral types (T), and + // reinterpret_cast behaviour for other types. + + typename Unsigned::type pointer_raw = + (typename Unsigned::type)pointer; + VIXL_STATIC_ASSERT(sizeof(pointer) <= sizeof(pointer_raw)); + + size_t mask = alignment - 1; + return (T)(pointer_raw & ~mask); +} + + +template +inline T ExtractBit(T value, unsigned bit) { + return (value >> bit) & T(1); +} + +template +inline Td ExtractBits(Ts value, int least_significant_bit, Td mask) { + return Td((value >> least_significant_bit) & Ts(mask)); +} + +template +inline void AssignBit(Td& dst, // NOLINT(runtime/references) + int bit, + Ts value) { + VIXL_ASSERT((value == Ts(0)) || (value == Ts(1))); + VIXL_ASSERT(bit >= 0); + VIXL_ASSERT(bit < static_cast(sizeof(Td) * 8)); + Td mask(1); + dst &= ~(mask << bit); + dst |= Td(value) << bit; +} + +template +inline void AssignBits(Td& dst, // NOLINT(runtime/references) + int least_significant_bit, + Ts mask, + Ts value) { + VIXL_ASSERT(least_significant_bit >= 0); + VIXL_ASSERT(least_significant_bit < static_cast(sizeof(Td) * 8)); + VIXL_ASSERT(((Td(mask) << least_significant_bit) >> least_significant_bit) == + Td(mask)); + VIXL_ASSERT((value & mask) == value); + dst &= ~(Td(mask) << least_significant_bit); + dst |= Td(value) << least_significant_bit; +} + +class VFP { + public: + static uint32_t FP32ToImm8(float imm) { + // bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000 + uint32_t bits = FloatToRawbits(imm); + // bit7: a000.0000 + uint32_t bit7 = ((bits >> 31) & 0x1) << 7; + // bit6: 0b00.0000 + uint32_t bit6 = ((bits >> 29) & 0x1) << 6; + // bit5_to_0: 00cd.efgh + uint32_t bit5_to_0 = (bits >> 19) & 0x3f; + return static_cast(bit7 | bit6 | bit5_to_0); + } + static uint32_t FP64ToImm8(double imm) { + // bits: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000 + // 0000.0000.0000.0000.0000.0000.0000.0000 + uint64_t bits = DoubleToRawbits(imm); + // bit7: a000.0000 + uint64_t bit7 = ((bits >> 63) & 0x1) << 7; + // bit6: 0b00.0000 + uint64_t bit6 = ((bits >> 61) & 0x1) << 6; + // bit5_to_0: 00cd.efgh + uint64_t bit5_to_0 = (bits >> 48) & 0x3f; + + return static_cast(bit7 | bit6 | bit5_to_0); + } + static float Imm8ToFP32(uint32_t imm8) { + // Imm8: abcdefgh (8 bits) + // Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits) + // where B is b ^ 1 + uint32_t bits = imm8; + uint32_t bit7 = (bits >> 7) & 0x1; + uint32_t bit6 = (bits >> 6) & 0x1; + uint32_t bit5_to_0 = bits & 0x3f; + uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19); + + return RawbitsToFloat(result); + } + static double Imm8ToFP64(uint32_t imm8) { + // Imm8: abcdefgh (8 bits) + // Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000 + // 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits) + // where B is b ^ 1 + uint32_t bits = imm8; + uint64_t bit7 = (bits >> 7) & 0x1; + uint64_t bit6 = (bits >> 6) & 0x1; + uint64_t bit5_to_0 = bits & 0x3f; + uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48); + return RawbitsToDouble(result); + } + static bool IsImmFP32(float imm) { + // Valid values will have the form: + // aBbb.bbbc.defg.h000.0000.0000.0000.0000 + uint32_t bits = FloatToRawbits(imm); + // bits[19..0] are cleared. + if ((bits & 0x7ffff) != 0) { + return false; + } + + + // bits[29..25] are all set or all cleared. + uint32_t b_pattern = (bits >> 16) & 0x3e00; + if (b_pattern != 0 && b_pattern != 0x3e00) { + return false; + } + // bit[30] and bit[29] are opposite. + if (((bits ^ (bits << 1)) & 0x40000000) == 0) { + return false; + } + return true; + } + static bool IsImmFP64(double imm) { + // Valid values will have the form: + // aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000 + // 0000.0000.0000.0000.0000.0000.0000.0000 + uint64_t bits = DoubleToRawbits(imm); + // bits[47..0] are cleared. + if ((bits & 0x0000ffffffffffff) != 0) { + return false; + } + // bits[61..54] are all set or all cleared. + uint32_t b_pattern = (bits >> 48) & 0x3fc0; + if ((b_pattern != 0) && (b_pattern != 0x3fc0)) { + return false; + } + // bit[62] and bit[61] are opposite. + if (((bits ^ (bits << 1)) & (UINT64_C(1) << 62)) == 0) { + return false; + } + return true; + } +}; + +class BitField { + // ForEachBitHelper is a functor that will call + // bool ForEachBitHelper::execute(ElementType id) const + // and expects a boolean in return whether to continue (if true) + // or stop (if false) + // check_set will check if the bits are on (true) or off(false) + template + bool ForEachBit(const ForEachBitHelper& helper) { + for (int i = 0; static_cast(i) < bitfield_.size(); i++) { + if (bitfield_[i] == check_set) + if (!helper.execute(i)) return false; + } + return true; + } + + public: + explicit BitField(unsigned size) : bitfield_(size, 0) {} + + void Set(int i) { + VIXL_ASSERT((i >= 0) && (static_cast(i) < bitfield_.size())); + bitfield_[i] = true; + } + + void Unset(int i) { + VIXL_ASSERT((i >= 0) && (static_cast(i) < bitfield_.size())); + bitfield_[i] = true; + } + + bool IsSet(int i) const { return bitfield_[i]; } + + // For each bit not set in the bitfield call the execute functor + // execute. + // ForEachBitSetHelper::execute returns true if the iteration through + // the bits can continue, otherwise it will stop. + // struct ForEachBitSetHelper { + // bool execute(int /*id*/) { return false; } + // }; + template + bool ForEachBitNotSet(const ForEachBitNotSetHelper& helper) { + return ForEachBit(helper); + } + + // For each bit set in the bitfield call the execute functor + // execute. + template + bool ForEachBitSet(const ForEachBitSetHelper& helper) { + return ForEachBit(helper); + } + + private: + std::vector bitfield_; +}; + +namespace internal { + +typedef int64_t Int64; +class Uint64; +class Uint128; + +class Uint32 { + uint32_t data_; + + public: + // Unlike uint32_t, Uint32 has a default constructor. + Uint32() { data_ = 0; } + explicit Uint32(uint32_t data) : data_(data) {} + inline explicit Uint32(Uint64 data); + uint32_t Get() const { return data_; } + template + int32_t GetSigned() const { + return ExtractSignedBitfield32(N - 1, 0, data_); + } + int32_t GetSigned() const { return data_; } + Uint32 operator~() const { return Uint32(~data_); } + Uint32 operator-() const { return Uint32(-data_); } + bool operator==(Uint32 value) const { return data_ == value.data_; } + bool operator!=(Uint32 value) const { return data_ != value.data_; } + bool operator>(Uint32 value) const { return data_ > value.data_; } + Uint32 operator+(Uint32 value) const { return Uint32(data_ + value.data_); } + Uint32 operator-(Uint32 value) const { return Uint32(data_ - value.data_); } + Uint32 operator&(Uint32 value) const { return Uint32(data_ & value.data_); } + Uint32 operator&=(Uint32 value) { + data_ &= value.data_; + return *this; + } + Uint32 operator^(Uint32 value) const { return Uint32(data_ ^ value.data_); } + Uint32 operator^=(Uint32 value) { + data_ ^= value.data_; + return *this; + } + Uint32 operator|(Uint32 value) const { return Uint32(data_ | value.data_); } + Uint32 operator|=(Uint32 value) { + data_ |= value.data_; + return *this; + } + // Unlike uint32_t, the shift functions can accept negative shift and + // return 0 when the shift is too big. + Uint32 operator>>(int shift) const { + if (shift == 0) return *this; + if (shift < 0) { + int tmp = -shift; + if (tmp >= 32) return Uint32(0); + return Uint32(data_ << tmp); + } + int tmp = shift; + if (tmp >= 32) return Uint32(0); + return Uint32(data_ >> tmp); + } + Uint32 operator<<(int shift) const { + if (shift == 0) return *this; + if (shift < 0) { + int tmp = -shift; + if (tmp >= 32) return Uint32(0); + return Uint32(data_ >> tmp); + } + int tmp = shift; + if (tmp >= 32) return Uint32(0); + return Uint32(data_ << tmp); + } +}; + +class Uint64 { + uint64_t data_; + + public: + // Unlike uint64_t, Uint64 has a default constructor. + Uint64() { data_ = 0; } + explicit Uint64(uint64_t data) : data_(data) {} + explicit Uint64(Uint32 data) : data_(data.Get()) {} + inline explicit Uint64(Uint128 data); + uint64_t Get() const { return data_; } + int64_t GetSigned(int N) const { + return ExtractSignedBitfield64(N - 1, 0, data_); + } + int64_t GetSigned() const { return data_; } + Uint32 ToUint32() const { + VIXL_ASSERT((data_ >> 32) == 0); + return Uint32(static_cast(data_)); + } + Uint32 GetHigh32() const { return Uint32(data_ >> 32); } + Uint32 GetLow32() const { return Uint32(data_ & 0xffffffff); } + Uint64 operator~() const { return Uint64(~data_); } + Uint64 operator-() const { return Uint64(-data_); } + bool operator==(Uint64 value) const { return data_ == value.data_; } + bool operator!=(Uint64 value) const { return data_ != value.data_; } + Uint64 operator+(Uint64 value) const { return Uint64(data_ + value.data_); } + Uint64 operator-(Uint64 value) const { return Uint64(data_ - value.data_); } + Uint64 operator&(Uint64 value) const { return Uint64(data_ & value.data_); } + Uint64 operator&=(Uint64 value) { + data_ &= value.data_; + return *this; + } + Uint64 operator^(Uint64 value) const { return Uint64(data_ ^ value.data_); } + Uint64 operator^=(Uint64 value) { + data_ ^= value.data_; + return *this; + } + Uint64 operator|(Uint64 value) const { return Uint64(data_ | value.data_); } + Uint64 operator|=(Uint64 value) { + data_ |= value.data_; + return *this; + } + // Unlike uint64_t, the shift functions can accept negative shift and + // return 0 when the shift is too big. + Uint64 operator>>(int shift) const { + if (shift == 0) return *this; + if (shift < 0) { + int tmp = -shift; + if (tmp >= 64) return Uint64(0); + return Uint64(data_ << tmp); + } + int tmp = shift; + if (tmp >= 64) return Uint64(0); + return Uint64(data_ >> tmp); + } + Uint64 operator<<(int shift) const { + if (shift == 0) return *this; + if (shift < 0) { + int tmp = -shift; + if (tmp >= 64) return Uint64(0); + return Uint64(data_ >> tmp); + } + int tmp = shift; + if (tmp >= 64) return Uint64(0); + return Uint64(data_ << tmp); + } +}; + +class Uint128 { + uint64_t data_high_; + uint64_t data_low_; + + public: + Uint128() : data_high_(0), data_low_(0) {} + explicit Uint128(uint64_t data_low) : data_high_(0), data_low_(data_low) {} + explicit Uint128(Uint64 data_low) + : data_high_(0), data_low_(data_low.Get()) {} + Uint128(uint64_t data_high, uint64_t data_low) + : data_high_(data_high), data_low_(data_low) {} + Uint64 ToUint64() const { + VIXL_ASSERT(data_high_ == 0); + return Uint64(data_low_); + } + Uint64 GetHigh64() const { return Uint64(data_high_); } + Uint64 GetLow64() const { return Uint64(data_low_); } + Uint128 operator~() const { return Uint128(~data_high_, ~data_low_); } + bool operator==(Uint128 value) const { + return (data_high_ == value.data_high_) && (data_low_ == value.data_low_); + } + Uint128 operator&(Uint128 value) const { + return Uint128(data_high_ & value.data_high_, data_low_ & value.data_low_); + } + Uint128 operator&=(Uint128 value) { + data_high_ &= value.data_high_; + data_low_ &= value.data_low_; + return *this; + } + Uint128 operator|=(Uint128 value) { + data_high_ |= value.data_high_; + data_low_ |= value.data_low_; + return *this; + } + Uint128 operator>>(int shift) const { + VIXL_ASSERT((shift >= 0) && (shift < 128)); + if (shift == 0) return *this; + if (shift >= 64) { + return Uint128(0, data_high_ >> (shift - 64)); + } + uint64_t tmp = (data_high_ << (64 - shift)) | (data_low_ >> shift); + return Uint128(data_high_ >> shift, tmp); + } + Uint128 operator<<(int shift) const { + VIXL_ASSERT((shift >= 0) && (shift < 128)); + if (shift == 0) return *this; + if (shift >= 64) { + return Uint128(data_low_ << (shift - 64), 0); + } + uint64_t tmp = (data_high_ << shift) | (data_low_ >> (64 - shift)); + return Uint128(tmp, data_low_ << shift); + } +}; + +Uint32::Uint32(Uint64 data) : data_(data.ToUint32().Get()) {} +Uint64::Uint64(Uint128 data) : data_(data.ToUint64().Get()) {} + +Int64 BitCount(Uint32 value); + +} // namespace internal + +// The default NaN values (for FPCR.DN=1). +extern const double kFP64DefaultNaN; +extern const float kFP32DefaultNaN; +extern const Float16 kFP16DefaultNaN; + +// Floating-point infinity values. +extern const Float16 kFP16PositiveInfinity; +extern const Float16 kFP16NegativeInfinity; +extern const float kFP32PositiveInfinity; +extern const float kFP32NegativeInfinity; +extern const double kFP64PositiveInfinity; +extern const double kFP64NegativeInfinity; + +// Floating-point zero values. +extern const Float16 kFP16PositiveZero; +extern const Float16 kFP16NegativeZero; + +// AArch64 floating-point specifics. These match IEEE-754. +const unsigned kDoubleMantissaBits = 52; +const unsigned kDoubleExponentBits = 11; +const unsigned kFloatMantissaBits = 23; +const unsigned kFloatExponentBits = 8; +const unsigned kFloat16MantissaBits = 10; +const unsigned kFloat16ExponentBits = 5; + +enum FPRounding { + // The first four values are encodable directly by FPCR. + FPTieEven = 0x0, + FPPositiveInfinity = 0x1, + FPNegativeInfinity = 0x2, + FPZero = 0x3, + + // The final rounding modes are only available when explicitly specified by + // the instruction (such as with fcvta). It cannot be set in FPCR. + FPTieAway, + FPRoundOdd +}; + +enum UseDefaultNaN { kUseDefaultNaN, kIgnoreDefaultNaN }; + +// Assemble the specified IEEE-754 components into the target type and apply +// appropriate rounding. +// sign: 0 = positive, 1 = negative +// exponent: Unbiased IEEE-754 exponent. +// mantissa: The mantissa of the input. The top bit (which is not encoded for +// normal IEEE-754 values) must not be omitted. This bit has the +// value 'pow(2, exponent)'. +// +// The input value is assumed to be a normalized value. That is, the input may +// not be infinity or NaN. If the source value is subnormal, it must be +// normalized before calling this function such that the highest set bit in the +// mantissa has the value 'pow(2, exponent)'. +// +// Callers should use FPRoundToFloat or FPRoundToDouble directly, rather than +// calling a templated FPRound. +template +T FPRound(int64_t sign, + int64_t exponent, + uint64_t mantissa, + FPRounding round_mode) { + VIXL_ASSERT((sign == 0) || (sign == 1)); + + // Only FPTieEven and FPRoundOdd rounding modes are implemented. + VIXL_ASSERT((round_mode == FPTieEven) || (round_mode == FPRoundOdd)); + + // Rounding can promote subnormals to normals, and normals to infinities. For + // example, a double with exponent 127 (FLT_MAX_EXP) would appear to be + // encodable as a float, but rounding based on the low-order mantissa bits + // could make it overflow. With ties-to-even rounding, this value would become + // an infinity. + + // ---- Rounding Method ---- + // + // The exponent is irrelevant in the rounding operation, so we treat the + // lowest-order bit that will fit into the result ('onebit') as having + // the value '1'. Similarly, the highest-order bit that won't fit into + // the result ('halfbit') has the value '0.5'. The 'point' sits between + // 'onebit' and 'halfbit': + // + // These bits fit into the result. + // |---------------------| + // mantissa = 0bxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + // || + // / | + // / halfbit + // onebit + // + // For subnormal outputs, the range of representable bits is smaller and + // the position of onebit and halfbit depends on the exponent of the + // input, but the method is otherwise similar. + // + // onebit(frac) + // | + // | halfbit(frac) halfbit(adjusted) + // | / / + // | | | + // 0b00.0 (exact) -> 0b00.0 (exact) -> 0b00 + // 0b00.0... -> 0b00.0... -> 0b00 + // 0b00.1 (exact) -> 0b00.0111..111 -> 0b00 + // 0b00.1... -> 0b00.1... -> 0b01 + // 0b01.0 (exact) -> 0b01.0 (exact) -> 0b01 + // 0b01.0... -> 0b01.0... -> 0b01 + // 0b01.1 (exact) -> 0b01.1 (exact) -> 0b10 + // 0b01.1... -> 0b01.1... -> 0b10 + // 0b10.0 (exact) -> 0b10.0 (exact) -> 0b10 + // 0b10.0... -> 0b10.0... -> 0b10 + // 0b10.1 (exact) -> 0b10.0111..111 -> 0b10 + // 0b10.1... -> 0b10.1... -> 0b11 + // 0b11.0 (exact) -> 0b11.0 (exact) -> 0b11 + // ... / | / | + // / | / | + // / | + // adjusted = frac - (halfbit(mantissa) & ~onebit(frac)); / | + // + // mantissa = (mantissa >> shift) + halfbit(adjusted); + + static const int mantissa_offset = 0; + static const int exponent_offset = mantissa_offset + mbits; + static const int sign_offset = exponent_offset + ebits; + VIXL_ASSERT(sign_offset == (sizeof(T) * 8 - 1)); + + // Bail out early for zero inputs. + if (mantissa == 0) { + return static_cast(sign << sign_offset); + } + + // If all bits in the exponent are set, the value is infinite or NaN. + // This is true for all binary IEEE-754 formats. + static const int infinite_exponent = (1 << ebits) - 1; + static const int max_normal_exponent = infinite_exponent - 1; + + // Apply the exponent bias to encode it for the result. Doing this early makes + // it easy to detect values that will be infinite or subnormal. + exponent += max_normal_exponent >> 1; + + if (exponent > max_normal_exponent) { + // Overflow: the input is too large for the result type to represent. + if (round_mode == FPTieEven) { + // FPTieEven rounding mode handles overflows using infinities. + exponent = infinite_exponent; + mantissa = 0; + } else { + VIXL_ASSERT(round_mode == FPRoundOdd); + // FPRoundOdd rounding mode handles overflows using the largest magnitude + // normal number. + exponent = max_normal_exponent; + mantissa = (UINT64_C(1) << exponent_offset) - 1; + } + return static_cast((sign << sign_offset) | + (exponent << exponent_offset) | + (mantissa << mantissa_offset)); + } + + // Calculate the shift required to move the top mantissa bit to the proper + // place in the destination type. + const int highest_significant_bit = 63 - CountLeadingZeros(mantissa); + int shift = highest_significant_bit - mbits; + + if (exponent <= 0) { + // The output will be subnormal (before rounding). + // For subnormal outputs, the shift must be adjusted by the exponent. The +1 + // is necessary because the exponent of a subnormal value (encoded as 0) is + // the same as the exponent of the smallest normal value (encoded as 1). + shift += -exponent + 1; + + // Handle inputs that would produce a zero output. + // + // Shifts higher than highest_significant_bit+1 will always produce a zero + // result. A shift of exactly highest_significant_bit+1 might produce a + // non-zero result after rounding. + if (shift > (highest_significant_bit + 1)) { + if (round_mode == FPTieEven) { + // The result will always be +/-0.0. + return static_cast(sign << sign_offset); + } else { + VIXL_ASSERT(round_mode == FPRoundOdd); + VIXL_ASSERT(mantissa != 0); + // For FPRoundOdd, if the mantissa is too small to represent and + // non-zero return the next "odd" value. + return static_cast((sign << sign_offset) | 1); + } + } + + // Properly encode the exponent for a subnormal output. + exponent = 0; + } else { + // Clear the topmost mantissa bit, since this is not encoded in IEEE-754 + // normal values. + mantissa &= ~(UINT64_C(1) << highest_significant_bit); + } + + // The casts below are only well-defined for unsigned integers. + VIXL_STATIC_ASSERT(std::numeric_limits::is_integer); + VIXL_STATIC_ASSERT(!std::numeric_limits::is_signed); + + if (shift > 0) { + if (round_mode == FPTieEven) { + // We have to shift the mantissa to the right. Some precision is lost, so + // we need to apply rounding. + uint64_t onebit_mantissa = (mantissa >> (shift)) & 1; + uint64_t halfbit_mantissa = (mantissa >> (shift - 1)) & 1; + uint64_t adjustment = (halfbit_mantissa & ~onebit_mantissa); + uint64_t adjusted = mantissa - adjustment; + T halfbit_adjusted = (adjusted >> (shift - 1)) & 1; + + T result = + static_cast((sign << sign_offset) | (exponent << exponent_offset) | + ((mantissa >> shift) << mantissa_offset)); + + // A very large mantissa can overflow during rounding. If this happens, + // the exponent should be incremented and the mantissa set to 1.0 + // (encoded as 0). Applying halfbit_adjusted after assembling the float + // has the nice side-effect that this case is handled for free. + // + // This also handles cases where a very large finite value overflows to + // infinity, or where a very large subnormal value overflows to become + // normal. + return result + halfbit_adjusted; + } else { + VIXL_ASSERT(round_mode == FPRoundOdd); + // If any bits at position halfbit or below are set, onebit (ie. the + // bottom bit of the resulting mantissa) must be set. + uint64_t fractional_bits = mantissa & ((UINT64_C(1) << shift) - 1); + if (fractional_bits != 0) { + mantissa |= UINT64_C(1) << shift; + } + + return static_cast((sign << sign_offset) | + (exponent << exponent_offset) | + ((mantissa >> shift) << mantissa_offset)); + } + } else { + // We have to shift the mantissa to the left (or not at all). The input + // mantissa is exactly representable in the output mantissa, so apply no + // rounding correction. + return static_cast((sign << sign_offset) | + (exponent << exponent_offset) | + ((mantissa << -shift) << mantissa_offset)); + } +} + + +// See FPRound for a description of this function. +inline double FPRoundToDouble(int64_t sign, + int64_t exponent, + uint64_t mantissa, + FPRounding round_mode) { + uint64_t bits = + FPRound(sign, + exponent, + mantissa, + round_mode); + return RawbitsToDouble(bits); +} + + +// See FPRound for a description of this function. +inline Float16 FPRoundToFloat16(int64_t sign, + int64_t exponent, + uint64_t mantissa, + FPRounding round_mode) { + return RawbitsToFloat16( + FPRound(sign, exponent, mantissa, round_mode)); +} + + +// See FPRound for a description of this function. +static inline float FPRoundToFloat(int64_t sign, + int64_t exponent, + uint64_t mantissa, + FPRounding round_mode) { + uint32_t bits = + FPRound(sign, + exponent, + mantissa, + round_mode); + return RawbitsToFloat(bits); +} + + +float FPToFloat(Float16 value, UseDefaultNaN DN, bool* exception = NULL); +float FPToFloat(double value, + FPRounding round_mode, + UseDefaultNaN DN, + bool* exception = NULL); + +double FPToDouble(Float16 value, UseDefaultNaN DN, bool* exception = NULL); +double FPToDouble(float value, UseDefaultNaN DN, bool* exception = NULL); + +Float16 FPToFloat16(float value, + FPRounding round_mode, + UseDefaultNaN DN, + bool* exception = NULL); + +Float16 FPToFloat16(double value, + FPRounding round_mode, + UseDefaultNaN DN, + bool* exception = NULL); +} // namespace vixl + +#endif // VIXL_UTILS_H diff --git a/dep/vixl/src/aarch32/assembler-aarch32.cc b/dep/vixl/src/aarch32/assembler-aarch32.cc new file mode 100644 index 000000000..5f636981d --- /dev/null +++ b/dep/vixl/src/aarch32/assembler-aarch32.cc @@ -0,0 +1,27923 @@ +// Copyright 2017, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +extern "C" { +#include +} + +#include +#include +#include +#include +#include + +#include "utils-vixl.h" +#include "aarch32/assembler-aarch32.h" +#include "aarch32/constants-aarch32.h" +#include "aarch32/instructions-aarch32.h" +#include "aarch32/operands-aarch32.h" + +namespace vixl { +namespace aarch32 { + +void Assembler::EmitT32_16(uint16_t instr) { + VIXL_ASSERT(buffer_.Is16bitAligned()); + buffer_.Emit16(instr); +} + + +void Assembler::EmitT32_32(uint32_t instr) { + VIXL_ASSERT(buffer_.Is16bitAligned()); + buffer_.Emit16(static_cast(instr >> 16)); + buffer_.Emit16(static_cast(instr & 0xffff)); +} + + +void Assembler::EmitA32(uint32_t instr) { + VIXL_ASSERT(buffer_.Is32bitAligned()); + buffer_.Emit32(instr); +} + + +#ifdef VIXL_DEBUG +void Assembler::PerformCheckIT(Condition condition) { + if (it_mask_ == 0) { + VIXL_ASSERT(IsUsingA32() || condition.Is(al)); + } else { + VIXL_ASSERT(condition.Is(first_condition_)); + // For A32, AdavanceIT() is not called by the assembler. We must call it + // in order to check that IT instructions are used consistently with + // the following conditional instructions. + if (IsUsingA32()) AdvanceIT(); + } +} +#endif + + +void Assembler::BindHelper(Label* label) { + VIXL_ASSERT(!label->IsBound()); + label->SetLocation(this, GetCursorOffset()); + label->MarkBound(); +} + +uint32_t Assembler::Link(uint32_t instr, + Location* location, + const Location::EmitOperator& op, + const ReferenceInfo* info) { + location->SetReferenced(); + if (location->IsBound()) { + return op.Encode(instr, GetCursorOffset(), location); + } + location->AddForwardRef(GetCursorOffset(), op, info); + return instr; +} + + +// Start of generated code. +class Dt_L_imm6_1 : public EncodingValue { + uint32_t type_; + + public: + explicit Dt_L_imm6_1(DataType dt); + uint32_t GetTypeEncodingValue() const { return type_; } +}; + +Dt_L_imm6_1::Dt_L_imm6_1(DataType dt) { + switch (dt.GetValue()) { + case S8: + type_ = 0x0; + SetEncodingValue(0x1); + break; + case U8: + type_ = 0x1; + SetEncodingValue(0x1); + break; + case S16: + type_ = 0x0; + SetEncodingValue(0x2); + break; + case U16: + type_ = 0x1; + SetEncodingValue(0x2); + break; + case S32: + type_ = 0x0; + SetEncodingValue(0x4); + break; + case U32: + type_ = 0x1; + SetEncodingValue(0x4); + break; + case S64: + type_ = 0x0; + SetEncodingValue(0x8); + break; + case U64: + type_ = 0x1; + SetEncodingValue(0x8); + break; + default: + VIXL_UNREACHABLE(); + type_ = 0x0; + break; + } +} + +class Dt_L_imm6_2 : public EncodingValue { + uint32_t type_; + + public: + explicit Dt_L_imm6_2(DataType dt); + uint32_t GetTypeEncodingValue() const { return type_; } +}; + +Dt_L_imm6_2::Dt_L_imm6_2(DataType dt) { + switch (dt.GetValue()) { + case S8: + type_ = 0x1; + SetEncodingValue(0x1); + break; + case S16: + type_ = 0x1; + SetEncodingValue(0x2); + break; + case S32: + type_ = 0x1; + SetEncodingValue(0x4); + break; + case S64: + type_ = 0x1; + SetEncodingValue(0x8); + break; + default: + VIXL_UNREACHABLE(); + type_ = 0x0; + break; + } +} + +class Dt_L_imm6_3 : public EncodingValue { + public: + explicit Dt_L_imm6_3(DataType dt); +}; + +Dt_L_imm6_3::Dt_L_imm6_3(DataType dt) { + switch (dt.GetValue()) { + case I8: + SetEncodingValue(0x1); + break; + case I16: + SetEncodingValue(0x2); + break; + case I32: + SetEncodingValue(0x4); + break; + case I64: + SetEncodingValue(0x8); + break; + default: + break; + } +} + +class Dt_L_imm6_4 : public EncodingValue { + public: + explicit Dt_L_imm6_4(DataType dt); +}; + +Dt_L_imm6_4::Dt_L_imm6_4(DataType dt) { + switch (dt.GetValue()) { + case Untyped8: + SetEncodingValue(0x1); + break; + case Untyped16: + SetEncodingValue(0x2); + break; + case Untyped32: + SetEncodingValue(0x4); + break; + case Untyped64: + SetEncodingValue(0x8); + break; + default: + break; + } +} + +class Dt_imm6_1 : public EncodingValue { + uint32_t type_; + + public: + explicit Dt_imm6_1(DataType dt); + uint32_t GetTypeEncodingValue() const { return type_; } +}; + +Dt_imm6_1::Dt_imm6_1(DataType dt) { + switch (dt.GetValue()) { + case S16: + type_ = 0x0; + SetEncodingValue(0x1); + break; + case U16: + type_ = 0x1; + SetEncodingValue(0x1); + break; + case S32: + type_ = 0x0; + SetEncodingValue(0x2); + break; + case U32: + type_ = 0x1; + SetEncodingValue(0x2); + break; + case S64: + type_ = 0x0; + SetEncodingValue(0x4); + break; + case U64: + type_ = 0x1; + SetEncodingValue(0x4); + break; + default: + VIXL_UNREACHABLE(); + type_ = 0x0; + break; + } +} + +class Dt_imm6_2 : public EncodingValue { + uint32_t type_; + + public: + explicit Dt_imm6_2(DataType dt); + uint32_t GetTypeEncodingValue() const { return type_; } +}; + +Dt_imm6_2::Dt_imm6_2(DataType dt) { + switch (dt.GetValue()) { + case S16: + type_ = 0x1; + SetEncodingValue(0x1); + break; + case S32: + type_ = 0x1; + SetEncodingValue(0x2); + break; + case S64: + type_ = 0x1; + SetEncodingValue(0x4); + break; + default: + VIXL_UNREACHABLE(); + type_ = 0x0; + break; + } +} + +class Dt_imm6_3 : public EncodingValue { + public: + explicit Dt_imm6_3(DataType dt); +}; + +Dt_imm6_3::Dt_imm6_3(DataType dt) { + switch (dt.GetValue()) { + case I16: + SetEncodingValue(0x1); + break; + case I32: + SetEncodingValue(0x2); + break; + case I64: + SetEncodingValue(0x4); + break; + default: + break; + } +} + +class Dt_imm6_4 : public EncodingValue { + uint32_t type_; + + public: + explicit Dt_imm6_4(DataType dt); + uint32_t GetTypeEncodingValue() const { return type_; } +}; + +Dt_imm6_4::Dt_imm6_4(DataType dt) { + switch (dt.GetValue()) { + case S8: + type_ = 0x0; + SetEncodingValue(0x1); + break; + case U8: + type_ = 0x1; + SetEncodingValue(0x1); + break; + case S16: + type_ = 0x0; + SetEncodingValue(0x2); + break; + case U16: + type_ = 0x1; + SetEncodingValue(0x2); + break; + case S32: + type_ = 0x0; + SetEncodingValue(0x4); + break; + case U32: + type_ = 0x1; + SetEncodingValue(0x4); + break; + default: + VIXL_UNREACHABLE(); + type_ = 0x0; + break; + } +} + +class Dt_op_U_size_1 : public EncodingValue { + public: + explicit Dt_op_U_size_1(DataType dt); +}; + +Dt_op_U_size_1::Dt_op_U_size_1(DataType dt) { + switch (dt.GetValue()) { + case S8: + SetEncodingValue(0x0); + break; + case S16: + SetEncodingValue(0x1); + break; + case S32: + SetEncodingValue(0x2); + break; + case U8: + SetEncodingValue(0x4); + break; + case U16: + SetEncodingValue(0x5); + break; + case U32: + SetEncodingValue(0x6); + break; + case P8: + SetEncodingValue(0x8); + break; + case P64: + SetEncodingValue(0xa); + break; + default: + break; + } +} + +class Dt_op_size_1 : public EncodingValue { + public: + explicit Dt_op_size_1(DataType dt); +}; + +Dt_op_size_1::Dt_op_size_1(DataType dt) { + switch (dt.GetValue()) { + case I8: + SetEncodingValue(0x0); + break; + case I16: + SetEncodingValue(0x1); + break; + case I32: + SetEncodingValue(0x2); + break; + case P8: + SetEncodingValue(0x4); + break; + default: + break; + } +} + +class Dt_op_size_2 : public EncodingValue { + public: + explicit Dt_op_size_2(DataType dt); +}; + +Dt_op_size_2::Dt_op_size_2(DataType dt) { + switch (dt.GetValue()) { + case S8: + SetEncodingValue(0x0); + break; + case S16: + SetEncodingValue(0x1); + break; + case S32: + SetEncodingValue(0x2); + break; + case U8: + SetEncodingValue(0x4); + break; + case U16: + SetEncodingValue(0x5); + break; + case U32: + SetEncodingValue(0x6); + break; + default: + break; + } +} + +class Dt_op_size_3 : public EncodingValue { + public: + explicit Dt_op_size_3(DataType dt); +}; + +Dt_op_size_3::Dt_op_size_3(DataType dt) { + switch (dt.GetValue()) { + case S16: + SetEncodingValue(0x0); + break; + case S32: + SetEncodingValue(0x1); + break; + case S64: + SetEncodingValue(0x2); + break; + case U16: + SetEncodingValue(0x4); + break; + case U32: + SetEncodingValue(0x5); + break; + case U64: + SetEncodingValue(0x6); + break; + default: + break; + } +} + +class Dt_U_imm3H_1 : public EncodingValue { + public: + explicit Dt_U_imm3H_1(DataType dt); +}; + +Dt_U_imm3H_1::Dt_U_imm3H_1(DataType dt) { + switch (dt.GetValue()) { + case S8: + SetEncodingValue(0x1); + break; + case S16: + SetEncodingValue(0x2); + break; + case S32: + SetEncodingValue(0x4); + break; + case U8: + SetEncodingValue(0x9); + break; + case U16: + SetEncodingValue(0xa); + break; + case U32: + SetEncodingValue(0xc); + break; + default: + break; + } +} + +class Dt_U_opc1_opc2_1 : public EncodingValue { + public: + explicit Dt_U_opc1_opc2_1(DataType dt, const DRegisterLane& lane); +}; + +Dt_U_opc1_opc2_1::Dt_U_opc1_opc2_1(DataType dt, const DRegisterLane& lane) { + switch (dt.GetValue()) { + case S8: + if ((lane.GetLane() & 7) != lane.GetLane()) { + return; + } + SetEncodingValue(0x8 | lane.GetLane()); + break; + case S16: + if ((lane.GetLane() & 3) != lane.GetLane()) { + return; + } + SetEncodingValue(0x1 | (lane.GetLane() << 1)); + break; + case U8: + if ((lane.GetLane() & 7) != lane.GetLane()) { + return; + } + SetEncodingValue(0x18 | lane.GetLane()); + break; + case U16: + if ((lane.GetLane() & 3) != lane.GetLane()) { + return; + } + SetEncodingValue(0x11 | (lane.GetLane() << 1)); + break; + case Untyped32: + if ((lane.GetLane() & 1) != lane.GetLane()) { + return; + } + SetEncodingValue(0x0 | (lane.GetLane() << 2)); + break; + case kDataTypeValueNone: + if ((lane.GetLane() & 1) != lane.GetLane()) { + return; + } + SetEncodingValue(0x0 | (lane.GetLane() << 2)); + break; + default: + break; + } +} + +class Dt_opc1_opc2_1 : public EncodingValue { + public: + explicit Dt_opc1_opc2_1(DataType dt, const DRegisterLane& lane); +}; + +Dt_opc1_opc2_1::Dt_opc1_opc2_1(DataType dt, const DRegisterLane& lane) { + switch (dt.GetValue()) { + case Untyped8: + if ((lane.GetLane() & 7) != lane.GetLane()) { + return; + } + SetEncodingValue(0x8 | lane.GetLane()); + break; + case Untyped16: + if ((lane.GetLane() & 3) != lane.GetLane()) { + return; + } + SetEncodingValue(0x1 | (lane.GetLane() << 1)); + break; + case Untyped32: + if ((lane.GetLane() & 1) != lane.GetLane()) { + return; + } + SetEncodingValue(0x0 | (lane.GetLane() << 2)); + break; + case kDataTypeValueNone: + if ((lane.GetLane() & 1) != lane.GetLane()) { + return; + } + SetEncodingValue(0x0 | (lane.GetLane() << 2)); + break; + default: + break; + } +} + +class Dt_imm4_1 : public EncodingValue { + public: + explicit Dt_imm4_1(DataType dt, const DRegisterLane& lane); +}; + +Dt_imm4_1::Dt_imm4_1(DataType dt, const DRegisterLane& lane) { + switch (dt.GetValue()) { + case Untyped8: + if ((lane.GetLane() & 7) != lane.GetLane()) { + return; + } + SetEncodingValue(0x1 | (lane.GetLane() << 1)); + break; + case Untyped16: + if ((lane.GetLane() & 3) != lane.GetLane()) { + return; + } + SetEncodingValue(0x2 | (lane.GetLane() << 2)); + break; + case Untyped32: + if ((lane.GetLane() & 1) != lane.GetLane()) { + return; + } + SetEncodingValue(0x4 | (lane.GetLane() << 3)); + break; + default: + break; + } +} + +class Dt_B_E_1 : public EncodingValue { + public: + explicit Dt_B_E_1(DataType dt); +}; + +Dt_B_E_1::Dt_B_E_1(DataType dt) { + switch (dt.GetValue()) { + case Untyped8: + SetEncodingValue(0x2); + break; + case Untyped16: + SetEncodingValue(0x1); + break; + case Untyped32: + SetEncodingValue(0x0); + break; + default: + break; + } +} + +class Dt_op_1 : public EncodingValue { + public: + Dt_op_1(DataType dt1, DataType dt2); +}; + +Dt_op_1::Dt_op_1(DataType dt1, DataType dt2) { + if ((dt1.GetValue() == F32) && (dt2.GetValue() == S32)) { + SetEncodingValue(0x0); + return; + } + if ((dt1.GetValue() == F32) && (dt2.GetValue() == U32)) { + SetEncodingValue(0x1); + return; + } + if ((dt1.GetValue() == S32) && (dt2.GetValue() == F32)) { + SetEncodingValue(0x2); + return; + } + if ((dt1.GetValue() == U32) && (dt2.GetValue() == F32)) { + SetEncodingValue(0x3); + return; + } +} + +class Dt_op_2 : public EncodingValue { + public: + explicit Dt_op_2(DataType dt); +}; + +Dt_op_2::Dt_op_2(DataType dt) { + switch (dt.GetValue()) { + case U32: + SetEncodingValue(0x0); + break; + case S32: + SetEncodingValue(0x1); + break; + default: + break; + } +} + +class Dt_op_3 : public EncodingValue { + public: + explicit Dt_op_3(DataType dt); +}; + +Dt_op_3::Dt_op_3(DataType dt) { + switch (dt.GetValue()) { + case S32: + SetEncodingValue(0x0); + break; + case U32: + SetEncodingValue(0x1); + break; + default: + break; + } +} + +class Dt_U_sx_1 : public EncodingValue { + public: + explicit Dt_U_sx_1(DataType dt); +}; + +Dt_U_sx_1::Dt_U_sx_1(DataType dt) { + switch (dt.GetValue()) { + case S16: + SetEncodingValue(0x0); + break; + case S32: + SetEncodingValue(0x1); + break; + case U16: + SetEncodingValue(0x2); + break; + case U32: + SetEncodingValue(0x3); + break; + default: + break; + } +} + +class Dt_op_U_1 : public EncodingValue { + public: + Dt_op_U_1(DataType dt1, DataType dt2); +}; + +Dt_op_U_1::Dt_op_U_1(DataType dt1, DataType dt2) { + if ((dt1.GetValue() == F32) && (dt2.GetValue() == S32)) { + SetEncodingValue(0x0); + return; + } + if ((dt1.GetValue() == F32) && (dt2.GetValue() == U32)) { + SetEncodingValue(0x1); + return; + } + if ((dt1.GetValue() == S32) && (dt2.GetValue() == F32)) { + SetEncodingValue(0x2); + return; + } + if ((dt1.GetValue() == U32) && (dt2.GetValue() == F32)) { + SetEncodingValue(0x3); + return; + } +} + +class Dt_sz_1 : public EncodingValue { + public: + explicit Dt_sz_1(DataType dt); +}; + +Dt_sz_1::Dt_sz_1(DataType dt) { + switch (dt.GetValue()) { + case F32: + SetEncodingValue(0x0); + break; + default: + break; + } +} + +class Dt_F_size_1 : public EncodingValue { + public: + explicit Dt_F_size_1(DataType dt); +}; + +Dt_F_size_1::Dt_F_size_1(DataType dt) { + switch (dt.GetValue()) { + case S8: + SetEncodingValue(0x0); + break; + case S16: + SetEncodingValue(0x1); + break; + case S32: + SetEncodingValue(0x2); + break; + case F32: + SetEncodingValue(0x6); + break; + default: + break; + } +} + +class Dt_F_size_2 : public EncodingValue { + public: + explicit Dt_F_size_2(DataType dt); +}; + +Dt_F_size_2::Dt_F_size_2(DataType dt) { + switch (dt.GetValue()) { + case I8: + SetEncodingValue(0x0); + break; + case I16: + SetEncodingValue(0x1); + break; + case I32: + SetEncodingValue(0x2); + break; + case F32: + SetEncodingValue(0x6); + break; + default: + break; + } +} + +class Dt_F_size_3 : public EncodingValue { + public: + explicit Dt_F_size_3(DataType dt); +}; + +Dt_F_size_3::Dt_F_size_3(DataType dt) { + switch (dt.GetValue()) { + case I16: + SetEncodingValue(0x1); + break; + case I32: + SetEncodingValue(0x2); + break; + case F32: + SetEncodingValue(0x6); + break; + default: + break; + } +} + +class Dt_F_size_4 : public EncodingValue { + public: + explicit Dt_F_size_4(DataType dt); +}; + +Dt_F_size_4::Dt_F_size_4(DataType dt) { + switch (dt.GetValue()) { + case U32: + SetEncodingValue(0x2); + break; + case F32: + SetEncodingValue(0x6); + break; + default: + break; + } +} + +class Dt_U_size_1 : public EncodingValue { + public: + explicit Dt_U_size_1(DataType dt); +}; + +Dt_U_size_1::Dt_U_size_1(DataType dt) { + switch (dt.GetValue()) { + case S8: + SetEncodingValue(0x0); + break; + case S16: + SetEncodingValue(0x1); + break; + case S32: + SetEncodingValue(0x2); + break; + case U8: + SetEncodingValue(0x4); + break; + case U16: + SetEncodingValue(0x5); + break; + case U32: + SetEncodingValue(0x6); + break; + default: + break; + } +} + +class Dt_U_size_2 : public EncodingValue { + public: + explicit Dt_U_size_2(DataType dt); +}; + +Dt_U_size_2::Dt_U_size_2(DataType dt) { + switch (dt.GetValue()) { + case S16: + SetEncodingValue(0x1); + break; + case S32: + SetEncodingValue(0x2); + break; + case U16: + SetEncodingValue(0x5); + break; + case U32: + SetEncodingValue(0x6); + break; + default: + break; + } +} + +class Dt_U_size_3 : public EncodingValue { + public: + explicit Dt_U_size_3(DataType dt); +}; + +Dt_U_size_3::Dt_U_size_3(DataType dt) { + switch (dt.GetValue()) { + case S8: + SetEncodingValue(0x0); + break; + case S16: + SetEncodingValue(0x1); + break; + case S32: + SetEncodingValue(0x2); + break; + case S64: + SetEncodingValue(0x3); + break; + case U8: + SetEncodingValue(0x4); + break; + case U16: + SetEncodingValue(0x5); + break; + case U32: + SetEncodingValue(0x6); + break; + case U64: + SetEncodingValue(0x7); + break; + default: + break; + } +} + +class Dt_size_1 : public EncodingValue { + public: + explicit Dt_size_1(DataType dt); +}; + +Dt_size_1::Dt_size_1(DataType dt) { + switch (dt.GetValue()) { + case Untyped8: + SetEncodingValue(0x0); + break; + default: + break; + } +} + +class Dt_size_2 : public EncodingValue { + public: + explicit Dt_size_2(DataType dt); +}; + +Dt_size_2::Dt_size_2(DataType dt) { + switch (dt.GetValue()) { + case I8: + SetEncodingValue(0x0); + break; + case I16: + SetEncodingValue(0x1); + break; + case I32: + SetEncodingValue(0x2); + break; + case I64: + SetEncodingValue(0x3); + break; + default: + break; + } +} + +class Dt_size_3 : public EncodingValue { + public: + explicit Dt_size_3(DataType dt); +}; + +Dt_size_3::Dt_size_3(DataType dt) { + switch (dt.GetValue()) { + case I16: + SetEncodingValue(0x0); + break; + case I32: + SetEncodingValue(0x1); + break; + case I64: + SetEncodingValue(0x2); + break; + default: + break; + } +} + +class Dt_size_4 : public EncodingValue { + public: + explicit Dt_size_4(DataType dt); +}; + +Dt_size_4::Dt_size_4(DataType dt) { + switch (dt.GetValue()) { + case I8: + SetEncodingValue(0x0); + break; + case I16: + SetEncodingValue(0x1); + break; + case I32: + SetEncodingValue(0x2); + break; + default: + break; + } +} + +class Dt_size_5 : public EncodingValue { + public: + explicit Dt_size_5(DataType dt); +}; + +Dt_size_5::Dt_size_5(DataType dt) { + switch (dt.GetValue()) { + case S8: + SetEncodingValue(0x0); + break; + case S16: + SetEncodingValue(0x1); + break; + case S32: + SetEncodingValue(0x2); + break; + default: + break; + } +} + +class Dt_size_6 : public EncodingValue { + public: + explicit Dt_size_6(DataType dt); +}; + +Dt_size_6::Dt_size_6(DataType dt) { + switch (dt.GetValue()) { + case Untyped8: + SetEncodingValue(0x0); + break; + case Untyped16: + SetEncodingValue(0x1); + break; + case Untyped32: + SetEncodingValue(0x2); + break; + case Untyped64: + SetEncodingValue(0x3); + break; + default: + break; + } +} + +class Dt_size_7 : public EncodingValue { + public: + explicit Dt_size_7(DataType dt); +}; + +Dt_size_7::Dt_size_7(DataType dt) { + switch (dt.GetValue()) { + case Untyped8: + SetEncodingValue(0x0); + break; + case Untyped16: + SetEncodingValue(0x1); + break; + case Untyped32: + SetEncodingValue(0x2); + break; + default: + break; + } +} + +class Dt_size_8 : public EncodingValue { + public: + Dt_size_8(DataType dt, Alignment align); +}; + +Dt_size_8::Dt_size_8(DataType dt, Alignment align) { + switch (dt.GetValue()) { + case Untyped8: + SetEncodingValue(0x0); + break; + case Untyped16: + SetEncodingValue(0x1); + break; + case Untyped32: + if (align.Is(k64BitAlign) || align.Is(kNoAlignment)) { + SetEncodingValue(0x2); + } else if (align.Is(k128BitAlign)) { + SetEncodingValue(0x3); + } + break; + default: + break; + } +} + +class Dt_size_9 : public EncodingValue { + uint32_t type_; + + public: + explicit Dt_size_9(DataType dt); + uint32_t GetTypeEncodingValue() const { return type_; } +}; + +Dt_size_9::Dt_size_9(DataType dt) { + switch (dt.GetValue()) { + case I16: + type_ = 0x0; + SetEncodingValue(0x1); + break; + case I32: + type_ = 0x0; + SetEncodingValue(0x2); + break; + case F32: + type_ = 0x1; + SetEncodingValue(0x2); + break; + default: + VIXL_UNREACHABLE(); + type_ = 0x0; + break; + } +} + +class Dt_size_10 : public EncodingValue { + public: + explicit Dt_size_10(DataType dt); +}; + +Dt_size_10::Dt_size_10(DataType dt) { + switch (dt.GetValue()) { + case S8: + case U8: + case I8: + SetEncodingValue(0x0); + break; + case S16: + case U16: + case I16: + SetEncodingValue(0x1); + break; + case S32: + case U32: + case I32: + SetEncodingValue(0x2); + break; + default: + break; + } +} + +class Dt_size_11 : public EncodingValue { + uint32_t type_; + + public: + explicit Dt_size_11(DataType dt); + uint32_t GetTypeEncodingValue() const { return type_; } +}; + +Dt_size_11::Dt_size_11(DataType dt) { + switch (dt.GetValue()) { + case S16: + type_ = 0x0; + SetEncodingValue(0x1); + break; + case U16: + type_ = 0x1; + SetEncodingValue(0x1); + break; + case S32: + type_ = 0x0; + SetEncodingValue(0x2); + break; + case U32: + type_ = 0x1; + SetEncodingValue(0x2); + break; + default: + VIXL_UNREACHABLE(); + type_ = 0x0; + break; + } +} + +class Dt_size_12 : public EncodingValue { + uint32_t type_; + + public: + explicit Dt_size_12(DataType dt); + uint32_t GetTypeEncodingValue() const { return type_; } +}; + +Dt_size_12::Dt_size_12(DataType dt) { + switch (dt.GetValue()) { + case S8: + type_ = 0x0; + SetEncodingValue(0x0); + break; + case U8: + type_ = 0x1; + SetEncodingValue(0x0); + break; + case S16: + type_ = 0x0; + SetEncodingValue(0x1); + break; + case U16: + type_ = 0x1; + SetEncodingValue(0x1); + break; + case S32: + type_ = 0x0; + SetEncodingValue(0x2); + break; + case U32: + type_ = 0x1; + SetEncodingValue(0x2); + break; + default: + VIXL_UNREACHABLE(); + type_ = 0x0; + break; + } +} + +class Dt_size_13 : public EncodingValue { + public: + explicit Dt_size_13(DataType dt); +}; + +Dt_size_13::Dt_size_13(DataType dt) { + switch (dt.GetValue()) { + case S16: + SetEncodingValue(0x1); + break; + case S32: + SetEncodingValue(0x2); + break; + default: + break; + } +} + +class Dt_size_14 : public EncodingValue { + public: + explicit Dt_size_14(DataType dt); +}; + +Dt_size_14::Dt_size_14(DataType dt) { + switch (dt.GetValue()) { + case S16: + SetEncodingValue(0x0); + break; + case S32: + SetEncodingValue(0x1); + break; + case S64: + SetEncodingValue(0x2); + break; + default: + break; + } +} + +class Dt_size_15 : public EncodingValue { + public: + explicit Dt_size_15(DataType dt); +}; + +Dt_size_15::Dt_size_15(DataType dt) { + switch (dt.GetValue()) { + case Untyped8: + SetEncodingValue(0x0); + break; + case Untyped16: + SetEncodingValue(0x1); + break; + default: + break; + } +} + +class Dt_size_16 : public EncodingValue { + public: + explicit Dt_size_16(DataType dt); +}; + +Dt_size_16::Dt_size_16(DataType dt) { + switch (dt.GetValue()) { + case F32: + SetEncodingValue(0x2); + break; + default: + break; + } +} + +class Dt_size_17 : public EncodingValue { + public: + explicit Dt_size_17(DataType dt); +}; + +Dt_size_17::Dt_size_17(DataType dt) { + switch (dt.GetValue()) { + case I8: + SetEncodingValue(0x0); + break; + case I16: + SetEncodingValue(0x1); + break; + case I32: + SetEncodingValue(0x2); + break; + default: + break; + } +} + +class Index_1 : public EncodingValue { + public: + Index_1(const NeonRegisterList& nreglist, DataType dt); +}; + +Index_1::Index_1(const NeonRegisterList& nreglist, DataType dt) { + switch (dt.GetValue()) { + case Untyped8: { + if ((nreglist.GetTransferLane() & 7) != nreglist.GetTransferLane()) { + return; + } + uint32_t value = nreglist.GetTransferLane() << 1; + if (!nreglist.IsSingleSpaced()) return; + SetEncodingValue(value); + break; + } + case Untyped16: { + if ((nreglist.GetTransferLane() & 3) != nreglist.GetTransferLane()) { + return; + } + uint32_t value = nreglist.GetTransferLane() << 2; + if (nreglist.IsDoubleSpaced()) value |= 2; + SetEncodingValue(value); + break; + } + case Untyped32: { + if ((nreglist.GetTransferLane() & 1) != nreglist.GetTransferLane()) { + return; + } + uint32_t value = nreglist.GetTransferLane() << 3; + if (nreglist.IsDoubleSpaced()) value |= 4; + SetEncodingValue(value); + break; + } + default: + break; + } +} + +class Align_index_align_1 : public EncodingValue { + public: + Align_index_align_1(Alignment align, + const NeonRegisterList& nreglist, + DataType dt); +}; + +Align_index_align_1::Align_index_align_1(Alignment align, + const NeonRegisterList& nreglist, + DataType dt) { + switch (dt.GetValue()) { + case Untyped8: { + uint32_t value; + if (align.GetType() == kNoAlignment) { + value = 0; + } else { + return; + } + if ((nreglist.GetTransferLane() & 7) != nreglist.GetTransferLane()) { + return; + } + value |= nreglist.GetTransferLane() << 1; + SetEncodingValue(value); + break; + } + case Untyped16: { + uint32_t value; + if (align.GetType() == k16BitAlign) { + value = 1; + } else if (align.GetType() == kNoAlignment) { + value = 0; + } else { + return; + } + if ((nreglist.GetTransferLane() & 3) != nreglist.GetTransferLane()) { + return; + } + value |= nreglist.GetTransferLane() << 2; + SetEncodingValue(value); + break; + } + case Untyped32: { + uint32_t value; + if (align.GetType() == k32BitAlign) { + value = 3; + } else if (align.GetType() == kNoAlignment) { + value = 0; + } else { + return; + } + if ((nreglist.GetTransferLane() & 1) != nreglist.GetTransferLane()) { + return; + } + value |= nreglist.GetTransferLane() << 3; + SetEncodingValue(value); + break; + } + default: + break; + } +} + +class Align_index_align_2 : public EncodingValue { + public: + Align_index_align_2(Alignment align, + const NeonRegisterList& nreglist, + DataType dt); +}; + +Align_index_align_2::Align_index_align_2(Alignment align, + const NeonRegisterList& nreglist, + DataType dt) { + switch (dt.GetValue()) { + case Untyped8: { + uint32_t value; + if (align.GetType() == k16BitAlign) { + value = 1; + } else if (align.GetType() == kNoAlignment) { + value = 0; + } else { + return; + } + if ((nreglist.GetTransferLane() & 7) != nreglist.GetTransferLane()) { + return; + } + value |= nreglist.GetTransferLane() << 1; + if (!nreglist.IsSingleSpaced()) return; + SetEncodingValue(value); + break; + } + case Untyped16: { + uint32_t value; + if (align.GetType() == k32BitAlign) { + value = 1; + } else if (align.GetType() == kNoAlignment) { + value = 0; + } else { + return; + } + if ((nreglist.GetTransferLane() & 3) != nreglist.GetTransferLane()) { + return; + } + value |= nreglist.GetTransferLane() << 2; + if (nreglist.IsDoubleSpaced()) value |= 2; + SetEncodingValue(value); + break; + } + case Untyped32: { + uint32_t value; + if (align.GetType() == k64BitAlign) { + value = 1; + } else if (align.GetType() == kNoAlignment) { + value = 0; + } else { + return; + } + if ((nreglist.GetTransferLane() & 1) != nreglist.GetTransferLane()) { + return; + } + value |= nreglist.GetTransferLane() << 3; + if (nreglist.IsDoubleSpaced()) value |= 4; + SetEncodingValue(value); + break; + } + default: + break; + } +} + +class Align_index_align_3 : public EncodingValue { + public: + Align_index_align_3(Alignment align, + const NeonRegisterList& nreglist, + DataType dt); +}; + +Align_index_align_3::Align_index_align_3(Alignment align, + const NeonRegisterList& nreglist, + DataType dt) { + switch (dt.GetValue()) { + case Untyped8: { + uint32_t value; + if (align.GetType() == k32BitAlign) { + value = 1; + } else if (align.GetType() == kNoAlignment) { + value = 0; + } else { + return; + } + if ((nreglist.GetTransferLane() & 7) != nreglist.GetTransferLane()) { + return; + } + value |= nreglist.GetTransferLane() << 1; + if (!nreglist.IsSingleSpaced()) return; + SetEncodingValue(value); + break; + } + case Untyped16: { + uint32_t value; + if (align.GetType() == k64BitAlign) { + value = 1; + } else if (align.GetType() == kNoAlignment) { + value = 0; + } else { + return; + } + if ((nreglist.GetTransferLane() & 3) != nreglist.GetTransferLane()) { + return; + } + value |= nreglist.GetTransferLane() << 2; + if (nreglist.IsDoubleSpaced()) value |= 2; + SetEncodingValue(value); + break; + } + case Untyped32: { + uint32_t value; + if (align.GetType() == k64BitAlign) { + value = 1; + } else if (align.GetType() == k128BitAlign) { + value = 2; + } else if (align.GetType() == kNoAlignment) { + value = 0; + } else { + return; + } + if ((nreglist.GetTransferLane() & 1) != nreglist.GetTransferLane()) { + return; + } + value |= nreglist.GetTransferLane() << 3; + if (nreglist.IsDoubleSpaced()) value |= 4; + SetEncodingValue(value); + break; + } + default: + break; + } +} + +class Align_a_1 : public EncodingValue { + public: + Align_a_1(Alignment align, DataType dt); +}; + +Align_a_1::Align_a_1(Alignment align, DataType dt) { + switch (align.GetType()) { + case k16BitAlign: + if (dt.Is(Untyped16)) SetEncodingValue(0x1); + break; + case k32BitAlign: + if (dt.Is(Untyped32)) SetEncodingValue(0x1); + break; + case kNoAlignment: + SetEncodingValue(0x0); + break; + default: + break; + } +} + +class Align_a_2 : public EncodingValue { + public: + Align_a_2(Alignment align, DataType dt); +}; + +Align_a_2::Align_a_2(Alignment align, DataType dt) { + switch (align.GetType()) { + case k16BitAlign: + if (dt.Is(Untyped8)) SetEncodingValue(0x1); + break; + case k32BitAlign: + if (dt.Is(Untyped16)) SetEncodingValue(0x1); + break; + case k64BitAlign: + if (dt.Is(Untyped32)) SetEncodingValue(0x1); + break; + case kNoAlignment: + SetEncodingValue(0x0); + break; + default: + break; + } +} + +class Align_a_3 : public EncodingValue { + public: + Align_a_3(Alignment align, DataType dt); +}; + +Align_a_3::Align_a_3(Alignment align, DataType dt) { + switch (align.GetType()) { + case k32BitAlign: + if (dt.Is(Untyped8)) SetEncodingValue(0x1); + break; + case k64BitAlign: + if (dt.Is(Untyped16)) + SetEncodingValue(0x1); + else if (dt.Is(Untyped32)) + SetEncodingValue(0x1); + break; + case k128BitAlign: + if (dt.Is(Untyped32)) SetEncodingValue(0x1); + break; + case kNoAlignment: + SetEncodingValue(0x0); + break; + default: + break; + } +} + +class Align_align_1 : public EncodingValue { + public: + Align_align_1(Alignment align, const NeonRegisterList& nreglist); +}; + +Align_align_1::Align_align_1(Alignment align, + const NeonRegisterList& nreglist) { + switch (align.GetType()) { + case k64BitAlign: + SetEncodingValue(0x1); + break; + case k128BitAlign: + if ((nreglist.GetLength() == 2) || (nreglist.GetLength() == 4)) + SetEncodingValue(0x2); + break; + case k256BitAlign: + if ((nreglist.GetLength() == 2) || (nreglist.GetLength() == 4)) + SetEncodingValue(0x3); + break; + case kNoAlignment: + SetEncodingValue(0x0); + break; + default: + break; + } +} + +class Align_align_2 : public EncodingValue { + public: + Align_align_2(Alignment align, const NeonRegisterList& nreglist); +}; + +Align_align_2::Align_align_2(Alignment align, + const NeonRegisterList& nreglist) { + switch (align.GetType()) { + case k64BitAlign: + SetEncodingValue(0x1); + break; + case k128BitAlign: + SetEncodingValue(0x2); + break; + case k256BitAlign: + if ((nreglist.GetLength() == 4)) SetEncodingValue(0x3); + break; + case kNoAlignment: + SetEncodingValue(0x0); + break; + default: + break; + } +} + +class Align_align_3 : public EncodingValue { + public: + explicit Align_align_3(Alignment align); +}; + +Align_align_3::Align_align_3(Alignment align) { + switch (align.GetType()) { + case k64BitAlign: + SetEncodingValue(0x1); + break; + case kNoAlignment: + SetEncodingValue(0x0); + break; + default: + break; + } +} + +class Align_align_4 : public EncodingValue { + public: + explicit Align_align_4(Alignment align); +}; + +Align_align_4::Align_align_4(Alignment align) { + switch (align.GetType()) { + case k64BitAlign: + SetEncodingValue(0x1); + break; + case k128BitAlign: + SetEncodingValue(0x2); + break; + case k256BitAlign: + SetEncodingValue(0x3); + break; + case kNoAlignment: + SetEncodingValue(0x0); + break; + default: + break; + } +} + +class Align_align_5 : public EncodingValue { + public: + Align_align_5(Alignment align, const NeonRegisterList& nreglist); +}; + +Align_align_5::Align_align_5(Alignment align, + const NeonRegisterList& nreglist) { + switch (align.GetType()) { + case k64BitAlign: + SetEncodingValue(0x1); + break; + case k128BitAlign: + if ((nreglist.GetLength() == 2) || (nreglist.GetLength() == 4)) + SetEncodingValue(0x2); + break; + case k256BitAlign: + if ((nreglist.GetLength() == 4)) SetEncodingValue(0x3); + break; + case kNoAlignment: + SetEncodingValue(0x0); + break; + default: + break; + } +} + + +// CBNZ{} ,

,
,
, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000710U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VABA{}{}.
, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000710U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVaba, &Assembler::vaba, cond, dt, rd, rn, rm); +} + +void Assembler::vaba( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VABA{}{}.
, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000750U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VABA{}{}.
, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000750U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVaba, &Assembler::vaba, cond, dt, rd, rn, rm); +} + +void Assembler::vabal( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VABAL{}{}.
, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800500U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VABAL{}{}.
, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2800500U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVabal, &Assembler::vabal, cond, dt, rd, rn, rm); +} + +void Assembler::vabd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VABD{}{}.F32 {
}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff200d00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VABD{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000700U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VABD{}{}.F32 {
}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf3200d00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + // VABD{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000700U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVabd, &Assembler::vabd, cond, dt, rd, rn, rm); +} + +void Assembler::vabd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VABD{}{}.F32 {}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff200d40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VABD{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000740U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VABD{}{}.F32 {}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf3200d40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + // VABD{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000740U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVabd, &Assembler::vabd, cond, dt, rd, rn, rm); +} + +void Assembler::vabdl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VABDL{}{}.
, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800700U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VABDL{}{}.
, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2800700U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVabdl, &Assembler::vabdl, cond, dt, rd, rn, rm); +} + +void Assembler::vabs(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_F_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VABS{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb10300U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VABS{}{}.F64
, ; T2 + if (dt.Is(F64)) { + EmitT32_32(0xeeb00bc0U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VABS{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b10300U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + // VABS{}{}.F64
, ; A2 + if (dt.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0eb00bc0U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVabs, &Assembler::vabs, cond, dt, rd, rm); +} + +void Assembler::vabs(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_F_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VABS{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb10340U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VABS{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b10340U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVabs, &Assembler::vabs, cond, dt, rd, rm); +} + +void Assembler::vabs(Condition cond, DataType dt, SRegister rd, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VABS{}{}.F32 , ; T2 + if (dt.Is(F32)) { + EmitT32_32(0xeeb00ac0U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VABS{}{}.F32 , ; A2 + if (dt.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0eb00ac0U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVabs, &Assembler::vabs, cond, dt, rd, rm); +} + +void Assembler::vacge( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VACGE{}{}.F32 {
}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff000e10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VACGE{}{}.F32 {
}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf3000e10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVacge, &Assembler::vacge, cond, dt, rd, rn, rm); +} + +void Assembler::vacge( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VACGE{}{}.F32 {}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff000e50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VACGE{}{}.F32 {}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf3000e50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVacge, &Assembler::vacge, cond, dt, rd, rn, rm); +} + +void Assembler::vacgt( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VACGT{}{}.F32 {
}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff200e10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VACGT{}{}.F32 {
}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf3200e10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVacgt, &Assembler::vacgt, cond, dt, rd, rn, rm); +} + +void Assembler::vacgt( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VACGT{}{}.F32 {}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff200e50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VACGT{}{}.F32 {}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf3200e50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVacgt, &Assembler::vacgt, cond, dt, rd, rn, rm); +} + +void Assembler::vacle( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VACLE{}{}.F32 {
}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff000e10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VACLE{}{}.F32 {
}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf3000e10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVacle, &Assembler::vacle, cond, dt, rd, rn, rm); +} + +void Assembler::vacle( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VACLE{}{}.F32 {}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff000e50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VACLE{}{}.F32 {}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf3000e50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVacle, &Assembler::vacle, cond, dt, rd, rn, rm); +} + +void Assembler::vaclt( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VACLT{}{}.F32 {
}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff200e10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VACLT{}{}.F32 {
}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf3200e10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVaclt, &Assembler::vaclt, cond, dt, rd, rn, rm); +} + +void Assembler::vaclt( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VACLT{}{}.F32 {}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff200e50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VACLT{}{}.F32 {}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf3200e50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVaclt, &Assembler::vaclt, cond, dt, rd, rn, rm); +} + +void Assembler::vadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_2 encoded_dt(dt); + if (IsUsingT32()) { + // VADD{}{}.F32 {
}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000d00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VADD{}{}.F64 {
}, , ; T2 + if (dt.Is(F64)) { + EmitT32_32(0xee300b00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + // VADD{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000800U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VADD{}{}.F32 {
}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf2000d00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + // VADD{}{}.F64 {
}, , ; A2 + if (dt.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0e300b00U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + // VADD{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000800U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVadd, &Assembler::vadd, cond, dt, rd, rn, rm); +} + +void Assembler::vadd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_2 encoded_dt(dt); + if (IsUsingT32()) { + // VADD{}{}.F32 {}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000d40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VADD{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000840U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VADD{}{}.F32 {}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf2000d40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + // VADD{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000840U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVadd, &Assembler::vadd, cond, dt, rd, rn, rm); +} + +void Assembler::vadd( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VADD{}{}.F32 {}, , ; T2 + if (dt.Is(F32)) { + EmitT32_32(0xee300a00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VADD{}{}.F32 {}, , ; A2 + if (dt.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0e300a00U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVadd, &Assembler::vadd, cond, dt, rd, rn, rm); +} + +void Assembler::vaddhn( + Condition cond, DataType dt, DRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_3 encoded_dt(dt); + if (IsUsingT32()) { + // VADDHN{}{}.
, , ; T1 + if (encoded_dt.IsValid() && (dt.Is(I16) || dt.Is(I32) || dt.Is(I64))) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800400U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VADDHN{}{}.
, , ; A1 + if (encoded_dt.IsValid() && (dt.Is(I16) || dt.Is(I32) || dt.Is(I64))) { + if (cond.Is(al)) { + EmitA32(0xf2800400U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVaddhn, &Assembler::vaddhn, cond, dt, rd, rn, rm); +} + +void Assembler::vaddl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VADDL{}{}.
, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800000U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VADDL{}{}.
, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2800000U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVaddl, &Assembler::vaddl, cond, dt, rd, rn, rm); +} + +void Assembler::vaddw( + Condition cond, DataType dt, QRegister rd, QRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VADDW{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800100U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VADDW{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2800100U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVaddw, &Assembler::vaddw, cond, dt, rd, rn, rm); +} + +void Assembler::vand(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + const DOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + ImmediateVand encoded_dt(dt, operand.GetNeonImmediate()); + if (IsUsingT32()) { + // VAND{}{}.
{}, , # ; T1 + if (encoded_dt.IsValid() && rd.Is(rn)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800030U | (encoded_dt.GetEncodingValue() << 8) | + rd.Encode(22, 12) | + (encoded_dt.GetEncodedImmediate() & 0xf) | + ((encoded_dt.GetEncodedImmediate() & 0x70) << 12) | + ((encoded_dt.GetEncodedImmediate() & 0x80) << 21)); + AdvanceIT(); + return; + } + } + } else { + // VAND{}{}.
{}, , # ; A1 + if (encoded_dt.IsValid() && rd.Is(rn)) { + if (cond.Is(al)) { + EmitA32(0xf2800030U | (encoded_dt.GetEncodingValue() << 8) | + rd.Encode(22, 12) | (encoded_dt.GetEncodedImmediate() & 0xf) | + ((encoded_dt.GetEncodedImmediate() & 0x70) << 12) | + ((encoded_dt.GetEncodedImmediate() & 0x80) << 17)); + return; + } + } + } + } + if (operand.IsRegister()) { + DRegister rm = operand.GetRegister(); + USE(dt); + if (IsUsingT32()) { + // VAND{}{}{.
} {
}, , ; T1 + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000110U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VAND{}{}{.
} {
}, , ; A1 + if (cond.Is(al)) { + EmitA32(0xf2000110U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVand, &Assembler::vand, cond, dt, rd, rn, operand); +} + +void Assembler::vand(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + ImmediateVand encoded_dt(dt, operand.GetNeonImmediate()); + if (IsUsingT32()) { + // VAND{}{}.
{}, , # ; T1 + if (encoded_dt.IsValid() && rd.Is(rn)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800070U | (encoded_dt.GetEncodingValue() << 8) | + rd.Encode(22, 12) | + (encoded_dt.GetEncodedImmediate() & 0xf) | + ((encoded_dt.GetEncodedImmediate() & 0x70) << 12) | + ((encoded_dt.GetEncodedImmediate() & 0x80) << 21)); + AdvanceIT(); + return; + } + } + } else { + // VAND{}{}.
{}, , # ; A1 + if (encoded_dt.IsValid() && rd.Is(rn)) { + if (cond.Is(al)) { + EmitA32(0xf2800070U | (encoded_dt.GetEncodingValue() << 8) | + rd.Encode(22, 12) | (encoded_dt.GetEncodedImmediate() & 0xf) | + ((encoded_dt.GetEncodedImmediate() & 0x70) << 12) | + ((encoded_dt.GetEncodedImmediate() & 0x80) << 17)); + return; + } + } + } + } + if (operand.IsRegister()) { + QRegister rm = operand.GetRegister(); + USE(dt); + if (IsUsingT32()) { + // VAND{}{}{.
} {}, , ; T1 + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000150U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VAND{}{}{.
} {}, , ; A1 + if (cond.Is(al)) { + EmitA32(0xf2000150U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVand, &Assembler::vand, cond, dt, rd, rn, operand); +} + +void Assembler::vbic(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + const DOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + ImmediateVbic encoded_dt(dt, operand.GetNeonImmediate()); + if (IsUsingT32()) { + // VBIC{}{}.
{}, , # ; T1 + if (encoded_dt.IsValid() && rd.Is(rn)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800030U | (encoded_dt.GetEncodingValue() << 8) | + rd.Encode(22, 12) | + (encoded_dt.GetEncodedImmediate() & 0xf) | + ((encoded_dt.GetEncodedImmediate() & 0x70) << 12) | + ((encoded_dt.GetEncodedImmediate() & 0x80) << 21)); + AdvanceIT(); + return; + } + } + } else { + // VBIC{}{}.
{}, , # ; A1 + if (encoded_dt.IsValid() && rd.Is(rn)) { + if (cond.Is(al)) { + EmitA32(0xf2800030U | (encoded_dt.GetEncodingValue() << 8) | + rd.Encode(22, 12) | (encoded_dt.GetEncodedImmediate() & 0xf) | + ((encoded_dt.GetEncodedImmediate() & 0x70) << 12) | + ((encoded_dt.GetEncodedImmediate() & 0x80) << 17)); + return; + } + } + } + } + if (operand.IsRegister()) { + DRegister rm = operand.GetRegister(); + USE(dt); + if (IsUsingT32()) { + // VBIC{}{}{.
} {
}, , ; T1 + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef100110U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VBIC{}{}{.
} {
}, , ; A1 + if (cond.Is(al)) { + EmitA32(0xf2100110U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVbic, &Assembler::vbic, cond, dt, rd, rn, operand); +} + +void Assembler::vbic(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + ImmediateVbic encoded_dt(dt, operand.GetNeonImmediate()); + if (IsUsingT32()) { + // VBIC{}{}.
{}, , # ; T1 + if (encoded_dt.IsValid() && rd.Is(rn)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800070U | (encoded_dt.GetEncodingValue() << 8) | + rd.Encode(22, 12) | + (encoded_dt.GetEncodedImmediate() & 0xf) | + ((encoded_dt.GetEncodedImmediate() & 0x70) << 12) | + ((encoded_dt.GetEncodedImmediate() & 0x80) << 21)); + AdvanceIT(); + return; + } + } + } else { + // VBIC{}{}.
{}, , # ; A1 + if (encoded_dt.IsValid() && rd.Is(rn)) { + if (cond.Is(al)) { + EmitA32(0xf2800070U | (encoded_dt.GetEncodingValue() << 8) | + rd.Encode(22, 12) | (encoded_dt.GetEncodedImmediate() & 0xf) | + ((encoded_dt.GetEncodedImmediate() & 0x70) << 12) | + ((encoded_dt.GetEncodedImmediate() & 0x80) << 17)); + return; + } + } + } + } + if (operand.IsRegister()) { + QRegister rm = operand.GetRegister(); + USE(dt); + if (IsUsingT32()) { + // VBIC{}{}{.
} {}, , ; T1 + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef100150U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VBIC{}{}{.
} {}, , ; A1 + if (cond.Is(al)) { + EmitA32(0xf2100150U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVbic, &Assembler::vbic, cond, dt, rd, rn, operand); +} + +void Assembler::vbif( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VBIF{}{}{.
} {
}, , ; T1 + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff300110U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VBIF{}{}{.
} {
}, , ; A1 + if (cond.Is(al)) { + EmitA32(0xf3300110U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVbif, &Assembler::vbif, cond, dt, rd, rn, rm); +} + +void Assembler::vbif( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VBIF{}{}{.
} {}, , ; T1 + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff300150U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VBIF{}{}{.
} {}, , ; A1 + if (cond.Is(al)) { + EmitA32(0xf3300150U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVbif, &Assembler::vbif, cond, dt, rd, rn, rm); +} + +void Assembler::vbit( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VBIT{}{}{.
} {
}, , ; T1 + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff200110U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VBIT{}{}{.
} {
}, , ; A1 + if (cond.Is(al)) { + EmitA32(0xf3200110U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVbit, &Assembler::vbit, cond, dt, rd, rn, rm); +} + +void Assembler::vbit( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VBIT{}{}{.
} {}, , ; T1 + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff200150U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VBIT{}{}{.
} {}, , ; A1 + if (cond.Is(al)) { + EmitA32(0xf3200150U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVbit, &Assembler::vbit, cond, dt, rd, rn, rm); +} + +void Assembler::vbsl( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VBSL{}{}{.
} {
}, , ; T1 + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff100110U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VBSL{}{}{.
} {
}, , ; A1 + if (cond.Is(al)) { + EmitA32(0xf3100110U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVbsl, &Assembler::vbsl, cond, dt, rd, rn, rm); +} + +void Assembler::vbsl( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VBSL{}{}{.
} {}, , ; T1 + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff100150U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VBSL{}{}{.
} {}, , ; A1 + if (cond.Is(al)) { + EmitA32(0xf3100150U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVbsl, &Assembler::vbsl, cond, dt, rd, rn, rm); +} + +void Assembler::vceq(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_F_size_2 encoded_dt(dt); + if (IsUsingT32()) { + // VCEQ{}{}.
{
}, , #0 ; T1 + if (encoded_dt.IsValid() && (imm == 0)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb10100U | + ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCEQ{}{}.
{
}, , #0 ; A1 + if (encoded_dt.IsValid() && (imm == 0)) { + if (cond.Is(al)) { + EmitA32(0xf3b10100U | + ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + } + } + Delegate(kVceq, &Assembler::vceq, cond, dt, rd, rm, operand); +} + +void Assembler::vceq(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_F_size_2 encoded_dt(dt); + if (IsUsingT32()) { + // VCEQ{}{}.
{}, , #0 ; T1 + if (encoded_dt.IsValid() && (imm == 0)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb10140U | + ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCEQ{}{}.
{}, , #0 ; A1 + if (encoded_dt.IsValid() && (imm == 0)) { + if (cond.Is(al)) { + EmitA32(0xf3b10140U | + ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + } + } + Delegate(kVceq, &Assembler::vceq, cond, dt, rd, rm, operand); +} + +void Assembler::vceq( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_4 encoded_dt(dt); + Dt_sz_1 encoded_dt_2(dt); + if (IsUsingT32()) { + // VCEQ{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff000810U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VCEQ{}{}.
{
}, , ; T2 + if (encoded_dt_2.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000e00U | (encoded_dt_2.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCEQ{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3000810U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + // VCEQ{}{}.
{
}, , ; A2 + if (encoded_dt_2.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000e00U | (encoded_dt_2.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVceq, &Assembler::vceq, cond, dt, rd, rn, rm); +} + +void Assembler::vceq( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_4 encoded_dt(dt); + Dt_sz_1 encoded_dt_2(dt); + if (IsUsingT32()) { + // VCEQ{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff000850U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VCEQ{}{}.
{}, , ; T2 + if (encoded_dt_2.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000e40U | (encoded_dt_2.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCEQ{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3000850U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + // VCEQ{}{}.
{}, , ; A2 + if (encoded_dt_2.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000e40U | (encoded_dt_2.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVceq, &Assembler::vceq, cond, dt, rd, rn, rm); +} + +void Assembler::vcge(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_F_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VCGE{}{}.
{
}, , #0 ; T1 + if (encoded_dt.IsValid() && (imm == 0)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb10080U | + ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCGE{}{}.
{
}, , #0 ; A1 + if (encoded_dt.IsValid() && (imm == 0)) { + if (cond.Is(al)) { + EmitA32(0xf3b10080U | + ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + } + } + Delegate(kVcge, &Assembler::vcge, cond, dt, rd, rm, operand); +} + +void Assembler::vcge(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_F_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VCGE{}{}.
{}, , #0 ; T1 + if (encoded_dt.IsValid() && (imm == 0)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb100c0U | + ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCGE{}{}.
{}, , #0 ; A1 + if (encoded_dt.IsValid() && (imm == 0)) { + if (cond.Is(al)) { + EmitA32(0xf3b100c0U | + ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + } + } + Delegate(kVcge, &Assembler::vcge, cond, dt, rd, rm, operand); +} + +void Assembler::vcge( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VCGE{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000310U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VCGE{}{}.F32 {
}, , ; T2 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff000e00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCGE{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000310U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + // VCGE{}{}.F32 {
}, , ; A2 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf3000e00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVcge, &Assembler::vcge, cond, dt, rd, rn, rm); +} + +void Assembler::vcge( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VCGE{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000350U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VCGE{}{}.F32 {}, , ; T2 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff000e40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCGE{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000350U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + // VCGE{}{}.F32 {}, , ; A2 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf3000e40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVcge, &Assembler::vcge, cond, dt, rd, rn, rm); +} + +void Assembler::vcgt(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_F_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VCGT{}{}.
{
}, , #0 ; T1 + if (encoded_dt.IsValid() && (imm == 0)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb10000U | + ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCGT{}{}.
{
}, , #0 ; A1 + if (encoded_dt.IsValid() && (imm == 0)) { + if (cond.Is(al)) { + EmitA32(0xf3b10000U | + ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + } + } + Delegate(kVcgt, &Assembler::vcgt, cond, dt, rd, rm, operand); +} + +void Assembler::vcgt(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_F_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VCGT{}{}.
{}, , #0 ; T1 + if (encoded_dt.IsValid() && (imm == 0)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb10040U | + ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCGT{}{}.
{}, , #0 ; A1 + if (encoded_dt.IsValid() && (imm == 0)) { + if (cond.Is(al)) { + EmitA32(0xf3b10040U | + ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + } + } + Delegate(kVcgt, &Assembler::vcgt, cond, dt, rd, rm, operand); +} + +void Assembler::vcgt( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VCGT{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000300U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VCGT{}{}.F32 {
}, , ; T2 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff200e00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCGT{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000300U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + // VCGT{}{}.F32 {
}, , ; A2 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf3200e00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVcgt, &Assembler::vcgt, cond, dt, rd, rn, rm); +} + +void Assembler::vcgt( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VCGT{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000340U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VCGT{}{}.F32 {}, , ; T2 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff200e40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCGT{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000340U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + // VCGT{}{}.F32 {}, , ; A2 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf3200e40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVcgt, &Assembler::vcgt, cond, dt, rd, rn, rm); +} + +void Assembler::vcle(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_F_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VCLE{}{}.
{
}, , #0 ; T1 + if (encoded_dt.IsValid() && (imm == 0)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb10180U | + ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCLE{}{}.
{
}, , #0 ; A1 + if (encoded_dt.IsValid() && (imm == 0)) { + if (cond.Is(al)) { + EmitA32(0xf3b10180U | + ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + } + } + Delegate(kVcle, &Assembler::vcle, cond, dt, rd, rm, operand); +} + +void Assembler::vcle(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_F_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VCLE{}{}.
{}, , #0 ; T1 + if (encoded_dt.IsValid() && (imm == 0)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb101c0U | + ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCLE{}{}.
{}, , #0 ; A1 + if (encoded_dt.IsValid() && (imm == 0)) { + if (cond.Is(al)) { + EmitA32(0xf3b101c0U | + ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + } + } + Delegate(kVcle, &Assembler::vcle, cond, dt, rd, rm, operand); +} + +void Assembler::vcle( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VCLE{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000310U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(5, 0) | rm.Encode(7, 16)); + AdvanceIT(); + return; + } + } + // VCLE{}{}.F32 {
}, , ; T2 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff000e00U | rd.Encode(22, 12) | rn.Encode(5, 0) | + rm.Encode(7, 16)); + AdvanceIT(); + return; + } + } + } else { + // VCLE{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000310U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(5, 0) | rm.Encode(7, 16)); + return; + } + } + // VCLE{}{}.F32 {
}, , ; A2 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf3000e00U | rd.Encode(22, 12) | rn.Encode(5, 0) | + rm.Encode(7, 16)); + return; + } + } + } + Delegate(kVcle, &Assembler::vcle, cond, dt, rd, rn, rm); +} + +void Assembler::vcle( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VCLE{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000350U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(5, 0) | rm.Encode(7, 16)); + AdvanceIT(); + return; + } + } + // VCLE{}{}.F32 {}, , ; T2 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff000e40U | rd.Encode(22, 12) | rn.Encode(5, 0) | + rm.Encode(7, 16)); + AdvanceIT(); + return; + } + } + } else { + // VCLE{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000350U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(5, 0) | rm.Encode(7, 16)); + return; + } + } + // VCLE{}{}.F32 {}, , ; A2 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf3000e40U | rd.Encode(22, 12) | rn.Encode(5, 0) | + rm.Encode(7, 16)); + return; + } + } + } + Delegate(kVcle, &Assembler::vcle, cond, dt, rd, rn, rm); +} + +void Assembler::vcls(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_5 encoded_dt(dt); + if (IsUsingT32()) { + // VCLS{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb00400U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCLS{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b00400U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVcls, &Assembler::vcls, cond, dt, rd, rm); +} + +void Assembler::vcls(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_5 encoded_dt(dt); + if (IsUsingT32()) { + // VCLS{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb00440U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCLS{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b00440U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVcls, &Assembler::vcls, cond, dt, rd, rm); +} + +void Assembler::vclt(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_F_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VCLT{}{}.
{
}, , #0 ; T1 + if (encoded_dt.IsValid() && (imm == 0)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb10200U | + ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCLT{}{}.
{
}, , #0 ; A1 + if (encoded_dt.IsValid() && (imm == 0)) { + if (cond.Is(al)) { + EmitA32(0xf3b10200U | + ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + } + } + Delegate(kVclt, &Assembler::vclt, cond, dt, rd, rm, operand); +} + +void Assembler::vclt(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_F_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VCLT{}{}.
{}, , #0 ; T1 + if (encoded_dt.IsValid() && (imm == 0)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb10240U | + ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCLT{}{}.
{}, , #0 ; A1 + if (encoded_dt.IsValid() && (imm == 0)) { + if (cond.Is(al)) { + EmitA32(0xf3b10240U | + ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + } + } + Delegate(kVclt, &Assembler::vclt, cond, dt, rd, rm, operand); +} + +void Assembler::vclt( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VCLT{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000300U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(5, 0) | rm.Encode(7, 16)); + AdvanceIT(); + return; + } + } + // VCLT{}{}.F32 {
}, , ; T2 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff200e00U | rd.Encode(22, 12) | rn.Encode(5, 0) | + rm.Encode(7, 16)); + AdvanceIT(); + return; + } + } + } else { + // VCLT{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000300U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(5, 0) | rm.Encode(7, 16)); + return; + } + } + // VCLT{}{}.F32 {
}, , ; A2 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf3200e00U | rd.Encode(22, 12) | rn.Encode(5, 0) | + rm.Encode(7, 16)); + return; + } + } + } + Delegate(kVclt, &Assembler::vclt, cond, dt, rd, rn, rm); +} + +void Assembler::vclt( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VCLT{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000340U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(5, 0) | rm.Encode(7, 16)); + AdvanceIT(); + return; + } + } + // VCLT{}{}.F32 {}, , ; T2 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff200e40U | rd.Encode(22, 12) | rn.Encode(5, 0) | + rm.Encode(7, 16)); + AdvanceIT(); + return; + } + } + } else { + // VCLT{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000340U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(5, 0) | rm.Encode(7, 16)); + return; + } + } + // VCLT{}{}.F32 {}, , ; A2 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf3200e40U | rd.Encode(22, 12) | rn.Encode(5, 0) | + rm.Encode(7, 16)); + return; + } + } + } + Delegate(kVclt, &Assembler::vclt, cond, dt, rd, rn, rm); +} + +void Assembler::vclz(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_4 encoded_dt(dt); + if (IsUsingT32()) { + // VCLZ{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb00480U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCLZ{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b00480U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVclz, &Assembler::vclz, cond, dt, rd, rm); +} + +void Assembler::vclz(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_4 encoded_dt(dt); + if (IsUsingT32()) { + // VCLZ{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb004c0U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCLZ{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b004c0U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVclz, &Assembler::vclz, cond, dt, rd, rm); +} + +void Assembler::vcmp(Condition cond, + DataType dt, + SRegister rd, + const SOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsRegister()) { + SRegister rm = operand.GetRegister(); + if (IsUsingT32()) { + // VCMP{}{}.F32 , ; T1 + if (dt.Is(F32)) { + EmitT32_32(0xeeb40a40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCMP{}{}.F32 , ; A1 + if (dt.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0eb40a40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + } + if (operand.IsImmediate()) { + if (IsUsingT32()) { + // VCMP{}{}.F32 , #0.0 ; T2 + if (dt.Is(F32) && (operand.IsFloatZero())) { + EmitT32_32(0xeeb50a40U | rd.Encode(22, 12)); + AdvanceIT(); + return; + } + } else { + // VCMP{}{}.F32 , #0.0 ; A2 + if (dt.Is(F32) && (operand.IsFloatZero()) && cond.IsNotNever()) { + EmitA32(0x0eb50a40U | (cond.GetCondition() << 28) | rd.Encode(22, 12)); + return; + } + } + } + Delegate(kVcmp, &Assembler::vcmp, cond, dt, rd, operand); +} + +void Assembler::vcmp(Condition cond, + DataType dt, + DRegister rd, + const DOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsRegister()) { + DRegister rm = operand.GetRegister(); + if (IsUsingT32()) { + // VCMP{}{}.F64
, ; T1 + if (dt.Is(F64)) { + EmitT32_32(0xeeb40b40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCMP{}{}.F64
, ; A1 + if (dt.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0eb40b40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + } + if (operand.IsImmediate()) { + if (IsUsingT32()) { + // VCMP{}{}.F64
, #0.0 ; T2 + if (dt.Is(F64) && (operand.IsFloatZero())) { + EmitT32_32(0xeeb50b40U | rd.Encode(22, 12)); + AdvanceIT(); + return; + } + } else { + // VCMP{}{}.F64
, #0.0 ; A2 + if (dt.Is(F64) && (operand.IsFloatZero()) && cond.IsNotNever()) { + EmitA32(0x0eb50b40U | (cond.GetCondition() << 28) | rd.Encode(22, 12)); + return; + } + } + } + Delegate(kVcmp, &Assembler::vcmp, cond, dt, rd, operand); +} + +void Assembler::vcmpe(Condition cond, + DataType dt, + SRegister rd, + const SOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsRegister()) { + SRegister rm = operand.GetRegister(); + if (IsUsingT32()) { + // VCMPE{}{}.F32 , ; T1 + if (dt.Is(F32)) { + EmitT32_32(0xeeb40ac0U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCMPE{}{}.F32 , ; A1 + if (dt.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0eb40ac0U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + } + if (operand.IsImmediate()) { + if (IsUsingT32()) { + // VCMPE{}{}.F32 , #0.0 ; T2 + if (dt.Is(F32) && (operand.IsFloatZero())) { + EmitT32_32(0xeeb50ac0U | rd.Encode(22, 12)); + AdvanceIT(); + return; + } + } else { + // VCMPE{}{}.F32 , #0.0 ; A2 + if (dt.Is(F32) && (operand.IsFloatZero()) && cond.IsNotNever()) { + EmitA32(0x0eb50ac0U | (cond.GetCondition() << 28) | rd.Encode(22, 12)); + return; + } + } + } + Delegate(kVcmpe, &Assembler::vcmpe, cond, dt, rd, operand); +} + +void Assembler::vcmpe(Condition cond, + DataType dt, + DRegister rd, + const DOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsRegister()) { + DRegister rm = operand.GetRegister(); + if (IsUsingT32()) { + // VCMPE{}{}.F64
, ; T1 + if (dt.Is(F64)) { + EmitT32_32(0xeeb40bc0U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCMPE{}{}.F64
, ; A1 + if (dt.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0eb40bc0U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + } + if (operand.IsImmediate()) { + if (IsUsingT32()) { + // VCMPE{}{}.F64
, #0.0 ; T2 + if (dt.Is(F64) && (operand.IsFloatZero())) { + EmitT32_32(0xeeb50bc0U | rd.Encode(22, 12)); + AdvanceIT(); + return; + } + } else { + // VCMPE{}{}.F64
, #0.0 ; A2 + if (dt.Is(F64) && (operand.IsFloatZero()) && cond.IsNotNever()) { + EmitA32(0x0eb50bc0U | (cond.GetCondition() << 28) | rd.Encode(22, 12)); + return; + } + } + } + Delegate(kVcmpe, &Assembler::vcmpe, cond, dt, rd, operand); +} + +void Assembler::vcnt(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VCNT{}{}.8
, ; T1 + if (dt.Is(Untyped8)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb00500U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCNT{}{}.8
, ; A1 + if (dt.Is(Untyped8)) { + if (cond.Is(al)) { + EmitA32(0xf3b00500U | rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVcnt, &Assembler::vcnt, cond, dt, rd, rm); +} + +void Assembler::vcnt(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VCNT{}{}.8 , ; T1 + if (dt.Is(Untyped8)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb00540U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCNT{}{}.8 , ; A1 + if (dt.Is(Untyped8)) { + if (cond.Is(al)) { + EmitA32(0xf3b00540U | rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVcnt, &Assembler::vcnt, cond, dt, rd, rm); +} + +void Assembler::vcvt( + Condition cond, DataType dt1, DataType dt2, DRegister rd, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_op_2 encoded_dt(dt2); + if (IsUsingT32()) { + // VCVT{}{}.F64.F32
, ; T1 + if (dt1.Is(F64) && dt2.Is(F32)) { + EmitT32_32(0xeeb70ac0U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + // VCVT{}{}.F64.
, ; T1 + if (dt1.Is(F64) && encoded_dt.IsValid()) { + EmitT32_32(0xeeb80b40U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVT{}{}.F64.F32
, ; A1 + if (dt1.Is(F64) && dt2.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0eb70ac0U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + // VCVT{}{}.F64.
, ; A1 + if (dt1.Is(F64) && encoded_dt.IsValid() && cond.IsNotNever()) { + EmitA32(0x0eb80b40U | (cond.GetCondition() << 28) | + (encoded_dt.GetEncodingValue() << 7) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvt, &Assembler::vcvt, cond, dt1, dt2, rd, rm); +} + +void Assembler::vcvt( + Condition cond, DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VCVT{}{}.F32.F64 , ; T1 + if (dt1.Is(F32) && dt2.Is(F64)) { + EmitT32_32(0xeeb70bc0U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + // VCVT{}{}.U32.F64 , ; T1 + if (dt1.Is(U32) && dt2.Is(F64)) { + EmitT32_32(0xeebc0bc0U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + // VCVT{}{}.S32.F64 , ; T1 + if (dt1.Is(S32) && dt2.Is(F64)) { + EmitT32_32(0xeebd0bc0U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVT{}{}.F32.F64 , ; A1 + if (dt1.Is(F32) && dt2.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0eb70bc0U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + // VCVT{}{}.U32.F64 , ; A1 + if (dt1.Is(U32) && dt2.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0ebc0bc0U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + // VCVT{}{}.S32.F64 , ; A1 + if (dt1.Is(S32) && dt2.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0ebd0bc0U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvt, &Assembler::vcvt, cond, dt1, dt2, rd, rm); +} + +void Assembler::vcvt(Condition cond, + DataType dt1, + DataType dt2, + DRegister rd, + DRegister rm, + int32_t fbits) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_op_U_1 encoded_dt(dt1, dt2); + Dt_U_sx_1 encoded_dt_2(dt2); + Dt_U_sx_1 encoded_dt_3(dt1); + if (IsUsingT32()) { + // VCVT{}{}.
.
, , # ; T1 + if (encoded_dt.IsValid() && (fbits >= 1) && (fbits <= 32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t fbits_ = 64 - fbits; + EmitT32_32(0xef800e10U | ((encoded_dt.GetEncodingValue() & 0x1) << 28) | + ((encoded_dt.GetEncodingValue() & 0x2) << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (fbits_ << 16)); + AdvanceIT(); + return; + } + } + // VCVT{}{}.F64.
, , # ; T1 + if (dt1.Is(F64) && encoded_dt_2.IsValid() && rd.Is(rm) && + (((dt2.Is(S16) || dt2.Is(U16)) && (fbits <= 16)) || + ((dt2.Is(S32) || dt2.Is(U32)) && (fbits >= 1) && (fbits <= 32)))) { + unsigned offset = 32; + if (dt2.Is(S16) || dt2.Is(U16)) { + offset = 16; + } + uint32_t fbits_ = offset - fbits; + EmitT32_32(0xeeba0b40U | ((encoded_dt_2.GetEncodingValue() & 0x1) << 7) | + ((encoded_dt_2.GetEncodingValue() & 0x2) << 15) | + rd.Encode(22, 12) | ((fbits_ & 0x1) << 5) | + ((fbits_ & 0x1e) >> 1)); + AdvanceIT(); + return; + } + // VCVT{}{}.
.F64 , , # ; T1 + if (encoded_dt_3.IsValid() && dt2.Is(F64) && rd.Is(rm) && + (((dt1.Is(S16) || dt1.Is(U16)) && (fbits <= 16)) || + ((dt1.Is(S32) || dt1.Is(U32)) && (fbits >= 1) && (fbits <= 32)))) { + unsigned offset = 32; + if (dt1.Is(S16) || dt1.Is(U16)) { + offset = 16; + } + uint32_t fbits_ = offset - fbits; + EmitT32_32(0xeebe0b40U | ((encoded_dt_3.GetEncodingValue() & 0x1) << 7) | + ((encoded_dt_3.GetEncodingValue() & 0x2) << 15) | + rd.Encode(22, 12) | ((fbits_ & 0x1) << 5) | + ((fbits_ & 0x1e) >> 1)); + AdvanceIT(); + return; + } + } else { + // VCVT{}{}.
.
, , # ; A1 + if (encoded_dt.IsValid() && (fbits >= 1) && (fbits <= 32)) { + if (cond.Is(al)) { + uint32_t fbits_ = 64 - fbits; + EmitA32(0xf2800e10U | ((encoded_dt.GetEncodingValue() & 0x1) << 24) | + ((encoded_dt.GetEncodingValue() & 0x2) << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (fbits_ << 16)); + return; + } + } + // VCVT{}{}.F64.
, , # ; A1 + if (dt1.Is(F64) && encoded_dt_2.IsValid() && rd.Is(rm) && + (((dt2.Is(S16) || dt2.Is(U16)) && (fbits <= 16)) || + ((dt2.Is(S32) || dt2.Is(U32)) && (fbits >= 1) && (fbits <= 32))) && + cond.IsNotNever()) { + unsigned offset = 32; + if (dt2.Is(S16) || dt2.Is(U16)) { + offset = 16; + } + uint32_t fbits_ = offset - fbits; + EmitA32(0x0eba0b40U | (cond.GetCondition() << 28) | + ((encoded_dt_2.GetEncodingValue() & 0x1) << 7) | + ((encoded_dt_2.GetEncodingValue() & 0x2) << 15) | + rd.Encode(22, 12) | ((fbits_ & 0x1) << 5) | + ((fbits_ & 0x1e) >> 1)); + return; + } + // VCVT{}{}.
.F64 , , # ; A1 + if (encoded_dt_3.IsValid() && dt2.Is(F64) && rd.Is(rm) && + (((dt1.Is(S16) || dt1.Is(U16)) && (fbits <= 16)) || + ((dt1.Is(S32) || dt1.Is(U32)) && (fbits >= 1) && (fbits <= 32))) && + cond.IsNotNever()) { + unsigned offset = 32; + if (dt1.Is(S16) || dt1.Is(U16)) { + offset = 16; + } + uint32_t fbits_ = offset - fbits; + EmitA32(0x0ebe0b40U | (cond.GetCondition() << 28) | + ((encoded_dt_3.GetEncodingValue() & 0x1) << 7) | + ((encoded_dt_3.GetEncodingValue() & 0x2) << 15) | + rd.Encode(22, 12) | ((fbits_ & 0x1) << 5) | + ((fbits_ & 0x1e) >> 1)); + return; + } + } + Delegate(kVcvt, &Assembler::vcvt, cond, dt1, dt2, rd, rm, fbits); +} + +void Assembler::vcvt(Condition cond, + DataType dt1, + DataType dt2, + QRegister rd, + QRegister rm, + int32_t fbits) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_op_U_1 encoded_dt(dt1, dt2); + if (IsUsingT32()) { + // VCVT{}{}.
.
, , # ; T1 + if (encoded_dt.IsValid() && (fbits >= 1) && (fbits <= 32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t fbits_ = 64 - fbits; + EmitT32_32(0xef800e50U | ((encoded_dt.GetEncodingValue() & 0x1) << 28) | + ((encoded_dt.GetEncodingValue() & 0x2) << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (fbits_ << 16)); + AdvanceIT(); + return; + } + } + } else { + // VCVT{}{}.
.
, , # ; A1 + if (encoded_dt.IsValid() && (fbits >= 1) && (fbits <= 32)) { + if (cond.Is(al)) { + uint32_t fbits_ = 64 - fbits; + EmitA32(0xf2800e50U | ((encoded_dt.GetEncodingValue() & 0x1) << 24) | + ((encoded_dt.GetEncodingValue() & 0x2) << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (fbits_ << 16)); + return; + } + } + } + Delegate(kVcvt, &Assembler::vcvt, cond, dt1, dt2, rd, rm, fbits); +} + +void Assembler::vcvt(Condition cond, + DataType dt1, + DataType dt2, + SRegister rd, + SRegister rm, + int32_t fbits) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_sx_1 encoded_dt(dt2); + Dt_U_sx_1 encoded_dt_2(dt1); + if (IsUsingT32()) { + // VCVT{}{}.F32.
, , # ; T1 + if (dt1.Is(F32) && encoded_dt.IsValid() && rd.Is(rm) && + (((dt2.Is(S16) || dt2.Is(U16)) && (fbits <= 16)) || + ((dt2.Is(S32) || dt2.Is(U32)) && (fbits >= 1) && (fbits <= 32)))) { + unsigned offset = 32; + if (dt2.Is(S16) || dt2.Is(U16)) { + offset = 16; + } + uint32_t fbits_ = offset - fbits; + EmitT32_32(0xeeba0a40U | ((encoded_dt.GetEncodingValue() & 0x1) << 7) | + ((encoded_dt.GetEncodingValue() & 0x2) << 15) | + rd.Encode(22, 12) | ((fbits_ & 0x1) << 5) | + ((fbits_ & 0x1e) >> 1)); + AdvanceIT(); + return; + } + // VCVT{}{}.
.F32 , , # ; T1 + if (encoded_dt_2.IsValid() && dt2.Is(F32) && rd.Is(rm) && + (((dt1.Is(S16) || dt1.Is(U16)) && (fbits <= 16)) || + ((dt1.Is(S32) || dt1.Is(U32)) && (fbits >= 1) && (fbits <= 32)))) { + unsigned offset = 32; + if (dt1.Is(S16) || dt1.Is(U16)) { + offset = 16; + } + uint32_t fbits_ = offset - fbits; + EmitT32_32(0xeebe0a40U | ((encoded_dt_2.GetEncodingValue() & 0x1) << 7) | + ((encoded_dt_2.GetEncodingValue() & 0x2) << 15) | + rd.Encode(22, 12) | ((fbits_ & 0x1) << 5) | + ((fbits_ & 0x1e) >> 1)); + AdvanceIT(); + return; + } + } else { + // VCVT{}{}.F32.
, , # ; A1 + if (dt1.Is(F32) && encoded_dt.IsValid() && rd.Is(rm) && + (((dt2.Is(S16) || dt2.Is(U16)) && (fbits <= 16)) || + ((dt2.Is(S32) || dt2.Is(U32)) && (fbits >= 1) && (fbits <= 32))) && + cond.IsNotNever()) { + unsigned offset = 32; + if (dt2.Is(S16) || dt2.Is(U16)) { + offset = 16; + } + uint32_t fbits_ = offset - fbits; + EmitA32(0x0eba0a40U | (cond.GetCondition() << 28) | + ((encoded_dt.GetEncodingValue() & 0x1) << 7) | + ((encoded_dt.GetEncodingValue() & 0x2) << 15) | + rd.Encode(22, 12) | ((fbits_ & 0x1) << 5) | + ((fbits_ & 0x1e) >> 1)); + return; + } + // VCVT{}{}.
.F32 , , # ; A1 + if (encoded_dt_2.IsValid() && dt2.Is(F32) && rd.Is(rm) && + (((dt1.Is(S16) || dt1.Is(U16)) && (fbits <= 16)) || + ((dt1.Is(S32) || dt1.Is(U32)) && (fbits >= 1) && (fbits <= 32))) && + cond.IsNotNever()) { + unsigned offset = 32; + if (dt1.Is(S16) || dt1.Is(U16)) { + offset = 16; + } + uint32_t fbits_ = offset - fbits; + EmitA32(0x0ebe0a40U | (cond.GetCondition() << 28) | + ((encoded_dt_2.GetEncodingValue() & 0x1) << 7) | + ((encoded_dt_2.GetEncodingValue() & 0x2) << 15) | + rd.Encode(22, 12) | ((fbits_ & 0x1) << 5) | + ((fbits_ & 0x1e) >> 1)); + return; + } + } + Delegate(kVcvt, &Assembler::vcvt, cond, dt1, dt2, rd, rm, fbits); +} + +void Assembler::vcvt( + Condition cond, DataType dt1, DataType dt2, DRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_op_1 encoded_dt(dt1, dt2); + if (IsUsingT32()) { + // VCVT{}{}.
.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffbb0600U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCVT{}{}.
.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3bb0600U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVcvt, &Assembler::vcvt, cond, dt1, dt2, rd, rm); +} + +void Assembler::vcvt( + Condition cond, DataType dt1, DataType dt2, QRegister rd, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_op_1 encoded_dt(dt1, dt2); + if (IsUsingT32()) { + // VCVT{}{}.
.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffbb0640U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCVT{}{}.
.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3bb0640U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVcvt, &Assembler::vcvt, cond, dt1, dt2, rd, rm); +} + +void Assembler::vcvt( + Condition cond, DataType dt1, DataType dt2, DRegister rd, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VCVT{}{}.F16.F32
, ; T1 + if (dt1.Is(F16) && dt2.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb60600U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCVT{}{}.F16.F32
, ; A1 + if (dt1.Is(F16) && dt2.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf3b60600U | rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVcvt, &Assembler::vcvt, cond, dt1, dt2, rd, rm); +} + +void Assembler::vcvt( + Condition cond, DataType dt1, DataType dt2, QRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VCVT{}{}.F32.F16 , ; T1 + if (dt1.Is(F32) && dt2.Is(F16)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb60700U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCVT{}{}.F32.F16 , ; A1 + if (dt1.Is(F32) && dt2.Is(F16)) { + if (cond.Is(al)) { + EmitA32(0xf3b60700U | rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVcvt, &Assembler::vcvt, cond, dt1, dt2, rd, rm); +} + +void Assembler::vcvt( + Condition cond, DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_op_2 encoded_dt(dt2); + if (IsUsingT32()) { + // VCVT{}{}.U32.F32 , ; T1 + if (dt1.Is(U32) && dt2.Is(F32)) { + EmitT32_32(0xeebc0ac0U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + // VCVT{}{}.S32.F32 , ; T1 + if (dt1.Is(S32) && dt2.Is(F32)) { + EmitT32_32(0xeebd0ac0U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + // VCVT{}{}.F32.
, ; T1 + if (dt1.Is(F32) && encoded_dt.IsValid()) { + EmitT32_32(0xeeb80a40U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVT{}{}.U32.F32 , ; A1 + if (dt1.Is(U32) && dt2.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0ebc0ac0U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + // VCVT{}{}.S32.F32 , ; A1 + if (dt1.Is(S32) && dt2.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0ebd0ac0U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + // VCVT{}{}.F32.
, ; A1 + if (dt1.Is(F32) && encoded_dt.IsValid() && cond.IsNotNever()) { + EmitA32(0x0eb80a40U | (cond.GetCondition() << 28) | + (encoded_dt.GetEncodingValue() << 7) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvt, &Assembler::vcvt, cond, dt1, dt2, rd, rm); +} + +void Assembler::vcvta(DataType dt1, DataType dt2, DRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_op_3 encoded_dt(dt1); + if (IsUsingT32()) { + // VCVTA{}.
.F32
, ; T1 + if (encoded_dt.IsValid() && dt2.Is(F32)) { + EmitT32_32(0xffbb0000U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVTA{}.
.F32
, ; A1 + if (encoded_dt.IsValid() && dt2.Is(F32)) { + EmitA32(0xf3bb0000U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvta, &Assembler::vcvta, dt1, dt2, rd, rm); +} + +void Assembler::vcvta(DataType dt1, DataType dt2, QRegister rd, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_op_3 encoded_dt(dt1); + if (IsUsingT32()) { + // VCVTA{}.
.F32 , ; T1 + if (encoded_dt.IsValid() && dt2.Is(F32)) { + EmitT32_32(0xffbb0040U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVTA{}.
.F32 , ; A1 + if (encoded_dt.IsValid() && dt2.Is(F32)) { + EmitA32(0xf3bb0040U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvta, &Assembler::vcvta, dt1, dt2, rd, rm); +} + +void Assembler::vcvta(DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_op_2 encoded_dt(dt1); + if (IsUsingT32()) { + // VCVTA{}.
.F32 , ; T1 + if (encoded_dt.IsValid() && dt2.Is(F32)) { + EmitT32_32(0xfebc0a40U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVTA{}.
.F32 , ; A1 + if (encoded_dt.IsValid() && dt2.Is(F32)) { + EmitA32(0xfebc0a40U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvta, &Assembler::vcvta, dt1, dt2, rd, rm); +} + +void Assembler::vcvta(DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_op_2 encoded_dt(dt1); + if (IsUsingT32()) { + // VCVTA{}.
.F64 , ; T1 + if (encoded_dt.IsValid() && dt2.Is(F64)) { + EmitT32_32(0xfebc0b40U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVTA{}.
.F64 , ; A1 + if (encoded_dt.IsValid() && dt2.Is(F64)) { + EmitA32(0xfebc0b40U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvta, &Assembler::vcvta, dt1, dt2, rd, rm); +} + +void Assembler::vcvtb( + Condition cond, DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VCVTB{}{}.F32.F16 , ; T1 + if (dt1.Is(F32) && dt2.Is(F16)) { + EmitT32_32(0xeeb20a40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + // VCVTB{}{}.F16.F32 , ; T1 + if (dt1.Is(F16) && dt2.Is(F32)) { + EmitT32_32(0xeeb30a40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVTB{}{}.F32.F16 , ; A1 + if (dt1.Is(F32) && dt2.Is(F16) && cond.IsNotNever()) { + EmitA32(0x0eb20a40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + // VCVTB{}{}.F16.F32 , ; A1 + if (dt1.Is(F16) && dt2.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0eb30a40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvtb, &Assembler::vcvtb, cond, dt1, dt2, rd, rm); +} + +void Assembler::vcvtb( + Condition cond, DataType dt1, DataType dt2, DRegister rd, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VCVTB{}{}.F64.F16
, ; T1 + if (dt1.Is(F64) && dt2.Is(F16)) { + EmitT32_32(0xeeb20b40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVTB{}{}.F64.F16
, ; A1 + if (dt1.Is(F64) && dt2.Is(F16) && cond.IsNotNever()) { + EmitA32(0x0eb20b40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvtb, &Assembler::vcvtb, cond, dt1, dt2, rd, rm); +} + +void Assembler::vcvtb( + Condition cond, DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VCVTB{}{}.F16.F64 , ; T1 + if (dt1.Is(F16) && dt2.Is(F64)) { + EmitT32_32(0xeeb30b40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVTB{}{}.F16.F64 , ; A1 + if (dt1.Is(F16) && dt2.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0eb30b40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvtb, &Assembler::vcvtb, cond, dt1, dt2, rd, rm); +} + +void Assembler::vcvtm(DataType dt1, DataType dt2, DRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_op_3 encoded_dt(dt1); + if (IsUsingT32()) { + // VCVTM{}.
.F32
, ; T1 + if (encoded_dt.IsValid() && dt2.Is(F32)) { + EmitT32_32(0xffbb0300U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVTM{}.
.F32
, ; A1 + if (encoded_dt.IsValid() && dt2.Is(F32)) { + EmitA32(0xf3bb0300U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvtm, &Assembler::vcvtm, dt1, dt2, rd, rm); +} + +void Assembler::vcvtm(DataType dt1, DataType dt2, QRegister rd, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_op_3 encoded_dt(dt1); + if (IsUsingT32()) { + // VCVTM{}.
.F32 , ; T1 + if (encoded_dt.IsValid() && dt2.Is(F32)) { + EmitT32_32(0xffbb0340U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVTM{}.
.F32 , ; A1 + if (encoded_dt.IsValid() && dt2.Is(F32)) { + EmitA32(0xf3bb0340U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvtm, &Assembler::vcvtm, dt1, dt2, rd, rm); +} + +void Assembler::vcvtm(DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_op_2 encoded_dt(dt1); + if (IsUsingT32()) { + // VCVTM{}.
.F32 , ; T1 + if (encoded_dt.IsValid() && dt2.Is(F32)) { + EmitT32_32(0xfebf0a40U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVTM{}.
.F32 , ; A1 + if (encoded_dt.IsValid() && dt2.Is(F32)) { + EmitA32(0xfebf0a40U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvtm, &Assembler::vcvtm, dt1, dt2, rd, rm); +} + +void Assembler::vcvtm(DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_op_2 encoded_dt(dt1); + if (IsUsingT32()) { + // VCVTM{}.
.F64 , ; T1 + if (encoded_dt.IsValid() && dt2.Is(F64)) { + EmitT32_32(0xfebf0b40U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVTM{}.
.F64 , ; A1 + if (encoded_dt.IsValid() && dt2.Is(F64)) { + EmitA32(0xfebf0b40U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvtm, &Assembler::vcvtm, dt1, dt2, rd, rm); +} + +void Assembler::vcvtn(DataType dt1, DataType dt2, DRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_op_3 encoded_dt(dt1); + if (IsUsingT32()) { + // VCVTN{}.
.F32
, ; T1 + if (encoded_dt.IsValid() && dt2.Is(F32)) { + EmitT32_32(0xffbb0100U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVTN{}.
.F32
, ; A1 + if (encoded_dt.IsValid() && dt2.Is(F32)) { + EmitA32(0xf3bb0100U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvtn, &Assembler::vcvtn, dt1, dt2, rd, rm); +} + +void Assembler::vcvtn(DataType dt1, DataType dt2, QRegister rd, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_op_3 encoded_dt(dt1); + if (IsUsingT32()) { + // VCVTN{}.
.F32 , ; T1 + if (encoded_dt.IsValid() && dt2.Is(F32)) { + EmitT32_32(0xffbb0140U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVTN{}.
.F32 , ; A1 + if (encoded_dt.IsValid() && dt2.Is(F32)) { + EmitA32(0xf3bb0140U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvtn, &Assembler::vcvtn, dt1, dt2, rd, rm); +} + +void Assembler::vcvtn(DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_op_2 encoded_dt(dt1); + if (IsUsingT32()) { + // VCVTN{}.
.F32 , ; T1 + if (encoded_dt.IsValid() && dt2.Is(F32)) { + EmitT32_32(0xfebd0a40U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVTN{}.
.F32 , ; A1 + if (encoded_dt.IsValid() && dt2.Is(F32)) { + EmitA32(0xfebd0a40U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvtn, &Assembler::vcvtn, dt1, dt2, rd, rm); +} + +void Assembler::vcvtn(DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_op_2 encoded_dt(dt1); + if (IsUsingT32()) { + // VCVTN{}.
.F64 , ; T1 + if (encoded_dt.IsValid() && dt2.Is(F64)) { + EmitT32_32(0xfebd0b40U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVTN{}.
.F64 , ; A1 + if (encoded_dt.IsValid() && dt2.Is(F64)) { + EmitA32(0xfebd0b40U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvtn, &Assembler::vcvtn, dt1, dt2, rd, rm); +} + +void Assembler::vcvtp(DataType dt1, DataType dt2, DRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_op_3 encoded_dt(dt1); + if (IsUsingT32()) { + // VCVTP{}.
.F32
, ; T1 + if (encoded_dt.IsValid() && dt2.Is(F32)) { + EmitT32_32(0xffbb0200U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVTP{}.
.F32
, ; A1 + if (encoded_dt.IsValid() && dt2.Is(F32)) { + EmitA32(0xf3bb0200U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvtp, &Assembler::vcvtp, dt1, dt2, rd, rm); +} + +void Assembler::vcvtp(DataType dt1, DataType dt2, QRegister rd, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_op_3 encoded_dt(dt1); + if (IsUsingT32()) { + // VCVTP{}.
.F32 , ; T1 + if (encoded_dt.IsValid() && dt2.Is(F32)) { + EmitT32_32(0xffbb0240U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVTP{}.
.F32 , ; A1 + if (encoded_dt.IsValid() && dt2.Is(F32)) { + EmitA32(0xf3bb0240U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvtp, &Assembler::vcvtp, dt1, dt2, rd, rm); +} + +void Assembler::vcvtp(DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_op_2 encoded_dt(dt1); + if (IsUsingT32()) { + // VCVTP{}.
.F32 , ; T1 + if (encoded_dt.IsValid() && dt2.Is(F32)) { + EmitT32_32(0xfebe0a40U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVTP{}.
.F32 , ; A1 + if (encoded_dt.IsValid() && dt2.Is(F32)) { + EmitA32(0xfebe0a40U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvtp, &Assembler::vcvtp, dt1, dt2, rd, rm); +} + +void Assembler::vcvtp(DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_op_2 encoded_dt(dt1); + if (IsUsingT32()) { + // VCVTP{}.
.F64 , ; T1 + if (encoded_dt.IsValid() && dt2.Is(F64)) { + EmitT32_32(0xfebe0b40U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVTP{}.
.F64 , ; A1 + if (encoded_dt.IsValid() && dt2.Is(F64)) { + EmitA32(0xfebe0b40U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvtp, &Assembler::vcvtp, dt1, dt2, rd, rm); +} + +void Assembler::vcvtr( + Condition cond, DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VCVTR{}{}.U32.F32 , ; T1 + if (dt1.Is(U32) && dt2.Is(F32)) { + EmitT32_32(0xeebc0a40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + // VCVTR{}{}.S32.F32 , ; T1 + if (dt1.Is(S32) && dt2.Is(F32)) { + EmitT32_32(0xeebd0a40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVTR{}{}.U32.F32 , ; A1 + if (dt1.Is(U32) && dt2.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0ebc0a40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + // VCVTR{}{}.S32.F32 , ; A1 + if (dt1.Is(S32) && dt2.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0ebd0a40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvtr, &Assembler::vcvtr, cond, dt1, dt2, rd, rm); +} + +void Assembler::vcvtr( + Condition cond, DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VCVTR{}{}.U32.F64 , ; T1 + if (dt1.Is(U32) && dt2.Is(F64)) { + EmitT32_32(0xeebc0b40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + // VCVTR{}{}.S32.F64 , ; T1 + if (dt1.Is(S32) && dt2.Is(F64)) { + EmitT32_32(0xeebd0b40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVTR{}{}.U32.F64 , ; A1 + if (dt1.Is(U32) && dt2.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0ebc0b40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + // VCVTR{}{}.S32.F64 , ; A1 + if (dt1.Is(S32) && dt2.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0ebd0b40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvtr, &Assembler::vcvtr, cond, dt1, dt2, rd, rm); +} + +void Assembler::vcvtt( + Condition cond, DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VCVTT{}{}.F32.F16 , ; T1 + if (dt1.Is(F32) && dt2.Is(F16)) { + EmitT32_32(0xeeb20ac0U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + // VCVTT{}{}.F16.F32 , ; T1 + if (dt1.Is(F16) && dt2.Is(F32)) { + EmitT32_32(0xeeb30ac0U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVTT{}{}.F32.F16 , ; A1 + if (dt1.Is(F32) && dt2.Is(F16) && cond.IsNotNever()) { + EmitA32(0x0eb20ac0U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + // VCVTT{}{}.F16.F32 , ; A1 + if (dt1.Is(F16) && dt2.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0eb30ac0U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvtt, &Assembler::vcvtt, cond, dt1, dt2, rd, rm); +} + +void Assembler::vcvtt( + Condition cond, DataType dt1, DataType dt2, DRegister rd, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VCVTT{}{}.F64.F16
, ; T1 + if (dt1.Is(F64) && dt2.Is(F16)) { + EmitT32_32(0xeeb20bc0U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVTT{}{}.F64.F16
, ; A1 + if (dt1.Is(F64) && dt2.Is(F16) && cond.IsNotNever()) { + EmitA32(0x0eb20bc0U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvtt, &Assembler::vcvtt, cond, dt1, dt2, rd, rm); +} + +void Assembler::vcvtt( + Condition cond, DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VCVTT{}{}.F16.F64 , ; T1 + if (dt1.Is(F16) && dt2.Is(F64)) { + EmitT32_32(0xeeb30bc0U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVTT{}{}.F16.F64 , ; A1 + if (dt1.Is(F16) && dt2.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0eb30bc0U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvtt, &Assembler::vcvtt, cond, dt1, dt2, rd, rm); +} + +void Assembler::vdiv( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VDIV{}{}.F32 {}, , ; T1 + if (dt.Is(F32)) { + EmitT32_32(0xee800a00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VDIV{}{}.F32 {}, , ; A1 + if (dt.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0e800a00U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVdiv, &Assembler::vdiv, cond, dt, rd, rn, rm); +} + +void Assembler::vdiv( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VDIV{}{}.F64 {
}, , ; T1 + if (dt.Is(F64)) { + EmitT32_32(0xee800b00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VDIV{}{}.F64 {
}, , ; A1 + if (dt.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0e800b00U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVdiv, &Assembler::vdiv, cond, dt, rd, rn, rm); +} + +void Assembler::vdup(Condition cond, DataType dt, QRegister rd, Register rt) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_B_E_1 encoded_dt(dt); + if (IsUsingT32()) { + // VDUP{}{}.
, ; T1 + if (encoded_dt.IsValid() && (!rt.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xeea00b10U | ((encoded_dt.GetEncodingValue() & 0x1) << 5) | + ((encoded_dt.GetEncodingValue() & 0x2) << 21) | + rd.Encode(7, 16) | (rt.GetCode() << 12)); + AdvanceIT(); + return; + } + } + } else { + // VDUP{}{}.
, ; A1 + if (encoded_dt.IsValid() && cond.IsNotNever() && + (!rt.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitA32(0x0ea00b10U | (cond.GetCondition() << 28) | + ((encoded_dt.GetEncodingValue() & 0x1) << 5) | + ((encoded_dt.GetEncodingValue() & 0x2) << 21) | + rd.Encode(7, 16) | (rt.GetCode() << 12)); + return; + } + } + } + Delegate(kVdup, &Assembler::vdup, cond, dt, rd, rt); +} + +void Assembler::vdup(Condition cond, DataType dt, DRegister rd, Register rt) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_B_E_1 encoded_dt(dt); + if (IsUsingT32()) { + // VDUP{}{}.
, ; T1 + if (encoded_dt.IsValid() && (!rt.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xee800b10U | ((encoded_dt.GetEncodingValue() & 0x1) << 5) | + ((encoded_dt.GetEncodingValue() & 0x2) << 21) | + rd.Encode(7, 16) | (rt.GetCode() << 12)); + AdvanceIT(); + return; + } + } + } else { + // VDUP{}{}.
, ; A1 + if (encoded_dt.IsValid() && cond.IsNotNever() && + (!rt.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitA32(0x0e800b10U | (cond.GetCondition() << 28) | + ((encoded_dt.GetEncodingValue() & 0x1) << 5) | + ((encoded_dt.GetEncodingValue() & 0x2) << 21) | + rd.Encode(7, 16) | (rt.GetCode() << 12)); + return; + } + } + } + Delegate(kVdup, &Assembler::vdup, cond, dt, rd, rt); +} + +void Assembler::vdup(Condition cond, + DataType dt, + DRegister rd, + DRegisterLane rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_imm4_1 encoded_dt(dt, rm); + if (IsUsingT32()) { + // VDUP{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb00c00U | (encoded_dt.GetEncodingValue() << 16) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VDUP{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b00c00U | (encoded_dt.GetEncodingValue() << 16) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVdup, &Assembler::vdup, cond, dt, rd, rm); +} + +void Assembler::vdup(Condition cond, + DataType dt, + QRegister rd, + DRegisterLane rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_imm4_1 encoded_dt(dt, rm); + if (IsUsingT32()) { + // VDUP{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb00c40U | (encoded_dt.GetEncodingValue() << 16) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VDUP{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b00c40U | (encoded_dt.GetEncodingValue() << 16) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVdup, &Assembler::vdup, cond, dt, rd, rm); +} + +void Assembler::veor( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VEOR{}{}{.
} {
}, , ; T1 + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff000110U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VEOR{}{}{.
} {
}, , ; A1 + if (cond.Is(al)) { + EmitA32(0xf3000110U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVeor, &Assembler::veor, cond, dt, rd, rn, rm); +} + +void Assembler::veor( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VEOR{}{}{.
} {}, , ; T1 + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff000150U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VEOR{}{}{.
} {}, , ; A1 + if (cond.Is(al)) { + EmitA32(0xf3000150U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVeor, &Assembler::veor, cond, dt, rd, rn, rm); +} + +void Assembler::vext(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + if (IsUsingT32()) { + // VEXT{}{}.8 {
}, , , # ; T1 + if (dt.Is(Untyped8) && (imm <= 7)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xefb00000U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0) | (imm << 8)); + AdvanceIT(); + return; + } + } + // VEXT{}{}.
{
}, , , # ; T1 + if ((dt.Is(Untyped16) || dt.Is(Untyped32)) && + (imm <= (128 / dt.GetSize()) - 1) && ((imm % dt.GetSize()) == 0)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm4 = imm / dt.GetSize(); + EmitT32_32(0xefb00000U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0) | (imm4 << 8)); + AdvanceIT(); + return; + } + } + } else { + // VEXT{}{}.8 {
}, , , # ; A1 + if (dt.Is(Untyped8) && (imm <= 7)) { + if (cond.Is(al)) { + EmitA32(0xf2b00000U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0) | (imm << 8)); + return; + } + } + // VEXT{}{}.
{
}, , , # ; A1 + if ((dt.Is(Untyped16) || dt.Is(Untyped32)) && + (imm <= (128 / dt.GetSize()) - 1) && ((imm % dt.GetSize()) == 0)) { + if (cond.Is(al)) { + uint32_t imm4 = imm / dt.GetSize(); + EmitA32(0xf2b00000U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0) | (imm4 << 8)); + return; + } + } + } + } + } + Delegate(kVext, &Assembler::vext, cond, dt, rd, rn, rm, operand); +} + +void Assembler::vext(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + if (IsUsingT32()) { + // VEXT{}{}.8 {}, , , # ; T1 + if (dt.Is(Untyped8) && (imm <= 15)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xefb00040U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0) | (imm << 8)); + AdvanceIT(); + return; + } + } + // VEXT{}{}.
{}, , , # ; T1 + if ((dt.Is(Untyped16) || dt.Is(Untyped32) || dt.Is(Untyped64)) && + (imm <= (64 / dt.GetSize()) - 1) && ((imm % dt.GetSize()) == 0)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm4 = imm / dt.GetSize(); + EmitT32_32(0xefb00040U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0) | (imm4 << 8)); + AdvanceIT(); + return; + } + } + } else { + // VEXT{}{}.8 {}, , , # ; A1 + if (dt.Is(Untyped8) && (imm <= 15)) { + if (cond.Is(al)) { + EmitA32(0xf2b00040U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0) | (imm << 8)); + return; + } + } + // VEXT{}{}.
{}, , , # ; A1 + if ((dt.Is(Untyped16) || dt.Is(Untyped32) || dt.Is(Untyped64)) && + (imm <= (64 / dt.GetSize()) - 1) && ((imm % dt.GetSize()) == 0)) { + if (cond.Is(al)) { + uint32_t imm4 = imm / dt.GetSize(); + EmitA32(0xf2b00040U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0) | (imm4 << 8)); + return; + } + } + } + } + } + Delegate(kVext, &Assembler::vext, cond, dt, rd, rn, rm, operand); +} + +void Assembler::vfma( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VFMA{}{}.F32
, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000c10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VFMA{}{}.F64
, , ; T2 + if (dt.Is(F64)) { + EmitT32_32(0xeea00b00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VFMA{}{}.F32
, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf2000c10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + // VFMA{}{}.F64
, , ; A2 + if (dt.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0ea00b00U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVfma, &Assembler::vfma, cond, dt, rd, rn, rm); +} + +void Assembler::vfma( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VFMA{}{}.F32 , , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000c50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VFMA{}{}.F32 , , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf2000c50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVfma, &Assembler::vfma, cond, dt, rd, rn, rm); +} + +void Assembler::vfma( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VFMA{}{}.F32 , , ; T2 + if (dt.Is(F32)) { + EmitT32_32(0xeea00a00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VFMA{}{}.F32 , , ; A2 + if (dt.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0ea00a00U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVfma, &Assembler::vfma, cond, dt, rd, rn, rm); +} + +void Assembler::vfms( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VFMS{}{}.F32
, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef200c10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VFMS{}{}.F64
, , ; T2 + if (dt.Is(F64)) { + EmitT32_32(0xeea00b40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VFMS{}{}.F32
, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf2200c10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + // VFMS{}{}.F64
, , ; A2 + if (dt.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0ea00b40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVfms, &Assembler::vfms, cond, dt, rd, rn, rm); +} + +void Assembler::vfms( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VFMS{}{}.F32 , , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef200c50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VFMS{}{}.F32 , , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf2200c50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVfms, &Assembler::vfms, cond, dt, rd, rn, rm); +} + +void Assembler::vfms( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VFMS{}{}.F32 , , ; T2 + if (dt.Is(F32)) { + EmitT32_32(0xeea00a40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VFMS{}{}.F32 , , ; A2 + if (dt.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0ea00a40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVfms, &Assembler::vfms, cond, dt, rd, rn, rm); +} + +void Assembler::vfnma( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VFNMA{}{}.F32 , , ; T1 + if (dt.Is(F32)) { + EmitT32_32(0xee900a40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VFNMA{}{}.F32 , , ; A1 + if (dt.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0e900a40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVfnma, &Assembler::vfnma, cond, dt, rd, rn, rm); +} + +void Assembler::vfnma( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VFNMA{}{}.F64
, , ; T1 + if (dt.Is(F64)) { + EmitT32_32(0xee900b40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VFNMA{}{}.F64
, , ; A1 + if (dt.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0e900b40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVfnma, &Assembler::vfnma, cond, dt, rd, rn, rm); +} + +void Assembler::vfnms( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VFNMS{}{}.F32 , , ; T1 + if (dt.Is(F32)) { + EmitT32_32(0xee900a00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VFNMS{}{}.F32 , , ; A1 + if (dt.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0e900a00U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVfnms, &Assembler::vfnms, cond, dt, rd, rn, rm); +} + +void Assembler::vfnms( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VFNMS{}{}.F64
, , ; T1 + if (dt.Is(F64)) { + EmitT32_32(0xee900b00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VFNMS{}{}.F64
, , ; A1 + if (dt.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0e900b00U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVfnms, &Assembler::vfnms, cond, dt, rd, rn, rm); +} + +void Assembler::vhadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VHADD{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000000U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VHADD{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000000U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVhadd, &Assembler::vhadd, cond, dt, rd, rn, rm); +} + +void Assembler::vhadd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VHADD{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000040U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VHADD{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000040U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVhadd, &Assembler::vhadd, cond, dt, rd, rn, rm); +} + +void Assembler::vhsub( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VHSUB{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000200U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VHSUB{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000200U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVhsub, &Assembler::vhsub, cond, dt, rd, rn, rm); +} + +void Assembler::vhsub( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VHSUB{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000240U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VHSUB{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000240U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVhsub, &Assembler::vhsub, cond, dt, rd, rn, rm); +} + +void Assembler::vld1(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediateZero()) { + Register rn = operand.GetBaseRegister(); + Alignment align = operand.GetAlignment(); + Dt_size_6 encoded_dt(dt); + Dt_size_7 encoded_dt_2(dt); + Align_align_1 encoded_align_1(align, nreglist); + Align_a_1 encoded_align_2(align, dt); + Align_index_align_1 encoded_align_3(align, nreglist, dt); + if (IsUsingT32()) { + // VLD1{}{}.
, [{:}] ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + (nreglist.IsSingleSpaced()) && (nreglist.GetLength() <= 4) && + operand.IsOffset() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding; + switch (nreglist.GetLength()) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 1: + len_encoding = 0x7; + break; + case 2: + len_encoding = 0xa; + break; + case 3: + len_encoding = 0x6; + break; + case 4: + len_encoding = 0x2; + break; + } + EmitT32_32(0xf920000fU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VLD1{}{}.
, [{:}]! ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + (nreglist.IsSingleSpaced()) && (nreglist.GetLength() <= 4) && + operand.IsPostIndex() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding; + switch (nreglist.GetLength()) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 1: + len_encoding = 0x7; + break; + case 2: + len_encoding = 0xa; + break; + case 3: + len_encoding = 0x6; + break; + case 4: + len_encoding = 0x2; + break; + } + EmitT32_32(0xf920000dU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VLD1{}{}.
, [{:}] ; T1 + if (encoded_dt_2.IsValid() && nreglist.IsTransferAllLanes() && + (nreglist.IsSingleSpaced()) && (nreglist.GetLength() <= 2) && + operand.IsOffset() && encoded_align_2.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.GetLength() - 1; + EmitT32_32(0xf9a00c0fU | (encoded_dt_2.GetEncodingValue() << 6) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 5) | + (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VLD1{}{}.
, [{:}]! ; T1 + if (encoded_dt_2.IsValid() && nreglist.IsTransferAllLanes() && + (nreglist.IsSingleSpaced()) && (nreglist.GetLength() <= 2) && + operand.IsPostIndex() && encoded_align_2.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.GetLength() - 1; + EmitT32_32(0xf9a00c0dU | (encoded_dt_2.GetEncodingValue() << 6) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 5) | + (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VLD1{}{}.
, [{:}] ; T1 + if (encoded_dt_2.IsValid() && nreglist.IsTransferOneLane() && + (nreglist.GetLength() == 1) && operand.IsOffset() && + encoded_align_3.IsValid() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitT32_32(0xf9a0000fU | (encoded_dt_2.GetEncodingValue() << 10) | + (encoded_align_3.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VLD1{}{}.
, [{:}]! ; T1 + if (encoded_dt_2.IsValid() && nreglist.IsTransferOneLane() && + (nreglist.GetLength() == 1) && operand.IsPostIndex() && + encoded_align_3.IsValid() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitT32_32(0xf9a0000dU | (encoded_dt_2.GetEncodingValue() << 10) | + (encoded_align_3.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + } else { + // VLD1{}{}.
, [{:}] ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + (nreglist.IsSingleSpaced()) && (nreglist.GetLength() <= 4) && + operand.IsOffset() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding; + switch (nreglist.GetLength()) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 1: + len_encoding = 0x7; + break; + case 2: + len_encoding = 0xa; + break; + case 3: + len_encoding = 0x6; + break; + case 4: + len_encoding = 0x2; + break; + } + EmitA32(0xf420000fU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + return; + } + } + // VLD1{}{}.
, [{:}]! ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + (nreglist.IsSingleSpaced()) && (nreglist.GetLength() <= 4) && + operand.IsPostIndex() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding; + switch (nreglist.GetLength()) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 1: + len_encoding = 0x7; + break; + case 2: + len_encoding = 0xa; + break; + case 3: + len_encoding = 0x6; + break; + case 4: + len_encoding = 0x2; + break; + } + EmitA32(0xf420000dU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + return; + } + } + // VLD1{}{}.
, [{:}] ; A1 + if (encoded_dt_2.IsValid() && nreglist.IsTransferAllLanes() && + (nreglist.IsSingleSpaced()) && (nreglist.GetLength() <= 2) && + operand.IsOffset() && encoded_align_2.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.GetLength() - 1; + EmitA32(0xf4a00c0fU | (encoded_dt_2.GetEncodingValue() << 6) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 5) | + (rn.GetCode() << 16)); + return; + } + } + // VLD1{}{}.
, [{:}]! ; A1 + if (encoded_dt_2.IsValid() && nreglist.IsTransferAllLanes() && + (nreglist.IsSingleSpaced()) && (nreglist.GetLength() <= 2) && + operand.IsPostIndex() && encoded_align_2.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.GetLength() - 1; + EmitA32(0xf4a00c0dU | (encoded_dt_2.GetEncodingValue() << 6) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 5) | + (rn.GetCode() << 16)); + return; + } + } + // VLD1{}{}.
, [{:}] ; A1 + if (encoded_dt_2.IsValid() && nreglist.IsTransferOneLane() && + (nreglist.GetLength() == 1) && operand.IsOffset() && + encoded_align_3.IsValid() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitA32(0xf4a0000fU | (encoded_dt_2.GetEncodingValue() << 10) | + (encoded_align_3.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + return; + } + } + // VLD1{}{}.
, [{:}]! ; A1 + if (encoded_dt_2.IsValid() && nreglist.IsTransferOneLane() && + (nreglist.GetLength() == 1) && operand.IsPostIndex() && + encoded_align_3.IsValid() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitA32(0xf4a0000dU | (encoded_dt_2.GetEncodingValue() << 10) | + (encoded_align_3.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + return; + } + } + } + } + if (operand.IsPlainRegister()) { + Register rn = operand.GetBaseRegister(); + Alignment align = operand.GetAlignment(); + Register rm = operand.GetOffsetRegister(); + Dt_size_6 encoded_dt(dt); + Dt_size_7 encoded_dt_2(dt); + Align_align_1 encoded_align_1(align, nreglist); + Align_a_1 encoded_align_2(align, dt); + Align_index_align_1 encoded_align_3(align, nreglist, dt); + if (IsUsingT32()) { + // VLD1{}{}.
, [{:}], ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + (nreglist.IsSingleSpaced()) && (nreglist.GetLength() <= 4) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding; + switch (nreglist.GetLength()) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 1: + len_encoding = 0x7; + break; + case 2: + len_encoding = 0xa; + break; + case 3: + len_encoding = 0x6; + break; + case 4: + len_encoding = 0x2; + break; + } + EmitT32_32(0xf9200000U | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16) | rm.GetCode()); + AdvanceIT(); + return; + } + } + // VLD1{}{}.
, [{:}], ; T1 + if (encoded_dt_2.IsValid() && nreglist.IsTransferAllLanes() && + (nreglist.IsSingleSpaced()) && (nreglist.GetLength() <= 2) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.GetLength() - 1; + EmitT32_32(0xf9a00c00U | (encoded_dt_2.GetEncodingValue() << 6) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 5) | + (rn.GetCode() << 16) | rm.GetCode()); + AdvanceIT(); + return; + } + } + // VLD1{}{}.
, [{:}], ; T1 + if (encoded_dt_2.IsValid() && nreglist.IsTransferOneLane() && + (nreglist.GetLength() == 1) && !rm.IsPC() && !rm.IsSP() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitT32_32(0xf9a00000U | (encoded_dt_2.GetEncodingValue() << 10) | + (encoded_align_3.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16) | + rm.GetCode()); + AdvanceIT(); + return; + } + } + } else { + // VLD1{}{}.
, [{:}], ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + (nreglist.IsSingleSpaced()) && (nreglist.GetLength() <= 4) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding; + switch (nreglist.GetLength()) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 1: + len_encoding = 0x7; + break; + case 2: + len_encoding = 0xa; + break; + case 3: + len_encoding = 0x6; + break; + case 4: + len_encoding = 0x2; + break; + } + EmitA32(0xf4200000U | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16) | rm.GetCode()); + return; + } + } + // VLD1{}{}.
, [{:}], ; A1 + if (encoded_dt_2.IsValid() && nreglist.IsTransferAllLanes() && + (nreglist.IsSingleSpaced()) && (nreglist.GetLength() <= 2) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.GetLength() - 1; + EmitA32(0xf4a00c00U | (encoded_dt_2.GetEncodingValue() << 6) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 5) | + (rn.GetCode() << 16) | rm.GetCode()); + return; + } + } + // VLD1{}{}.
, [{:}], ; A1 + if (encoded_dt_2.IsValid() && nreglist.IsTransferOneLane() && + (nreglist.GetLength() == 1) && !rm.IsPC() && !rm.IsSP() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitA32(0xf4a00000U | (encoded_dt_2.GetEncodingValue() << 10) | + (encoded_align_3.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16) | rm.GetCode()); + return; + } + } + } + } + Delegate(kVld1, &Assembler::vld1, cond, dt, nreglist, operand); +} + +void Assembler::vld2(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediateZero()) { + Register rn = operand.GetBaseRegister(); + Alignment align = operand.GetAlignment(); + Dt_size_7 encoded_dt(dt); + Align_align_2 encoded_align_1(align, nreglist); + Align_a_2 encoded_align_2(align, dt); + Align_index_align_2 encoded_align_3(align, nreglist, dt); + if (IsUsingT32()) { + // VLD2{}{}.
, [{:}] ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsOffset() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding; + if (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) { + len_encoding = 0x8; + } + if (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2)) { + len_encoding = 0x9; + } + if (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) { + len_encoding = 0x3; + } + EmitT32_32(0xf920000fU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VLD2{}{}.
, [{:}]! ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsPostIndex() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding; + if (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) { + len_encoding = 0x8; + } + if (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2)) { + len_encoding = 0x9; + } + if (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) { + len_encoding = 0x3; + } + EmitT32_32(0xf920000dU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VLD2{}{}.
, [{:}] ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferAllLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2))) && + operand.IsOffset() && encoded_align_2.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitT32_32(0xf9a00d0fU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 5) | + (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VLD2{}{}.
, [{:}]! ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferAllLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2))) && + operand.IsPostIndex() && encoded_align_2.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitT32_32(0xf9a00d0dU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 5) | + (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VLD2{}{}.
, [{:}] ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2))) && + operand.IsOffset() && encoded_align_3.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitT32_32(0xf9a0010fU | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_3.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VLD2{}{}.
, [{:}]! ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2))) && + operand.IsPostIndex() && encoded_align_3.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitT32_32(0xf9a0010dU | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_3.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + } else { + // VLD2{}{}.
, [{:}] ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsOffset() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding; + if (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) { + len_encoding = 0x8; + } + if (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2)) { + len_encoding = 0x9; + } + if (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) { + len_encoding = 0x3; + } + EmitA32(0xf420000fU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + return; + } + } + // VLD2{}{}.
, [{:}]! ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsPostIndex() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding; + if (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) { + len_encoding = 0x8; + } + if (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2)) { + len_encoding = 0x9; + } + if (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) { + len_encoding = 0x3; + } + EmitA32(0xf420000dU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + return; + } + } + // VLD2{}{}.
, [{:}] ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferAllLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2))) && + operand.IsOffset() && encoded_align_2.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitA32(0xf4a00d0fU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 5) | + (rn.GetCode() << 16)); + return; + } + } + // VLD2{}{}.
, [{:}]! ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferAllLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2))) && + operand.IsPostIndex() && encoded_align_2.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitA32(0xf4a00d0dU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 5) | + (rn.GetCode() << 16)); + return; + } + } + // VLD2{}{}.
, [{:}] ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2))) && + operand.IsOffset() && encoded_align_3.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitA32(0xf4a0010fU | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_3.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + return; + } + } + // VLD2{}{}.
, [{:}]! ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2))) && + operand.IsPostIndex() && encoded_align_3.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitA32(0xf4a0010dU | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_3.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + return; + } + } + } + } + if (operand.IsPlainRegister()) { + Register rn = operand.GetBaseRegister(); + Alignment align = operand.GetAlignment(); + Register rm = operand.GetOffsetRegister(); + Dt_size_7 encoded_dt(dt); + Align_align_2 encoded_align_1(align, nreglist); + Align_a_2 encoded_align_2(align, dt); + Align_index_align_2 encoded_align_3(align, nreglist, dt); + if (IsUsingT32()) { + // VLD2{}{}.
, [{:}], ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4))) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding; + if (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) { + len_encoding = 0x8; + } + if (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2)) { + len_encoding = 0x9; + } + if (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) { + len_encoding = 0x3; + } + EmitT32_32(0xf9200000U | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16) | rm.GetCode()); + AdvanceIT(); + return; + } + } + // VLD2{}{}.
, [{:}], ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferAllLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2))) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitT32_32(0xf9a00d00U | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 5) | + (rn.GetCode() << 16) | rm.GetCode()); + AdvanceIT(); + return; + } + } + // VLD2{}{}.
, [{:}], ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2))) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitT32_32(0xf9a00100U | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_3.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16) | + rm.GetCode()); + AdvanceIT(); + return; + } + } + } else { + // VLD2{}{}.
, [{:}], ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4))) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding; + if (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) { + len_encoding = 0x8; + } + if (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2)) { + len_encoding = 0x9; + } + if (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) { + len_encoding = 0x3; + } + EmitA32(0xf4200000U | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16) | rm.GetCode()); + return; + } + } + // VLD2{}{}.
, [{:}], ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferAllLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2))) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitA32(0xf4a00d00U | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 5) | + (rn.GetCode() << 16) | rm.GetCode()); + return; + } + } + // VLD2{}{}.
, [{:}], ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2))) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitA32(0xf4a00100U | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_3.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16) | rm.GetCode()); + return; + } + } + } + } + Delegate(kVld2, &Assembler::vld2, cond, dt, nreglist, operand); +} + +void Assembler::vld3(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediateZero()) { + Register rn = operand.GetBaseRegister(); + Alignment align = operand.GetAlignment(); + Dt_size_7 encoded_dt(dt); + Align_align_3 encoded_align_1(align); + if (IsUsingT32()) { + // VLD3{}{}.
, [{:}] ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + operand.IsOffset() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x4 : 0x5; + EmitT32_32(0xf920000fU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VLD3{}{}.
, [{:}]! ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + operand.IsPostIndex() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x4 : 0x5; + EmitT32_32(0xf920000dU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + } else { + // VLD3{}{}.
, [{:}] ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + operand.IsOffset() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x4 : 0x5; + EmitA32(0xf420000fU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + return; + } + } + // VLD3{}{}.
, [{:}]! ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + operand.IsPostIndex() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x4 : 0x5; + EmitA32(0xf420000dU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + return; + } + } + } + } + if (operand.IsPlainRegister()) { + Register rn = operand.GetBaseRegister(); + Alignment align = operand.GetAlignment(); + Register rm = operand.GetOffsetRegister(); + Dt_size_7 encoded_dt(dt); + Align_align_3 encoded_align_1(align); + if (IsUsingT32()) { + // VLD3{}{}.
, [{:}], ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x4 : 0x5; + EmitT32_32(0xf9200000U | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16) | rm.GetCode()); + AdvanceIT(); + return; + } + } + } else { + // VLD3{}{}.
, [{:}], ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x4 : 0x5; + EmitA32(0xf4200000U | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16) | rm.GetCode()); + return; + } + } + } + } + Delegate(kVld3, &Assembler::vld3, cond, dt, nreglist, operand); +} + +void Assembler::vld3(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const MemOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediateZero()) { + Register rn = operand.GetBaseRegister(); + Dt_size_7 encoded_dt(dt); + Index_1 encoded_align_1(nreglist, dt); + if (IsUsingT32()) { + // VLD3{}{}.
, [] ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferAllLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + operand.IsOffset() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitT32_32(0xf9a00e0fU | (encoded_dt.GetEncodingValue() << 6) | + first.Encode(22, 12) | (len_encoding << 5) | + (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VLD3{}{}.
, []! ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferAllLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + operand.IsPostIndex() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitT32_32(0xf9a00e0dU | (encoded_dt.GetEncodingValue() << 6) | + first.Encode(22, 12) | (len_encoding << 5) | + (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VLD3{}{}.
, [] ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + operand.IsOffset() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitT32_32(0xf9a0020fU | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VLD3{}{}.
, []! ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + operand.IsPostIndex() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitT32_32(0xf9a0020dU | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + } else { + // VLD3{}{}.
, [] ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferAllLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + operand.IsOffset() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitA32(0xf4a00e0fU | (encoded_dt.GetEncodingValue() << 6) | + first.Encode(22, 12) | (len_encoding << 5) | + (rn.GetCode() << 16)); + return; + } + } + // VLD3{}{}.
, []! ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferAllLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + operand.IsPostIndex() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitA32(0xf4a00e0dU | (encoded_dt.GetEncodingValue() << 6) | + first.Encode(22, 12) | (len_encoding << 5) | + (rn.GetCode() << 16)); + return; + } + } + // VLD3{}{}.
, [] ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + operand.IsOffset() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitA32(0xf4a0020fU | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + return; + } + } + // VLD3{}{}.
, []! ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + operand.IsPostIndex() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitA32(0xf4a0020dU | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + return; + } + } + } + } + if (operand.IsPlainRegister()) { + Register rn = operand.GetBaseRegister(); + Sign sign = operand.GetSign(); + Register rm = operand.GetOffsetRegister(); + Dt_size_7 encoded_dt(dt); + Index_1 encoded_align_1(nreglist, dt); + if (IsUsingT32()) { + // VLD3{}{}.
, [], # ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferAllLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + sign.IsPlus() && operand.IsPostIndex() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitT32_32(0xf9a00e00U | (encoded_dt.GetEncodingValue() << 6) | + first.Encode(22, 12) | (len_encoding << 5) | + (rn.GetCode() << 16) | rm.GetCode()); + AdvanceIT(); + return; + } + } + // VLD3{}{}.
, [], # ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + sign.IsPlus() && operand.IsPostIndex() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitT32_32(0xf9a00200U | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16) | + rm.GetCode()); + AdvanceIT(); + return; + } + } + } else { + // VLD3{}{}.
, [], # ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferAllLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + sign.IsPlus() && operand.IsPostIndex() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitA32(0xf4a00e00U | (encoded_dt.GetEncodingValue() << 6) | + first.Encode(22, 12) | (len_encoding << 5) | + (rn.GetCode() << 16) | rm.GetCode()); + return; + } + } + // VLD3{}{}.
, [], # ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + sign.IsPlus() && operand.IsPostIndex() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitA32(0xf4a00200U | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16) | rm.GetCode()); + return; + } + } + } + } + Delegate(kVld3, &Assembler::vld3, cond, dt, nreglist, operand); +} + +void Assembler::vld4(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediateZero()) { + Register rn = operand.GetBaseRegister(); + Alignment align = operand.GetAlignment(); + Dt_size_7 encoded_dt(dt); + Dt_size_8 encoded_dt_2(dt, align); + Align_align_4 encoded_align_1(align); + Align_a_3 encoded_align_2(align, dt); + Align_index_align_3 encoded_align_3(align, nreglist, dt); + if (IsUsingT32()) { + // VLD4{}{}.
, [{:}] ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsOffset() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitT32_32(0xf920000fU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VLD4{}{}.
, [{:}]! ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsPostIndex() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitT32_32(0xf920000dU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VLD4{}{}.
, [{:}] ; T1 + if (encoded_dt_2.IsValid() && nreglist.IsTransferAllLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsOffset() && encoded_align_2.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitT32_32(0xf9a00f0fU | (encoded_dt_2.GetEncodingValue() << 6) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 5) | + (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VLD4{}{}.
, [{:}]! ; T1 + if (encoded_dt_2.IsValid() && nreglist.IsTransferAllLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsPostIndex() && encoded_align_2.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitT32_32(0xf9a00f0dU | (encoded_dt_2.GetEncodingValue() << 6) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 5) | + (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VLD4{}{}.
, [{:}] ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsOffset() && encoded_align_3.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitT32_32(0xf9a0030fU | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_3.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VLD4{}{}.
, [{:}]! ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsPostIndex() && encoded_align_3.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitT32_32(0xf9a0030dU | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_3.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + } else { + // VLD4{}{}.
, [{:}] ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsOffset() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitA32(0xf420000fU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + return; + } + } + // VLD4{}{}.
, [{:}]! ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsPostIndex() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitA32(0xf420000dU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + return; + } + } + // VLD4{}{}.
, [{:}] ; A1 + if (encoded_dt_2.IsValid() && nreglist.IsTransferAllLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsOffset() && encoded_align_2.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitA32(0xf4a00f0fU | (encoded_dt_2.GetEncodingValue() << 6) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 5) | + (rn.GetCode() << 16)); + return; + } + } + // VLD4{}{}.
, [{:}]! ; A1 + if (encoded_dt_2.IsValid() && nreglist.IsTransferAllLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsPostIndex() && encoded_align_2.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitA32(0xf4a00f0dU | (encoded_dt_2.GetEncodingValue() << 6) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 5) | + (rn.GetCode() << 16)); + return; + } + } + // VLD4{}{}.
, [{:}] ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsOffset() && encoded_align_3.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitA32(0xf4a0030fU | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_3.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + return; + } + } + // VLD4{}{}.
, [{:}]! ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsPostIndex() && encoded_align_3.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitA32(0xf4a0030dU | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_3.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + return; + } + } + } + } + if (operand.IsPlainRegister()) { + Register rn = operand.GetBaseRegister(); + Alignment align = operand.GetAlignment(); + Register rm = operand.GetOffsetRegister(); + Dt_size_7 encoded_dt(dt); + Dt_size_8 encoded_dt_2(dt, align); + Align_align_4 encoded_align_1(align); + Align_a_3 encoded_align_2(align, dt); + Align_index_align_3 encoded_align_3(align, nreglist, dt); + if (IsUsingT32()) { + // VLD4{}{}.
, [{:}], ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitT32_32(0xf9200000U | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16) | rm.GetCode()); + AdvanceIT(); + return; + } + } + // VLD4{}{}.
, [{:}], ; T1 + if (encoded_dt_2.IsValid() && nreglist.IsTransferAllLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitT32_32(0xf9a00f00U | (encoded_dt_2.GetEncodingValue() << 6) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 5) | + (rn.GetCode() << 16) | rm.GetCode()); + AdvanceIT(); + return; + } + } + // VLD4{}{}.
, [{:}], ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitT32_32(0xf9a00300U | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_3.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16) | + rm.GetCode()); + AdvanceIT(); + return; + } + } + } else { + // VLD4{}{}.
, [{:}], ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitA32(0xf4200000U | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16) | rm.GetCode()); + return; + } + } + // VLD4{}{}.
, [{:}], ; A1 + if (encoded_dt_2.IsValid() && nreglist.IsTransferAllLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitA32(0xf4a00f00U | (encoded_dt_2.GetEncodingValue() << 6) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 5) | + (rn.GetCode() << 16) | rm.GetCode()); + return; + } + } + // VLD4{}{}.
, [{:}], ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitA32(0xf4a00300U | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_3.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16) | rm.GetCode()); + return; + } + } + } + } + Delegate(kVld4, &Assembler::vld4, cond, dt, nreglist, operand); +} + +void Assembler::vldm(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VLDM{}{}{.} {!}, ; T1 + if ((((dreglist.GetLength() <= 16) && !rn.IsPC()) || + AllowUnpredictable())) { + const DRegister& dreg = dreglist.GetFirstDRegister(); + unsigned len = dreglist.GetLength() * 2; + EmitT32_32(0xec900b00U | (rn.GetCode() << 16) | + (write_back.GetWriteBackUint32() << 21) | dreg.Encode(22, 12) | + (len & 0xff)); + AdvanceIT(); + return; + } + } else { + // VLDM{}{}{.} {!}, ; A1 + if (cond.IsNotNever() && (((dreglist.GetLength() <= 16) && + (!rn.IsPC() || !write_back.DoesWriteBack())) || + AllowUnpredictable())) { + const DRegister& dreg = dreglist.GetFirstDRegister(); + unsigned len = dreglist.GetLength() * 2; + EmitA32(0x0c900b00U | (cond.GetCondition() << 28) | (rn.GetCode() << 16) | + (write_back.GetWriteBackUint32() << 21) | dreg.Encode(22, 12) | + (len & 0xff)); + return; + } + } + Delegate(kVldm, &Assembler::vldm, cond, dt, rn, write_back, dreglist); +} + +void Assembler::vldm(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VLDM{}{}{.} {!}, ; T2 + if ((!rn.IsPC() || AllowUnpredictable())) { + const SRegister& sreg = sreglist.GetFirstSRegister(); + unsigned len = sreglist.GetLength(); + EmitT32_32(0xec900a00U | (rn.GetCode() << 16) | + (write_back.GetWriteBackUint32() << 21) | sreg.Encode(22, 12) | + (len & 0xff)); + AdvanceIT(); + return; + } + } else { + // VLDM{}{}{.} {!}, ; A2 + if (cond.IsNotNever() && + ((!rn.IsPC() || !write_back.DoesWriteBack()) || AllowUnpredictable())) { + const SRegister& sreg = sreglist.GetFirstSRegister(); + unsigned len = sreglist.GetLength(); + EmitA32(0x0c900a00U | (cond.GetCondition() << 28) | (rn.GetCode() << 16) | + (write_back.GetWriteBackUint32() << 21) | sreg.Encode(22, 12) | + (len & 0xff)); + return; + } + } + Delegate(kVldm, &Assembler::vldm, cond, dt, rn, write_back, sreglist); +} + +void Assembler::vldmdb(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VLDMDB{}{}{.} !, ; T1 + if (write_back.DoesWriteBack() && + (((dreglist.GetLength() <= 16) && !rn.IsPC()) || + AllowUnpredictable())) { + const DRegister& dreg = dreglist.GetFirstDRegister(); + unsigned len = dreglist.GetLength() * 2; + EmitT32_32(0xed300b00U | (rn.GetCode() << 16) | dreg.Encode(22, 12) | + (len & 0xff)); + AdvanceIT(); + return; + } + } else { + // VLDMDB{}{}{.} !, ; A1 + if (write_back.DoesWriteBack() && cond.IsNotNever() && + (((dreglist.GetLength() <= 16) && !rn.IsPC()) || + AllowUnpredictable())) { + const DRegister& dreg = dreglist.GetFirstDRegister(); + unsigned len = dreglist.GetLength() * 2; + EmitA32(0x0d300b00U | (cond.GetCondition() << 28) | (rn.GetCode() << 16) | + dreg.Encode(22, 12) | (len & 0xff)); + return; + } + } + Delegate(kVldmdb, &Assembler::vldmdb, cond, dt, rn, write_back, dreglist); +} + +void Assembler::vldmdb(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VLDMDB{}{}{.} !, ; T2 + if (write_back.DoesWriteBack() && (!rn.IsPC() || AllowUnpredictable())) { + const SRegister& sreg = sreglist.GetFirstSRegister(); + unsigned len = sreglist.GetLength(); + EmitT32_32(0xed300a00U | (rn.GetCode() << 16) | sreg.Encode(22, 12) | + (len & 0xff)); + AdvanceIT(); + return; + } + } else { + // VLDMDB{}{}{.} !, ; A2 + if (write_back.DoesWriteBack() && cond.IsNotNever() && + (!rn.IsPC() || AllowUnpredictable())) { + const SRegister& sreg = sreglist.GetFirstSRegister(); + unsigned len = sreglist.GetLength(); + EmitA32(0x0d300a00U | (cond.GetCondition() << 28) | (rn.GetCode() << 16) | + sreg.Encode(22, 12) | (len & 0xff)); + return; + } + } + Delegate(kVldmdb, &Assembler::vldmdb, cond, dt, rn, write_back, sreglist); +} + +void Assembler::vldmia(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VLDMIA{}{}{.} {!}, ; T1 + if ((((dreglist.GetLength() <= 16) && !rn.IsPC()) || + AllowUnpredictable())) { + const DRegister& dreg = dreglist.GetFirstDRegister(); + unsigned len = dreglist.GetLength() * 2; + EmitT32_32(0xec900b00U | (rn.GetCode() << 16) | + (write_back.GetWriteBackUint32() << 21) | dreg.Encode(22, 12) | + (len & 0xff)); + AdvanceIT(); + return; + } + } else { + // VLDMIA{}{}{.} {!}, ; A1 + if (cond.IsNotNever() && (((dreglist.GetLength() <= 16) && + (!rn.IsPC() || !write_back.DoesWriteBack())) || + AllowUnpredictable())) { + const DRegister& dreg = dreglist.GetFirstDRegister(); + unsigned len = dreglist.GetLength() * 2; + EmitA32(0x0c900b00U | (cond.GetCondition() << 28) | (rn.GetCode() << 16) | + (write_back.GetWriteBackUint32() << 21) | dreg.Encode(22, 12) | + (len & 0xff)); + return; + } + } + Delegate(kVldmia, &Assembler::vldmia, cond, dt, rn, write_back, dreglist); +} + +void Assembler::vldmia(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VLDMIA{}{}{.} {!}, ; T2 + if ((!rn.IsPC() || AllowUnpredictable())) { + const SRegister& sreg = sreglist.GetFirstSRegister(); + unsigned len = sreglist.GetLength(); + EmitT32_32(0xec900a00U | (rn.GetCode() << 16) | + (write_back.GetWriteBackUint32() << 21) | sreg.Encode(22, 12) | + (len & 0xff)); + AdvanceIT(); + return; + } + } else { + // VLDMIA{}{}{.} {!}, ; A2 + if (cond.IsNotNever() && + ((!rn.IsPC() || !write_back.DoesWriteBack()) || AllowUnpredictable())) { + const SRegister& sreg = sreglist.GetFirstSRegister(); + unsigned len = sreglist.GetLength(); + EmitA32(0x0c900a00U | (cond.GetCondition() << 28) | (rn.GetCode() << 16) | + (write_back.GetWriteBackUint32() << 21) | sreg.Encode(22, 12) | + (len & 0xff)); + return; + } + } + Delegate(kVldmia, &Assembler::vldmia, cond, dt, rn, write_back, sreglist); +} + +void Assembler::vldr(Condition cond, + DataType dt, + DRegister rd, + Location* location) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Location::Offset offset = + location->IsBound() + ? location->GetLocation() - + AlignDown(GetCursorOffset() + GetArchitectureStatePCOffset(), 4) + : 0; + if (IsUsingT32()) { + // VLDR{}{}{.64}
,
,
,
,
, [PC, #<_plusminus_>] ; T1 + if (dt.IsNoneOr(Untyped64) && (offset >= -1020) && (offset <= 1020) && + ((offset % 4) == 0) && rn.Is(pc) && operand.IsOffset()) { + uint32_t sign = operand.GetSign().IsPlus() ? 1 : 0; + uint32_t offset_ = abs(offset) >> 2; + EmitT32_32(0xed1f0b00U | rd.Encode(22, 12) | offset_ | (sign << 23)); + AdvanceIT(); + return; + } + // VLDR{}{}{.64}
, [{, #{+/-}}] ; T1 + if (dt.IsNoneOr(Untyped64) && (offset >= -1020) && (offset <= 1020) && + ((offset % 4) == 0) && operand.IsOffset() && + ((rn.GetCode() & 0xf) != 0xf)) { + uint32_t sign = operand.GetSign().IsPlus() ? 1 : 0; + uint32_t offset_ = abs(offset) >> 2; + EmitT32_32(0xed100b00U | rd.Encode(22, 12) | (rn.GetCode() << 16) | + offset_ | (sign << 23)); + AdvanceIT(); + return; + } + } else { + // VLDR{}{}{.64}
, [PC, #<_plusminus_>] ; A1 + if (dt.IsNoneOr(Untyped64) && (offset >= -1020) && (offset <= 1020) && + ((offset % 4) == 0) && rn.Is(pc) && operand.IsOffset() && + cond.IsNotNever()) { + uint32_t sign = operand.GetSign().IsPlus() ? 1 : 0; + uint32_t offset_ = abs(offset) >> 2; + EmitA32(0x0d1f0b00U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + offset_ | (sign << 23)); + return; + } + // VLDR{}{}{.64}
, [{, #{+/-}}] ; A1 + if (dt.IsNoneOr(Untyped64) && (offset >= -1020) && (offset <= 1020) && + ((offset % 4) == 0) && operand.IsOffset() && cond.IsNotNever() && + ((rn.GetCode() & 0xf) != 0xf)) { + uint32_t sign = operand.GetSign().IsPlus() ? 1 : 0; + uint32_t offset_ = abs(offset) >> 2; + EmitA32(0x0d100b00U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + (rn.GetCode() << 16) | offset_ | (sign << 23)); + return; + } + } + } + Delegate(kVldr, &Assembler::vldr, cond, dt, rd, operand); +} + +void Assembler::vldr(Condition cond, + DataType dt, + SRegister rd, + Location* location) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Location::Offset offset = + location->IsBound() + ? location->GetLocation() - + AlignDown(GetCursorOffset() + GetArchitectureStatePCOffset(), 4) + : 0; + if (IsUsingT32()) { + // VLDR{}{}{.32} ,
}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000f00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VMAX{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000600U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VMAX{}{}.F32 {
}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf2000f00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + // VMAX{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000600U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVmax, &Assembler::vmax, cond, dt, rd, rn, rm); +} + +void Assembler::vmax( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VMAX{}{}.F32 {}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000f40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VMAX{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000640U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VMAX{}{}.F32 {}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf2000f40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + // VMAX{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000640U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVmax, &Assembler::vmax, cond, dt, rd, rn, rm); +} + +void Assembler::vmaxnm(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + if (IsUsingT32()) { + // VMAXNM{}.F32
, , ; T1 + if (OutsideITBlock() && dt.Is(F32)) { + EmitT32_32(0xff000f10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + // VMAXNM{}.F64
, , ; T2 + if (OutsideITBlock() && dt.Is(F64)) { + EmitT32_32(0xfe800b00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VMAXNM{}.F32
, , ; A1 + if (dt.Is(F32)) { + EmitA32(0xf3000f10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + // VMAXNM{}.F64
, , ; A2 + if (dt.Is(F64)) { + EmitA32(0xfe800b00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVmaxnm, &Assembler::vmaxnm, dt, rd, rn, rm); +} + +void Assembler::vmaxnm(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + if (IsUsingT32()) { + // VMAXNM{}.F32 , , ; T1 + if (OutsideITBlock() && dt.Is(F32)) { + EmitT32_32(0xff000f50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VMAXNM{}.F32 , , ; A1 + if (dt.Is(F32)) { + EmitA32(0xf3000f50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVmaxnm, &Assembler::vmaxnm, dt, rd, rn, rm); +} + +void Assembler::vmaxnm(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + if (IsUsingT32()) { + // VMAXNM{}.F32 , , ; T2 + if (OutsideITBlock() && dt.Is(F32)) { + EmitT32_32(0xfe800a00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VMAXNM{}.F32 , , ; A2 + if (dt.Is(F32)) { + EmitA32(0xfe800a00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVmaxnm, &Assembler::vmaxnm, dt, rd, rn, rm); +} + +void Assembler::vmin( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VMIN{}{}.F32 {
}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef200f00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VMIN{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000610U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VMIN{}{}.F32 {
}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf2200f00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + // VMIN{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000610U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVmin, &Assembler::vmin, cond, dt, rd, rn, rm); +} + +void Assembler::vmin( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VMIN{}{}.F32 {}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef200f40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VMIN{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000650U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VMIN{}{}.F32 {}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf2200f40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + // VMIN{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000650U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVmin, &Assembler::vmin, cond, dt, rd, rn, rm); +} + +void Assembler::vminnm(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + if (IsUsingT32()) { + // VMINNM{}.F32
, , ; T1 + if (OutsideITBlock() && dt.Is(F32)) { + EmitT32_32(0xff200f10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + // VMINNM{}.F64
, , ; T2 + if (OutsideITBlock() && dt.Is(F64)) { + EmitT32_32(0xfe800b40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VMINNM{}.F32
, , ; A1 + if (dt.Is(F32)) { + EmitA32(0xf3200f10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + // VMINNM{}.F64
, , ; A2 + if (dt.Is(F64)) { + EmitA32(0xfe800b40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVminnm, &Assembler::vminnm, dt, rd, rn, rm); +} + +void Assembler::vminnm(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + if (IsUsingT32()) { + // VMINNM{}.F32 , , ; T1 + if (OutsideITBlock() && dt.Is(F32)) { + EmitT32_32(0xff200f50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VMINNM{}.F32 , , ; A1 + if (dt.Is(F32)) { + EmitA32(0xf3200f50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVminnm, &Assembler::vminnm, dt, rd, rn, rm); +} + +void Assembler::vminnm(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + if (IsUsingT32()) { + // VMINNM{}.F32 , , ; T2 + if (OutsideITBlock() && dt.Is(F32)) { + EmitT32_32(0xfe800a40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VMINNM{}.F32 , , ; A2 + if (dt.Is(F32)) { + EmitA32(0xfe800a40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVminnm, &Assembler::vminnm, dt, rd, rn, rm); +} + +void Assembler::vmla( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegisterLane rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_9 encoded_dt(dt); + if (IsUsingT32()) { + // VMLA{}{}.
, , ; T1 + if (encoded_dt.IsValid() && + (((dt.GetSize() == 16) && (rm.GetCode() <= 7) && (rm.GetLane() <= 3)) || + ((dt.GetSize() == 32) && (rm.GetCode() <= 15) && + (rm.GetLane() <= 1)))) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800040U | (encoded_dt.GetTypeEncodingValue() << 8) | + (encoded_dt.GetEncodingValue() << 20) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.EncodeX(dt, 5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VMLA{}{}.
, , ; A1 + if (encoded_dt.IsValid() && + (((dt.GetSize() == 16) && (rm.GetCode() <= 7) && (rm.GetLane() <= 3)) || + ((dt.GetSize() == 32) && (rm.GetCode() <= 15) && + (rm.GetLane() <= 1)))) { + if (cond.Is(al)) { + EmitA32(0xf2800040U | (encoded_dt.GetTypeEncodingValue() << 8) | + (encoded_dt.GetEncodingValue() << 20) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.EncodeX(dt, 5, 0)); + return; + } + } + } + Delegate(kVmla, &Assembler::vmla, cond, dt, rd, rn, rm); +} + +void Assembler::vmla( + Condition cond, DataType dt, QRegister rd, QRegister rn, DRegisterLane rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_9 encoded_dt(dt); + if (IsUsingT32()) { + // VMLA{}{}. , , ; T1 + if (encoded_dt.IsValid() && + (((dt.GetSize() == 16) && (rm.GetCode() <= 7) && (rm.GetLane() <= 3)) || + ((dt.GetSize() == 32) && (rm.GetCode() <= 15) && + (rm.GetLane() <= 1)))) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff800040U | (encoded_dt.GetTypeEncodingValue() << 8) | + (encoded_dt.GetEncodingValue() << 20) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.EncodeX(dt, 5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VMLA{}{}. , , ; A1 + if (encoded_dt.IsValid() && + (((dt.GetSize() == 16) && (rm.GetCode() <= 7) && (rm.GetLane() <= 3)) || + ((dt.GetSize() == 32) && (rm.GetCode() <= 15) && + (rm.GetLane() <= 1)))) { + if (cond.Is(al)) { + EmitA32(0xf3800040U | (encoded_dt.GetTypeEncodingValue() << 8) | + (encoded_dt.GetEncodingValue() << 20) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.EncodeX(dt, 5, 0)); + return; + } + } + } + Delegate(kVmla, &Assembler::vmla, cond, dt, rd, rn, rm); +} + +void Assembler::vmla( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_10 encoded_dt(dt); + if (IsUsingT32()) { + // VMLA{}{}.F32
, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000d10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VMLA{}{}.F64
, , ; T2 + if (dt.Is(F64)) { + EmitT32_32(0xee000b00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + // VMLA{}{}.
, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000900U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VMLA{}{}.F32
, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf2000d10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + // VMLA{}{}.F64
, , ; A2 + if (dt.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0e000b00U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + // VMLA{}{}.
, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000900U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVmla, &Assembler::vmla, cond, dt, rd, rn, rm); +} + +void Assembler::vmla( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_10 encoded_dt(dt); + if (IsUsingT32()) { + // VMLA{}{}.F32 , , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000d50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VMLA{}{}. , , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000940U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VMLA{}{}.F32 , , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf2000d50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + // VMLA{}{}. , , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000940U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVmla, &Assembler::vmla, cond, dt, rd, rn, rm); +} + +void Assembler::vmla( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VMLA{}{}.F32 , , ; T2 + if (dt.Is(F32)) { + EmitT32_32(0xee000a00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VMLA{}{}.F32 , , ; A2 + if (dt.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0e000a00U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVmla, &Assembler::vmla, cond, dt, rd, rn, rm); +} + +void Assembler::vmlal( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegisterLane rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_11 encoded_dt(dt); + if (IsUsingT32()) { + // VMLAL{}{}. , , ; T1 + if (encoded_dt.IsValid() && + (((dt.GetSize() == 16) && (rm.GetCode() <= 7) && (rm.GetLane() <= 3)) || + ((dt.GetSize() == 32) && (rm.GetCode() <= 15) && + (rm.GetLane() <= 1)))) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800240U | (encoded_dt.GetTypeEncodingValue() << 28) | + (encoded_dt.GetEncodingValue() << 20) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.EncodeX(dt, 5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VMLAL{}{}. , , ; A1 + if (encoded_dt.IsValid() && + (((dt.GetSize() == 16) && (rm.GetCode() <= 7) && (rm.GetLane() <= 3)) || + ((dt.GetSize() == 32) && (rm.GetCode() <= 15) && + (rm.GetLane() <= 1)))) { + if (cond.Is(al)) { + EmitA32(0xf2800240U | (encoded_dt.GetTypeEncodingValue() << 24) | + (encoded_dt.GetEncodingValue() << 20) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.EncodeX(dt, 5, 0)); + return; + } + } + } + Delegate(kVmlal, &Assembler::vmlal, cond, dt, rd, rn, rm); +} + +void Assembler::vmlal( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_12 encoded_dt(dt); + if (IsUsingT32()) { + // VMLAL{}{}. , , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800800U | (encoded_dt.GetTypeEncodingValue() << 28) | + (encoded_dt.GetEncodingValue() << 20) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VMLAL{}{}. , , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2800800U | (encoded_dt.GetTypeEncodingValue() << 24) | + (encoded_dt.GetEncodingValue() << 20) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVmlal, &Assembler::vmlal, cond, dt, rd, rn, rm); +} + +void Assembler::vmls( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegisterLane rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_9 encoded_dt(dt); + if (IsUsingT32()) { + // VMLS{}{}.
, , ; T1 + if (encoded_dt.IsValid() && + (((dt.GetSize() == 16) && (rm.GetCode() <= 7) && (rm.GetLane() <= 3)) || + ((dt.GetSize() == 32) && (rm.GetCode() <= 15) && + (rm.GetLane() <= 1)))) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800440U | (encoded_dt.GetTypeEncodingValue() << 8) | + (encoded_dt.GetEncodingValue() << 20) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.EncodeX(dt, 5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VMLS{}{}.
, , ; A1 + if (encoded_dt.IsValid() && + (((dt.GetSize() == 16) && (rm.GetCode() <= 7) && (rm.GetLane() <= 3)) || + ((dt.GetSize() == 32) && (rm.GetCode() <= 15) && + (rm.GetLane() <= 1)))) { + if (cond.Is(al)) { + EmitA32(0xf2800440U | (encoded_dt.GetTypeEncodingValue() << 8) | + (encoded_dt.GetEncodingValue() << 20) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.EncodeX(dt, 5, 0)); + return; + } + } + } + Delegate(kVmls, &Assembler::vmls, cond, dt, rd, rn, rm); +} + +void Assembler::vmls( + Condition cond, DataType dt, QRegister rd, QRegister rn, DRegisterLane rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_9 encoded_dt(dt); + if (IsUsingT32()) { + // VMLS{}{}. , , ; T1 + if (encoded_dt.IsValid() && + (((dt.GetSize() == 16) && (rm.GetCode() <= 7) && (rm.GetLane() <= 3)) || + ((dt.GetSize() == 32) && (rm.GetCode() <= 15) && + (rm.GetLane() <= 1)))) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff800440U | (encoded_dt.GetTypeEncodingValue() << 8) | + (encoded_dt.GetEncodingValue() << 20) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.EncodeX(dt, 5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VMLS{}{}. , , ; A1 + if (encoded_dt.IsValid() && + (((dt.GetSize() == 16) && (rm.GetCode() <= 7) && (rm.GetLane() <= 3)) || + ((dt.GetSize() == 32) && (rm.GetCode() <= 15) && + (rm.GetLane() <= 1)))) { + if (cond.Is(al)) { + EmitA32(0xf3800440U | (encoded_dt.GetTypeEncodingValue() << 8) | + (encoded_dt.GetEncodingValue() << 20) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.EncodeX(dt, 5, 0)); + return; + } + } + } + Delegate(kVmls, &Assembler::vmls, cond, dt, rd, rn, rm); +} + +void Assembler::vmls( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_10 encoded_dt(dt); + if (IsUsingT32()) { + // VMLS{}{}.F32
, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef200d10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VMLS{}{}.F64
, , ; T2 + if (dt.Is(F64)) { + EmitT32_32(0xee000b40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + // VMLS{}{}.
, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff000900U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VMLS{}{}.F32
, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf2200d10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + // VMLS{}{}.F64
, , ; A2 + if (dt.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0e000b40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + // VMLS{}{}.
, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3000900U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVmls, &Assembler::vmls, cond, dt, rd, rn, rm); +} + +void Assembler::vmls( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_10 encoded_dt(dt); + if (IsUsingT32()) { + // VMLS{}{}.F32 , , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef200d50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VMLS{}{}. , , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff000940U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VMLS{}{}.F32 , , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf2200d50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + // VMLS{}{}. , , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3000940U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVmls, &Assembler::vmls, cond, dt, rd, rn, rm); +} + +void Assembler::vmls( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VMLS{}{}.F32 , , ; T2 + if (dt.Is(F32)) { + EmitT32_32(0xee000a40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VMLS{}{}.F32 , , ; A2 + if (dt.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0e000a40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVmls, &Assembler::vmls, cond, dt, rd, rn, rm); +} + +void Assembler::vmlsl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegisterLane rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_11 encoded_dt(dt); + if (IsUsingT32()) { + // VMLSL{}{}. , , ; T1 + if (encoded_dt.IsValid() && + (((dt.GetSize() == 16) && (rm.GetCode() <= 7) && (rm.GetLane() <= 3)) || + ((dt.GetSize() == 32) && (rm.GetCode() <= 15) && + (rm.GetLane() <= 1)))) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800640U | (encoded_dt.GetTypeEncodingValue() << 28) | + (encoded_dt.GetEncodingValue() << 20) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.EncodeX(dt, 5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VMLSL{}{}. , , ; A1 + if (encoded_dt.IsValid() && + (((dt.GetSize() == 16) && (rm.GetCode() <= 7) && (rm.GetLane() <= 3)) || + ((dt.GetSize() == 32) && (rm.GetCode() <= 15) && + (rm.GetLane() <= 1)))) { + if (cond.Is(al)) { + EmitA32(0xf2800640U | (encoded_dt.GetTypeEncodingValue() << 24) | + (encoded_dt.GetEncodingValue() << 20) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.EncodeX(dt, 5, 0)); + return; + } + } + } + Delegate(kVmlsl, &Assembler::vmlsl, cond, dt, rd, rn, rm); +} + +void Assembler::vmlsl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_12 encoded_dt(dt); + if (IsUsingT32()) { + // VMLSL{}{}. , , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800a00U | (encoded_dt.GetTypeEncodingValue() << 28) | + (encoded_dt.GetEncodingValue() << 20) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VMLSL{}{}. , , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2800a00U | (encoded_dt.GetTypeEncodingValue() << 24) | + (encoded_dt.GetEncodingValue() << 20) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVmlsl, &Assembler::vmlsl, cond, dt, rd, rn, rm); +} + +void Assembler::vmov(Condition cond, Register rt, SRegister rn) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VMOV{}{} , ; T1 + if ((!rt.IsPC() || AllowUnpredictable())) { + EmitT32_32(0xee100a10U | (rt.GetCode() << 12) | rn.Encode(7, 16)); + AdvanceIT(); + return; + } + } else { + // VMOV{}{} , ; A1 + if (cond.IsNotNever() && (!rt.IsPC() || AllowUnpredictable())) { + EmitA32(0x0e100a10U | (cond.GetCondition() << 28) | (rt.GetCode() << 12) | + rn.Encode(7, 16)); + return; + } + } + Delegate(kVmov, &Assembler::vmov, cond, rt, rn); +} + +void Assembler::vmov(Condition cond, SRegister rn, Register rt) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VMOV{}{} , ; T1 + if ((!rt.IsPC() || AllowUnpredictable())) { + EmitT32_32(0xee000a10U | rn.Encode(7, 16) | (rt.GetCode() << 12)); + AdvanceIT(); + return; + } + } else { + // VMOV{}{} , ; A1 + if (cond.IsNotNever() && (!rt.IsPC() || AllowUnpredictable())) { + EmitA32(0x0e000a10U | (cond.GetCondition() << 28) | rn.Encode(7, 16) | + (rt.GetCode() << 12)); + return; + } + } + Delegate(kVmov, &Assembler::vmov, cond, rn, rt); +} + +void Assembler::vmov(Condition cond, Register rt, Register rt2, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VMOV{}{} , , ; T1 + if (((!rt.IsPC() && !rt2.IsPC()) || AllowUnpredictable())) { + EmitT32_32(0xec500b10U | (rt.GetCode() << 12) | (rt2.GetCode() << 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VMOV{}{} , , ; A1 + if (cond.IsNotNever() && + ((!rt.IsPC() && !rt2.IsPC()) || AllowUnpredictable())) { + EmitA32(0x0c500b10U | (cond.GetCondition() << 28) | (rt.GetCode() << 12) | + (rt2.GetCode() << 16) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVmov, &Assembler::vmov, cond, rt, rt2, rm); +} + +void Assembler::vmov(Condition cond, DRegister rm, Register rt, Register rt2) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VMOV{}{} , , ; T1 + if (((!rt.IsPC() && !rt2.IsPC()) || AllowUnpredictable())) { + EmitT32_32(0xec400b10U | rm.Encode(5, 0) | (rt.GetCode() << 12) | + (rt2.GetCode() << 16)); + AdvanceIT(); + return; + } + } else { + // VMOV{}{} , , ; A1 + if (cond.IsNotNever() && + ((!rt.IsPC() && !rt2.IsPC()) || AllowUnpredictable())) { + EmitA32(0x0c400b10U | (cond.GetCondition() << 28) | rm.Encode(5, 0) | + (rt.GetCode() << 12) | (rt2.GetCode() << 16)); + return; + } + } + Delegate(kVmov, &Assembler::vmov, cond, rm, rt, rt2); +} + +void Assembler::vmov( + Condition cond, Register rt, Register rt2, SRegister rm, SRegister rm1) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VMOV{}{} , , , ; T1 + if ((((rm.GetCode() + 1) % kNumberOfSRegisters) == rm1.GetCode()) && + ((!rt.IsPC() && !rt2.IsPC()) || AllowUnpredictable())) { + EmitT32_32(0xec500a10U | (rt.GetCode() << 12) | (rt2.GetCode() << 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VMOV{}{} , , , ; A1 + if ((((rm.GetCode() + 1) % kNumberOfSRegisters) == rm1.GetCode()) && + cond.IsNotNever() && + ((!rt.IsPC() && !rt2.IsPC()) || AllowUnpredictable())) { + EmitA32(0x0c500a10U | (cond.GetCondition() << 28) | (rt.GetCode() << 12) | + (rt2.GetCode() << 16) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVmov, &Assembler::vmov, cond, rt, rt2, rm, rm1); +} + +void Assembler::vmov( + Condition cond, SRegister rm, SRegister rm1, Register rt, Register rt2) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VMOV{}{} , , , ; T1 + if ((((rm.GetCode() + 1) % kNumberOfSRegisters) == rm1.GetCode()) && + ((!rt.IsPC() && !rt2.IsPC()) || AllowUnpredictable())) { + EmitT32_32(0xec400a10U | rm.Encode(5, 0) | (rt.GetCode() << 12) | + (rt2.GetCode() << 16)); + AdvanceIT(); + return; + } + } else { + // VMOV{}{} , , , ; A1 + if ((((rm.GetCode() + 1) % kNumberOfSRegisters) == rm1.GetCode()) && + cond.IsNotNever() && + ((!rt.IsPC() && !rt2.IsPC()) || AllowUnpredictable())) { + EmitA32(0x0c400a10U | (cond.GetCondition() << 28) | rm.Encode(5, 0) | + (rt.GetCode() << 12) | (rt2.GetCode() << 16)); + return; + } + } + Delegate(kVmov, &Assembler::vmov, cond, rm, rm1, rt, rt2); +} + +void Assembler::vmov(Condition cond, + DataType dt, + DRegisterLane rd, + Register rt) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_opc1_opc2_1 encoded_dt(dt, rd); + if (IsUsingT32()) { + // VMOV{}{}{.} , ; T1 + if (encoded_dt.IsValid() && (!rt.IsPC() || AllowUnpredictable())) { + EmitT32_32(0xee000b10U | ((encoded_dt.GetEncodingValue() & 0x3) << 5) | + ((encoded_dt.GetEncodingValue() & 0xc) << 19) | + rd.Encode(7, 16) | (rt.GetCode() << 12)); + AdvanceIT(); + return; + } + } else { + // VMOV{}{}{.} , ; A1 + if (encoded_dt.IsValid() && cond.IsNotNever() && + (!rt.IsPC() || AllowUnpredictable())) { + EmitA32(0x0e000b10U | (cond.GetCondition() << 28) | + ((encoded_dt.GetEncodingValue() & 0x3) << 5) | + ((encoded_dt.GetEncodingValue() & 0xc) << 19) | rd.Encode(7, 16) | + (rt.GetCode() << 12)); + return; + } + } + Delegate(kVmov, &Assembler::vmov, cond, dt, rd, rt); +} + +void Assembler::vmov(Condition cond, + DataType dt, + DRegister rd, + const DOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + ImmediateVmov encoded_dt(dt, operand.GetNeonImmediate()); + if (IsUsingT32()) { + // VMOV{}{}.
, # ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32( + 0xef800010U | ((encoded_dt.GetEncodingValue() & 0xf) << 8) | + ((encoded_dt.GetEncodingValue() & 0x10) << 1) | + rd.Encode(22, 12) | (encoded_dt.GetEncodedImmediate() & 0xf) | + ((encoded_dt.GetEncodedImmediate() & 0x70) << 12) | + ((encoded_dt.GetEncodedImmediate() & 0x80) << 21)); + AdvanceIT(); + return; + } + } + } else { + // VMOV{}{}.
, # ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2800010U | ((encoded_dt.GetEncodingValue() & 0xf) << 8) | + ((encoded_dt.GetEncodingValue() & 0x10) << 1) | + rd.Encode(22, 12) | (encoded_dt.GetEncodedImmediate() & 0xf) | + ((encoded_dt.GetEncodedImmediate() & 0x70) << 12) | + ((encoded_dt.GetEncodedImmediate() & 0x80) << 17)); + return; + } + } + } + } + if (operand.IsImmediate()) { + ImmediateVFP vfp(operand.GetNeonImmediate()); + if (IsUsingT32()) { + // VMOV{}{}.F64
, # ; T2 + if (dt.Is(F64) && vfp.IsValid()) { + EmitT32_32(0xeeb00b00U | rd.Encode(22, 12) | + (vfp.GetEncodingValue() & 0xf) | + ((vfp.GetEncodingValue() & 0xf0) << 12)); + AdvanceIT(); + return; + } + } else { + // VMOV{}{}.F64
, # ; A2 + if (dt.Is(F64) && vfp.IsValid() && cond.IsNotNever()) { + EmitA32(0x0eb00b00U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + (vfp.GetEncodingValue() & 0xf) | + ((vfp.GetEncodingValue() & 0xf0) << 12)); + return; + } + } + } + if (operand.IsRegister()) { + DRegister rm = operand.GetRegister(); + if (IsUsingT32()) { + // VMOV{}{}.F64
, ; T2 + if (dt.Is(F64)) { + EmitT32_32(0xeeb00b40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + // VMOV{}{}{.
}
, ; T1 + if (!dt.Is(F64)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef200110U | rd.Encode(22, 12) | rm.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VMOV{}{}.F64
, ; A2 + if (dt.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0eb00b40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + // VMOV{}{}{.
}
, ; A1 + if (!dt.Is(F64)) { + if (cond.Is(al)) { + EmitA32(0xf2200110U | rd.Encode(22, 12) | rm.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + } + Delegate(kVmov, &Assembler::vmov, cond, dt, rd, operand); +} + +void Assembler::vmov(Condition cond, + DataType dt, + QRegister rd, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + ImmediateVmov encoded_dt(dt, operand.GetNeonImmediate()); + if (IsUsingT32()) { + // VMOV{}{}.
, # ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32( + 0xef800050U | ((encoded_dt.GetEncodingValue() & 0xf) << 8) | + ((encoded_dt.GetEncodingValue() & 0x10) << 1) | + rd.Encode(22, 12) | (encoded_dt.GetEncodedImmediate() & 0xf) | + ((encoded_dt.GetEncodedImmediate() & 0x70) << 12) | + ((encoded_dt.GetEncodedImmediate() & 0x80) << 21)); + AdvanceIT(); + return; + } + } + } else { + // VMOV{}{}.
, # ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2800050U | ((encoded_dt.GetEncodingValue() & 0xf) << 8) | + ((encoded_dt.GetEncodingValue() & 0x10) << 1) | + rd.Encode(22, 12) | (encoded_dt.GetEncodedImmediate() & 0xf) | + ((encoded_dt.GetEncodedImmediate() & 0x70) << 12) | + ((encoded_dt.GetEncodedImmediate() & 0x80) << 17)); + return; + } + } + } + } + if (operand.IsRegister()) { + QRegister rm = operand.GetRegister(); + if (IsUsingT32()) { + // VMOV{}{}{.
} , ; T1 + if (!dt.Is(F64)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef200150U | rd.Encode(22, 12) | rm.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VMOV{}{}{.
} , ; A1 + if (!dt.Is(F64)) { + if (cond.Is(al)) { + EmitA32(0xf2200150U | rd.Encode(22, 12) | rm.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + } + Delegate(kVmov, &Assembler::vmov, cond, dt, rd, operand); +} + +void Assembler::vmov(Condition cond, + DataType dt, + SRegister rd, + const SOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + ImmediateVFP vfp(operand.GetNeonImmediate()); + if (IsUsingT32()) { + // VMOV{}{}.F32 , # ; T2 + if (dt.Is(F32) && vfp.IsValid()) { + EmitT32_32(0xeeb00a00U | rd.Encode(22, 12) | + (vfp.GetEncodingValue() & 0xf) | + ((vfp.GetEncodingValue() & 0xf0) << 12)); + AdvanceIT(); + return; + } + } else { + // VMOV{}{}.F32 , # ; A2 + if (dt.Is(F32) && vfp.IsValid() && cond.IsNotNever()) { + EmitA32(0x0eb00a00U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + (vfp.GetEncodingValue() & 0xf) | + ((vfp.GetEncodingValue() & 0xf0) << 12)); + return; + } + } + } + if (operand.IsRegister()) { + SRegister rm = operand.GetRegister(); + if (IsUsingT32()) { + // VMOV{}{}.F32 , ; T2 + if (dt.Is(F32)) { + EmitT32_32(0xeeb00a40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VMOV{}{}.F32 , ; A2 + if (dt.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0eb00a40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVmov, &Assembler::vmov, cond, dt, rd, operand); +} + +void Assembler::vmov(Condition cond, + DataType dt, + Register rt, + DRegisterLane rn) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_opc1_opc2_1 encoded_dt(dt, rn); + if (IsUsingT32()) { + // VMOV{}{}{.
} , ; T1 + if (encoded_dt.IsValid() && (!rt.IsPC() || AllowUnpredictable())) { + EmitT32_32(0xee100b10U | ((encoded_dt.GetEncodingValue() & 0x3) << 5) | + ((encoded_dt.GetEncodingValue() & 0xc) << 19) | + ((encoded_dt.GetEncodingValue() & 0x10) << 19) | + (rt.GetCode() << 12) | rn.Encode(7, 16)); + AdvanceIT(); + return; + } + } else { + // VMOV{}{}{.
} , ; A1 + if (encoded_dt.IsValid() && cond.IsNotNever() && + (!rt.IsPC() || AllowUnpredictable())) { + EmitA32(0x0e100b10U | (cond.GetCondition() << 28) | + ((encoded_dt.GetEncodingValue() & 0x3) << 5) | + ((encoded_dt.GetEncodingValue() & 0xc) << 19) | + ((encoded_dt.GetEncodingValue() & 0x10) << 19) | + (rt.GetCode() << 12) | rn.Encode(7, 16)); + return; + } + } + Delegate(kVmov, &Assembler::vmov, cond, dt, rt, rn); +} + +void Assembler::vmovl(Condition cond, DataType dt, QRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_imm3H_1 encoded_dt(dt); + if (IsUsingT32()) { + // VMOVL{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800a10U | ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 25) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VMOVL{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2800a10U | ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 21) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVmovl, &Assembler::vmovl, cond, dt, rd, rm); +} + +void Assembler::vmovn(Condition cond, DataType dt, DRegister rd, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_3 encoded_dt(dt); + if (IsUsingT32()) { + // VMOVN{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb20200U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VMOVN{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b20200U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVmovn, &Assembler::vmovn, cond, dt, rd, rm); +} + +void Assembler::vmrs(Condition cond, + RegisterOrAPSR_nzcv rt, + SpecialFPRegister spec_reg) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VMRS{}{} , ; T1 + EmitT32_32(0xeef00a10U | (rt.GetCode() << 12) | (spec_reg.GetReg() << 16)); + AdvanceIT(); + return; + } else { + // VMRS{}{} , ; A1 + if (cond.IsNotNever()) { + EmitA32(0x0ef00a10U | (cond.GetCondition() << 28) | (rt.GetCode() << 12) | + (spec_reg.GetReg() << 16)); + return; + } + } + Delegate(kVmrs, &Assembler::vmrs, cond, rt, spec_reg); +} + +void Assembler::vmsr(Condition cond, SpecialFPRegister spec_reg, Register rt) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VMSR{}{} , ; T1 + if ((!rt.IsPC() || AllowUnpredictable())) { + EmitT32_32(0xeee00a10U | (spec_reg.GetReg() << 16) | + (rt.GetCode() << 12)); + AdvanceIT(); + return; + } + } else { + // VMSR{}{} , ; A1 + if (cond.IsNotNever() && (!rt.IsPC() || AllowUnpredictable())) { + EmitA32(0x0ee00a10U | (cond.GetCondition() << 28) | + (spec_reg.GetReg() << 16) | (rt.GetCode() << 12)); + return; + } + } + Delegate(kVmsr, &Assembler::vmsr, cond, spec_reg, rt); +} + +void Assembler::vmul(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegister dm, + unsigned index) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_F_size_3 encoded_dt(dt); + if (IsUsingT32()) { + // VMUL{}{}.
{
}, , [] ; T1 + if (encoded_dt.IsValid() && + ((dt.Is(I16) && (index <= 3) && (dm.GetCode() <= 7)) || + (!dt.Is(I16) && (index <= 1) && (dm.GetCode() <= 15)))) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t shift = 4; + if (dt.Is(I16)) { + shift = 3; + } + uint32_t mvm = dm.GetCode() | index << shift; + EmitT32_32(0xef800840U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 6) | + rd.Encode(22, 12) | rn.Encode(7, 16) | (mvm & 0xf) | + ((mvm & 0x10) << 1)); + AdvanceIT(); + return; + } + } + } else { + // VMUL{}{}.
{
}, , [] ; A1 + if (encoded_dt.IsValid() && + ((dt.Is(I16) && (index <= 3) && (dm.GetCode() <= 7)) || + (!dt.Is(I16) && (index <= 1) && (dm.GetCode() <= 15)))) { + if (cond.Is(al)) { + uint32_t shift = 4; + if (dt.Is(I16)) { + shift = 3; + } + uint32_t mvm = dm.GetCode() | index << shift; + EmitA32(0xf2800840U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 6) | + rd.Encode(22, 12) | rn.Encode(7, 16) | (mvm & 0xf) | + ((mvm & 0x10) << 1)); + return; + } + } + } + Delegate(kVmul, &Assembler::vmul, cond, dt, rd, rn, dm, index); +} + +void Assembler::vmul(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + DRegister dm, + unsigned index) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_F_size_3 encoded_dt(dt); + if (IsUsingT32()) { + // VMUL{}{}.
{}, , [] ; T1 + if (encoded_dt.IsValid() && + ((dt.Is(I16) && (index <= 3) && (dm.GetCode() <= 7)) || + (!dt.Is(I16) && (index <= 1) && (dm.GetCode() <= 15)))) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t shift = 4; + if (dt.Is(I16)) { + shift = 3; + } + uint32_t mvm = dm.GetCode() | index << shift; + EmitT32_32(0xff800840U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 6) | + rd.Encode(22, 12) | rn.Encode(7, 16) | (mvm & 0xf) | + ((mvm & 0x10) << 1)); + AdvanceIT(); + return; + } + } + } else { + // VMUL{}{}.
{}, , [] ; A1 + if (encoded_dt.IsValid() && + ((dt.Is(I16) && (index <= 3) && (dm.GetCode() <= 7)) || + (!dt.Is(I16) && (index <= 1) && (dm.GetCode() <= 15)))) { + if (cond.Is(al)) { + uint32_t shift = 4; + if (dt.Is(I16)) { + shift = 3; + } + uint32_t mvm = dm.GetCode() | index << shift; + EmitA32(0xf3800840U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 6) | + rd.Encode(22, 12) | rn.Encode(7, 16) | (mvm & 0xf) | + ((mvm & 0x10) << 1)); + return; + } + } + } + Delegate(kVmul, &Assembler::vmul, cond, dt, rd, rn, dm, index); +} + +void Assembler::vmul( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_op_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VMUL{}{}.F32 {
}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff000d10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VMUL{}{}.F64 {
}, , ; T2 + if (dt.Is(F64)) { + EmitT32_32(0xee200b00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + // VMUL{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000910U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VMUL{}{}.F32 {
}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf3000d10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + // VMUL{}{}.F64 {
}, , ; A2 + if (dt.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0e200b00U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + // VMUL{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000910U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVmul, &Assembler::vmul, cond, dt, rd, rn, rm); +} + +void Assembler::vmul( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_op_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VMUL{}{}.F32 {}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff000d50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VMUL{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000950U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VMUL{}{}.F32 {}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf3000d50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + // VMUL{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000950U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVmul, &Assembler::vmul, cond, dt, rd, rn, rm); +} + +void Assembler::vmul( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VMUL{}{}.F32 {}, , ; T2 + if (dt.Is(F32)) { + EmitT32_32(0xee200a00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VMUL{}{}.F32 {}, , ; A2 + if (dt.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0e200a00U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVmul, &Assembler::vmul, cond, dt, rd, rn, rm); +} + +void Assembler::vmull(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegister dm, + unsigned index) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_2 encoded_dt(dt); + if (IsUsingT32()) { + // VMULL{}{}.
, , [] ; T1 + if (encoded_dt.IsValid() && + (((dt.Is(S16) || dt.Is(U16)) && (index <= 3) && (dm.GetCode() <= 7)) || + (!dt.Is(S16) && !dt.Is(U16) && (index <= 1) && + (dm.GetCode() <= 15)))) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t shift = 4; + if (dt.Is(S16) || dt.Is(U16)) { + shift = 3; + } + uint32_t mvm = dm.GetCode() | index << shift; + EmitT32_32(0xef800a40U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | (mvm & 0xf) | + ((mvm & 0x10) << 1)); + AdvanceIT(); + return; + } + } + } else { + // VMULL{}{}.
, , [] ; A1 + if (encoded_dt.IsValid() && + (((dt.Is(S16) || dt.Is(U16)) && (index <= 3) && (dm.GetCode() <= 7)) || + (!dt.Is(S16) && !dt.Is(U16) && (index <= 1) && + (dm.GetCode() <= 15)))) { + if (cond.Is(al)) { + uint32_t shift = 4; + if (dt.Is(S16) || dt.Is(U16)) { + shift = 3; + } + uint32_t mvm = dm.GetCode() | index << shift; + EmitA32(0xf2800a40U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | (mvm & 0xf) | + ((mvm & 0x10) << 1)); + return; + } + } + } + Delegate(kVmull, &Assembler::vmull, cond, dt, rd, rn, dm, index); +} + +void Assembler::vmull( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_op_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VMULL{}{}.
, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800c00U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + ((encoded_dt.GetEncodingValue() & 0x8) << 6) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VMULL{}{}.
, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2800c00U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + ((encoded_dt.GetEncodingValue() & 0x8) << 6) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVmull, &Assembler::vmull, cond, dt, rd, rn, rm); +} + +void Assembler::vmvn(Condition cond, + DataType dt, + DRegister rd, + const DOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + ImmediateVmvn encoded_dt(dt, operand.GetNeonImmediate()); + if (IsUsingT32()) { + // VMVN{}{}.
, # ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800030U | (encoded_dt.GetEncodingValue() << 8) | + rd.Encode(22, 12) | + (encoded_dt.GetEncodedImmediate() & 0xf) | + ((encoded_dt.GetEncodedImmediate() & 0x70) << 12) | + ((encoded_dt.GetEncodedImmediate() & 0x80) << 21)); + AdvanceIT(); + return; + } + } + } else { + // VMVN{}{}.
, # ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2800030U | (encoded_dt.GetEncodingValue() << 8) | + rd.Encode(22, 12) | (encoded_dt.GetEncodedImmediate() & 0xf) | + ((encoded_dt.GetEncodedImmediate() & 0x70) << 12) | + ((encoded_dt.GetEncodedImmediate() & 0x80) << 17)); + return; + } + } + } + } + if (operand.IsRegister()) { + DRegister rm = operand.GetRegister(); + USE(dt); + if (IsUsingT32()) { + // VMVN{}{}{.
}
, ; T1 + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb00580U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VMVN{}{}{.
}
, ; A1 + if (cond.Is(al)) { + EmitA32(0xf3b00580U | rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVmvn, &Assembler::vmvn, cond, dt, rd, operand); +} + +void Assembler::vmvn(Condition cond, + DataType dt, + QRegister rd, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + ImmediateVmvn encoded_dt(dt, operand.GetNeonImmediate()); + if (IsUsingT32()) { + // VMVN{}{}.
, # ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800070U | (encoded_dt.GetEncodingValue() << 8) | + rd.Encode(22, 12) | + (encoded_dt.GetEncodedImmediate() & 0xf) | + ((encoded_dt.GetEncodedImmediate() & 0x70) << 12) | + ((encoded_dt.GetEncodedImmediate() & 0x80) << 21)); + AdvanceIT(); + return; + } + } + } else { + // VMVN{}{}.
, # ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2800070U | (encoded_dt.GetEncodingValue() << 8) | + rd.Encode(22, 12) | (encoded_dt.GetEncodedImmediate() & 0xf) | + ((encoded_dt.GetEncodedImmediate() & 0x70) << 12) | + ((encoded_dt.GetEncodedImmediate() & 0x80) << 17)); + return; + } + } + } + } + if (operand.IsRegister()) { + QRegister rm = operand.GetRegister(); + USE(dt); + if (IsUsingT32()) { + // VMVN{}{}{.
} , ; T1 + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb005c0U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VMVN{}{}{.
} , ; A1 + if (cond.Is(al)) { + EmitA32(0xf3b005c0U | rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVmvn, &Assembler::vmvn, cond, dt, rd, operand); +} + +void Assembler::vneg(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_F_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VNEG{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb10380U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VNEG{}{}.F64
, ; T2 + if (dt.Is(F64)) { + EmitT32_32(0xeeb10b40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VNEG{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b10380U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + // VNEG{}{}.F64
, ; A2 + if (dt.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0eb10b40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVneg, &Assembler::vneg, cond, dt, rd, rm); +} + +void Assembler::vneg(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_F_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VNEG{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb103c0U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VNEG{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b103c0U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVneg, &Assembler::vneg, cond, dt, rd, rm); +} + +void Assembler::vneg(Condition cond, DataType dt, SRegister rd, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VNEG{}{}.F32 , ; T2 + if (dt.Is(F32)) { + EmitT32_32(0xeeb10a40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VNEG{}{}.F32 , ; A2 + if (dt.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0eb10a40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVneg, &Assembler::vneg, cond, dt, rd, rm); +} + +void Assembler::vnmla( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VNMLA{}{}.F32 , , ; T1 + if (dt.Is(F32)) { + EmitT32_32(0xee100a40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VNMLA{}{}.F32 , , ; A1 + if (dt.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0e100a40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVnmla, &Assembler::vnmla, cond, dt, rd, rn, rm); +} + +void Assembler::vnmla( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VNMLA{}{}.F64
, , ; T1 + if (dt.Is(F64)) { + EmitT32_32(0xee100b40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VNMLA{}{}.F64
, , ; A1 + if (dt.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0e100b40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVnmla, &Assembler::vnmla, cond, dt, rd, rn, rm); +} + +void Assembler::vnmls( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VNMLS{}{}.F32 , , ; T1 + if (dt.Is(F32)) { + EmitT32_32(0xee100a00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VNMLS{}{}.F32 , , ; A1 + if (dt.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0e100a00U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVnmls, &Assembler::vnmls, cond, dt, rd, rn, rm); +} + +void Assembler::vnmls( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VNMLS{}{}.F64
, , ; T1 + if (dt.Is(F64)) { + EmitT32_32(0xee100b00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VNMLS{}{}.F64
, , ; A1 + if (dt.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0e100b00U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVnmls, &Assembler::vnmls, cond, dt, rd, rn, rm); +} + +void Assembler::vnmul( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VNMUL{}{}.F32 {}, , ; T1 + if (dt.Is(F32)) { + EmitT32_32(0xee200a40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VNMUL{}{}.F32 {}, , ; A1 + if (dt.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0e200a40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVnmul, &Assembler::vnmul, cond, dt, rd, rn, rm); +} + +void Assembler::vnmul( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VNMUL{}{}.F64 {
}, , ; T1 + if (dt.Is(F64)) { + EmitT32_32(0xee200b40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VNMUL{}{}.F64 {
}, , ; A1 + if (dt.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0e200b40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVnmul, &Assembler::vnmul, cond, dt, rd, rn, rm); +} + +void Assembler::vorn(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + const DOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + ImmediateVorn encoded_dt(dt, operand.GetNeonImmediate()); + if (IsUsingT32()) { + // VORN{}{}.
{}, , # ; T1 + if (encoded_dt.IsValid() && rd.Is(rn)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800010U | (encoded_dt.GetEncodingValue() << 8) | + rd.Encode(22, 12) | + (encoded_dt.GetEncodedImmediate() & 0xf) | + ((encoded_dt.GetEncodedImmediate() & 0x70) << 12) | + ((encoded_dt.GetEncodedImmediate() & 0x80) << 21)); + AdvanceIT(); + return; + } + } + } else { + // VORN{}{}.
{}, , # ; A1 + if (encoded_dt.IsValid() && rd.Is(rn)) { + if (cond.Is(al)) { + EmitA32(0xf2800010U | (encoded_dt.GetEncodingValue() << 8) | + rd.Encode(22, 12) | (encoded_dt.GetEncodedImmediate() & 0xf) | + ((encoded_dt.GetEncodedImmediate() & 0x70) << 12) | + ((encoded_dt.GetEncodedImmediate() & 0x80) << 17)); + return; + } + } + } + } + if (operand.IsRegister()) { + DRegister rm = operand.GetRegister(); + USE(dt); + if (IsUsingT32()) { + // VORN{}{}{.
} {
}, , ; T1 + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef300110U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VORN{}{}{.
} {
}, , ; A1 + if (cond.Is(al)) { + EmitA32(0xf2300110U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVorn, &Assembler::vorn, cond, dt, rd, rn, operand); +} + +void Assembler::vorn(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + ImmediateVorn encoded_dt(dt, operand.GetNeonImmediate()); + if (IsUsingT32()) { + // VORN{}{}.
{}, , # ; T1 + if (encoded_dt.IsValid() && rd.Is(rn)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800050U | (encoded_dt.GetEncodingValue() << 8) | + rd.Encode(22, 12) | + (encoded_dt.GetEncodedImmediate() & 0xf) | + ((encoded_dt.GetEncodedImmediate() & 0x70) << 12) | + ((encoded_dt.GetEncodedImmediate() & 0x80) << 21)); + AdvanceIT(); + return; + } + } + } else { + // VORN{}{}.
{}, , # ; A1 + if (encoded_dt.IsValid() && rd.Is(rn)) { + if (cond.Is(al)) { + EmitA32(0xf2800050U | (encoded_dt.GetEncodingValue() << 8) | + rd.Encode(22, 12) | (encoded_dt.GetEncodedImmediate() & 0xf) | + ((encoded_dt.GetEncodedImmediate() & 0x70) << 12) | + ((encoded_dt.GetEncodedImmediate() & 0x80) << 17)); + return; + } + } + } + } + if (operand.IsRegister()) { + QRegister rm = operand.GetRegister(); + USE(dt); + if (IsUsingT32()) { + // VORN{}{}{.
} {}, , ; T1 + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef300150U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VORN{}{}{.
} {}, , ; A1 + if (cond.Is(al)) { + EmitA32(0xf2300150U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVorn, &Assembler::vorn, cond, dt, rd, rn, operand); +} + +void Assembler::vorr(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + const DOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsRegister()) { + DRegister rm = operand.GetRegister(); + USE(dt); + if (IsUsingT32()) { + // VORR{}{}{.
} {
}, , ; T1 + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef200110U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VORR{}{}{.
} {
}, , ; A1 + if (cond.Is(al)) { + EmitA32(0xf2200110U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + if (operand.IsImmediate()) { + ImmediateVorr encoded_dt(dt, operand.GetNeonImmediate()); + if (IsUsingT32()) { + // VORR{}{}.
{}, , # ; T1 + if (encoded_dt.IsValid() && rd.Is(rn)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800010U | (encoded_dt.GetEncodingValue() << 8) | + rd.Encode(22, 12) | + (encoded_dt.GetEncodedImmediate() & 0xf) | + ((encoded_dt.GetEncodedImmediate() & 0x70) << 12) | + ((encoded_dt.GetEncodedImmediate() & 0x80) << 21)); + AdvanceIT(); + return; + } + } + } else { + // VORR{}{}.
{}, , # ; A1 + if (encoded_dt.IsValid() && rd.Is(rn)) { + if (cond.Is(al)) { + EmitA32(0xf2800010U | (encoded_dt.GetEncodingValue() << 8) | + rd.Encode(22, 12) | (encoded_dt.GetEncodedImmediate() & 0xf) | + ((encoded_dt.GetEncodedImmediate() & 0x70) << 12) | + ((encoded_dt.GetEncodedImmediate() & 0x80) << 17)); + return; + } + } + } + } + Delegate(kVorr, &Assembler::vorr, cond, dt, rd, rn, operand); +} + +void Assembler::vorr(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsRegister()) { + QRegister rm = operand.GetRegister(); + USE(dt); + if (IsUsingT32()) { + // VORR{}{}{.
} {}, , ; T1 + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef200150U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VORR{}{}{.
} {}, , ; A1 + if (cond.Is(al)) { + EmitA32(0xf2200150U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + if (operand.IsImmediate()) { + ImmediateVorr encoded_dt(dt, operand.GetNeonImmediate()); + if (IsUsingT32()) { + // VORR{}{}.
{}, , # ; T1 + if (encoded_dt.IsValid() && rd.Is(rn)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800050U | (encoded_dt.GetEncodingValue() << 8) | + rd.Encode(22, 12) | + (encoded_dt.GetEncodedImmediate() & 0xf) | + ((encoded_dt.GetEncodedImmediate() & 0x70) << 12) | + ((encoded_dt.GetEncodedImmediate() & 0x80) << 21)); + AdvanceIT(); + return; + } + } + } else { + // VORR{}{}.
{}, , # ; A1 + if (encoded_dt.IsValid() && rd.Is(rn)) { + if (cond.Is(al)) { + EmitA32(0xf2800050U | (encoded_dt.GetEncodingValue() << 8) | + rd.Encode(22, 12) | (encoded_dt.GetEncodedImmediate() & 0xf) | + ((encoded_dt.GetEncodedImmediate() & 0x70) << 12) | + ((encoded_dt.GetEncodedImmediate() & 0x80) << 17)); + return; + } + } + } + } + Delegate(kVorr, &Assembler::vorr, cond, dt, rd, rn, operand); +} + +void Assembler::vpadal(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_op_size_2 encoded_dt(dt); + if (IsUsingT32()) { + // VPADAL{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb00600U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 5) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VPADAL{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b00600U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 5) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVpadal, &Assembler::vpadal, cond, dt, rd, rm); +} + +void Assembler::vpadal(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_op_size_2 encoded_dt(dt); + if (IsUsingT32()) { + // VPADAL{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb00640U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 5) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VPADAL{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b00640U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 5) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVpadal, &Assembler::vpadal, cond, dt, rd, rm); +} + +void Assembler::vpadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_4 encoded_dt(dt); + if (IsUsingT32()) { + // VPADD{}{}.F32 {
}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff000d00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VPADD{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000b10U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VPADD{}{}.F32 {
}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf3000d00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + // VPADD{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000b10U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVpadd, &Assembler::vpadd, cond, dt, rd, rn, rm); +} + +void Assembler::vpaddl(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_op_size_2 encoded_dt(dt); + if (IsUsingT32()) { + // VPADDL{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb00200U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 5) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VPADDL{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b00200U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 5) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVpaddl, &Assembler::vpaddl, cond, dt, rd, rm); +} + +void Assembler::vpaddl(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_op_size_2 encoded_dt(dt); + if (IsUsingT32()) { + // VPADDL{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb00240U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 5) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VPADDL{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b00240U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 5) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVpaddl, &Assembler::vpaddl, cond, dt, rd, rm); +} + +void Assembler::vpmax( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VPMAX{}{}.F32 {
}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff000f00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VPMAX{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000a00U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VPMAX{}{}.F32 {
}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf3000f00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + // VPMAX{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000a00U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVpmax, &Assembler::vpmax, cond, dt, rd, rn, rm); +} + +void Assembler::vpmin( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VPMIN{}{}.F32 {
}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff200f00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VPMIN{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000a10U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VPMIN{}{}.F32 {
}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf3200f00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + // VPMIN{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000a10U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVpmin, &Assembler::vpmin, cond, dt, rd, rn, rm); +} + +void Assembler::vpop(Condition cond, DataType dt, DRegisterList dreglist) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VPOP{}{}{.} ; T1 + if (((dreglist.GetLength() <= 16) || AllowUnpredictable())) { + const DRegister& dreg = dreglist.GetFirstDRegister(); + unsigned len = dreglist.GetLength() * 2; + EmitT32_32(0xecbd0b00U | dreg.Encode(22, 12) | (len & 0xff)); + AdvanceIT(); + return; + } + } else { + // VPOP{}{}{.} ; A1 + if (cond.IsNotNever() && + ((dreglist.GetLength() <= 16) || AllowUnpredictable())) { + const DRegister& dreg = dreglist.GetFirstDRegister(); + unsigned len = dreglist.GetLength() * 2; + EmitA32(0x0cbd0b00U | (cond.GetCondition() << 28) | dreg.Encode(22, 12) | + (len & 0xff)); + return; + } + } + Delegate(kVpop, &Assembler::vpop, cond, dt, dreglist); +} + +void Assembler::vpop(Condition cond, DataType dt, SRegisterList sreglist) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VPOP{}{}{.} ; T2 + const SRegister& sreg = sreglist.GetFirstSRegister(); + unsigned len = sreglist.GetLength(); + EmitT32_32(0xecbd0a00U | sreg.Encode(22, 12) | (len & 0xff)); + AdvanceIT(); + return; + } else { + // VPOP{}{}{.} ; A2 + if (cond.IsNotNever()) { + const SRegister& sreg = sreglist.GetFirstSRegister(); + unsigned len = sreglist.GetLength(); + EmitA32(0x0cbd0a00U | (cond.GetCondition() << 28) | sreg.Encode(22, 12) | + (len & 0xff)); + return; + } + } + Delegate(kVpop, &Assembler::vpop, cond, dt, sreglist); +} + +void Assembler::vpush(Condition cond, DataType dt, DRegisterList dreglist) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VPUSH{}{}{.} ; T1 + if (((dreglist.GetLength() <= 16) || AllowUnpredictable())) { + const DRegister& dreg = dreglist.GetFirstDRegister(); + unsigned len = dreglist.GetLength() * 2; + EmitT32_32(0xed2d0b00U | dreg.Encode(22, 12) | (len & 0xff)); + AdvanceIT(); + return; + } + } else { + // VPUSH{}{}{.} ; A1 + if (cond.IsNotNever() && + ((dreglist.GetLength() <= 16) || AllowUnpredictable())) { + const DRegister& dreg = dreglist.GetFirstDRegister(); + unsigned len = dreglist.GetLength() * 2; + EmitA32(0x0d2d0b00U | (cond.GetCondition() << 28) | dreg.Encode(22, 12) | + (len & 0xff)); + return; + } + } + Delegate(kVpush, &Assembler::vpush, cond, dt, dreglist); +} + +void Assembler::vpush(Condition cond, DataType dt, SRegisterList sreglist) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VPUSH{}{}{.} ; T2 + const SRegister& sreg = sreglist.GetFirstSRegister(); + unsigned len = sreglist.GetLength(); + EmitT32_32(0xed2d0a00U | sreg.Encode(22, 12) | (len & 0xff)); + AdvanceIT(); + return; + } else { + // VPUSH{}{}{.} ; A2 + if (cond.IsNotNever()) { + const SRegister& sreg = sreglist.GetFirstSRegister(); + unsigned len = sreglist.GetLength(); + EmitA32(0x0d2d0a00U | (cond.GetCondition() << 28) | sreg.Encode(22, 12) | + (len & 0xff)); + return; + } + } + Delegate(kVpush, &Assembler::vpush, cond, dt, sreglist); +} + +void Assembler::vqabs(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_5 encoded_dt(dt); + if (IsUsingT32()) { + // VQABS{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb00700U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VQABS{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b00700U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVqabs, &Assembler::vqabs, cond, dt, rd, rm); +} + +void Assembler::vqabs(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_5 encoded_dt(dt); + if (IsUsingT32()) { + // VQABS{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb00740U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VQABS{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b00740U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVqabs, &Assembler::vqabs, cond, dt, rd, rm); +} + +void Assembler::vqadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_3 encoded_dt(dt); + if (IsUsingT32()) { + // VQADD{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000010U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VQADD{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000010U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVqadd, &Assembler::vqadd, cond, dt, rd, rn, rm); +} + +void Assembler::vqadd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_3 encoded_dt(dt); + if (IsUsingT32()) { + // VQADD{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000050U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VQADD{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000050U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVqadd, &Assembler::vqadd, cond, dt, rd, rn, rm); +} + +void Assembler::vqdmlal( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_13 encoded_dt(dt); + if (IsUsingT32()) { + // VQDMLAL{}{}.
, , ; T1 + if (encoded_dt.IsValid() && (dt.Is(S16) || dt.Is(S32))) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800900U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VQDMLAL{}{}.
, , ; A1 + if (encoded_dt.IsValid() && (dt.Is(S16) || dt.Is(S32))) { + if (cond.Is(al)) { + EmitA32(0xf2800900U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVqdmlal, &Assembler::vqdmlal, cond, dt, rd, rn, rm); +} + +void Assembler::vqdmlal(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegister dm, + unsigned index) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_13 encoded_dt(dt); + if (IsUsingT32()) { + // VQDMLAL{}{}.
, , [] ; T2 + if (encoded_dt.IsValid() && + ((dt.Is(S16) && (index <= 3) && (dm.GetCode() <= 7)) || + (!dt.Is(S16) && (index <= 1) && (dm.GetCode() <= 15))) && + (dt.Is(S16) || dt.Is(S32))) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t shift = 4; + if (dt.Is(S16)) { + shift = 3; + } + uint32_t mvm = dm.GetCode() | index << shift; + EmitT32_32(0xef800340U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | (mvm & 0xf) | + ((mvm & 0x10) << 1)); + AdvanceIT(); + return; + } + } + } else { + // VQDMLAL{}{}.
, , [] ; A2 + if (encoded_dt.IsValid() && + ((dt.Is(S16) && (index <= 3) && (dm.GetCode() <= 7)) || + (!dt.Is(S16) && (index <= 1) && (dm.GetCode() <= 15))) && + (dt.Is(S16) || dt.Is(S32))) { + if (cond.Is(al)) { + uint32_t shift = 4; + if (dt.Is(S16)) { + shift = 3; + } + uint32_t mvm = dm.GetCode() | index << shift; + EmitA32(0xf2800340U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | (mvm & 0xf) | + ((mvm & 0x10) << 1)); + return; + } + } + } + Delegate(kVqdmlal, &Assembler::vqdmlal, cond, dt, rd, rn, dm, index); +} + +void Assembler::vqdmlsl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_13 encoded_dt(dt); + if (IsUsingT32()) { + // VQDMLSL{}{}.
, , ; T1 + if (encoded_dt.IsValid() && (dt.Is(S16) || dt.Is(S32))) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800b00U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VQDMLSL{}{}.
, , ; A1 + if (encoded_dt.IsValid() && (dt.Is(S16) || dt.Is(S32))) { + if (cond.Is(al)) { + EmitA32(0xf2800b00U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVqdmlsl, &Assembler::vqdmlsl, cond, dt, rd, rn, rm); +} + +void Assembler::vqdmlsl(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegister dm, + unsigned index) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_13 encoded_dt(dt); + if (IsUsingT32()) { + // VQDMLSL{}{}.
, , [] ; T2 + if (encoded_dt.IsValid() && + ((dt.Is(S16) && (index <= 3) && (dm.GetCode() <= 7)) || + (!dt.Is(S16) && (index <= 1) && (dm.GetCode() <= 15))) && + (dt.Is(S16) || dt.Is(S32))) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t shift = 4; + if (dt.Is(S16)) { + shift = 3; + } + uint32_t mvm = dm.GetCode() | index << shift; + EmitT32_32(0xef800740U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | (mvm & 0xf) | + ((mvm & 0x10) << 1)); + AdvanceIT(); + return; + } + } + } else { + // VQDMLSL{}{}.
, , [] ; A2 + if (encoded_dt.IsValid() && + ((dt.Is(S16) && (index <= 3) && (dm.GetCode() <= 7)) || + (!dt.Is(S16) && (index <= 1) && (dm.GetCode() <= 15))) && + (dt.Is(S16) || dt.Is(S32))) { + if (cond.Is(al)) { + uint32_t shift = 4; + if (dt.Is(S16)) { + shift = 3; + } + uint32_t mvm = dm.GetCode() | index << shift; + EmitA32(0xf2800740U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | (mvm & 0xf) | + ((mvm & 0x10) << 1)); + return; + } + } + } + Delegate(kVqdmlsl, &Assembler::vqdmlsl, cond, dt, rd, rn, dm, index); +} + +void Assembler::vqdmulh( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_13 encoded_dt(dt); + if (IsUsingT32()) { + // VQDMULH{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000b00U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VQDMULH{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000b00U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVqdmulh, &Assembler::vqdmulh, cond, dt, rd, rn, rm); +} + +void Assembler::vqdmulh( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_13 encoded_dt(dt); + if (IsUsingT32()) { + // VQDMULH{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000b40U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VQDMULH{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000b40U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVqdmulh, &Assembler::vqdmulh, cond, dt, rd, rn, rm); +} + +void Assembler::vqdmulh( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegisterLane rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_13 encoded_dt(dt); + if (IsUsingT32()) { + // VQDMULH{}{}.
{
}, , ; T2 + if (encoded_dt.IsValid() && + (((dt.GetSize() == 16) && (rm.GetCode() <= 7) && (rm.GetLane() <= 3)) || + ((dt.GetSize() == 32) && (rm.GetCode() <= 15) && + (rm.GetLane() <= 1))) && + (dt.Is(S16) || dt.Is(S32))) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800c40U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.EncodeX(dt, 5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VQDMULH{}{}.
{
}, , ; A2 + if (encoded_dt.IsValid() && + (((dt.GetSize() == 16) && (rm.GetCode() <= 7) && (rm.GetLane() <= 3)) || + ((dt.GetSize() == 32) && (rm.GetCode() <= 15) && + (rm.GetLane() <= 1))) && + (dt.Is(S16) || dt.Is(S32))) { + if (cond.Is(al)) { + EmitA32(0xf2800c40U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.EncodeX(dt, 5, 0)); + return; + } + } + } + Delegate(kVqdmulh, &Assembler::vqdmulh, cond, dt, rd, rn, rm); +} + +void Assembler::vqdmulh( + Condition cond, DataType dt, QRegister rd, QRegister rn, DRegisterLane rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_13 encoded_dt(dt); + if (IsUsingT32()) { + // VQDMULH{}{}.
{}, , ; T2 + if (encoded_dt.IsValid() && + (((dt.GetSize() == 16) && (rm.GetCode() <= 7) && (rm.GetLane() <= 3)) || + ((dt.GetSize() == 32) && (rm.GetCode() <= 15) && + (rm.GetLane() <= 1))) && + (dt.Is(S16) || dt.Is(S32))) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff800c40U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.EncodeX(dt, 5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VQDMULH{}{}.
{}, , ; A2 + if (encoded_dt.IsValid() && + (((dt.GetSize() == 16) && (rm.GetCode() <= 7) && (rm.GetLane() <= 3)) || + ((dt.GetSize() == 32) && (rm.GetCode() <= 15) && + (rm.GetLane() <= 1))) && + (dt.Is(S16) || dt.Is(S32))) { + if (cond.Is(al)) { + EmitA32(0xf3800c40U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.EncodeX(dt, 5, 0)); + return; + } + } + } + Delegate(kVqdmulh, &Assembler::vqdmulh, cond, dt, rd, rn, rm); +} + +void Assembler::vqdmull( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_13 encoded_dt(dt); + if (IsUsingT32()) { + // VQDMULL{}{}.
, , ; T1 + if (encoded_dt.IsValid() && (dt.Is(S16) || dt.Is(S32))) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800d00U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VQDMULL{}{}.
, , ; A1 + if (encoded_dt.IsValid() && (dt.Is(S16) || dt.Is(S32))) { + if (cond.Is(al)) { + EmitA32(0xf2800d00U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVqdmull, &Assembler::vqdmull, cond, dt, rd, rn, rm); +} + +void Assembler::vqdmull( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegisterLane rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_13 encoded_dt(dt); + if (IsUsingT32()) { + // VQDMULL{}{}.
, , ; T2 + if (encoded_dt.IsValid() && + (((dt.GetSize() == 16) && (rm.GetCode() <= 7) && (rm.GetLane() <= 3)) || + ((dt.GetSize() == 32) && (rm.GetCode() <= 15) && + (rm.GetLane() <= 1))) && + (dt.Is(S16) || dt.Is(S32))) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800b40U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.EncodeX(dt, 5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VQDMULL{}{}.
, , ; A2 + if (encoded_dt.IsValid() && + (((dt.GetSize() == 16) && (rm.GetCode() <= 7) && (rm.GetLane() <= 3)) || + ((dt.GetSize() == 32) && (rm.GetCode() <= 15) && + (rm.GetLane() <= 1))) && + (dt.Is(S16) || dt.Is(S32))) { + if (cond.Is(al)) { + EmitA32(0xf2800b40U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.EncodeX(dt, 5, 0)); + return; + } + } + } + Delegate(kVqdmull, &Assembler::vqdmull, cond, dt, rd, rn, rm); +} + +void Assembler::vqmovn(Condition cond, + DataType dt, + DRegister rd, + QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_op_size_3 encoded_dt(dt); + if (IsUsingT32()) { + // VQMOVN{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb20280U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0xc) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VQMOVN{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b20280U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0xc) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVqmovn, &Assembler::vqmovn, cond, dt, rd, rm); +} + +void Assembler::vqmovun(Condition cond, + DataType dt, + DRegister rd, + QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_14 encoded_dt(dt); + if (IsUsingT32()) { + // VQMOVUN{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb20240U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VQMOVUN{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b20240U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVqmovun, &Assembler::vqmovun, cond, dt, rd, rm); +} + +void Assembler::vqneg(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_5 encoded_dt(dt); + if (IsUsingT32()) { + // VQNEG{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb00780U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VQNEG{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b00780U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVqneg, &Assembler::vqneg, cond, dt, rd, rm); +} + +void Assembler::vqneg(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_5 encoded_dt(dt); + if (IsUsingT32()) { + // VQNEG{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb007c0U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VQNEG{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b007c0U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVqneg, &Assembler::vqneg, cond, dt, rd, rm); +} + +void Assembler::vqrdmulh( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_13 encoded_dt(dt); + if (IsUsingT32()) { + // VQRDMULH{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff000b00U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VQRDMULH{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3000b00U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVqrdmulh, &Assembler::vqrdmulh, cond, dt, rd, rn, rm); +} + +void Assembler::vqrdmulh( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_13 encoded_dt(dt); + if (IsUsingT32()) { + // VQRDMULH{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff000b40U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VQRDMULH{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3000b40U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVqrdmulh, &Assembler::vqrdmulh, cond, dt, rd, rn, rm); +} + +void Assembler::vqrdmulh( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegisterLane rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_13 encoded_dt(dt); + if (IsUsingT32()) { + // VQRDMULH{}{}.
{
}, , ; T2 + if (encoded_dt.IsValid() && + (((dt.GetSize() == 16) && (rm.GetCode() <= 7) && (rm.GetLane() <= 3)) || + ((dt.GetSize() == 32) && (rm.GetCode() <= 15) && + (rm.GetLane() <= 1))) && + (dt.Is(S16) || dt.Is(S32))) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800d40U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.EncodeX(dt, 5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VQRDMULH{}{}.
{
}, , ; A2 + if (encoded_dt.IsValid() && + (((dt.GetSize() == 16) && (rm.GetCode() <= 7) && (rm.GetLane() <= 3)) || + ((dt.GetSize() == 32) && (rm.GetCode() <= 15) && + (rm.GetLane() <= 1))) && + (dt.Is(S16) || dt.Is(S32))) { + if (cond.Is(al)) { + EmitA32(0xf2800d40U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.EncodeX(dt, 5, 0)); + return; + } + } + } + Delegate(kVqrdmulh, &Assembler::vqrdmulh, cond, dt, rd, rn, rm); +} + +void Assembler::vqrdmulh( + Condition cond, DataType dt, QRegister rd, QRegister rn, DRegisterLane rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_13 encoded_dt(dt); + if (IsUsingT32()) { + // VQRDMULH{}{}.
{}, , ; T2 + if (encoded_dt.IsValid() && + (((dt.GetSize() == 16) && (rm.GetCode() <= 7) && (rm.GetLane() <= 3)) || + ((dt.GetSize() == 32) && (rm.GetCode() <= 15) && + (rm.GetLane() <= 1))) && + (dt.Is(S16) || dt.Is(S32))) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff800d40U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.EncodeX(dt, 5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VQRDMULH{}{}.
{}, , ; A2 + if (encoded_dt.IsValid() && + (((dt.GetSize() == 16) && (rm.GetCode() <= 7) && (rm.GetLane() <= 3)) || + ((dt.GetSize() == 32) && (rm.GetCode() <= 15) && + (rm.GetLane() <= 1))) && + (dt.Is(S16) || dt.Is(S32))) { + if (cond.Is(al)) { + EmitA32(0xf3800d40U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.EncodeX(dt, 5, 0)); + return; + } + } + } + Delegate(kVqrdmulh, &Assembler::vqrdmulh, cond, dt, rd, rn, rm); +} + +void Assembler::vqrshl( + Condition cond, DataType dt, DRegister rd, DRegister rm, DRegister rn) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_3 encoded_dt(dt); + if (IsUsingT32()) { + // VQRSHL{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000510U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rm.Encode(5, 0) | rn.Encode(7, 16)); + AdvanceIT(); + return; + } + } + } else { + // VQRSHL{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000510U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rm.Encode(5, 0) | rn.Encode(7, 16)); + return; + } + } + } + Delegate(kVqrshl, &Assembler::vqrshl, cond, dt, rd, rm, rn); +} + +void Assembler::vqrshl( + Condition cond, DataType dt, QRegister rd, QRegister rm, QRegister rn) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_3 encoded_dt(dt); + if (IsUsingT32()) { + // VQRSHL{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000550U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rm.Encode(5, 0) | rn.Encode(7, 16)); + AdvanceIT(); + return; + } + } + } else { + // VQRSHL{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000550U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rm.Encode(5, 0) | rn.Encode(7, 16)); + return; + } + } + } + Delegate(kVqrshl, &Assembler::vqrshl, cond, dt, rd, rm, rn); +} + +void Assembler::vqrshrn(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_op_size_3 encoded_dt(dt); + Dt_imm6_1 encoded_dt_2(dt); + if (IsUsingT32()) { + // VQRSHRN{}{}.
, , #0 ; T1 + if (encoded_dt.IsValid() && (imm == 0)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb20280U | + ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0xc) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VQRSHRN{}{}.
, , # ; T1 + if (encoded_dt_2.IsValid() && (imm >= 1) && (imm <= dt.GetSize() / 2)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = dt.GetSize() / 2 - imm; + EmitT32_32(0xef800950U | + (encoded_dt_2.GetTypeEncodingValue() << 28) | + ((encoded_dt_2.GetEncodingValue() & 0x7) << 19) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + } else { + // VQRSHRN{}{}.
, , #0 ; A1 + if (encoded_dt.IsValid() && (imm == 0)) { + if (cond.Is(al)) { + EmitA32(0xf3b20280U | + ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0xc) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + // VQRSHRN{}{}.
, , # ; A1 + if (encoded_dt_2.IsValid() && (imm >= 1) && (imm <= dt.GetSize() / 2)) { + if (cond.Is(al)) { + uint32_t imm6 = dt.GetSize() / 2 - imm; + EmitA32(0xf2800950U | (encoded_dt_2.GetTypeEncodingValue() << 24) | + ((encoded_dt_2.GetEncodingValue() & 0x7) << 19) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + } + } + } + Delegate(kVqrshrn, &Assembler::vqrshrn, cond, dt, rd, rm, operand); +} + +void Assembler::vqrshrun(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_imm6_2 encoded_dt(dt); + Dt_size_14 encoded_dt_2(dt); + if (IsUsingT32()) { + // VQRSHRUN{}{}.
, , # ; T1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize() / 2)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = dt.GetSize() / 2 - imm; + EmitT32_32(0xff800850U | (encoded_dt.GetTypeEncodingValue() << 28) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + // VQRSHRUN{}{}.
, , #0 ; T1 + if (encoded_dt_2.IsValid() && (imm == 0)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb20240U | (encoded_dt_2.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VQRSHRUN{}{}.
, , # ; A1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize() / 2)) { + if (cond.Is(al)) { + uint32_t imm6 = dt.GetSize() / 2 - imm; + EmitA32(0xf3800850U | (encoded_dt.GetTypeEncodingValue() << 24) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + // VQRSHRUN{}{}.
, , #0 ; A1 + if (encoded_dt_2.IsValid() && (imm == 0)) { + if (cond.Is(al)) { + EmitA32(0xf3b20240U | (encoded_dt_2.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + } + } + Delegate(kVqrshrun, &Assembler::vqrshrun, cond, dt, rd, rm, operand); +} + +void Assembler::vqshl(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsRegister()) { + DRegister rn = operand.GetRegister(); + Dt_U_size_3 encoded_dt(dt); + if (IsUsingT32()) { + // VQSHL{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000410U | + ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rm.Encode(5, 0) | rn.Encode(7, 16)); + AdvanceIT(); + return; + } + } + } else { + // VQSHL{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000410U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rm.Encode(5, 0) | rn.Encode(7, 16)); + return; + } + } + } + } + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_L_imm6_1 encoded_dt(dt); + if (IsUsingT32()) { + // VQSHL{}{}. {
}, , # ; T1 + if (encoded_dt.IsValid() && (imm <= dt.GetSize() - 1)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = imm; + EmitT32_32(0xef800710U | (encoded_dt.GetTypeEncodingValue() << 28) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + } else { + // VQSHL{}{}. {
}, , # ; A1 + if (encoded_dt.IsValid() && (imm <= dt.GetSize() - 1)) { + if (cond.Is(al)) { + uint32_t imm6 = imm; + EmitA32(0xf2800710U | (encoded_dt.GetTypeEncodingValue() << 24) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + } + } + } + Delegate(kVqshl, &Assembler::vqshl, cond, dt, rd, rm, operand); +} + +void Assembler::vqshl(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsRegister()) { + QRegister rn = operand.GetRegister(); + Dt_U_size_3 encoded_dt(dt); + if (IsUsingT32()) { + // VQSHL{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000450U | + ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rm.Encode(5, 0) | rn.Encode(7, 16)); + AdvanceIT(); + return; + } + } + } else { + // VQSHL{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000450U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rm.Encode(5, 0) | rn.Encode(7, 16)); + return; + } + } + } + } + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_L_imm6_1 encoded_dt(dt); + if (IsUsingT32()) { + // VQSHL{}{}. {}, , # ; T1 + if (encoded_dt.IsValid() && (imm <= dt.GetSize() - 1)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = imm; + EmitT32_32(0xef800750U | (encoded_dt.GetTypeEncodingValue() << 28) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + } else { + // VQSHL{}{}. {}, , # ; A1 + if (encoded_dt.IsValid() && (imm <= dt.GetSize() - 1)) { + if (cond.Is(al)) { + uint32_t imm6 = imm; + EmitA32(0xf2800750U | (encoded_dt.GetTypeEncodingValue() << 24) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + } + } + } + Delegate(kVqshl, &Assembler::vqshl, cond, dt, rd, rm, operand); +} + +void Assembler::vqshlu(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_L_imm6_2 encoded_dt(dt); + if (IsUsingT32()) { + // VQSHLU{}{}. {
}, , # ; T1 + if (encoded_dt.IsValid() && (imm <= dt.GetSize() - 1)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = imm; + EmitT32_32(0xef800610U | (encoded_dt.GetTypeEncodingValue() << 28) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + } else { + // VQSHLU{}{}. {
}, , # ; A1 + if (encoded_dt.IsValid() && (imm <= dt.GetSize() - 1)) { + if (cond.Is(al)) { + uint32_t imm6 = imm; + EmitA32(0xf2800610U | (encoded_dt.GetTypeEncodingValue() << 24) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + } + } + } + Delegate(kVqshlu, &Assembler::vqshlu, cond, dt, rd, rm, operand); +} + +void Assembler::vqshlu(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_L_imm6_2 encoded_dt(dt); + if (IsUsingT32()) { + // VQSHLU{}{}. {}, , # ; T1 + if (encoded_dt.IsValid() && (imm <= dt.GetSize() - 1)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = imm; + EmitT32_32(0xef800650U | (encoded_dt.GetTypeEncodingValue() << 28) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + } else { + // VQSHLU{}{}. {}, , # ; A1 + if (encoded_dt.IsValid() && (imm <= dt.GetSize() - 1)) { + if (cond.Is(al)) { + uint32_t imm6 = imm; + EmitA32(0xf2800650U | (encoded_dt.GetTypeEncodingValue() << 24) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + } + } + } + Delegate(kVqshlu, &Assembler::vqshlu, cond, dt, rd, rm, operand); +} + +void Assembler::vqshrn(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_op_size_3 encoded_dt(dt); + Dt_imm6_1 encoded_dt_2(dt); + if (IsUsingT32()) { + // VQSHRN{}{}.
, , #0 ; T1 + if (encoded_dt.IsValid() && (imm == 0)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb20280U | + ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0xc) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VQSHRN{}{}.
, , # ; T1 + if (encoded_dt_2.IsValid() && (imm >= 1) && (imm <= dt.GetSize() / 2)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = dt.GetSize() / 2 - imm; + EmitT32_32(0xef800910U | + (encoded_dt_2.GetTypeEncodingValue() << 28) | + ((encoded_dt_2.GetEncodingValue() & 0x7) << 19) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + } else { + // VQSHRN{}{}.
, , #0 ; A1 + if (encoded_dt.IsValid() && (imm == 0)) { + if (cond.Is(al)) { + EmitA32(0xf3b20280U | + ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0xc) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + // VQSHRN{}{}.
, , # ; A1 + if (encoded_dt_2.IsValid() && (imm >= 1) && (imm <= dt.GetSize() / 2)) { + if (cond.Is(al)) { + uint32_t imm6 = dt.GetSize() / 2 - imm; + EmitA32(0xf2800910U | (encoded_dt_2.GetTypeEncodingValue() << 24) | + ((encoded_dt_2.GetEncodingValue() & 0x7) << 19) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + } + } + } + Delegate(kVqshrn, &Assembler::vqshrn, cond, dt, rd, rm, operand); +} + +void Assembler::vqshrun(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_imm6_2 encoded_dt(dt); + Dt_size_14 encoded_dt_2(dt); + if (IsUsingT32()) { + // VQSHRUN{}{}.
, , # ; T1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize() / 2)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = dt.GetSize() / 2 - imm; + EmitT32_32(0xff800810U | (encoded_dt.GetTypeEncodingValue() << 28) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + // VQSHRUN{}{}.
, , #0 ; T1 + if (encoded_dt_2.IsValid() && (imm == 0)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb20240U | (encoded_dt_2.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VQSHRUN{}{}.
, , # ; A1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize() / 2)) { + if (cond.Is(al)) { + uint32_t imm6 = dt.GetSize() / 2 - imm; + EmitA32(0xf3800810U | (encoded_dt.GetTypeEncodingValue() << 24) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + // VQSHRUN{}{}.
, , #0 ; A1 + if (encoded_dt_2.IsValid() && (imm == 0)) { + if (cond.Is(al)) { + EmitA32(0xf3b20240U | (encoded_dt_2.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + } + } + Delegate(kVqshrun, &Assembler::vqshrun, cond, dt, rd, rm, operand); +} + +void Assembler::vqsub( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_3 encoded_dt(dt); + if (IsUsingT32()) { + // VQSUB{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000210U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VQSUB{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000210U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVqsub, &Assembler::vqsub, cond, dt, rd, rn, rm); +} + +void Assembler::vqsub( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_3 encoded_dt(dt); + if (IsUsingT32()) { + // VQSUB{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000250U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VQSUB{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000250U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVqsub, &Assembler::vqsub, cond, dt, rd, rn, rm); +} + +void Assembler::vraddhn( + Condition cond, DataType dt, DRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_3 encoded_dt(dt); + if (IsUsingT32()) { + // VRADDHN{}{}.
, , ; T1 + if (encoded_dt.IsValid() && (dt.Is(I16) || dt.Is(I32) || dt.Is(I64))) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff800400U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VRADDHN{}{}.
, , ; A1 + if (encoded_dt.IsValid() && (dt.Is(I16) || dt.Is(I32) || dt.Is(I64))) { + if (cond.Is(al)) { + EmitA32(0xf3800400U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVraddhn, &Assembler::vraddhn, cond, dt, rd, rn, rm); +} + +void Assembler::vrecpe(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_F_size_4 encoded_dt(dt); + if (IsUsingT32()) { + // VRECPE{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb30400U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 6) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VRECPE{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b30400U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 6) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVrecpe, &Assembler::vrecpe, cond, dt, rd, rm); +} + +void Assembler::vrecpe(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_F_size_4 encoded_dt(dt); + if (IsUsingT32()) { + // VRECPE{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb30440U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 6) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VRECPE{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b30440U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 6) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVrecpe, &Assembler::vrecpe, cond, dt, rd, rm); +} + +void Assembler::vrecps( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VRECPS{}{}.F32 {
}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000f10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VRECPS{}{}.F32 {
}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf2000f10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVrecps, &Assembler::vrecps, cond, dt, rd, rn, rm); +} + +void Assembler::vrecps( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VRECPS{}{}.F32 {}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000f50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VRECPS{}{}.F32 {}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf2000f50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVrecps, &Assembler::vrecps, cond, dt, rd, rn, rm); +} + +void Assembler::vrev16(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VREV16{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb00100U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VREV16{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b00100U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVrev16, &Assembler::vrev16, cond, dt, rd, rm); +} + +void Assembler::vrev16(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VREV16{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb00140U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VREV16{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b00140U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVrev16, &Assembler::vrev16, cond, dt, rd, rm); +} + +void Assembler::vrev32(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_15 encoded_dt(dt); + if (IsUsingT32()) { + // VREV32{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb00080U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VREV32{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b00080U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVrev32, &Assembler::vrev32, cond, dt, rd, rm); +} + +void Assembler::vrev32(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_15 encoded_dt(dt); + if (IsUsingT32()) { + // VREV32{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb000c0U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VREV32{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b000c0U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVrev32, &Assembler::vrev32, cond, dt, rd, rm); +} + +void Assembler::vrev64(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_7 encoded_dt(dt); + if (IsUsingT32()) { + // VREV64{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb00000U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VREV64{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b00000U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVrev64, &Assembler::vrev64, cond, dt, rd, rm); +} + +void Assembler::vrev64(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_7 encoded_dt(dt); + if (IsUsingT32()) { + // VREV64{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb00040U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VREV64{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b00040U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVrev64, &Assembler::vrev64, cond, dt, rd, rm); +} + +void Assembler::vrhadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VRHADD{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000100U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VRHADD{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000100U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVrhadd, &Assembler::vrhadd, cond, dt, rd, rn, rm); +} + +void Assembler::vrhadd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VRHADD{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000140U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VRHADD{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000140U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVrhadd, &Assembler::vrhadd, cond, dt, rd, rn, rm); +} + +void Assembler::vrinta(DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_size_16 encoded_dt(dt); + if (IsUsingT32()) { + // VRINTA{}.
, ; T1 + if (encoded_dt.IsValid()) { + EmitT32_32(0xffb20500U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + // VRINTA{}.F64
, ; T1 + if (dt.Is(F64)) { + EmitT32_32(0xfeb80b40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VRINTA{}.
, ; A1 + if (encoded_dt.IsValid()) { + EmitA32(0xf3b20500U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + // VRINTA{}.F64
, ; A1 + if (dt.Is(F64)) { + EmitA32(0xfeb80b40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVrinta, &Assembler::vrinta, dt, rd, rm); +} + +void Assembler::vrinta(DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_size_16 encoded_dt(dt); + if (IsUsingT32()) { + // VRINTA{}.
, ; T1 + if (encoded_dt.IsValid()) { + EmitT32_32(0xffb20540U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VRINTA{}.
, ; A1 + if (encoded_dt.IsValid()) { + EmitA32(0xf3b20540U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVrinta, &Assembler::vrinta, dt, rd, rm); +} + +void Assembler::vrinta(DataType dt, SRegister rd, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + if (IsUsingT32()) { + // VRINTA{}.F32 , ; T1 + if (dt.Is(F32)) { + EmitT32_32(0xfeb80a40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VRINTA{}.F32 , ; A1 + if (dt.Is(F32)) { + EmitA32(0xfeb80a40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVrinta, &Assembler::vrinta, dt, rd, rm); +} + +void Assembler::vrintm(DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_size_16 encoded_dt(dt); + if (IsUsingT32()) { + // VRINTM{}.
, ; T1 + if (encoded_dt.IsValid()) { + EmitT32_32(0xffb20680U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + // VRINTM{}.F64
, ; T1 + if (dt.Is(F64)) { + EmitT32_32(0xfebb0b40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VRINTM{}.
, ; A1 + if (encoded_dt.IsValid()) { + EmitA32(0xf3b20680U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + // VRINTM{}.F64
, ; A1 + if (dt.Is(F64)) { + EmitA32(0xfebb0b40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVrintm, &Assembler::vrintm, dt, rd, rm); +} + +void Assembler::vrintm(DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_size_16 encoded_dt(dt); + if (IsUsingT32()) { + // VRINTM{}.
, ; T1 + if (encoded_dt.IsValid()) { + EmitT32_32(0xffb206c0U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VRINTM{}.
, ; A1 + if (encoded_dt.IsValid()) { + EmitA32(0xf3b206c0U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVrintm, &Assembler::vrintm, dt, rd, rm); +} + +void Assembler::vrintm(DataType dt, SRegister rd, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + if (IsUsingT32()) { + // VRINTM{}.F32 , ; T1 + if (dt.Is(F32)) { + EmitT32_32(0xfebb0a40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VRINTM{}.F32 , ; A1 + if (dt.Is(F32)) { + EmitA32(0xfebb0a40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVrintm, &Assembler::vrintm, dt, rd, rm); +} + +void Assembler::vrintn(DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_size_16 encoded_dt(dt); + if (IsUsingT32()) { + // VRINTN{}.
, ; T1 + if (encoded_dt.IsValid()) { + EmitT32_32(0xffb20400U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + // VRINTN{}.F64
, ; T1 + if (dt.Is(F64)) { + EmitT32_32(0xfeb90b40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VRINTN{}.
, ; A1 + if (encoded_dt.IsValid()) { + EmitA32(0xf3b20400U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + // VRINTN{}.F64
, ; A1 + if (dt.Is(F64)) { + EmitA32(0xfeb90b40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVrintn, &Assembler::vrintn, dt, rd, rm); +} + +void Assembler::vrintn(DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_size_16 encoded_dt(dt); + if (IsUsingT32()) { + // VRINTN{}.
, ; T1 + if (encoded_dt.IsValid()) { + EmitT32_32(0xffb20440U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VRINTN{}.
, ; A1 + if (encoded_dt.IsValid()) { + EmitA32(0xf3b20440U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVrintn, &Assembler::vrintn, dt, rd, rm); +} + +void Assembler::vrintn(DataType dt, SRegister rd, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + if (IsUsingT32()) { + // VRINTN{}.F32 , ; T1 + if (dt.Is(F32)) { + EmitT32_32(0xfeb90a40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VRINTN{}.F32 , ; A1 + if (dt.Is(F32)) { + EmitA32(0xfeb90a40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVrintn, &Assembler::vrintn, dt, rd, rm); +} + +void Assembler::vrintp(DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_size_16 encoded_dt(dt); + if (IsUsingT32()) { + // VRINTP{}.
, ; T1 + if (encoded_dt.IsValid()) { + EmitT32_32(0xffb20780U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + // VRINTP{}.F64
, ; T1 + if (dt.Is(F64)) { + EmitT32_32(0xfeba0b40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VRINTP{}.
, ; A1 + if (encoded_dt.IsValid()) { + EmitA32(0xf3b20780U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + // VRINTP{}.F64
, ; A1 + if (dt.Is(F64)) { + EmitA32(0xfeba0b40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVrintp, &Assembler::vrintp, dt, rd, rm); +} + +void Assembler::vrintp(DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_size_16 encoded_dt(dt); + if (IsUsingT32()) { + // VRINTP{}.
, ; T1 + if (encoded_dt.IsValid()) { + EmitT32_32(0xffb207c0U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VRINTP{}.
, ; A1 + if (encoded_dt.IsValid()) { + EmitA32(0xf3b207c0U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVrintp, &Assembler::vrintp, dt, rd, rm); +} + +void Assembler::vrintp(DataType dt, SRegister rd, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + if (IsUsingT32()) { + // VRINTP{}.F32 , ; T1 + if (dt.Is(F32)) { + EmitT32_32(0xfeba0a40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VRINTP{}.F32 , ; A1 + if (dt.Is(F32)) { + EmitA32(0xfeba0a40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVrintp, &Assembler::vrintp, dt, rd, rm); +} + +void Assembler::vrintr(Condition cond, + DataType dt, + SRegister rd, + SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VRINTR{}{}.F32 , ; T1 + if (dt.Is(F32)) { + EmitT32_32(0xeeb60a40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VRINTR{}{}.F32 , ; A1 + if (dt.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0eb60a40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVrintr, &Assembler::vrintr, cond, dt, rd, rm); +} + +void Assembler::vrintr(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VRINTR{}{}.F64
, ; T1 + if (dt.Is(F64)) { + EmitT32_32(0xeeb60b40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VRINTR{}{}.F64
, ; A1 + if (dt.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0eb60b40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVrintr, &Assembler::vrintr, cond, dt, rd, rm); +} + +void Assembler::vrintx(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_16 encoded_dt(dt); + if (IsUsingT32()) { + // VRINTX{}.
, ; T1 + if (encoded_dt.IsValid()) { + EmitT32_32(0xffb20480U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + // VRINTX{}{}.F64
, ; T1 + if (dt.Is(F64)) { + EmitT32_32(0xeeb70b40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VRINTX{}.
, ; A1 + if (encoded_dt.IsValid()) { + EmitA32(0xf3b20480U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + // VRINTX{}{}.F64
, ; A1 + if (dt.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0eb70b40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVrintx, &Assembler::vrintx, cond, dt, rd, rm); +} + +void Assembler::vrintx(DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_size_16 encoded_dt(dt); + if (IsUsingT32()) { + // VRINTX{}.
, ; T1 + if (encoded_dt.IsValid()) { + EmitT32_32(0xffb204c0U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VRINTX{}.
, ; A1 + if (encoded_dt.IsValid()) { + EmitA32(0xf3b204c0U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVrintx, &Assembler::vrintx, dt, rd, rm); +} + +void Assembler::vrintx(Condition cond, + DataType dt, + SRegister rd, + SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VRINTX{}{}.F32 , ; T1 + if (dt.Is(F32)) { + EmitT32_32(0xeeb70a40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VRINTX{}{}.F32 , ; A1 + if (dt.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0eb70a40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVrintx, &Assembler::vrintx, cond, dt, rd, rm); +} + +void Assembler::vrintz(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_16 encoded_dt(dt); + if (IsUsingT32()) { + // VRINTZ{}.
, ; T1 + if (encoded_dt.IsValid()) { + EmitT32_32(0xffb20580U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + // VRINTZ{}{}.F64
, ; T1 + if (dt.Is(F64)) { + EmitT32_32(0xeeb60bc0U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VRINTZ{}.
, ; A1 + if (encoded_dt.IsValid()) { + EmitA32(0xf3b20580U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + // VRINTZ{}{}.F64
, ; A1 + if (dt.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0eb60bc0U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVrintz, &Assembler::vrintz, cond, dt, rd, rm); +} + +void Assembler::vrintz(DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_size_16 encoded_dt(dt); + if (IsUsingT32()) { + // VRINTZ{}.
, ; T1 + if (encoded_dt.IsValid()) { + EmitT32_32(0xffb205c0U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VRINTZ{}.
, ; A1 + if (encoded_dt.IsValid()) { + EmitA32(0xf3b205c0U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVrintz, &Assembler::vrintz, dt, rd, rm); +} + +void Assembler::vrintz(Condition cond, + DataType dt, + SRegister rd, + SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VRINTZ{}{}.F32 , ; T1 + if (dt.Is(F32)) { + EmitT32_32(0xeeb60ac0U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VRINTZ{}{}.F32 , ; A1 + if (dt.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0eb60ac0U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVrintz, &Assembler::vrintz, cond, dt, rd, rm); +} + +void Assembler::vrshl( + Condition cond, DataType dt, DRegister rd, DRegister rm, DRegister rn) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_3 encoded_dt(dt); + if (IsUsingT32()) { + // VRSHL{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000500U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rm.Encode(5, 0) | rn.Encode(7, 16)); + AdvanceIT(); + return; + } + } + } else { + // VRSHL{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000500U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rm.Encode(5, 0) | rn.Encode(7, 16)); + return; + } + } + } + Delegate(kVrshl, &Assembler::vrshl, cond, dt, rd, rm, rn); +} + +void Assembler::vrshl( + Condition cond, DataType dt, QRegister rd, QRegister rm, QRegister rn) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_3 encoded_dt(dt); + if (IsUsingT32()) { + // VRSHL{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000540U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rm.Encode(5, 0) | rn.Encode(7, 16)); + AdvanceIT(); + return; + } + } + } else { + // VRSHL{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000540U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rm.Encode(5, 0) | rn.Encode(7, 16)); + return; + } + } + } + Delegate(kVrshl, &Assembler::vrshl, cond, dt, rd, rm, rn); +} + +void Assembler::vrshr(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_L_imm6_1 encoded_dt(dt); + if (IsUsingT32()) { + // VRSHR{}{}. {
}, , # ; T1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = dt.GetSize() - imm; + EmitT32_32(0xef800210U | (encoded_dt.GetTypeEncodingValue() << 28) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + // VRSHR{}{}.
, , #0 ; T1 + if ((dt.Is(kDataTypeS) || dt.Is(kDataTypeU)) && (imm == 0)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef200110U | rd.Encode(22, 12) | rm.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VRSHR{}{}. {
}, , # ; A1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize())) { + if (cond.Is(al)) { + uint32_t imm6 = dt.GetSize() - imm; + EmitA32(0xf2800210U | (encoded_dt.GetTypeEncodingValue() << 24) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + // VRSHR{}{}.
, , #0 ; A1 + if ((dt.Is(kDataTypeS) || dt.Is(kDataTypeU)) && (imm == 0)) { + if (cond.Is(al)) { + EmitA32(0xf2200110U | rd.Encode(22, 12) | rm.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + } + } + Delegate(kVrshr, &Assembler::vrshr, cond, dt, rd, rm, operand); +} + +void Assembler::vrshr(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_L_imm6_1 encoded_dt(dt); + if (IsUsingT32()) { + // VRSHR{}{}. {}, , # ; T1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = dt.GetSize() - imm; + EmitT32_32(0xef800250U | (encoded_dt.GetTypeEncodingValue() << 28) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + // VRSHR{}{}.
, , #0 ; T1 + if ((dt.Is(kDataTypeS) || dt.Is(kDataTypeU)) && (imm == 0)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef200150U | rd.Encode(22, 12) | rm.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VRSHR{}{}. {}, , # ; A1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize())) { + if (cond.Is(al)) { + uint32_t imm6 = dt.GetSize() - imm; + EmitA32(0xf2800250U | (encoded_dt.GetTypeEncodingValue() << 24) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + // VRSHR{}{}.
, , #0 ; A1 + if ((dt.Is(kDataTypeS) || dt.Is(kDataTypeU)) && (imm == 0)) { + if (cond.Is(al)) { + EmitA32(0xf2200150U | rd.Encode(22, 12) | rm.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + } + } + Delegate(kVrshr, &Assembler::vrshr, cond, dt, rd, rm, operand); +} + +void Assembler::vrshrn(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_imm6_3 encoded_dt(dt); + Dt_size_3 encoded_dt_2(dt); + if (IsUsingT32()) { + // VRSHRN{}{}.I
, , # ; T1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize() / 2)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = dt.GetSize() / 2 - imm; + EmitT32_32(0xef800850U | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + // VRSHRN{}{}.
, , #0 ; T1 + if (encoded_dt_2.IsValid() && (imm == 0)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb20200U | (encoded_dt_2.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VRSHRN{}{}.I
, , # ; A1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize() / 2)) { + if (cond.Is(al)) { + uint32_t imm6 = dt.GetSize() / 2 - imm; + EmitA32(0xf2800850U | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + // VRSHRN{}{}.
, , #0 ; A1 + if (encoded_dt_2.IsValid() && (imm == 0)) { + if (cond.Is(al)) { + EmitA32(0xf3b20200U | (encoded_dt_2.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + } + } + Delegate(kVrshrn, &Assembler::vrshrn, cond, dt, rd, rm, operand); +} + +void Assembler::vrsqrte(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_F_size_4 encoded_dt(dt); + if (IsUsingT32()) { + // VRSQRTE{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb30480U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 6) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VRSQRTE{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b30480U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 6) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVrsqrte, &Assembler::vrsqrte, cond, dt, rd, rm); +} + +void Assembler::vrsqrte(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_F_size_4 encoded_dt(dt); + if (IsUsingT32()) { + // VRSQRTE{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb304c0U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 6) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VRSQRTE{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b304c0U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 6) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVrsqrte, &Assembler::vrsqrte, cond, dt, rd, rm); +} + +void Assembler::vrsqrts( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VRSQRTS{}{}.F32 {
}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef200f10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VRSQRTS{}{}.F32 {
}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf2200f10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVrsqrts, &Assembler::vrsqrts, cond, dt, rd, rn, rm); +} + +void Assembler::vrsqrts( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VRSQRTS{}{}.F32 {}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef200f50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VRSQRTS{}{}.F32 {}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf2200f50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVrsqrts, &Assembler::vrsqrts, cond, dt, rd, rn, rm); +} + +void Assembler::vrsra(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_L_imm6_1 encoded_dt(dt); + if (IsUsingT32()) { + // VRSRA{}{}. {
}, , # ; T1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = dt.GetSize() - imm; + EmitT32_32(0xef800310U | (encoded_dt.GetTypeEncodingValue() << 28) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + } else { + // VRSRA{}{}. {
}, , # ; A1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize())) { + if (cond.Is(al)) { + uint32_t imm6 = dt.GetSize() - imm; + EmitA32(0xf2800310U | (encoded_dt.GetTypeEncodingValue() << 24) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + } + } + } + Delegate(kVrsra, &Assembler::vrsra, cond, dt, rd, rm, operand); +} + +void Assembler::vrsra(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_L_imm6_1 encoded_dt(dt); + if (IsUsingT32()) { + // VRSRA{}{}. {}, , # ; T1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = dt.GetSize() - imm; + EmitT32_32(0xef800350U | (encoded_dt.GetTypeEncodingValue() << 28) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + } else { + // VRSRA{}{}. {}, , # ; A1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize())) { + if (cond.Is(al)) { + uint32_t imm6 = dt.GetSize() - imm; + EmitA32(0xf2800350U | (encoded_dt.GetTypeEncodingValue() << 24) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + } + } + } + Delegate(kVrsra, &Assembler::vrsra, cond, dt, rd, rm, operand); +} + +void Assembler::vrsubhn( + Condition cond, DataType dt, DRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_3 encoded_dt(dt); + if (IsUsingT32()) { + // VRSUBHN{}{}.
, , ; T1 + if (encoded_dt.IsValid() && (dt.Is(I16) || dt.Is(I32) || dt.Is(I64))) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff800600U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VRSUBHN{}{}.
, , ; A1 + if (encoded_dt.IsValid() && (dt.Is(I16) || dt.Is(I32) || dt.Is(I64))) { + if (cond.Is(al)) { + EmitA32(0xf3800600U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVrsubhn, &Assembler::vrsubhn, cond, dt, rd, rn, rm); +} + +void Assembler::vseleq(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + if (IsUsingT32()) { + // VSELEQ.F64
, , ; T1 + if (OutsideITBlock() && dt.Is(F64)) { + EmitT32_32(0xfe000b00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VSELEQ.F64
, , ; A1 + if (dt.Is(F64)) { + EmitA32(0xfe000b00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVseleq, &Assembler::vseleq, dt, rd, rn, rm); +} + +void Assembler::vseleq(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + if (IsUsingT32()) { + // VSELEQ.F32 , , ; T1 + if (OutsideITBlock() && dt.Is(F32)) { + EmitT32_32(0xfe000a00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VSELEQ.F32 , , ; A1 + if (dt.Is(F32)) { + EmitA32(0xfe000a00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVseleq, &Assembler::vseleq, dt, rd, rn, rm); +} + +void Assembler::vselge(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + if (IsUsingT32()) { + // VSELGE.F64
, , ; T1 + if (OutsideITBlock() && dt.Is(F64)) { + EmitT32_32(0xfe200b00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VSELGE.F64
, , ; A1 + if (dt.Is(F64)) { + EmitA32(0xfe200b00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVselge, &Assembler::vselge, dt, rd, rn, rm); +} + +void Assembler::vselge(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + if (IsUsingT32()) { + // VSELGE.F32 , , ; T1 + if (OutsideITBlock() && dt.Is(F32)) { + EmitT32_32(0xfe200a00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VSELGE.F32 , , ; A1 + if (dt.Is(F32)) { + EmitA32(0xfe200a00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVselge, &Assembler::vselge, dt, rd, rn, rm); +} + +void Assembler::vselgt(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + if (IsUsingT32()) { + // VSELGT.F64
, , ; T1 + if (OutsideITBlock() && dt.Is(F64)) { + EmitT32_32(0xfe300b00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VSELGT.F64
, , ; A1 + if (dt.Is(F64)) { + EmitA32(0xfe300b00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVselgt, &Assembler::vselgt, dt, rd, rn, rm); +} + +void Assembler::vselgt(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + if (IsUsingT32()) { + // VSELGT.F32 , , ; T1 + if (OutsideITBlock() && dt.Is(F32)) { + EmitT32_32(0xfe300a00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VSELGT.F32 , , ; A1 + if (dt.Is(F32)) { + EmitA32(0xfe300a00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVselgt, &Assembler::vselgt, dt, rd, rn, rm); +} + +void Assembler::vselvs(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + if (IsUsingT32()) { + // VSELVS.F64
, , ; T1 + if (OutsideITBlock() && dt.Is(F64)) { + EmitT32_32(0xfe100b00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VSELVS.F64
, , ; A1 + if (dt.Is(F64)) { + EmitA32(0xfe100b00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVselvs, &Assembler::vselvs, dt, rd, rn, rm); +} + +void Assembler::vselvs(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + if (IsUsingT32()) { + // VSELVS.F32 , , ; T1 + if (OutsideITBlock() && dt.Is(F32)) { + EmitT32_32(0xfe100a00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VSELVS.F32 , , ; A1 + if (dt.Is(F32)) { + EmitA32(0xfe100a00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVselvs, &Assembler::vselvs, dt, rd, rn, rm); +} + +void Assembler::vshl(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_L_imm6_3 encoded_dt(dt); + if (IsUsingT32()) { + // VSHL{}{}.I {
}, , # ; T1 + if (encoded_dt.IsValid() && (imm <= dt.GetSize() - 1)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = imm; + EmitT32_32(0xef800510U | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + } else { + // VSHL{}{}.I {
}, , # ; A1 + if (encoded_dt.IsValid() && (imm <= dt.GetSize() - 1)) { + if (cond.Is(al)) { + uint32_t imm6 = imm; + EmitA32(0xf2800510U | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + } + } + } + if (operand.IsRegister()) { + DRegister rn = operand.GetRegister(); + Dt_U_size_3 encoded_dt(dt); + if (IsUsingT32()) { + // VSHL{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000400U | + ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rm.Encode(5, 0) | rn.Encode(7, 16)); + AdvanceIT(); + return; + } + } + } else { + // VSHL{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000400U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rm.Encode(5, 0) | rn.Encode(7, 16)); + return; + } + } + } + } + Delegate(kVshl, &Assembler::vshl, cond, dt, rd, rm, operand); +} + +void Assembler::vshl(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_L_imm6_3 encoded_dt(dt); + if (IsUsingT32()) { + // VSHL{}{}.I {}, , # ; T1 + if (encoded_dt.IsValid() && (imm <= dt.GetSize() - 1)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = imm; + EmitT32_32(0xef800550U | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + } else { + // VSHL{}{}.I {}, , # ; A1 + if (encoded_dt.IsValid() && (imm <= dt.GetSize() - 1)) { + if (cond.Is(al)) { + uint32_t imm6 = imm; + EmitA32(0xf2800550U | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + } + } + } + if (operand.IsRegister()) { + QRegister rn = operand.GetRegister(); + Dt_U_size_3 encoded_dt(dt); + if (IsUsingT32()) { + // VSHL{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000440U | + ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rm.Encode(5, 0) | rn.Encode(7, 16)); + AdvanceIT(); + return; + } + } + } else { + // VSHL{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000440U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rm.Encode(5, 0) | rn.Encode(7, 16)); + return; + } + } + } + } + Delegate(kVshl, &Assembler::vshl, cond, dt, rd, rm, operand); +} + +void Assembler::vshll(Condition cond, + DataType dt, + QRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_imm6_4 encoded_dt(dt); + Dt_size_17 encoded_dt_2(dt); + if (IsUsingT32()) { + // VSHLL{}{}. , , # ; T1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize() - 1)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = dt.GetSize() + imm; + EmitT32_32(0xef800a10U | (encoded_dt.GetTypeEncodingValue() << 28) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + // VSHLL{}{}. , , # ; T2 + if (encoded_dt_2.IsValid() && (imm == dt.GetSize())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb20300U | (encoded_dt_2.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VSHLL{}{}. , , # ; A1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize() - 1)) { + if (cond.Is(al)) { + uint32_t imm6 = dt.GetSize() + imm; + EmitA32(0xf2800a10U | (encoded_dt.GetTypeEncodingValue() << 24) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + // VSHLL{}{}. , , # ; A2 + if (encoded_dt_2.IsValid() && (imm == dt.GetSize())) { + if (cond.Is(al)) { + EmitA32(0xf3b20300U | (encoded_dt_2.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + } + } + Delegate(kVshll, &Assembler::vshll, cond, dt, rd, rm, operand); +} + +void Assembler::vshr(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_L_imm6_1 encoded_dt(dt); + if (IsUsingT32()) { + // VSHR{}{}. {
}, , # ; T1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = dt.GetSize() - imm; + EmitT32_32(0xef800010U | (encoded_dt.GetTypeEncodingValue() << 28) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + // VSHR{}{}.
, , #0 ; T1 + if ((dt.Is(kDataTypeS) || dt.Is(kDataTypeU)) && (imm == 0)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef200110U | rd.Encode(22, 12) | rm.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VSHR{}{}. {
}, , # ; A1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize())) { + if (cond.Is(al)) { + uint32_t imm6 = dt.GetSize() - imm; + EmitA32(0xf2800010U | (encoded_dt.GetTypeEncodingValue() << 24) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + // VSHR{}{}.
, , #0 ; A1 + if ((dt.Is(kDataTypeS) || dt.Is(kDataTypeU)) && (imm == 0)) { + if (cond.Is(al)) { + EmitA32(0xf2200110U | rd.Encode(22, 12) | rm.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + } + } + Delegate(kVshr, &Assembler::vshr, cond, dt, rd, rm, operand); +} + +void Assembler::vshr(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_L_imm6_1 encoded_dt(dt); + if (IsUsingT32()) { + // VSHR{}{}. {}, , # ; T1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = dt.GetSize() - imm; + EmitT32_32(0xef800050U | (encoded_dt.GetTypeEncodingValue() << 28) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + // VSHR{}{}.
, , #0 ; T1 + if ((dt.Is(kDataTypeS) || dt.Is(kDataTypeU)) && (imm == 0)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef200150U | rd.Encode(22, 12) | rm.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VSHR{}{}. {}, , # ; A1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize())) { + if (cond.Is(al)) { + uint32_t imm6 = dt.GetSize() - imm; + EmitA32(0xf2800050U | (encoded_dt.GetTypeEncodingValue() << 24) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + // VSHR{}{}.
, , #0 ; A1 + if ((dt.Is(kDataTypeS) || dt.Is(kDataTypeU)) && (imm == 0)) { + if (cond.Is(al)) { + EmitA32(0xf2200150U | rd.Encode(22, 12) | rm.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + } + } + Delegate(kVshr, &Assembler::vshr, cond, dt, rd, rm, operand); +} + +void Assembler::vshrn(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_imm6_3 encoded_dt(dt); + Dt_size_3 encoded_dt_2(dt); + if (IsUsingT32()) { + // VSHRN{}{}.I
, , # ; T1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize() / 2)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = dt.GetSize() / 2 - imm; + EmitT32_32(0xef800810U | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + // VSHRN{}{}.
, , #0 ; T1 + if (encoded_dt_2.IsValid() && (imm == 0)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb20200U | (encoded_dt_2.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VSHRN{}{}.I
, , # ; A1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize() / 2)) { + if (cond.Is(al)) { + uint32_t imm6 = dt.GetSize() / 2 - imm; + EmitA32(0xf2800810U | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + // VSHRN{}{}.
, , #0 ; A1 + if (encoded_dt_2.IsValid() && (imm == 0)) { + if (cond.Is(al)) { + EmitA32(0xf3b20200U | (encoded_dt_2.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + } + } + Delegate(kVshrn, &Assembler::vshrn, cond, dt, rd, rm, operand); +} + +void Assembler::vsli(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_L_imm6_4 encoded_dt(dt); + if (IsUsingT32()) { + // VSLI{}{}.
{
}, , # ; T1 + if (encoded_dt.IsValid() && (imm <= dt.GetSize() - 1)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = imm; + EmitT32_32(0xff800510U | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + } else { + // VSLI{}{}.
{
}, , # ; A1 + if (encoded_dt.IsValid() && (imm <= dt.GetSize() - 1)) { + if (cond.Is(al)) { + uint32_t imm6 = imm; + EmitA32(0xf3800510U | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + } + } + } + Delegate(kVsli, &Assembler::vsli, cond, dt, rd, rm, operand); +} + +void Assembler::vsli(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_L_imm6_4 encoded_dt(dt); + if (IsUsingT32()) { + // VSLI{}{}.
{}, , # ; T1 + if (encoded_dt.IsValid() && (imm <= dt.GetSize() - 1)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = imm; + EmitT32_32(0xff800550U | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + } else { + // VSLI{}{}.
{}, , # ; A1 + if (encoded_dt.IsValid() && (imm <= dt.GetSize() - 1)) { + if (cond.Is(al)) { + uint32_t imm6 = imm; + EmitA32(0xf3800550U | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + } + } + } + Delegate(kVsli, &Assembler::vsli, cond, dt, rd, rm, operand); +} + +void Assembler::vsqrt(Condition cond, DataType dt, SRegister rd, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VSQRT{}{}.F32 , ; T1 + if (dt.Is(F32)) { + EmitT32_32(0xeeb10ac0U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VSQRT{}{}.F32 , ; A1 + if (dt.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0eb10ac0U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVsqrt, &Assembler::vsqrt, cond, dt, rd, rm); +} + +void Assembler::vsqrt(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VSQRT{}{}.F64
, ; T1 + if (dt.Is(F64)) { + EmitT32_32(0xeeb10bc0U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VSQRT{}{}.F64
, ; A1 + if (dt.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0eb10bc0U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVsqrt, &Assembler::vsqrt, cond, dt, rd, rm); +} + +void Assembler::vsra(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_L_imm6_1 encoded_dt(dt); + if (IsUsingT32()) { + // VSRA{}{}. {
}, , # ; T1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = dt.GetSize() - imm; + EmitT32_32(0xef800110U | (encoded_dt.GetTypeEncodingValue() << 28) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + } else { + // VSRA{}{}. {
}, , # ; A1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize())) { + if (cond.Is(al)) { + uint32_t imm6 = dt.GetSize() - imm; + EmitA32(0xf2800110U | (encoded_dt.GetTypeEncodingValue() << 24) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + } + } + } + Delegate(kVsra, &Assembler::vsra, cond, dt, rd, rm, operand); +} + +void Assembler::vsra(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_L_imm6_1 encoded_dt(dt); + if (IsUsingT32()) { + // VSRA{}{}. {}, , # ; T1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = dt.GetSize() - imm; + EmitT32_32(0xef800150U | (encoded_dt.GetTypeEncodingValue() << 28) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + } else { + // VSRA{}{}. {}, , # ; A1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize())) { + if (cond.Is(al)) { + uint32_t imm6 = dt.GetSize() - imm; + EmitA32(0xf2800150U | (encoded_dt.GetTypeEncodingValue() << 24) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + } + } + } + Delegate(kVsra, &Assembler::vsra, cond, dt, rd, rm, operand); +} + +void Assembler::vsri(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_L_imm6_4 encoded_dt(dt); + if (IsUsingT32()) { + // VSRI{}{}.
{
}, , # ; T1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = dt.GetSize() - imm; + EmitT32_32(0xff800410U | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + } else { + // VSRI{}{}.
{
}, , # ; A1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize())) { + if (cond.Is(al)) { + uint32_t imm6 = dt.GetSize() - imm; + EmitA32(0xf3800410U | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + } + } + } + Delegate(kVsri, &Assembler::vsri, cond, dt, rd, rm, operand); +} + +void Assembler::vsri(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_L_imm6_4 encoded_dt(dt); + if (IsUsingT32()) { + // VSRI{}{}.
{}, , # ; T1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = dt.GetSize() - imm; + EmitT32_32(0xff800450U | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + } else { + // VSRI{}{}.
{}, , # ; A1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize())) { + if (cond.Is(al)) { + uint32_t imm6 = dt.GetSize() - imm; + EmitA32(0xf3800450U | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + } + } + } + Delegate(kVsri, &Assembler::vsri, cond, dt, rd, rm, operand); +} + +void Assembler::vst1(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediateZero()) { + Register rn = operand.GetBaseRegister(); + Alignment align = operand.GetAlignment(); + Dt_size_6 encoded_dt(dt); + Dt_size_7 encoded_dt_2(dt); + Align_align_5 encoded_align_1(align, nreglist); + Align_index_align_1 encoded_align_2(align, nreglist, dt); + if (IsUsingT32()) { + // VST1{}{}.
, [{:}] ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + (nreglist.IsSingleSpaced()) && (nreglist.GetLength() <= 4) && + operand.IsOffset() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding; + switch (nreglist.GetLength()) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 1: + len_encoding = 0x7; + break; + case 2: + len_encoding = 0xa; + break; + case 3: + len_encoding = 0x6; + break; + case 4: + len_encoding = 0x2; + break; + } + EmitT32_32(0xf900000fU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VST1{}{}.
, [{:}]! ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + (nreglist.IsSingleSpaced()) && (nreglist.GetLength() <= 4) && + operand.IsPostIndex() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding; + switch (nreglist.GetLength()) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 1: + len_encoding = 0x7; + break; + case 2: + len_encoding = 0xa; + break; + case 3: + len_encoding = 0x6; + break; + case 4: + len_encoding = 0x2; + break; + } + EmitT32_32(0xf900000dU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VST1{}{}.
, [{:}] ; T1 + if (encoded_dt_2.IsValid() && nreglist.IsTransferOneLane() && + (nreglist.GetLength() == 1) && operand.IsOffset() && + encoded_align_2.IsValid() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitT32_32(0xf980000fU | (encoded_dt_2.GetEncodingValue() << 10) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VST1{}{}.
, [{:}]! ; T1 + if (encoded_dt_2.IsValid() && nreglist.IsTransferOneLane() && + (nreglist.GetLength() == 1) && operand.IsPostIndex() && + encoded_align_2.IsValid() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitT32_32(0xf980000dU | (encoded_dt_2.GetEncodingValue() << 10) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + } else { + // VST1{}{}.
, [{:}] ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + (nreglist.IsSingleSpaced()) && (nreglist.GetLength() <= 4) && + operand.IsOffset() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding; + switch (nreglist.GetLength()) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 1: + len_encoding = 0x7; + break; + case 2: + len_encoding = 0xa; + break; + case 3: + len_encoding = 0x6; + break; + case 4: + len_encoding = 0x2; + break; + } + EmitA32(0xf400000fU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + return; + } + } + // VST1{}{}.
, [{:}]! ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + (nreglist.IsSingleSpaced()) && (nreglist.GetLength() <= 4) && + operand.IsPostIndex() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding; + switch (nreglist.GetLength()) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 1: + len_encoding = 0x7; + break; + case 2: + len_encoding = 0xa; + break; + case 3: + len_encoding = 0x6; + break; + case 4: + len_encoding = 0x2; + break; + } + EmitA32(0xf400000dU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + return; + } + } + // VST1{}{}.
, [{:}] ; A1 + if (encoded_dt_2.IsValid() && nreglist.IsTransferOneLane() && + (nreglist.GetLength() == 1) && operand.IsOffset() && + encoded_align_2.IsValid() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitA32(0xf480000fU | (encoded_dt_2.GetEncodingValue() << 10) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + return; + } + } + // VST1{}{}.
, [{:}]! ; A1 + if (encoded_dt_2.IsValid() && nreglist.IsTransferOneLane() && + (nreglist.GetLength() == 1) && operand.IsPostIndex() && + encoded_align_2.IsValid() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitA32(0xf480000dU | (encoded_dt_2.GetEncodingValue() << 10) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + return; + } + } + } + } + if (operand.IsPlainRegister()) { + Register rn = operand.GetBaseRegister(); + Alignment align = operand.GetAlignment(); + Register rm = operand.GetOffsetRegister(); + Dt_size_6 encoded_dt(dt); + Dt_size_7 encoded_dt_2(dt); + Align_align_5 encoded_align_1(align, nreglist); + Align_index_align_1 encoded_align_2(align, nreglist, dt); + if (IsUsingT32()) { + // VST1{}{}.
, [{:}], ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + (nreglist.IsSingleSpaced()) && (nreglist.GetLength() <= 4) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding; + switch (nreglist.GetLength()) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 1: + len_encoding = 0x7; + break; + case 2: + len_encoding = 0xa; + break; + case 3: + len_encoding = 0x6; + break; + case 4: + len_encoding = 0x2; + break; + } + EmitT32_32(0xf9000000U | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16) | rm.GetCode()); + AdvanceIT(); + return; + } + } + // VST1{}{}.
, [{:}], ; T1 + if (encoded_dt_2.IsValid() && nreglist.IsTransferOneLane() && + (nreglist.GetLength() == 1) && !rm.IsPC() && !rm.IsSP() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitT32_32(0xf9800000U | (encoded_dt_2.GetEncodingValue() << 10) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16) | + rm.GetCode()); + AdvanceIT(); + return; + } + } + } else { + // VST1{}{}.
, [{:}], ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + (nreglist.IsSingleSpaced()) && (nreglist.GetLength() <= 4) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding; + switch (nreglist.GetLength()) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 1: + len_encoding = 0x7; + break; + case 2: + len_encoding = 0xa; + break; + case 3: + len_encoding = 0x6; + break; + case 4: + len_encoding = 0x2; + break; + } + EmitA32(0xf4000000U | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16) | rm.GetCode()); + return; + } + } + // VST1{}{}.
, [{:}], ; A1 + if (encoded_dt_2.IsValid() && nreglist.IsTransferOneLane() && + (nreglist.GetLength() == 1) && !rm.IsPC() && !rm.IsSP() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitA32(0xf4800000U | (encoded_dt_2.GetEncodingValue() << 10) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16) | rm.GetCode()); + return; + } + } + } + } + Delegate(kVst1, &Assembler::vst1, cond, dt, nreglist, operand); +} + +void Assembler::vst2(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediateZero()) { + Register rn = operand.GetBaseRegister(); + Alignment align = operand.GetAlignment(); + Dt_size_7 encoded_dt(dt); + Align_align_2 encoded_align_1(align, nreglist); + Align_index_align_2 encoded_align_2(align, nreglist, dt); + if (IsUsingT32()) { + // VST2{}{}.
, [{:}] ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsOffset() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding; + if (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) { + len_encoding = 0x8; + } + if (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2)) { + len_encoding = 0x9; + } + if (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) { + len_encoding = 0x3; + } + EmitT32_32(0xf900000fU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VST2{}{}.
, [{:}]! ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsPostIndex() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding; + if (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) { + len_encoding = 0x8; + } + if (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2)) { + len_encoding = 0x9; + } + if (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) { + len_encoding = 0x3; + } + EmitT32_32(0xf900000dU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VST2{}{}.
, [{:}] ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2))) && + operand.IsOffset() && encoded_align_2.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitT32_32(0xf980010fU | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VST2{}{}.
, [{:}]! ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2))) && + operand.IsPostIndex() && encoded_align_2.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitT32_32(0xf980010dU | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + } else { + // VST2{}{}.
, [{:}] ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsOffset() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding; + if (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) { + len_encoding = 0x8; + } + if (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2)) { + len_encoding = 0x9; + } + if (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) { + len_encoding = 0x3; + } + EmitA32(0xf400000fU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + return; + } + } + // VST2{}{}.
, [{:}]! ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsPostIndex() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding; + if (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) { + len_encoding = 0x8; + } + if (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2)) { + len_encoding = 0x9; + } + if (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) { + len_encoding = 0x3; + } + EmitA32(0xf400000dU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + return; + } + } + // VST2{}{}.
, [{:}] ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2))) && + operand.IsOffset() && encoded_align_2.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitA32(0xf480010fU | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + return; + } + } + // VST2{}{}.
, [{:}]! ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2))) && + operand.IsPostIndex() && encoded_align_2.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitA32(0xf480010dU | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + return; + } + } + } + } + if (operand.IsPlainRegister()) { + Register rn = operand.GetBaseRegister(); + Alignment align = operand.GetAlignment(); + Register rm = operand.GetOffsetRegister(); + Dt_size_7 encoded_dt(dt); + Align_align_2 encoded_align_1(align, nreglist); + Align_index_align_2 encoded_align_2(align, nreglist, dt); + if (IsUsingT32()) { + // VST2{}{}.
, [{:}], ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4))) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding; + if (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) { + len_encoding = 0x8; + } + if (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2)) { + len_encoding = 0x9; + } + if (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) { + len_encoding = 0x3; + } + EmitT32_32(0xf9000000U | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16) | rm.GetCode()); + AdvanceIT(); + return; + } + } + // VST2{}{}.
, [{:}], ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2))) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitT32_32(0xf9800100U | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16) | + rm.GetCode()); + AdvanceIT(); + return; + } + } + } else { + // VST2{}{}.
, [{:}], ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4))) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding; + if (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) { + len_encoding = 0x8; + } + if (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2)) { + len_encoding = 0x9; + } + if (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) { + len_encoding = 0x3; + } + EmitA32(0xf4000000U | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16) | rm.GetCode()); + return; + } + } + // VST2{}{}.
, [{:}], ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2))) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitA32(0xf4800100U | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16) | rm.GetCode()); + return; + } + } + } + } + Delegate(kVst2, &Assembler::vst2, cond, dt, nreglist, operand); +} + +void Assembler::vst3(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediateZero()) { + Register rn = operand.GetBaseRegister(); + Alignment align = operand.GetAlignment(); + Dt_size_7 encoded_dt(dt); + Align_align_3 encoded_align_1(align); + if (IsUsingT32()) { + // VST3{}{}.
, [{:}] ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + operand.IsOffset() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x4 : 0x5; + EmitT32_32(0xf900000fU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VST3{}{}.
, [{:}]! ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + operand.IsPostIndex() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x4 : 0x5; + EmitT32_32(0xf900000dU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + } else { + // VST3{}{}.
, [{:}] ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + operand.IsOffset() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x4 : 0x5; + EmitA32(0xf400000fU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + return; + } + } + // VST3{}{}.
, [{:}]! ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + operand.IsPostIndex() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x4 : 0x5; + EmitA32(0xf400000dU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + return; + } + } + } + } + if (operand.IsPlainRegister()) { + Register rn = operand.GetBaseRegister(); + Alignment align = operand.GetAlignment(); + Register rm = operand.GetOffsetRegister(); + Dt_size_7 encoded_dt(dt); + Align_align_3 encoded_align_1(align); + if (IsUsingT32()) { + // VST3{}{}.
, [{:}], ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x4 : 0x5; + EmitT32_32(0xf9000000U | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16) | rm.GetCode()); + AdvanceIT(); + return; + } + } + } else { + // VST3{}{}.
, [{:}], ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x4 : 0x5; + EmitA32(0xf4000000U | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16) | rm.GetCode()); + return; + } + } + } + } + Delegate(kVst3, &Assembler::vst3, cond, dt, nreglist, operand); +} + +void Assembler::vst3(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const MemOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediateZero()) { + Register rn = operand.GetBaseRegister(); + Dt_size_7 encoded_dt(dt); + Index_1 encoded_align_1(nreglist, dt); + if (IsUsingT32()) { + // VST3{}{}.
, [] ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + operand.IsOffset() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitT32_32(0xf980020fU | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VST3{}{}.
, []! ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + operand.IsPostIndex() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitT32_32(0xf980020dU | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + } else { + // VST3{}{}.
, [] ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + operand.IsOffset() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitA32(0xf480020fU | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + return; + } + } + // VST3{}{}.
, []! ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + operand.IsPostIndex() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitA32(0xf480020dU | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + return; + } + } + } + } + if (operand.IsPlainRegister()) { + Register rn = operand.GetBaseRegister(); + Sign sign = operand.GetSign(); + Register rm = operand.GetOffsetRegister(); + Dt_size_7 encoded_dt(dt); + Index_1 encoded_align_1(nreglist, dt); + if (IsUsingT32()) { + // VST3{}{}.
, [], # ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + sign.IsPlus() && operand.IsPostIndex() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitT32_32(0xf9800200U | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16) | + rm.GetCode()); + AdvanceIT(); + return; + } + } + } else { + // VST3{}{}.
, [], # ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + sign.IsPlus() && operand.IsPostIndex() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitA32(0xf4800200U | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16) | rm.GetCode()); + return; + } + } + } + } + Delegate(kVst3, &Assembler::vst3, cond, dt, nreglist, operand); +} + +void Assembler::vst4(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediateZero()) { + Register rn = operand.GetBaseRegister(); + Alignment align = operand.GetAlignment(); + Dt_size_7 encoded_dt(dt); + Align_align_4 encoded_align_1(align); + Align_index_align_3 encoded_align_2(align, nreglist, dt); + if (IsUsingT32()) { + // VST4{}{}.
, [{:}] ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsOffset() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitT32_32(0xf900000fU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VST4{}{}.
, [{:}]! ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsPostIndex() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitT32_32(0xf900000dU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VST4{}{}.
, [{:}] ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsOffset() && encoded_align_2.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitT32_32(0xf980030fU | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VST4{}{}.
, [{:}]! ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsPostIndex() && encoded_align_2.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitT32_32(0xf980030dU | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + } else { + // VST4{}{}.
, [{:}] ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsOffset() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitA32(0xf400000fU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + return; + } + } + // VST4{}{}.
, [{:}]! ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsPostIndex() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitA32(0xf400000dU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + return; + } + } + // VST4{}{}.
, [{:}] ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsOffset() && encoded_align_2.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitA32(0xf480030fU | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + return; + } + } + // VST4{}{}.
, [{:}]! ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsPostIndex() && encoded_align_2.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitA32(0xf480030dU | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + return; + } + } + } + } + if (operand.IsPlainRegister()) { + Register rn = operand.GetBaseRegister(); + Alignment align = operand.GetAlignment(); + Register rm = operand.GetOffsetRegister(); + Dt_size_7 encoded_dt(dt); + Align_align_4 encoded_align_1(align); + Align_index_align_3 encoded_align_2(align, nreglist, dt); + if (IsUsingT32()) { + // VST4{}{}.
, [{:}], ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitT32_32(0xf9000000U | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16) | rm.GetCode()); + AdvanceIT(); + return; + } + } + // VST4{}{}.
, [{:}], ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitT32_32(0xf9800300U | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16) | + rm.GetCode()); + AdvanceIT(); + return; + } + } + } else { + // VST4{}{}.
, [{:}], ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitA32(0xf4000000U | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16) | rm.GetCode()); + return; + } + } + // VST4{}{}.
, [{:}], ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitA32(0xf4800300U | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16) | rm.GetCode()); + return; + } + } + } + } + Delegate(kVst4, &Assembler::vst4, cond, dt, nreglist, operand); +} + +void Assembler::vstm(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VSTM{}{}{.} {!}, ; T1 + if ((((dreglist.GetLength() <= 16) && !rn.IsPC()) || + AllowUnpredictable())) { + const DRegister& dreg = dreglist.GetFirstDRegister(); + unsigned len = dreglist.GetLength() * 2; + EmitT32_32(0xec800b00U | (rn.GetCode() << 16) | + (write_back.GetWriteBackUint32() << 21) | dreg.Encode(22, 12) | + (len & 0xff)); + AdvanceIT(); + return; + } + } else { + // VSTM{}{}{.} {!}, ; A1 + if (cond.IsNotNever() && (((dreglist.GetLength() <= 16) && + (!rn.IsPC() || !write_back.DoesWriteBack())) || + AllowUnpredictable())) { + const DRegister& dreg = dreglist.GetFirstDRegister(); + unsigned len = dreglist.GetLength() * 2; + EmitA32(0x0c800b00U | (cond.GetCondition() << 28) | (rn.GetCode() << 16) | + (write_back.GetWriteBackUint32() << 21) | dreg.Encode(22, 12) | + (len & 0xff)); + return; + } + } + Delegate(kVstm, &Assembler::vstm, cond, dt, rn, write_back, dreglist); +} + +void Assembler::vstm(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VSTM{}{}{.} {!}, ; T2 + if ((!rn.IsPC() || AllowUnpredictable())) { + const SRegister& sreg = sreglist.GetFirstSRegister(); + unsigned len = sreglist.GetLength(); + EmitT32_32(0xec800a00U | (rn.GetCode() << 16) | + (write_back.GetWriteBackUint32() << 21) | sreg.Encode(22, 12) | + (len & 0xff)); + AdvanceIT(); + return; + } + } else { + // VSTM{}{}{.} {!}, ; A2 + if (cond.IsNotNever() && + ((!rn.IsPC() || !write_back.DoesWriteBack()) || AllowUnpredictable())) { + const SRegister& sreg = sreglist.GetFirstSRegister(); + unsigned len = sreglist.GetLength(); + EmitA32(0x0c800a00U | (cond.GetCondition() << 28) | (rn.GetCode() << 16) | + (write_back.GetWriteBackUint32() << 21) | sreg.Encode(22, 12) | + (len & 0xff)); + return; + } + } + Delegate(kVstm, &Assembler::vstm, cond, dt, rn, write_back, sreglist); +} + +void Assembler::vstmdb(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VSTMDB{}{}{.} !, ; T1 + if (write_back.DoesWriteBack() && + (((dreglist.GetLength() <= 16) && !rn.IsPC()) || + AllowUnpredictable())) { + const DRegister& dreg = dreglist.GetFirstDRegister(); + unsigned len = dreglist.GetLength() * 2; + EmitT32_32(0xed200b00U | (rn.GetCode() << 16) | dreg.Encode(22, 12) | + (len & 0xff)); + AdvanceIT(); + return; + } + } else { + // VSTMDB{}{}{.} !, ; A1 + if (write_back.DoesWriteBack() && cond.IsNotNever() && + (((dreglist.GetLength() <= 16) && !rn.IsPC()) || + AllowUnpredictable())) { + const DRegister& dreg = dreglist.GetFirstDRegister(); + unsigned len = dreglist.GetLength() * 2; + EmitA32(0x0d200b00U | (cond.GetCondition() << 28) | (rn.GetCode() << 16) | + dreg.Encode(22, 12) | (len & 0xff)); + return; + } + } + Delegate(kVstmdb, &Assembler::vstmdb, cond, dt, rn, write_back, dreglist); +} + +void Assembler::vstmdb(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VSTMDB{}{}{.} !, ; T2 + if (write_back.DoesWriteBack() && (!rn.IsPC() || AllowUnpredictable())) { + const SRegister& sreg = sreglist.GetFirstSRegister(); + unsigned len = sreglist.GetLength(); + EmitT32_32(0xed200a00U | (rn.GetCode() << 16) | sreg.Encode(22, 12) | + (len & 0xff)); + AdvanceIT(); + return; + } + } else { + // VSTMDB{}{}{.} !, ; A2 + if (write_back.DoesWriteBack() && cond.IsNotNever() && + (!rn.IsPC() || AllowUnpredictable())) { + const SRegister& sreg = sreglist.GetFirstSRegister(); + unsigned len = sreglist.GetLength(); + EmitA32(0x0d200a00U | (cond.GetCondition() << 28) | (rn.GetCode() << 16) | + sreg.Encode(22, 12) | (len & 0xff)); + return; + } + } + Delegate(kVstmdb, &Assembler::vstmdb, cond, dt, rn, write_back, sreglist); +} + +void Assembler::vstmia(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VSTMIA{}{}{.} {!}, ; T1 + if ((((dreglist.GetLength() <= 16) && !rn.IsPC()) || + AllowUnpredictable())) { + const DRegister& dreg = dreglist.GetFirstDRegister(); + unsigned len = dreglist.GetLength() * 2; + EmitT32_32(0xec800b00U | (rn.GetCode() << 16) | + (write_back.GetWriteBackUint32() << 21) | dreg.Encode(22, 12) | + (len & 0xff)); + AdvanceIT(); + return; + } + } else { + // VSTMIA{}{}{.} {!}, ; A1 + if (cond.IsNotNever() && (((dreglist.GetLength() <= 16) && + (!rn.IsPC() || !write_back.DoesWriteBack())) || + AllowUnpredictable())) { + const DRegister& dreg = dreglist.GetFirstDRegister(); + unsigned len = dreglist.GetLength() * 2; + EmitA32(0x0c800b00U | (cond.GetCondition() << 28) | (rn.GetCode() << 16) | + (write_back.GetWriteBackUint32() << 21) | dreg.Encode(22, 12) | + (len & 0xff)); + return; + } + } + Delegate(kVstmia, &Assembler::vstmia, cond, dt, rn, write_back, dreglist); +} + +void Assembler::vstmia(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VSTMIA{}{}{.} {!}, ; T2 + if ((!rn.IsPC() || AllowUnpredictable())) { + const SRegister& sreg = sreglist.GetFirstSRegister(); + unsigned len = sreglist.GetLength(); + EmitT32_32(0xec800a00U | (rn.GetCode() << 16) | + (write_back.GetWriteBackUint32() << 21) | sreg.Encode(22, 12) | + (len & 0xff)); + AdvanceIT(); + return; + } + } else { + // VSTMIA{}{}{.} {!}, ; A2 + if (cond.IsNotNever() && + ((!rn.IsPC() || !write_back.DoesWriteBack()) || AllowUnpredictable())) { + const SRegister& sreg = sreglist.GetFirstSRegister(); + unsigned len = sreglist.GetLength(); + EmitA32(0x0c800a00U | (cond.GetCondition() << 28) | (rn.GetCode() << 16) | + (write_back.GetWriteBackUint32() << 21) | sreg.Encode(22, 12) | + (len & 0xff)); + return; + } + } + Delegate(kVstmia, &Assembler::vstmia, cond, dt, rn, write_back, sreglist); +} + +void Assembler::vstr(Condition cond, + DataType dt, + DRegister rd, + const MemOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + Register rn = operand.GetBaseRegister(); + int32_t offset = operand.GetOffsetImmediate(); + if (IsUsingT32()) { + // VSTR{}{}{.64}
, [{, #{+/-}}] ; T1 + if (dt.IsNoneOr(Untyped64) && (offset >= -1020) && (offset <= 1020) && + ((offset % 4) == 0) && operand.IsOffset() && + (!rn.IsPC() || AllowUnpredictable())) { + uint32_t sign = operand.GetSign().IsPlus() ? 1 : 0; + uint32_t offset_ = abs(offset) >> 2; + EmitT32_32(0xed000b00U | rd.Encode(22, 12) | (rn.GetCode() << 16) | + offset_ | (sign << 23)); + AdvanceIT(); + return; + } + } else { + // VSTR{}{}{.64}
, [{, #{+/-}}] ; A1 + if (dt.IsNoneOr(Untyped64) && (offset >= -1020) && (offset <= 1020) && + ((offset % 4) == 0) && operand.IsOffset() && cond.IsNotNever()) { + uint32_t sign = operand.GetSign().IsPlus() ? 1 : 0; + uint32_t offset_ = abs(offset) >> 2; + EmitA32(0x0d000b00U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + (rn.GetCode() << 16) | offset_ | (sign << 23)); + return; + } + } + } + Delegate(kVstr, &Assembler::vstr, cond, dt, rd, operand); +} + +void Assembler::vstr(Condition cond, + DataType dt, + SRegister rd, + const MemOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + Register rn = operand.GetBaseRegister(); + int32_t offset = operand.GetOffsetImmediate(); + if (IsUsingT32()) { + // VSTR{}{}{.32} , [{, #{+/-}}] ; T2 + if (dt.IsNoneOr(Untyped32) && (offset >= -1020) && (offset <= 1020) && + ((offset % 4) == 0) && operand.IsOffset() && + (!rn.IsPC() || AllowUnpredictable())) { + uint32_t sign = operand.GetSign().IsPlus() ? 1 : 0; + uint32_t offset_ = abs(offset) >> 2; + EmitT32_32(0xed000a00U | rd.Encode(22, 12) | (rn.GetCode() << 16) | + offset_ | (sign << 23)); + AdvanceIT(); + return; + } + } else { + // VSTR{}{}{.32} , [{, #{+/-}}] ; A2 + if (dt.IsNoneOr(Untyped32) && (offset >= -1020) && (offset <= 1020) && + ((offset % 4) == 0) && operand.IsOffset() && cond.IsNotNever()) { + uint32_t sign = operand.GetSign().IsPlus() ? 1 : 0; + uint32_t offset_ = abs(offset) >> 2; + EmitA32(0x0d000a00U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + (rn.GetCode() << 16) | offset_ | (sign << 23)); + return; + } + } + } + Delegate(kVstr, &Assembler::vstr, cond, dt, rd, operand); +} + +void Assembler::vsub( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_2 encoded_dt(dt); + if (IsUsingT32()) { + // VSUB{}{}.F32 {
}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef200d00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VSUB{}{}.F64 {
}, , ; T2 + if (dt.Is(F64)) { + EmitT32_32(0xee300b40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + // VSUB{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff000800U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VSUB{}{}.F32 {
}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf2200d00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + // VSUB{}{}.F64 {
}, , ; A2 + if (dt.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0e300b40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + // VSUB{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3000800U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVsub, &Assembler::vsub, cond, dt, rd, rn, rm); +} + +void Assembler::vsub( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_2 encoded_dt(dt); + if (IsUsingT32()) { + // VSUB{}{}.F32 {}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef200d40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VSUB{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff000840U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VSUB{}{}.F32 {}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf2200d40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + // VSUB{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3000840U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVsub, &Assembler::vsub, cond, dt, rd, rn, rm); +} + +void Assembler::vsub( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VSUB{}{}.F32 {}, , ; T2 + if (dt.Is(F32)) { + EmitT32_32(0xee300a40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VSUB{}{}.F32 {}, , ; A2 + if (dt.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0e300a40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVsub, &Assembler::vsub, cond, dt, rd, rn, rm); +} + +void Assembler::vsubhn( + Condition cond, DataType dt, DRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_3 encoded_dt(dt); + if (IsUsingT32()) { + // VSUBHN{}{}.
, , ; T1 + if (encoded_dt.IsValid() && (dt.Is(I16) || dt.Is(I32) || dt.Is(I64))) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800600U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VSUBHN{}{}.
, , ; A1 + if (encoded_dt.IsValid() && (dt.Is(I16) || dt.Is(I32) || dt.Is(I64))) { + if (cond.Is(al)) { + EmitA32(0xf2800600U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVsubhn, &Assembler::vsubhn, cond, dt, rd, rn, rm); +} + +void Assembler::vsubl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VSUBL{}{}.
, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800200U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VSUBL{}{}.
, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2800200U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVsubl, &Assembler::vsubl, cond, dt, rd, rn, rm); +} + +void Assembler::vsubw( + Condition cond, DataType dt, QRegister rd, QRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VSUBW{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800300U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VSUBW{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2800300U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVsubw, &Assembler::vsubw, cond, dt, rd, rn, rm); +} + +void Assembler::vswp(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VSWP{}{}{.
}
, ; T1 + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb20000U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VSWP{}{}{.
}
, ; A1 + if (cond.Is(al)) { + EmitA32(0xf3b20000U | rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVswp, &Assembler::vswp, cond, dt, rd, rm); +} + +void Assembler::vswp(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VSWP{}{}{.
} , ; T1 + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb20040U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VSWP{}{}{.
} , ; A1 + if (cond.Is(al)) { + EmitA32(0xf3b20040U | rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVswp, &Assembler::vswp, cond, dt, rd, rm); +} + +void Assembler::vtbl(Condition cond, + DataType dt, + DRegister rd, + const NeonRegisterList& nreglist, + DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VTBL{}{}.8
, , ; T1 + if (dt.Is(Untyped8) && nreglist.IsTransferMultipleLanes() && + (nreglist.IsSingleSpaced()) && (nreglist.GetLength() <= 4)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.GetLength() - 1; + EmitT32_32(0xffb00800U | rd.Encode(22, 12) | first.Encode(7, 16) | + (len_encoding << 8) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VTBL{}{}.8
, , ; A1 + if (dt.Is(Untyped8) && nreglist.IsTransferMultipleLanes() && + (nreglist.IsSingleSpaced()) && (nreglist.GetLength() <= 4)) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.GetLength() - 1; + EmitA32(0xf3b00800U | rd.Encode(22, 12) | first.Encode(7, 16) | + (len_encoding << 8) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVtbl, &Assembler::vtbl, cond, dt, rd, nreglist, rm); +} + +void Assembler::vtbx(Condition cond, + DataType dt, + DRegister rd, + const NeonRegisterList& nreglist, + DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VTBX{}{}.8
, , ; T1 + if (dt.Is(Untyped8) && nreglist.IsTransferMultipleLanes() && + (nreglist.IsSingleSpaced()) && (nreglist.GetLength() <= 4)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.GetLength() - 1; + EmitT32_32(0xffb00840U | rd.Encode(22, 12) | first.Encode(7, 16) | + (len_encoding << 8) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VTBX{}{}.8
, , ; A1 + if (dt.Is(Untyped8) && nreglist.IsTransferMultipleLanes() && + (nreglist.IsSingleSpaced()) && (nreglist.GetLength() <= 4)) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.GetLength() - 1; + EmitA32(0xf3b00840U | rd.Encode(22, 12) | first.Encode(7, 16) | + (len_encoding << 8) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVtbx, &Assembler::vtbx, cond, dt, rd, nreglist, rm); +} + +void Assembler::vtrn(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_7 encoded_dt(dt); + if (IsUsingT32()) { + // VTRN{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb20080U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VTRN{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b20080U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVtrn, &Assembler::vtrn, cond, dt, rd, rm); +} + +void Assembler::vtrn(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_7 encoded_dt(dt); + if (IsUsingT32()) { + // VTRN{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb200c0U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VTRN{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b200c0U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVtrn, &Assembler::vtrn, cond, dt, rd, rm); +} + +void Assembler::vtst( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_7 encoded_dt(dt); + if (IsUsingT32()) { + // VTST{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000810U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VTST{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000810U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVtst, &Assembler::vtst, cond, dt, rd, rn, rm); +} + +void Assembler::vtst( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_7 encoded_dt(dt); + if (IsUsingT32()) { + // VTST{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000850U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VTST{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000850U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVtst, &Assembler::vtst, cond, dt, rd, rn, rm); +} + +void Assembler::vuzp(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_15 encoded_dt(dt); + if (IsUsingT32()) { + // VUZP{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb20100U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VUZP{}{}.32
, ; T1 + if (dt.Is(Untyped32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffba0080U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VUZP{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b20100U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + // VUZP{}{}.32
, ; A1 + if (dt.Is(Untyped32)) { + if (cond.Is(al)) { + EmitA32(0xf3ba0080U | rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVuzp, &Assembler::vuzp, cond, dt, rd, rm); +} + +void Assembler::vuzp(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_7 encoded_dt(dt); + if (IsUsingT32()) { + // VUZP{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb20140U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VUZP{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b20140U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVuzp, &Assembler::vuzp, cond, dt, rd, rm); +} + +void Assembler::vzip(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_15 encoded_dt(dt); + if (IsUsingT32()) { + // VZIP{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb20180U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VZIP{}{}.32
, ; T1 + if (dt.Is(Untyped32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffba0080U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VZIP{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b20180U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + // VZIP{}{}.32
, ; A1 + if (dt.Is(Untyped32)) { + if (cond.Is(al)) { + EmitA32(0xf3ba0080U | rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVzip, &Assembler::vzip, cond, dt, rd, rm); +} + +void Assembler::vzip(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_7 encoded_dt(dt); + if (IsUsingT32()) { + // VZIP{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb201c0U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VZIP{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b201c0U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVzip, &Assembler::vzip, cond, dt, rd, rm); +} + +void Assembler::yield(Condition cond, EncodingSize size) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // YIELD{}{} ; T1 + if (!size.IsWide()) { + EmitT32_16(0xbf10); + AdvanceIT(); + return; + } + // YIELD{}.W ; T2 + if (!size.IsNarrow()) { + EmitT32_32(0xf3af8001U); + AdvanceIT(); + return; + } + } else { + // YIELD{}{} ; A1 + if (cond.IsNotNever()) { + EmitA32(0x0320f001U | (cond.GetCondition() << 28)); + return; + } + } + Delegate(kYield, &Assembler::yield, cond, size); +} +// End of generated code. + +} // namespace aarch32 +} // namespace vixl diff --git a/dep/vixl/src/aarch32/constants-aarch32.cc b/dep/vixl/src/aarch32/constants-aarch32.cc new file mode 100644 index 000000000..759236179 --- /dev/null +++ b/dep/vixl/src/aarch32/constants-aarch32.cc @@ -0,0 +1,855 @@ +// Copyright 2016, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may +// be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +#include "aarch32/constants-aarch32.h" +#include "utils-vixl.h" + +namespace vixl { +namespace aarch32 { + +// Start of generated code. +const char* ToCString(InstructionType type) { + switch (type) { + case kAdc: + return "adc"; + case kAdcs: + return "adcs"; + case kAdd: + return "add"; + case kAdds: + return "adds"; + case kAddw: + return "addw"; + case kAdr: + return "adr"; + case kAnd: + return "and"; + case kAnds: + return "ands"; + case kAsr: + return "asr"; + case kAsrs: + return "asrs"; + case kB: + return "b"; + case kBfc: + return "bfc"; + case kBfi: + return "bfi"; + case kBic: + return "bic"; + case kBics: + return "bics"; + case kBkpt: + return "bkpt"; + case kBl: + return "bl"; + case kBlx: + return "blx"; + case kBx: + return "bx"; + case kBxj: + return "bxj"; + case kCbnz: + return "cbnz"; + case kCbz: + return "cbz"; + case kClrex: + return "clrex"; + case kClz: + return "clz"; + case kCmn: + return "cmn"; + case kCmp: + return "cmp"; + case kCrc32b: + return "crc32b"; + case kCrc32cb: + return "crc32cb"; + case kCrc32ch: + return "crc32ch"; + case kCrc32cw: + return "crc32cw"; + case kCrc32h: + return "crc32h"; + case kCrc32w: + return "crc32w"; + case kDmb: + return "dmb"; + case kDsb: + return "dsb"; + case kEor: + return "eor"; + case kEors: + return "eors"; + case kFldmdbx: + return "fldmdbx"; + case kFldmiax: + return "fldmiax"; + case kFstmdbx: + return "fstmdbx"; + case kFstmiax: + return "fstmiax"; + case kHlt: + return "hlt"; + case kHvc: + return "hvc"; + case kIsb: + return "isb"; + case kIt: + return "it"; + case kLda: + return "lda"; + case kLdab: + return "ldab"; + case kLdaex: + return "ldaex"; + case kLdaexb: + return "ldaexb"; + case kLdaexd: + return "ldaexd"; + case kLdaexh: + return "ldaexh"; + case kLdah: + return "ldah"; + case kLdm: + return "ldm"; + case kLdmda: + return "ldmda"; + case kLdmdb: + return "ldmdb"; + case kLdmea: + return "ldmea"; + case kLdmed: + return "ldmed"; + case kLdmfa: + return "ldmfa"; + case kLdmfd: + return "ldmfd"; + case kLdmib: + return "ldmib"; + case kLdr: + return "ldr"; + case kLdrb: + return "ldrb"; + case kLdrd: + return "ldrd"; + case kLdrex: + return "ldrex"; + case kLdrexb: + return "ldrexb"; + case kLdrexd: + return "ldrexd"; + case kLdrexh: + return "ldrexh"; + case kLdrh: + return "ldrh"; + case kLdrsb: + return "ldrsb"; + case kLdrsh: + return "ldrsh"; + case kLsl: + return "lsl"; + case kLsls: + return "lsls"; + case kLsr: + return "lsr"; + case kLsrs: + return "lsrs"; + case kMla: + return "mla"; + case kMlas: + return "mlas"; + case kMls: + return "mls"; + case kMov: + return "mov"; + case kMovs: + return "movs"; + case kMovt: + return "movt"; + case kMovw: + return "movw"; + case kMrs: + return "mrs"; + case kMsr: + return "msr"; + case kMul: + return "mul"; + case kMuls: + return "muls"; + case kMvn: + return "mvn"; + case kMvns: + return "mvns"; + case kNop: + return "nop"; + case kOrn: + return "orn"; + case kOrns: + return "orns"; + case kOrr: + return "orr"; + case kOrrs: + return "orrs"; + case kPkhbt: + return "pkhbt"; + case kPkhtb: + return "pkhtb"; + case kPld: + return "pld"; + case kPldw: + return "pldw"; + case kPli: + return "pli"; + case kPop: + return "pop"; + case kPush: + return "push"; + case kQadd: + return "qadd"; + case kQadd16: + return "qadd16"; + case kQadd8: + return "qadd8"; + case kQasx: + return "qasx"; + case kQdadd: + return "qdadd"; + case kQdsub: + return "qdsub"; + case kQsax: + return "qsax"; + case kQsub: + return "qsub"; + case kQsub16: + return "qsub16"; + case kQsub8: + return "qsub8"; + case kRbit: + return "rbit"; + case kRev: + return "rev"; + case kRev16: + return "rev16"; + case kRevsh: + return "revsh"; + case kRor: + return "ror"; + case kRors: + return "rors"; + case kRrx: + return "rrx"; + case kRrxs: + return "rrxs"; + case kRsb: + return "rsb"; + case kRsbs: + return "rsbs"; + case kRsc: + return "rsc"; + case kRscs: + return "rscs"; + case kSadd16: + return "sadd16"; + case kSadd8: + return "sadd8"; + case kSasx: + return "sasx"; + case kSbc: + return "sbc"; + case kSbcs: + return "sbcs"; + case kSbfx: + return "sbfx"; + case kSdiv: + return "sdiv"; + case kSel: + return "sel"; + case kShadd16: + return "shadd16"; + case kShadd8: + return "shadd8"; + case kShasx: + return "shasx"; + case kShsax: + return "shsax"; + case kShsub16: + return "shsub16"; + case kShsub8: + return "shsub8"; + case kSmlabb: + return "smlabb"; + case kSmlabt: + return "smlabt"; + case kSmlad: + return "smlad"; + case kSmladx: + return "smladx"; + case kSmlal: + return "smlal"; + case kSmlalbb: + return "smlalbb"; + case kSmlalbt: + return "smlalbt"; + case kSmlald: + return "smlald"; + case kSmlaldx: + return "smlaldx"; + case kSmlals: + return "smlals"; + case kSmlaltb: + return "smlaltb"; + case kSmlaltt: + return "smlaltt"; + case kSmlatb: + return "smlatb"; + case kSmlatt: + return "smlatt"; + case kSmlawb: + return "smlawb"; + case kSmlawt: + return "smlawt"; + case kSmlsd: + return "smlsd"; + case kSmlsdx: + return "smlsdx"; + case kSmlsld: + return "smlsld"; + case kSmlsldx: + return "smlsldx"; + case kSmmla: + return "smmla"; + case kSmmlar: + return "smmlar"; + case kSmmls: + return "smmls"; + case kSmmlsr: + return "smmlsr"; + case kSmmul: + return "smmul"; + case kSmmulr: + return "smmulr"; + case kSmuad: + return "smuad"; + case kSmuadx: + return "smuadx"; + case kSmulbb: + return "smulbb"; + case kSmulbt: + return "smulbt"; + case kSmull: + return "smull"; + case kSmulls: + return "smulls"; + case kSmultb: + return "smultb"; + case kSmultt: + return "smultt"; + case kSmulwb: + return "smulwb"; + case kSmulwt: + return "smulwt"; + case kSmusd: + return "smusd"; + case kSmusdx: + return "smusdx"; + case kSsat: + return "ssat"; + case kSsat16: + return "ssat16"; + case kSsax: + return "ssax"; + case kSsub16: + return "ssub16"; + case kSsub8: + return "ssub8"; + case kStl: + return "stl"; + case kStlb: + return "stlb"; + case kStlex: + return "stlex"; + case kStlexb: + return "stlexb"; + case kStlexd: + return "stlexd"; + case kStlexh: + return "stlexh"; + case kStlh: + return "stlh"; + case kStm: + return "stm"; + case kStmda: + return "stmda"; + case kStmdb: + return "stmdb"; + case kStmea: + return "stmea"; + case kStmed: + return "stmed"; + case kStmfa: + return "stmfa"; + case kStmfd: + return "stmfd"; + case kStmib: + return "stmib"; + case kStr: + return "str"; + case kStrb: + return "strb"; + case kStrd: + return "strd"; + case kStrex: + return "strex"; + case kStrexb: + return "strexb"; + case kStrexd: + return "strexd"; + case kStrexh: + return "strexh"; + case kStrh: + return "strh"; + case kSub: + return "sub"; + case kSubs: + return "subs"; + case kSubw: + return "subw"; + case kSvc: + return "svc"; + case kSxtab: + return "sxtab"; + case kSxtab16: + return "sxtab16"; + case kSxtah: + return "sxtah"; + case kSxtb: + return "sxtb"; + case kSxtb16: + return "sxtb16"; + case kSxth: + return "sxth"; + case kTbb: + return "tbb"; + case kTbh: + return "tbh"; + case kTeq: + return "teq"; + case kTst: + return "tst"; + case kUadd16: + return "uadd16"; + case kUadd8: + return "uadd8"; + case kUasx: + return "uasx"; + case kUbfx: + return "ubfx"; + case kUdf: + return "udf"; + case kUdiv: + return "udiv"; + case kUhadd16: + return "uhadd16"; + case kUhadd8: + return "uhadd8"; + case kUhasx: + return "uhasx"; + case kUhsax: + return "uhsax"; + case kUhsub16: + return "uhsub16"; + case kUhsub8: + return "uhsub8"; + case kUmaal: + return "umaal"; + case kUmlal: + return "umlal"; + case kUmlals: + return "umlals"; + case kUmull: + return "umull"; + case kUmulls: + return "umulls"; + case kUqadd16: + return "uqadd16"; + case kUqadd8: + return "uqadd8"; + case kUqasx: + return "uqasx"; + case kUqsax: + return "uqsax"; + case kUqsub16: + return "uqsub16"; + case kUqsub8: + return "uqsub8"; + case kUsad8: + return "usad8"; + case kUsada8: + return "usada8"; + case kUsat: + return "usat"; + case kUsat16: + return "usat16"; + case kUsax: + return "usax"; + case kUsub16: + return "usub16"; + case kUsub8: + return "usub8"; + case kUxtab: + return "uxtab"; + case kUxtab16: + return "uxtab16"; + case kUxtah: + return "uxtah"; + case kUxtb: + return "uxtb"; + case kUxtb16: + return "uxtb16"; + case kUxth: + return "uxth"; + case kVaba: + return "vaba"; + case kVabal: + return "vabal"; + case kVabd: + return "vabd"; + case kVabdl: + return "vabdl"; + case kVabs: + return "vabs"; + case kVacge: + return "vacge"; + case kVacgt: + return "vacgt"; + case kVacle: + return "vacle"; + case kVaclt: + return "vaclt"; + case kVadd: + return "vadd"; + case kVaddhn: + return "vaddhn"; + case kVaddl: + return "vaddl"; + case kVaddw: + return "vaddw"; + case kVand: + return "vand"; + case kVbic: + return "vbic"; + case kVbif: + return "vbif"; + case kVbit: + return "vbit"; + case kVbsl: + return "vbsl"; + case kVceq: + return "vceq"; + case kVcge: + return "vcge"; + case kVcgt: + return "vcgt"; + case kVcle: + return "vcle"; + case kVcls: + return "vcls"; + case kVclt: + return "vclt"; + case kVclz: + return "vclz"; + case kVcmp: + return "vcmp"; + case kVcmpe: + return "vcmpe"; + case kVcnt: + return "vcnt"; + case kVcvt: + return "vcvt"; + case kVcvta: + return "vcvta"; + case kVcvtb: + return "vcvtb"; + case kVcvtm: + return "vcvtm"; + case kVcvtn: + return "vcvtn"; + case kVcvtp: + return "vcvtp"; + case kVcvtr: + return "vcvtr"; + case kVcvtt: + return "vcvtt"; + case kVdiv: + return "vdiv"; + case kVdup: + return "vdup"; + case kVeor: + return "veor"; + case kVext: + return "vext"; + case kVfma: + return "vfma"; + case kVfms: + return "vfms"; + case kVfnma: + return "vfnma"; + case kVfnms: + return "vfnms"; + case kVhadd: + return "vhadd"; + case kVhsub: + return "vhsub"; + case kVld1: + return "vld1"; + case kVld2: + return "vld2"; + case kVld3: + return "vld3"; + case kVld4: + return "vld4"; + case kVldm: + return "vldm"; + case kVldmdb: + return "vldmdb"; + case kVldmia: + return "vldmia"; + case kVldr: + return "vldr"; + case kVmax: + return "vmax"; + case kVmaxnm: + return "vmaxnm"; + case kVmin: + return "vmin"; + case kVminnm: + return "vminnm"; + case kVmla: + return "vmla"; + case kVmlal: + return "vmlal"; + case kVmls: + return "vmls"; + case kVmlsl: + return "vmlsl"; + case kVmov: + return "vmov"; + case kVmovl: + return "vmovl"; + case kVmovn: + return "vmovn"; + case kVmrs: + return "vmrs"; + case kVmsr: + return "vmsr"; + case kVmul: + return "vmul"; + case kVmull: + return "vmull"; + case kVmvn: + return "vmvn"; + case kVneg: + return "vneg"; + case kVnmla: + return "vnmla"; + case kVnmls: + return "vnmls"; + case kVnmul: + return "vnmul"; + case kVorn: + return "vorn"; + case kVorr: + return "vorr"; + case kVpadal: + return "vpadal"; + case kVpadd: + return "vpadd"; + case kVpaddl: + return "vpaddl"; + case kVpmax: + return "vpmax"; + case kVpmin: + return "vpmin"; + case kVpop: + return "vpop"; + case kVpush: + return "vpush"; + case kVqabs: + return "vqabs"; + case kVqadd: + return "vqadd"; + case kVqdmlal: + return "vqdmlal"; + case kVqdmlsl: + return "vqdmlsl"; + case kVqdmulh: + return "vqdmulh"; + case kVqdmull: + return "vqdmull"; + case kVqmovn: + return "vqmovn"; + case kVqmovun: + return "vqmovun"; + case kVqneg: + return "vqneg"; + case kVqrdmulh: + return "vqrdmulh"; + case kVqrshl: + return "vqrshl"; + case kVqrshrn: + return "vqrshrn"; + case kVqrshrun: + return "vqrshrun"; + case kVqshl: + return "vqshl"; + case kVqshlu: + return "vqshlu"; + case kVqshrn: + return "vqshrn"; + case kVqshrun: + return "vqshrun"; + case kVqsub: + return "vqsub"; + case kVraddhn: + return "vraddhn"; + case kVrecpe: + return "vrecpe"; + case kVrecps: + return "vrecps"; + case kVrev16: + return "vrev16"; + case kVrev32: + return "vrev32"; + case kVrev64: + return "vrev64"; + case kVrhadd: + return "vrhadd"; + case kVrinta: + return "vrinta"; + case kVrintm: + return "vrintm"; + case kVrintn: + return "vrintn"; + case kVrintp: + return "vrintp"; + case kVrintr: + return "vrintr"; + case kVrintx: + return "vrintx"; + case kVrintz: + return "vrintz"; + case kVrshl: + return "vrshl"; + case kVrshr: + return "vrshr"; + case kVrshrn: + return "vrshrn"; + case kVrsqrte: + return "vrsqrte"; + case kVrsqrts: + return "vrsqrts"; + case kVrsra: + return "vrsra"; + case kVrsubhn: + return "vrsubhn"; + case kVseleq: + return "vseleq"; + case kVselge: + return "vselge"; + case kVselgt: + return "vselgt"; + case kVselvs: + return "vselvs"; + case kVshl: + return "vshl"; + case kVshll: + return "vshll"; + case kVshr: + return "vshr"; + case kVshrn: + return "vshrn"; + case kVsli: + return "vsli"; + case kVsqrt: + return "vsqrt"; + case kVsra: + return "vsra"; + case kVsri: + return "vsri"; + case kVst1: + return "vst1"; + case kVst2: + return "vst2"; + case kVst3: + return "vst3"; + case kVst4: + return "vst4"; + case kVstm: + return "vstm"; + case kVstmdb: + return "vstmdb"; + case kVstmia: + return "vstmia"; + case kVstr: + return "vstr"; + case kVsub: + return "vsub"; + case kVsubhn: + return "vsubhn"; + case kVsubl: + return "vsubl"; + case kVsubw: + return "vsubw"; + case kVswp: + return "vswp"; + case kVtbl: + return "vtbl"; + case kVtbx: + return "vtbx"; + case kVtrn: + return "vtrn"; + case kVtst: + return "vtst"; + case kVuzp: + return "vuzp"; + case kVzip: + return "vzip"; + case kYield: + return "yield"; + case kUndefInstructionType: + VIXL_UNREACHABLE(); + return ""; + } + VIXL_UNREACHABLE(); + return ""; +} // NOLINT(readability/fn_size) +// End of generated code. + +} // namespace aarch32 +} // namespace vixl diff --git a/dep/vixl/src/aarch32/disasm-aarch32.cc b/dep/vixl/src/aarch32/disasm-aarch32.cc new file mode 100644 index 000000000..9ed3a8315 --- /dev/null +++ b/dep/vixl/src/aarch32/disasm-aarch32.cc @@ -0,0 +1,67276 @@ +// Copyright 2015, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +extern "C" { +#include +} + +#include +#include +#include +#include +#include +#include + +#include "utils-vixl.h" +#include "aarch32/constants-aarch32.h" +#include "aarch32/disasm-aarch32.h" +#include "aarch32/instructions-aarch32.h" +#include "aarch32/operands-aarch32.h" + +namespace vixl { +namespace aarch32 { + +using internal::Int64; +using internal::Uint32; + +class T32CodeAddressIncrementer { + uint32_t* code_address_; + uint32_t increment_; + + public: + T32CodeAddressIncrementer(uint32_t instr, uint32_t* code_address) + : code_address_(code_address), + increment_(Disassembler::Is16BitEncoding(instr) ? 2 : 4) {} + ~T32CodeAddressIncrementer() { *code_address_ += increment_; } +}; + +class A32CodeAddressIncrementer { + uint32_t* code_address_; + + public: + explicit A32CodeAddressIncrementer(uint32_t* code_address) + : code_address_(code_address) {} + ~A32CodeAddressIncrementer() { *code_address_ += 4; } +}; + +class DecodeNeon { + int lane_; + SpacingType spacing_; + bool valid_; + + public: + DecodeNeon(int lane, SpacingType spacing) + : lane_(lane), spacing_(spacing), valid_(true) {} + DecodeNeon() : lane_(0), spacing_(kSingle), valid_(false) {} + int GetLane() const { return lane_; } + SpacingType GetSpacing() const { return spacing_; } + bool IsValid() const { return valid_; } +}; + +class DecodeNeonAndAlign : public DecodeNeon { + public: + Alignment align_; + DecodeNeonAndAlign(int lanes, SpacingType spacing, Alignment align) + : DecodeNeon(lanes, spacing), align_(align) {} + DecodeNeonAndAlign() : align_(kBadAlignment) {} + Alignment GetAlign() const { return align_; } +}; + +// Start of generated code. +DataTypeValue Dt_L_imm6_1_Decode(uint32_t value, uint32_t type_value) { + if ((value & 0xf) == 0x1) { + switch (type_value) { + case 0x0: + return S8; + case 0x1: + return U8; + } + } else if ((value & 0xe) == 0x2) { + switch (type_value) { + case 0x0: + return S16; + case 0x1: + return U16; + } + } else if ((value & 0xc) == 0x4) { + switch (type_value) { + case 0x0: + return S32; + case 0x1: + return U32; + } + } else if ((value & 0x8) == 0x8) { + switch (type_value) { + case 0x0: + return S64; + case 0x1: + return U64; + } + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_L_imm6_2_Decode(uint32_t value, uint32_t type_value) { + if ((value & 0xf) == 0x1) { + if (type_value == 0x1) return S8; + } else if ((value & 0xe) == 0x2) { + if (type_value == 0x1) return S16; + } else if ((value & 0xc) == 0x4) { + if (type_value == 0x1) return S32; + } else if ((value & 0x8) == 0x8) { + if (type_value == 0x1) return S64; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_L_imm6_3_Decode(uint32_t value) { + if ((value & 0xf) == 0x1) { + return I8; + } else if ((value & 0xe) == 0x2) { + return I16; + } else if ((value & 0xc) == 0x4) { + return I32; + } else if ((value & 0x8) == 0x8) { + return I64; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_L_imm6_4_Decode(uint32_t value) { + if ((value & 0xf) == 0x1) { + return Untyped8; + } else if ((value & 0xe) == 0x2) { + return Untyped16; + } else if ((value & 0xc) == 0x4) { + return Untyped32; + } else if ((value & 0x8) == 0x8) { + return Untyped64; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_imm6_1_Decode(uint32_t value, uint32_t type_value) { + if ((value & 0x7) == 0x1) { + switch (type_value) { + case 0x0: + return S16; + case 0x1: + return U16; + } + } else if ((value & 0x6) == 0x2) { + switch (type_value) { + case 0x0: + return S32; + case 0x1: + return U32; + } + } else if ((value & 0x4) == 0x4) { + switch (type_value) { + case 0x0: + return S64; + case 0x1: + return U64; + } + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_imm6_2_Decode(uint32_t value, uint32_t type_value) { + if ((value & 0x7) == 0x1) { + if (type_value == 0x1) return S16; + } else if ((value & 0x6) == 0x2) { + if (type_value == 0x1) return S32; + } else if ((value & 0x4) == 0x4) { + if (type_value == 0x1) return S64; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_imm6_3_Decode(uint32_t value) { + if ((value & 0x7) == 0x1) { + return I16; + } else if ((value & 0x6) == 0x2) { + return I32; + } else if ((value & 0x4) == 0x4) { + return I64; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_imm6_4_Decode(uint32_t value, uint32_t type_value) { + if ((value & 0x7) == 0x1) { + switch (type_value) { + case 0x0: + return S8; + case 0x1: + return U8; + } + } else if ((value & 0x6) == 0x2) { + switch (type_value) { + case 0x0: + return S16; + case 0x1: + return U16; + } + } else if ((value & 0x4) == 0x4) { + switch (type_value) { + case 0x0: + return S32; + case 0x1: + return U32; + } + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_op_U_size_1_Decode(uint32_t value) { + switch (value) { + case 0x0: + return S8; + case 0x1: + return S16; + case 0x2: + return S32; + case 0x4: + return U8; + case 0x5: + return U16; + case 0x6: + return U32; + case 0x8: + return P8; + case 0xa: + return P64; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_op_size_1_Decode(uint32_t value) { + switch (value) { + case 0x0: + return I8; + case 0x1: + return I16; + case 0x2: + return I32; + case 0x4: + return P8; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_op_size_2_Decode(uint32_t value) { + switch (value) { + case 0x0: + return S8; + case 0x1: + return S16; + case 0x2: + return S32; + case 0x4: + return U8; + case 0x5: + return U16; + case 0x6: + return U32; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_op_size_3_Decode(uint32_t value) { + switch (value) { + case 0x0: + return S16; + case 0x1: + return S32; + case 0x2: + return S64; + case 0x4: + return U16; + case 0x5: + return U32; + case 0x6: + return U64; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_U_imm3H_1_Decode(uint32_t value) { + switch (value) { + case 0x1: + return S8; + case 0x2: + return S16; + case 0x4: + return S32; + case 0x9: + return U8; + case 0xa: + return U16; + case 0xc: + return U32; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_U_opc1_opc2_1_Decode(uint32_t value, unsigned* lane) { + if ((value & 0x18) == 0x8) { + *lane = value & 7; + return S8; + } + if ((value & 0x19) == 0x1) { + *lane = (value >> 1) & 3; + return S16; + } + if ((value & 0x18) == 0x18) { + *lane = value & 7; + return U8; + } + if ((value & 0x19) == 0x11) { + *lane = (value >> 1) & 3; + return U16; + } + if ((value & 0x1b) == 0x0) { + *lane = (value >> 2) & 1; + return Untyped32; + } + *lane = -1; + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_opc1_opc2_1_Decode(uint32_t value, unsigned* lane) { + if ((value & 0x8) == 0x8) { + *lane = value & 7; + return Untyped8; + } + if ((value & 0x9) == 0x1) { + *lane = (value >> 1) & 3; + return Untyped16; + } + if ((value & 0xb) == 0x0) { + *lane = (value >> 2) & 1; + return Untyped32; + } + *lane = -1; + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_imm4_1_Decode(uint32_t value, unsigned* lane) { + if ((value & 0x1) == 0x1) { + *lane = (value >> 1) & 7; + return Untyped8; + } + if ((value & 0x3) == 0x2) { + *lane = (value >> 2) & 3; + return Untyped16; + } + if ((value & 0x7) == 0x4) { + *lane = (value >> 3) & 1; + return Untyped32; + } + *lane = -1; + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_B_E_1_Decode(uint32_t value) { + switch (value) { + case 0x2: + return Untyped8; + case 0x1: + return Untyped16; + case 0x0: + return Untyped32; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_op_1_Decode1(uint32_t value) { + switch (value) { + case 0x0: + return F32; + case 0x1: + return F32; + case 0x2: + return S32; + case 0x3: + return U32; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_op_1_Decode2(uint32_t value) { + switch (value) { + case 0x0: + return S32; + case 0x1: + return U32; + case 0x2: + return F32; + case 0x3: + return F32; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_op_2_Decode(uint32_t value) { + switch (value) { + case 0x0: + return U32; + case 0x1: + return S32; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_op_3_Decode(uint32_t value) { + switch (value) { + case 0x0: + return S32; + case 0x1: + return U32; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_U_sx_1_Decode(uint32_t value) { + switch (value) { + case 0x0: + return S16; + case 0x1: + return S32; + case 0x2: + return U16; + case 0x3: + return U32; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_op_U_1_Decode1(uint32_t value) { + switch (value) { + case 0x0: + return F32; + case 0x1: + return F32; + case 0x2: + return S32; + case 0x3: + return U32; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_op_U_1_Decode2(uint32_t value) { + switch (value) { + case 0x0: + return S32; + case 0x1: + return U32; + case 0x2: + return F32; + case 0x3: + return F32; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_sz_1_Decode(uint32_t value) { + switch (value) { + case 0x0: + return F32; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_F_size_1_Decode(uint32_t value) { + switch (value) { + case 0x0: + return S8; + case 0x1: + return S16; + case 0x2: + return S32; + case 0x6: + return F32; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_F_size_2_Decode(uint32_t value) { + switch (value) { + case 0x0: + return I8; + case 0x1: + return I16; + case 0x2: + return I32; + case 0x6: + return F32; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_F_size_3_Decode(uint32_t value) { + switch (value) { + case 0x1: + return I16; + case 0x2: + return I32; + case 0x6: + return F32; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_F_size_4_Decode(uint32_t value) { + switch (value) { + case 0x2: + return U32; + case 0x6: + return F32; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_U_size_1_Decode(uint32_t value) { + switch (value) { + case 0x0: + return S8; + case 0x1: + return S16; + case 0x2: + return S32; + case 0x4: + return U8; + case 0x5: + return U16; + case 0x6: + return U32; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_U_size_2_Decode(uint32_t value) { + switch (value) { + case 0x1: + return S16; + case 0x2: + return S32; + case 0x5: + return U16; + case 0x6: + return U32; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_U_size_3_Decode(uint32_t value) { + switch (value) { + case 0x0: + return S8; + case 0x1: + return S16; + case 0x2: + return S32; + case 0x3: + return S64; + case 0x4: + return U8; + case 0x5: + return U16; + case 0x6: + return U32; + case 0x7: + return U64; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_size_1_Decode(uint32_t value) { + switch (value) { + case 0x0: + return Untyped8; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_size_2_Decode(uint32_t value) { + switch (value) { + case 0x0: + return I8; + case 0x1: + return I16; + case 0x2: + return I32; + case 0x3: + return I64; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_size_3_Decode(uint32_t value) { + switch (value) { + case 0x0: + return I16; + case 0x1: + return I32; + case 0x2: + return I64; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_size_4_Decode(uint32_t value) { + switch (value) { + case 0x0: + return I8; + case 0x1: + return I16; + case 0x2: + return I32; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_size_5_Decode(uint32_t value) { + switch (value) { + case 0x0: + return S8; + case 0x1: + return S16; + case 0x2: + return S32; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_size_6_Decode(uint32_t value) { + switch (value) { + case 0x0: + return Untyped8; + case 0x1: + return Untyped16; + case 0x2: + return Untyped32; + case 0x3: + return Untyped64; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_size_7_Decode(uint32_t value) { + switch (value) { + case 0x0: + return Untyped8; + case 0x1: + return Untyped16; + case 0x2: + return Untyped32; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_size_8_Decode(uint32_t value) { + switch (value) { + case 0x0: + return Untyped8; + case 0x1: + return Untyped16; + case 0x2: + return Untyped32; + case 0x3: + return Untyped32; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_size_9_Decode(uint32_t value, uint32_t type_value) { + switch (value) { + case 0x1: + switch (type_value) { + case 0x0: + return I16; + } + break; + case 0x2: + switch (type_value) { + case 0x0: + return I32; + case 0x1: + return F32; + } + break; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_size_10_Decode(uint32_t value) { + switch (value) { + case 0x0: + return I8; + case 0x1: + return I16; + case 0x2: + return I32; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_size_11_Decode(uint32_t value, uint32_t type_value) { + switch (value) { + case 0x1: + switch (type_value) { + case 0x0: + return S16; + case 0x1: + return U16; + } + break; + case 0x2: + switch (type_value) { + case 0x0: + return S32; + case 0x1: + return U32; + } + break; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_size_12_Decode(uint32_t value, uint32_t type_value) { + switch (value) { + case 0x0: + switch (type_value) { + case 0x0: + return S8; + case 0x1: + return U8; + } + break; + case 0x1: + switch (type_value) { + case 0x0: + return S16; + case 0x1: + return U16; + } + break; + case 0x2: + switch (type_value) { + case 0x0: + return S32; + case 0x1: + return U32; + } + break; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_size_13_Decode(uint32_t value) { + switch (value) { + case 0x1: + return S16; + case 0x2: + return S32; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_size_14_Decode(uint32_t value) { + switch (value) { + case 0x0: + return S16; + case 0x1: + return S32; + case 0x2: + return S64; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_size_15_Decode(uint32_t value) { + switch (value) { + case 0x0: + return Untyped8; + case 0x1: + return Untyped16; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_size_16_Decode(uint32_t value) { + switch (value) { + case 0x2: + return F32; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_size_17_Decode(uint32_t value) { + switch (value) { + case 0x0: + return I8; + case 0x1: + return I16; + case 0x2: + return I32; + } + return kDataTypeValueInvalid; +} + +DecodeNeon Index_1_Decode(uint32_t value, DataType dt) { + switch (dt.GetValue()) { + case Untyped8: { + int lane = (value >> 1) & 0x7; + if ((value & 1) != 0) break; + SpacingType spacing = kSingle; + return DecodeNeon(lane, spacing); + } + case Untyped16: { + int lane = (value >> 2) & 0x3; + if ((value & 1) != 0) break; + SpacingType spacing = ((value & 3) == 2) ? kDouble : kSingle; + return DecodeNeon(lane, spacing); + } + case Untyped32: { + int lane = (value >> 3) & 0x1; + if ((value & 3) != 0) break; + SpacingType spacing = ((value & 7) == 4) ? kDouble : kSingle; + return DecodeNeon(lane, spacing); + } + default: + break; + } + return DecodeNeon(); +} + +DecodeNeonAndAlign Align_index_align_1_Decode(uint32_t value, DataType dt) { + switch (dt.GetValue()) { + case Untyped8: { + AlignmentType align; + if ((value & 1) == 0) { + align = kNoAlignment; + } else { + break; + } + int lane = (value >> 1) & 0x7; + SpacingType spacing = kSingle; + return DecodeNeonAndAlign(lane, spacing, align); + } + case Untyped16: { + AlignmentType align; + if ((value & 3) == 1) { + align = k16BitAlign; + } else if ((value & 3) == 0) { + align = kNoAlignment; + } else { + break; + } + int lane = (value >> 2) & 0x3; + SpacingType spacing = kSingle; + return DecodeNeonAndAlign(lane, spacing, align); + } + case Untyped32: { + AlignmentType align; + if ((value & 7) == 3) { + align = k32BitAlign; + } else if ((value & 7) == 0) { + align = kNoAlignment; + } else { + break; + } + int lane = (value >> 3) & 0x1; + SpacingType spacing = kSingle; + return DecodeNeonAndAlign(lane, spacing, align); + } + default: + break; + } + return DecodeNeonAndAlign(); +} + +DecodeNeonAndAlign Align_index_align_2_Decode(uint32_t value, DataType dt) { + switch (dt.GetValue()) { + case Untyped8: { + AlignmentType align; + if ((value & 1) == 1) { + align = k16BitAlign; + } else if ((value & 1) == 0) { + align = kNoAlignment; + } else { + break; + } + int lane = (value >> 1) & 0x7; + SpacingType spacing = kSingle; + return DecodeNeonAndAlign(lane, spacing, align); + } + case Untyped16: { + AlignmentType align; + if ((value & 1) == 1) { + align = k32BitAlign; + } else if ((value & 1) == 0) { + align = kNoAlignment; + } else { + break; + } + int lane = (value >> 2) & 0x3; + SpacingType spacing = ((value & 2) == 2) ? kDouble : kSingle; + return DecodeNeonAndAlign(lane, spacing, align); + } + case Untyped32: { + AlignmentType align; + if ((value & 3) == 1) { + align = k64BitAlign; + } else if ((value & 3) == 0) { + align = kNoAlignment; + } else { + break; + } + int lane = (value >> 3) & 0x1; + SpacingType spacing = ((value & 4) == 4) ? kDouble : kSingle; + return DecodeNeonAndAlign(lane, spacing, align); + } + default: + break; + } + return DecodeNeonAndAlign(); +} + +DecodeNeonAndAlign Align_index_align_3_Decode(uint32_t value, DataType dt) { + switch (dt.GetValue()) { + case Untyped8: { + AlignmentType align; + if ((value & 1) == 1) { + align = k32BitAlign; + } else if ((value & 1) == 0) { + align = kNoAlignment; + } else { + break; + } + int lane = (value >> 1) & 0x7; + SpacingType spacing = kSingle; + return DecodeNeonAndAlign(lane, spacing, align); + } + case Untyped16: { + AlignmentType align; + if ((value & 1) == 1) { + align = k64BitAlign; + } else if ((value & 1) == 0) { + align = kNoAlignment; + } else { + break; + } + int lane = (value >> 2) & 0x3; + SpacingType spacing = ((value & 2) == 2) ? kDouble : kSingle; + return DecodeNeonAndAlign(lane, spacing, align); + } + case Untyped32: { + AlignmentType align; + if ((value & 3) == 1) { + align = k64BitAlign; + } else if ((value & 3) == 2) { + align = k128BitAlign; + } else if ((value & 3) == 0) { + align = kNoAlignment; + } else { + break; + } + int lane = (value >> 3) & 0x1; + SpacingType spacing = ((value & 4) == 4) ? kDouble : kSingle; + return DecodeNeonAndAlign(lane, spacing, align); + } + default: + break; + } + return DecodeNeonAndAlign(); +} + +Alignment Align_a_1_Decode(uint32_t value, DataType dt) { + switch (value) { + case 0: + return kNoAlignment; + case 1: + if (dt.Is(Untyped16)) return k16BitAlign; + if (dt.Is(Untyped32)) return k32BitAlign; + break; + default: + break; + } + return kBadAlignment; +} + +Alignment Align_a_2_Decode(uint32_t value, DataType dt) { + switch (value) { + case 0: + return kNoAlignment; + case 1: + if (dt.Is(Untyped8)) return k16BitAlign; + if (dt.Is(Untyped16)) return k32BitAlign; + if (dt.Is(Untyped32)) return k64BitAlign; + break; + default: + break; + } + return kBadAlignment; +} + +Alignment Align_a_3_Decode(uint32_t value, DataType dt, uint32_t size) { + switch (value) { + case 0: + if (size != 3) return kNoAlignment; + break; + case 1: + if (dt.Is(Untyped8)) return k32BitAlign; + if (dt.Is(Untyped16)) return k64BitAlign; + if (size == 2) return k64BitAlign; + if (size == 3) return k128BitAlign; + break; + default: + break; + } + return kBadAlignment; +} + +Alignment Align_align_1_Decode(uint32_t value) { + switch (value) { + case 0: + return kNoAlignment; + case 1: + return k64BitAlign; + case 2: + return k128BitAlign; + case 3: + return k256BitAlign; + default: + break; + } + return kBadAlignment; +} + +Alignment Align_align_2_Decode(uint32_t value) { + switch (value) { + case 0: + return kNoAlignment; + case 1: + return k64BitAlign; + case 2: + return k128BitAlign; + case 3: + return k256BitAlign; + default: + break; + } + return kBadAlignment; +} + +Alignment Align_align_3_Decode(uint32_t value) { + switch (value) { + case 0: + return kNoAlignment; + case 1: + return k64BitAlign; + default: + break; + } + return kBadAlignment; +} + +Alignment Align_align_4_Decode(uint32_t value) { + switch (value) { + case 0: + return kNoAlignment; + case 1: + return k64BitAlign; + case 2: + return k128BitAlign; + case 3: + return k256BitAlign; + default: + break; + } + return kBadAlignment; +} + +Alignment Align_align_5_Decode(uint32_t value) { + switch (value) { + case 0: + return kNoAlignment; + case 1: + return k64BitAlign; + case 2: + return k128BitAlign; + case 3: + return k256BitAlign; + default: + break; + } + return kBadAlignment; +} + + +void Disassembler::adc(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kAdc, kArithmetic); + os() << ToCString(kAdc) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::adcs(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kAdcs, kArithmetic); + os() << ToCString(kAdcs) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::add(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kAdd, kArithmetic); + os() << ToCString(kAdd) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::add(Condition cond, Register rd, const Operand& operand) { + os().SetCurrentInstruction(kAdd, kArithmetic); + os() << ToCString(kAdd) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << operand; +} + +void Disassembler::adds(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kAdds, kArithmetic); + os() << ToCString(kAdds) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::adds(Register rd, const Operand& operand) { + os().SetCurrentInstruction(kAdds, kArithmetic); + os() << ToCString(kAdds) << " " << rd << ", " << operand; +} + +void Disassembler::addw(Condition cond, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kAddw, kArithmetic); + os() << ToCString(kAddw) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::adr(Condition cond, + EncodingSize size, + Register rd, + Location* location) { + os().SetCurrentInstruction(kAdr, kAddress); + os() << ToCString(kAdr) << ConditionPrinter(it_block_, cond) << size << " " + << rd << ", " + << PrintLabel(kAnyLocation, location, GetCodeAddress() & ~3); +} + +void Disassembler::and_(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kAnd, kBitwise); + os() << ToCString(kAnd) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::ands(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kAnds, kBitwise); + os() << ToCString(kAnds) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::asr(Condition cond, + EncodingSize size, + Register rd, + Register rm, + const Operand& operand) { + os().SetCurrentInstruction(kAsr, kShift); + os() << ToCString(kAsr) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::asrs(Condition cond, + EncodingSize size, + Register rd, + Register rm, + const Operand& operand) { + os().SetCurrentInstruction(kAsrs, kShift); + os() << ToCString(kAsrs) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::b(Condition cond, EncodingSize size, Location* location) { + os().SetCurrentInstruction(kB, kAddress | kBranch); + os() << ToCString(kB) << ConditionPrinter(it_block_, cond) << size << " " + << PrintLabel(kCodeLocation, location, GetCodeAddress()); +} + +void Disassembler::bfc(Condition cond, + Register rd, + uint32_t lsb, + uint32_t width) { + os().SetCurrentInstruction(kBfc, kShift); + os() << ToCString(kBfc) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << ImmediatePrinter(lsb) << ", " << ImmediatePrinter(width); +} + +void Disassembler::bfi( + Condition cond, Register rd, Register rn, uint32_t lsb, uint32_t width) { + os().SetCurrentInstruction(kBfi, kShift); + os() << ToCString(kBfi) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << ImmediatePrinter(lsb) << ", " + << ImmediatePrinter(width); +} + +void Disassembler::bic(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kBic, kBitwise); + os() << ToCString(kBic) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::bics(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kBics, kBitwise); + os() << ToCString(kBics) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::bkpt(Condition cond, uint32_t imm) { + os().SetCurrentInstruction(kBkpt, kSystem); + os() << ToCString(kBkpt) << ConditionPrinter(it_block_, cond) << " " + << RawImmediatePrinter(imm); +} + +void Disassembler::bl(Condition cond, Location* location) { + os().SetCurrentInstruction(kBl, kAddress | kBranch); + os() << ToCString(kBl) << ConditionPrinter(it_block_, cond) << " " + << PrintLabel(kCodeLocation, location, GetCodeAddress()); +} + +void Disassembler::blx(Condition cond, Location* location) { + os().SetCurrentInstruction(kBlx, kAddress | kBranch); + os() << ToCString(kBlx) << ConditionPrinter(it_block_, cond) << " " + << PrintLabel(kCodeLocation, location, GetCodeAddress() & ~3); +} + +void Disassembler::blx(Condition cond, Register rm) { + os().SetCurrentInstruction(kBlx, kAddress | kBranch); + os() << ToCString(kBlx) << ConditionPrinter(it_block_, cond) << " " << rm; +} + +void Disassembler::bx(Condition cond, Register rm) { + os().SetCurrentInstruction(kBx, kAddress | kBranch); + os() << ToCString(kBx) << ConditionPrinter(it_block_, cond) << " " << rm; +} + +void Disassembler::bxj(Condition cond, Register rm) { + os().SetCurrentInstruction(kBxj, kAddress | kBranch); + os() << ToCString(kBxj) << ConditionPrinter(it_block_, cond) << " " << rm; +} + +void Disassembler::cbnz(Register rn, Location* location) { + os().SetCurrentInstruction(kCbnz, kAddress | kBranch); + os() << ToCString(kCbnz) << " " << rn << ", " + << PrintLabel(kCodeLocation, location, GetCodeAddress()); +} + +void Disassembler::cbz(Register rn, Location* location) { + os().SetCurrentInstruction(kCbz, kAddress | kBranch); + os() << ToCString(kCbz) << " " << rn << ", " + << PrintLabel(kCodeLocation, location, GetCodeAddress()); +} + +void Disassembler::clrex(Condition cond) { + os().SetCurrentInstruction(kClrex, kNoAttribute); + os() << ToCString(kClrex) << ConditionPrinter(it_block_, cond); +} + +void Disassembler::clz(Condition cond, Register rd, Register rm) { + os().SetCurrentInstruction(kClz, kNoAttribute); + os() << ToCString(kClz) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rm; +} + +void Disassembler::cmn(Condition cond, + EncodingSize size, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kCmn, kArithmetic); + os() << ToCString(kCmn) << ConditionPrinter(it_block_, cond) << size << " " + << rn << ", " << operand; +} + +void Disassembler::cmp(Condition cond, + EncodingSize size, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kCmp, kArithmetic); + os() << ToCString(kCmp) << ConditionPrinter(it_block_, cond) << size << " " + << rn << ", " << operand; +} + +void Disassembler::crc32b(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kCrc32b, kNoAttribute); + os() << ToCString(kCrc32b) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm; +} + +void Disassembler::crc32cb(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kCrc32cb, kNoAttribute); + os() << ToCString(kCrc32cb) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm; +} + +void Disassembler::crc32ch(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kCrc32ch, kNoAttribute); + os() << ToCString(kCrc32ch) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm; +} + +void Disassembler::crc32cw(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kCrc32cw, kNoAttribute); + os() << ToCString(kCrc32cw) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm; +} + +void Disassembler::crc32h(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kCrc32h, kNoAttribute); + os() << ToCString(kCrc32h) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm; +} + +void Disassembler::crc32w(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kCrc32w, kNoAttribute); + os() << ToCString(kCrc32w) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm; +} + +void Disassembler::dmb(Condition cond, MemoryBarrier option) { + os().SetCurrentInstruction(kDmb, kNoAttribute); + os() << ToCString(kDmb) << ConditionPrinter(it_block_, cond) << " " << option; +} + +void Disassembler::dsb(Condition cond, MemoryBarrier option) { + os().SetCurrentInstruction(kDsb, kNoAttribute); + os() << ToCString(kDsb) << ConditionPrinter(it_block_, cond) << " " << option; +} + +void Disassembler::eor(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kEor, kBitwise); + os() << ToCString(kEor) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::eors(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kEors, kBitwise); + os() << ToCString(kEors) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::fldmdbx(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + os().SetCurrentInstruction(kFldmdbx, + kLoadStore | kLoadStoreMultiple | kFpNeon); + os() << ToCString(kFldmdbx) << ConditionPrinter(it_block_, cond) << " " << rn + << write_back << ", " << dreglist; +} + +void Disassembler::fldmiax(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + os().SetCurrentInstruction(kFldmiax, + kLoadStore | kLoadStoreMultiple | kFpNeon); + os() << ToCString(kFldmiax) << ConditionPrinter(it_block_, cond) << " " << rn + << write_back << ", " << dreglist; +} + +void Disassembler::fstmdbx(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + os().SetCurrentInstruction(kFstmdbx, + kLoadStore | kLoadStoreMultiple | kFpNeon); + os() << ToCString(kFstmdbx) << ConditionPrinter(it_block_, cond) << " " << rn + << write_back << ", " << dreglist; +} + +void Disassembler::fstmiax(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + os().SetCurrentInstruction(kFstmiax, + kLoadStore | kLoadStoreMultiple | kFpNeon); + os() << ToCString(kFstmiax) << ConditionPrinter(it_block_, cond) << " " << rn + << write_back << ", " << dreglist; +} + +void Disassembler::hlt(Condition cond, uint32_t imm) { + os().SetCurrentInstruction(kHlt, kSystem); + os() << ToCString(kHlt) << ConditionPrinter(it_block_, cond) << " " + << RawImmediatePrinter(imm); +} + +void Disassembler::hvc(Condition cond, uint32_t imm) { + os().SetCurrentInstruction(kHvc, kSystem); + os() << ToCString(kHvc) << ConditionPrinter(it_block_, cond) << " " + << RawImmediatePrinter(imm); +} + +void Disassembler::isb(Condition cond, MemoryBarrier option) { + os().SetCurrentInstruction(kIsb, kNoAttribute); + os() << ToCString(kIsb) << ConditionPrinter(it_block_, cond) << " " << option; +} + +void Disassembler::it(Condition cond, uint16_t mask) { + os().SetCurrentInstruction(kIt, kNoAttribute); + os() << ToCString(kIt); + int count; + if ((mask & 0x1) != 0) { + count = 3; + } else if ((mask & 0x2) != 0) { + count = 2; + } else if ((mask & 0x4) != 0) { + count = 1; + } else { + count = 0; + } + uint16_t tmp = 0x8; + uint16_t ref = (cond.GetCondition() & 0x1) << 3; + while (count-- > 0) { + os() << (((mask & tmp) == ref) ? "t" : "e"); + tmp >>= 1; + ref >>= 1; + } + if (cond.Is(al)) { + os() << " al"; + } else { + os() << " " << cond; + } +} + +void Disassembler::lda(Condition cond, Register rt, const MemOperand& operand) { + os().SetCurrentInstruction(kLda, kAddress | kLoadStore); + os() << ToCString(kLda) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " << PrintMemOperand(kLoadWordLocation, operand); +} + +void Disassembler::ldab(Condition cond, + Register rt, + const MemOperand& operand) { + os().SetCurrentInstruction(kLdab, kAddress | kLoadStore); + os() << ToCString(kLdab) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " << PrintMemOperand(kLoadByteLocation, operand); +} + +void Disassembler::ldaex(Condition cond, + Register rt, + const MemOperand& operand) { + os().SetCurrentInstruction(kLdaex, kAddress | kLoadStore); + os() << ToCString(kLdaex) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " << PrintMemOperand(kLoadWordLocation, operand); +} + +void Disassembler::ldaexb(Condition cond, + Register rt, + const MemOperand& operand) { + os().SetCurrentInstruction(kLdaexb, kAddress | kLoadStore); + os() << ToCString(kLdaexb) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " << PrintMemOperand(kLoadByteLocation, operand); +} + +void Disassembler::ldaexd(Condition cond, + Register rt, + Register rt2, + const MemOperand& operand) { + os().SetCurrentInstruction(kLdaexd, kAddress | kLoadStore); + os() << ToCString(kLdaexd) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " << rt2 << ", " + << PrintMemOperand(kLoadDoubleWordLocation, operand); +} + +void Disassembler::ldaexh(Condition cond, + Register rt, + const MemOperand& operand) { + os().SetCurrentInstruction(kLdaexh, kAddress | kLoadStore); + os() << ToCString(kLdaexh) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " << PrintMemOperand(kLoadHalfWordLocation, operand); +} + +void Disassembler::ldah(Condition cond, + Register rt, + const MemOperand& operand) { + os().SetCurrentInstruction(kLdah, kAddress | kLoadStore); + os() << ToCString(kLdah) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " << PrintMemOperand(kLoadHalfWordLocation, operand); +} + +void Disassembler::ldm(Condition cond, + EncodingSize size, + Register rn, + WriteBack write_back, + RegisterList registers) { + os().SetCurrentInstruction(kLdm, kLoadStore | kLoadStoreMultiple); + os() << ToCString(kLdm) << ConditionPrinter(it_block_, cond) << size << " " + << rn << write_back << ", " << registers; +} + +void Disassembler::ldmda(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + os().SetCurrentInstruction(kLdmda, kLoadStore | kLoadStoreMultiple); + os() << ToCString(kLdmda) << ConditionPrinter(it_block_, cond) << " " << rn + << write_back << ", " << registers; +} + +void Disassembler::ldmdb(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + os().SetCurrentInstruction(kLdmdb, kLoadStore | kLoadStoreMultiple); + os() << ToCString(kLdmdb) << ConditionPrinter(it_block_, cond) << " " << rn + << write_back << ", " << registers; +} + +void Disassembler::ldmea(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + os().SetCurrentInstruction(kLdmea, kLoadStore | kLoadStoreMultiple); + os() << ToCString(kLdmea) << ConditionPrinter(it_block_, cond) << " " << rn + << write_back << ", " << registers; +} + +void Disassembler::ldmed(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + os().SetCurrentInstruction(kLdmed, kLoadStore | kLoadStoreMultiple); + os() << ToCString(kLdmed) << ConditionPrinter(it_block_, cond) << " " << rn + << write_back << ", " << registers; +} + +void Disassembler::ldmfa(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + os().SetCurrentInstruction(kLdmfa, kLoadStore | kLoadStoreMultiple); + os() << ToCString(kLdmfa) << ConditionPrinter(it_block_, cond) << " " << rn + << write_back << ", " << registers; +} + +void Disassembler::ldmfd(Condition cond, + EncodingSize size, + Register rn, + WriteBack write_back, + RegisterList registers) { + os().SetCurrentInstruction(kLdmfd, kLoadStore | kLoadStoreMultiple); + os() << ToCString(kLdmfd) << ConditionPrinter(it_block_, cond) << size << " " + << rn << write_back << ", " << registers; +} + +void Disassembler::ldmib(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + os().SetCurrentInstruction(kLdmib, kLoadStore | kLoadStoreMultiple); + os() << ToCString(kLdmib) << ConditionPrinter(it_block_, cond) << " " << rn + << write_back << ", " << registers; +} + +void Disassembler::ldr(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand) { + os().SetCurrentInstruction(kLdr, kAddress | kLoadStore); + os() << ToCString(kLdr) << ConditionPrinter(it_block_, cond) << size << " " + << rt << ", " << PrintMemOperand(kLoadWordLocation, operand); +} + +void Disassembler::ldr(Condition cond, + EncodingSize size, + Register rt, + Location* location) { + os().SetCurrentInstruction(kLdr, kAddress | kLoadStore); + os() << ToCString(kLdr) << ConditionPrinter(it_block_, cond) << size << " " + << rt << ", " + << PrintLabel(kLoadWordLocation, location, GetCodeAddress() & ~3); +} + +void Disassembler::ldrb(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand) { + os().SetCurrentInstruction(kLdrb, kAddress | kLoadStore); + os() << ToCString(kLdrb) << ConditionPrinter(it_block_, cond) << size << " " + << rt << ", " << PrintMemOperand(kLoadByteLocation, operand); +} + +void Disassembler::ldrb(Condition cond, Register rt, Location* location) { + os().SetCurrentInstruction(kLdrb, kAddress | kLoadStore); + os() << ToCString(kLdrb) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " + << PrintLabel(kLoadByteLocation, location, GetCodeAddress() & ~3); +} + +void Disassembler::ldrd(Condition cond, + Register rt, + Register rt2, + const MemOperand& operand) { + os().SetCurrentInstruction(kLdrd, kAddress | kLoadStore); + os() << ToCString(kLdrd) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " << rt2 << ", " + << PrintMemOperand(kLoadDoubleWordLocation, operand); +} + +void Disassembler::ldrd(Condition cond, + Register rt, + Register rt2, + Location* location) { + os().SetCurrentInstruction(kLdrd, kAddress | kLoadStore); + os() << ToCString(kLdrd) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " << rt2 << ", " + << PrintLabel(kLoadDoubleWordLocation, location, GetCodeAddress() & ~3); +} + +void Disassembler::ldrex(Condition cond, + Register rt, + const MemOperand& operand) { + os().SetCurrentInstruction(kLdrex, kAddress | kLoadStore); + os() << ToCString(kLdrex) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " << PrintMemOperand(kLoadWordLocation, operand); +} + +void Disassembler::ldrexb(Condition cond, + Register rt, + const MemOperand& operand) { + os().SetCurrentInstruction(kLdrexb, kAddress | kLoadStore); + os() << ToCString(kLdrexb) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " << PrintMemOperand(kLoadByteLocation, operand); +} + +void Disassembler::ldrexd(Condition cond, + Register rt, + Register rt2, + const MemOperand& operand) { + os().SetCurrentInstruction(kLdrexd, kAddress | kLoadStore); + os() << ToCString(kLdrexd) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " << rt2 << ", " + << PrintMemOperand(kLoadDoubleWordLocation, operand); +} + +void Disassembler::ldrexh(Condition cond, + Register rt, + const MemOperand& operand) { + os().SetCurrentInstruction(kLdrexh, kAddress | kLoadStore); + os() << ToCString(kLdrexh) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " << PrintMemOperand(kLoadHalfWordLocation, operand); +} + +void Disassembler::ldrh(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand) { + os().SetCurrentInstruction(kLdrh, kAddress | kLoadStore); + os() << ToCString(kLdrh) << ConditionPrinter(it_block_, cond) << size << " " + << rt << ", " << PrintMemOperand(kLoadHalfWordLocation, operand); +} + +void Disassembler::ldrh(Condition cond, Register rt, Location* location) { + os().SetCurrentInstruction(kLdrh, kAddress | kLoadStore); + os() << ToCString(kLdrh) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " + << PrintLabel(kLoadHalfWordLocation, location, GetCodeAddress() & ~3); +} + +void Disassembler::ldrsb(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand) { + os().SetCurrentInstruction(kLdrsb, kAddress | kLoadStore); + os() << ToCString(kLdrsb) << ConditionPrinter(it_block_, cond) << size << " " + << rt << ", " << PrintMemOperand(kLoadSignedByteLocation, operand); +} + +void Disassembler::ldrsb(Condition cond, Register rt, Location* location) { + os().SetCurrentInstruction(kLdrsb, kAddress | kLoadStore); + os() << ToCString(kLdrsb) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " + << PrintLabel(kLoadSignedByteLocation, location, GetCodeAddress() & ~3); +} + +void Disassembler::ldrsh(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand) { + os().SetCurrentInstruction(kLdrsh, kAddress | kLoadStore); + os() << ToCString(kLdrsh) << ConditionPrinter(it_block_, cond) << size << " " + << rt << ", " << PrintMemOperand(kLoadSignedHalfWordLocation, operand); +} + +void Disassembler::ldrsh(Condition cond, Register rt, Location* location) { + os().SetCurrentInstruction(kLdrsh, kAddress | kLoadStore); + os() << ToCString(kLdrsh) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " << PrintLabel(kLoadSignedHalfWordLocation, + location, + GetCodeAddress() & ~3); +} + +void Disassembler::lsl(Condition cond, + EncodingSize size, + Register rd, + Register rm, + const Operand& operand) { + os().SetCurrentInstruction(kLsl, kShift); + os() << ToCString(kLsl) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::lsls(Condition cond, + EncodingSize size, + Register rd, + Register rm, + const Operand& operand) { + os().SetCurrentInstruction(kLsls, kShift); + os() << ToCString(kLsls) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::lsr(Condition cond, + EncodingSize size, + Register rd, + Register rm, + const Operand& operand) { + os().SetCurrentInstruction(kLsr, kShift); + os() << ToCString(kLsr) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::lsrs(Condition cond, + EncodingSize size, + Register rd, + Register rm, + const Operand& operand) { + os().SetCurrentInstruction(kLsrs, kShift); + os() << ToCString(kLsrs) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::mla( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + os().SetCurrentInstruction(kMla, kArithmetic); + os() << ToCString(kMla) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm << ", " << ra; +} + +void Disassembler::mlas( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + os().SetCurrentInstruction(kMlas, kArithmetic); + os() << ToCString(kMlas) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm << ", " << ra; +} + +void Disassembler::mls( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + os().SetCurrentInstruction(kMls, kArithmetic); + os() << ToCString(kMls) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm << ", " << ra; +} + +void Disassembler::mov(Condition cond, + EncodingSize size, + Register rd, + const Operand& operand) { + os().SetCurrentInstruction(kMov, kNoAttribute); + os() << ToCString(kMov) << ConditionPrinter(it_block_, cond) << size << " " + << rd << ", " << operand; +} + +void Disassembler::movs(Condition cond, + EncodingSize size, + Register rd, + const Operand& operand) { + os().SetCurrentInstruction(kMovs, kNoAttribute); + os() << ToCString(kMovs) << ConditionPrinter(it_block_, cond) << size << " " + << rd << ", " << operand; +} + +void Disassembler::movt(Condition cond, Register rd, const Operand& operand) { + os().SetCurrentInstruction(kMovt, kNoAttribute); + os() << ToCString(kMovt) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << operand; +} + +void Disassembler::movw(Condition cond, Register rd, const Operand& operand) { + os().SetCurrentInstruction(kMovw, kNoAttribute); + os() << ToCString(kMovw) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << operand; +} + +void Disassembler::mrs(Condition cond, Register rd, SpecialRegister spec_reg) { + os().SetCurrentInstruction(kMrs, kNoAttribute); + os() << ToCString(kMrs) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << spec_reg; +} + +void Disassembler::msr(Condition cond, + MaskedSpecialRegister spec_reg, + const Operand& operand) { + os().SetCurrentInstruction(kMsr, kNoAttribute); + os() << ToCString(kMsr) << ConditionPrinter(it_block_, cond) << " " + << spec_reg << ", " << operand; +} + +void Disassembler::mul( + Condition cond, EncodingSize size, Register rd, Register rn, Register rm) { + os().SetCurrentInstruction(kMul, kArithmetic); + os() << ToCString(kMul) << ConditionPrinter(it_block_, cond) << size << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::muls(Condition cond, Register rd, Register rn, Register rm) { + os().SetCurrentInstruction(kMuls, kArithmetic); + os() << ToCString(kMuls) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm; +} + +void Disassembler::mvn(Condition cond, + EncodingSize size, + Register rd, + const Operand& operand) { + os().SetCurrentInstruction(kMvn, kNoAttribute); + os() << ToCString(kMvn) << ConditionPrinter(it_block_, cond) << size << " " + << rd << ", " << operand; +} + +void Disassembler::mvns(Condition cond, + EncodingSize size, + Register rd, + const Operand& operand) { + os().SetCurrentInstruction(kMvns, kNoAttribute); + os() << ToCString(kMvns) << ConditionPrinter(it_block_, cond) << size << " " + << rd << ", " << operand; +} + +void Disassembler::nop(Condition cond, EncodingSize size) { + os().SetCurrentInstruction(kNop, kNoAttribute); + os() << ToCString(kNop) << ConditionPrinter(it_block_, cond) << size; +} + +void Disassembler::orn(Condition cond, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kOrn, kBitwise); + os() << ToCString(kOrn) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::orns(Condition cond, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kOrns, kBitwise); + os() << ToCString(kOrns) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::orr(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kOrr, kBitwise); + os() << ToCString(kOrr) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::orrs(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kOrrs, kBitwise); + os() << ToCString(kOrrs) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::pkhbt(Condition cond, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kPkhbt, kNoAttribute); + os() << ToCString(kPkhbt) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::pkhtb(Condition cond, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kPkhtb, kNoAttribute); + os() << ToCString(kPkhtb) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::pld(Condition cond, Location* location) { + os().SetCurrentInstruction(kPld, kAddress); + os() << ToCString(kPld) << ConditionPrinter(it_block_, cond) << " " + << PrintLabel(kDataLocation, location, GetCodeAddress() & ~3); +} + +void Disassembler::pld(Condition cond, const MemOperand& operand) { + os().SetCurrentInstruction(kPld, kAddress); + os() << ToCString(kPld) << ConditionPrinter(it_block_, cond) << " " + << PrintMemOperand(kDataLocation, operand); +} + +void Disassembler::pldw(Condition cond, const MemOperand& operand) { + os().SetCurrentInstruction(kPldw, kAddress); + os() << ToCString(kPldw) << ConditionPrinter(it_block_, cond) << " " + << PrintMemOperand(kDataLocation, operand); +} + +void Disassembler::pli(Condition cond, const MemOperand& operand) { + os().SetCurrentInstruction(kPli, kAddress); + os() << ToCString(kPli) << ConditionPrinter(it_block_, cond) << " " + << PrintMemOperand(kCodeLocation, operand); +} + +void Disassembler::pli(Condition cond, Location* location) { + os().SetCurrentInstruction(kPli, kAddress); + os() << ToCString(kPli) << ConditionPrinter(it_block_, cond) << " " + << PrintLabel(kCodeLocation, location, GetCodeAddress() & ~3); +} + +void Disassembler::pop(Condition cond, + EncodingSize size, + RegisterList registers) { + os().SetCurrentInstruction(kPop, kLoadStore | kLoadStoreMultiple); + os() << ToCString(kPop) << ConditionPrinter(it_block_, cond) << size << " " + << registers; +} + +void Disassembler::pop(Condition cond, EncodingSize size, Register rt) { + os().SetCurrentInstruction(kPop, kLoadStore | kLoadStoreMultiple); + os() << ToCString(kPop) << ConditionPrinter(it_block_, cond) << size << " " + << "{" << rt << "}"; +} + +void Disassembler::push(Condition cond, + EncodingSize size, + RegisterList registers) { + os().SetCurrentInstruction(kPush, kLoadStore | kLoadStoreMultiple); + os() << ToCString(kPush) << ConditionPrinter(it_block_, cond) << size << " " + << registers; +} + +void Disassembler::push(Condition cond, EncodingSize size, Register rt) { + os().SetCurrentInstruction(kPush, kLoadStore | kLoadStoreMultiple); + os() << ToCString(kPush) << ConditionPrinter(it_block_, cond) << size << " " + << "{" << rt << "}"; +} + +void Disassembler::qadd(Condition cond, Register rd, Register rm, Register rn) { + os().SetCurrentInstruction(kQadd, kArithmetic); + os() << ToCString(kQadd) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << rn; +} + +void Disassembler::qadd16(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kQadd16, kArithmetic); + os() << ToCString(kQadd16) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::qadd8(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kQadd8, kArithmetic); + os() << ToCString(kQadd8) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::qasx(Condition cond, Register rd, Register rn, Register rm) { + os().SetCurrentInstruction(kQasx, kArithmetic); + os() << ToCString(kQasx) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::qdadd(Condition cond, + Register rd, + Register rm, + Register rn) { + os().SetCurrentInstruction(kQdadd, kArithmetic); + os() << ToCString(kQdadd) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << rn; +} + +void Disassembler::qdsub(Condition cond, + Register rd, + Register rm, + Register rn) { + os().SetCurrentInstruction(kQdsub, kArithmetic); + os() << ToCString(kQdsub) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << rn; +} + +void Disassembler::qsax(Condition cond, Register rd, Register rn, Register rm) { + os().SetCurrentInstruction(kQsax, kArithmetic); + os() << ToCString(kQsax) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::qsub(Condition cond, Register rd, Register rm, Register rn) { + os().SetCurrentInstruction(kQsub, kArithmetic); + os() << ToCString(kQsub) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << rn; +} + +void Disassembler::qsub16(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kQsub16, kArithmetic); + os() << ToCString(kQsub16) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::qsub8(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kQsub8, kArithmetic); + os() << ToCString(kQsub8) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::rbit(Condition cond, Register rd, Register rm) { + os().SetCurrentInstruction(kRbit, kNoAttribute); + os() << ToCString(kRbit) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rm; +} + +void Disassembler::rev(Condition cond, + EncodingSize size, + Register rd, + Register rm) { + os().SetCurrentInstruction(kRev, kNoAttribute); + os() << ToCString(kRev) << ConditionPrinter(it_block_, cond) << size << " " + << rd << ", " << rm; +} + +void Disassembler::rev16(Condition cond, + EncodingSize size, + Register rd, + Register rm) { + os().SetCurrentInstruction(kRev16, kNoAttribute); + os() << ToCString(kRev16) << ConditionPrinter(it_block_, cond) << size << " " + << rd << ", " << rm; +} + +void Disassembler::revsh(Condition cond, + EncodingSize size, + Register rd, + Register rm) { + os().SetCurrentInstruction(kRevsh, kNoAttribute); + os() << ToCString(kRevsh) << ConditionPrinter(it_block_, cond) << size << " " + << rd << ", " << rm; +} + +void Disassembler::ror(Condition cond, + EncodingSize size, + Register rd, + Register rm, + const Operand& operand) { + os().SetCurrentInstruction(kRor, kShift); + os() << ToCString(kRor) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::rors(Condition cond, + EncodingSize size, + Register rd, + Register rm, + const Operand& operand) { + os().SetCurrentInstruction(kRors, kShift); + os() << ToCString(kRors) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::rrx(Condition cond, Register rd, Register rm) { + os().SetCurrentInstruction(kRrx, kShift); + os() << ToCString(kRrx) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm; +} + +void Disassembler::rrxs(Condition cond, Register rd, Register rm) { + os().SetCurrentInstruction(kRrxs, kShift); + os() << ToCString(kRrxs) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm; +} + +void Disassembler::rsb(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kRsb, kArithmetic); + os() << ToCString(kRsb) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::rsbs(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kRsbs, kArithmetic); + os() << ToCString(kRsbs) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::rsc(Condition cond, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kRsc, kArithmetic); + os() << ToCString(kRsc) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::rscs(Condition cond, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kRscs, kArithmetic); + os() << ToCString(kRscs) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::sadd16(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kSadd16, kArithmetic); + os() << ToCString(kSadd16) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::sadd8(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kSadd8, kArithmetic); + os() << ToCString(kSadd8) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::sasx(Condition cond, Register rd, Register rn, Register rm) { + os().SetCurrentInstruction(kSasx, kArithmetic); + os() << ToCString(kSasx) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::sbc(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kSbc, kArithmetic); + os() << ToCString(kSbc) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::sbcs(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kSbcs, kArithmetic); + os() << ToCString(kSbcs) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::sbfx( + Condition cond, Register rd, Register rn, uint32_t lsb, uint32_t width) { + os().SetCurrentInstruction(kSbfx, kShift); + os() << ToCString(kSbfx) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << ImmediatePrinter(lsb) << ", " + << ImmediatePrinter(width); +} + +void Disassembler::sdiv(Condition cond, Register rd, Register rn, Register rm) { + os().SetCurrentInstruction(kSdiv, kArithmetic); + os() << ToCString(kSdiv) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::sel(Condition cond, Register rd, Register rn, Register rm) { + os().SetCurrentInstruction(kSel, kNoAttribute); + os() << ToCString(kSel) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::shadd16(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kShadd16, kArithmetic); + os() << ToCString(kShadd16) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::shadd8(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kShadd8, kArithmetic); + os() << ToCString(kShadd8) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::shasx(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kShasx, kArithmetic); + os() << ToCString(kShasx) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::shsax(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kShsax, kArithmetic); + os() << ToCString(kShsax) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::shsub16(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kShsub16, kArithmetic); + os() << ToCString(kShsub16) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::shsub8(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kShsub8, kArithmetic); + os() << ToCString(kShsub8) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::smlabb( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + os().SetCurrentInstruction(kSmlabb, kArithmetic); + os() << ToCString(kSmlabb) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm << ", " << ra; +} + +void Disassembler::smlabt( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + os().SetCurrentInstruction(kSmlabt, kArithmetic); + os() << ToCString(kSmlabt) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm << ", " << ra; +} + +void Disassembler::smlad( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + os().SetCurrentInstruction(kSmlad, kArithmetic); + os() << ToCString(kSmlad) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm << ", " << ra; +} + +void Disassembler::smladx( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + os().SetCurrentInstruction(kSmladx, kArithmetic); + os() << ToCString(kSmladx) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm << ", " << ra; +} + +void Disassembler::smlal( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + os().SetCurrentInstruction(kSmlal, kArithmetic); + os() << ToCString(kSmlal) << ConditionPrinter(it_block_, cond) << " " << rdlo + << ", " << rdhi << ", " << rn << ", " << rm; +} + +void Disassembler::smlalbb( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + os().SetCurrentInstruction(kSmlalbb, kArithmetic); + os() << ToCString(kSmlalbb) << ConditionPrinter(it_block_, cond) << " " + << rdlo << ", " << rdhi << ", " << rn << ", " << rm; +} + +void Disassembler::smlalbt( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + os().SetCurrentInstruction(kSmlalbt, kArithmetic); + os() << ToCString(kSmlalbt) << ConditionPrinter(it_block_, cond) << " " + << rdlo << ", " << rdhi << ", " << rn << ", " << rm; +} + +void Disassembler::smlald( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + os().SetCurrentInstruction(kSmlald, kArithmetic); + os() << ToCString(kSmlald) << ConditionPrinter(it_block_, cond) << " " << rdlo + << ", " << rdhi << ", " << rn << ", " << rm; +} + +void Disassembler::smlaldx( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + os().SetCurrentInstruction(kSmlaldx, kArithmetic); + os() << ToCString(kSmlaldx) << ConditionPrinter(it_block_, cond) << " " + << rdlo << ", " << rdhi << ", " << rn << ", " << rm; +} + +void Disassembler::smlals( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + os().SetCurrentInstruction(kSmlals, kArithmetic); + os() << ToCString(kSmlals) << ConditionPrinter(it_block_, cond) << " " << rdlo + << ", " << rdhi << ", " << rn << ", " << rm; +} + +void Disassembler::smlaltb( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + os().SetCurrentInstruction(kSmlaltb, kArithmetic); + os() << ToCString(kSmlaltb) << ConditionPrinter(it_block_, cond) << " " + << rdlo << ", " << rdhi << ", " << rn << ", " << rm; +} + +void Disassembler::smlaltt( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + os().SetCurrentInstruction(kSmlaltt, kArithmetic); + os() << ToCString(kSmlaltt) << ConditionPrinter(it_block_, cond) << " " + << rdlo << ", " << rdhi << ", " << rn << ", " << rm; +} + +void Disassembler::smlatb( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + os().SetCurrentInstruction(kSmlatb, kArithmetic); + os() << ToCString(kSmlatb) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm << ", " << ra; +} + +void Disassembler::smlatt( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + os().SetCurrentInstruction(kSmlatt, kArithmetic); + os() << ToCString(kSmlatt) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm << ", " << ra; +} + +void Disassembler::smlawb( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + os().SetCurrentInstruction(kSmlawb, kArithmetic); + os() << ToCString(kSmlawb) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm << ", " << ra; +} + +void Disassembler::smlawt( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + os().SetCurrentInstruction(kSmlawt, kArithmetic); + os() << ToCString(kSmlawt) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm << ", " << ra; +} + +void Disassembler::smlsd( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + os().SetCurrentInstruction(kSmlsd, kArithmetic); + os() << ToCString(kSmlsd) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm << ", " << ra; +} + +void Disassembler::smlsdx( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + os().SetCurrentInstruction(kSmlsdx, kArithmetic); + os() << ToCString(kSmlsdx) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm << ", " << ra; +} + +void Disassembler::smlsld( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + os().SetCurrentInstruction(kSmlsld, kArithmetic); + os() << ToCString(kSmlsld) << ConditionPrinter(it_block_, cond) << " " << rdlo + << ", " << rdhi << ", " << rn << ", " << rm; +} + +void Disassembler::smlsldx( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + os().SetCurrentInstruction(kSmlsldx, kArithmetic); + os() << ToCString(kSmlsldx) << ConditionPrinter(it_block_, cond) << " " + << rdlo << ", " << rdhi << ", " << rn << ", " << rm; +} + +void Disassembler::smmla( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + os().SetCurrentInstruction(kSmmla, kArithmetic); + os() << ToCString(kSmmla) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm << ", " << ra; +} + +void Disassembler::smmlar( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + os().SetCurrentInstruction(kSmmlar, kArithmetic); + os() << ToCString(kSmmlar) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm << ", " << ra; +} + +void Disassembler::smmls( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + os().SetCurrentInstruction(kSmmls, kArithmetic); + os() << ToCString(kSmmls) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm << ", " << ra; +} + +void Disassembler::smmlsr( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + os().SetCurrentInstruction(kSmmlsr, kArithmetic); + os() << ToCString(kSmmlsr) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm << ", " << ra; +} + +void Disassembler::smmul(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kSmmul, kArithmetic); + os() << ToCString(kSmmul) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::smmulr(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kSmmulr, kArithmetic); + os() << ToCString(kSmmulr) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::smuad(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kSmuad, kArithmetic); + os() << ToCString(kSmuad) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::smuadx(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kSmuadx, kArithmetic); + os() << ToCString(kSmuadx) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::smulbb(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kSmulbb, kArithmetic); + os() << ToCString(kSmulbb) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::smulbt(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kSmulbt, kArithmetic); + os() << ToCString(kSmulbt) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::smull( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + os().SetCurrentInstruction(kSmull, kArithmetic); + os() << ToCString(kSmull) << ConditionPrinter(it_block_, cond) << " " << rdlo + << ", " << rdhi << ", " << rn << ", " << rm; +} + +void Disassembler::smulls( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + os().SetCurrentInstruction(kSmulls, kArithmetic); + os() << ToCString(kSmulls) << ConditionPrinter(it_block_, cond) << " " << rdlo + << ", " << rdhi << ", " << rn << ", " << rm; +} + +void Disassembler::smultb(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kSmultb, kArithmetic); + os() << ToCString(kSmultb) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::smultt(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kSmultt, kArithmetic); + os() << ToCString(kSmultt) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::smulwb(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kSmulwb, kArithmetic); + os() << ToCString(kSmulwb) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::smulwt(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kSmulwt, kArithmetic); + os() << ToCString(kSmulwt) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::smusd(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kSmusd, kArithmetic); + os() << ToCString(kSmusd) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::smusdx(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kSmusdx, kArithmetic); + os() << ToCString(kSmusdx) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::ssat(Condition cond, + Register rd, + uint32_t imm, + const Operand& operand) { + os().SetCurrentInstruction(kSsat, kArithmetic); + os() << ToCString(kSsat) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << ImmediatePrinter(imm) << ", " << operand; +} + +void Disassembler::ssat16(Condition cond, + Register rd, + uint32_t imm, + Register rn) { + os().SetCurrentInstruction(kSsat16, kArithmetic); + os() << ToCString(kSsat16) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << ImmediatePrinter(imm) << ", " << rn; +} + +void Disassembler::ssax(Condition cond, Register rd, Register rn, Register rm) { + os().SetCurrentInstruction(kSsax, kArithmetic); + os() << ToCString(kSsax) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::ssub16(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kSsub16, kArithmetic); + os() << ToCString(kSsub16) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::ssub8(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kSsub8, kArithmetic); + os() << ToCString(kSsub8) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::stl(Condition cond, Register rt, const MemOperand& operand) { + os().SetCurrentInstruction(kStl, kAddress | kLoadStore); + os() << ToCString(kStl) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " << PrintMemOperand(kStoreWordLocation, operand); +} + +void Disassembler::stlb(Condition cond, + Register rt, + const MemOperand& operand) { + os().SetCurrentInstruction(kStlb, kAddress | kLoadStore); + os() << ToCString(kStlb) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " << PrintMemOperand(kStoreByteLocation, operand); +} + +void Disassembler::stlex(Condition cond, + Register rd, + Register rt, + const MemOperand& operand) { + os().SetCurrentInstruction(kStlex, kAddress | kLoadStore); + os() << ToCString(kStlex) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rt << ", " << PrintMemOperand(kStoreWordLocation, operand); +} + +void Disassembler::stlexb(Condition cond, + Register rd, + Register rt, + const MemOperand& operand) { + os().SetCurrentInstruction(kStlexb, kAddress | kLoadStore); + os() << ToCString(kStlexb) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rt << ", " << PrintMemOperand(kStoreByteLocation, operand); +} + +void Disassembler::stlexd(Condition cond, + Register rd, + Register rt, + Register rt2, + const MemOperand& operand) { + os().SetCurrentInstruction(kStlexd, kAddress | kLoadStore); + os() << ToCString(kStlexd) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rt << ", " << rt2 << ", " + << PrintMemOperand(kStoreDoubleWordLocation, operand); +} + +void Disassembler::stlexh(Condition cond, + Register rd, + Register rt, + const MemOperand& operand) { + os().SetCurrentInstruction(kStlexh, kAddress | kLoadStore); + os() << ToCString(kStlexh) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rt << ", " + << PrintMemOperand(kStoreHalfWordLocation, operand); +} + +void Disassembler::stlh(Condition cond, + Register rt, + const MemOperand& operand) { + os().SetCurrentInstruction(kStlh, kAddress | kLoadStore); + os() << ToCString(kStlh) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " << PrintMemOperand(kStoreHalfWordLocation, operand); +} + +void Disassembler::stm(Condition cond, + EncodingSize size, + Register rn, + WriteBack write_back, + RegisterList registers) { + os().SetCurrentInstruction(kStm, kLoadStore | kLoadStoreMultiple); + os() << ToCString(kStm) << ConditionPrinter(it_block_, cond) << size << " " + << rn << write_back << ", " << registers; +} + +void Disassembler::stmda(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + os().SetCurrentInstruction(kStmda, kLoadStore | kLoadStoreMultiple); + os() << ToCString(kStmda) << ConditionPrinter(it_block_, cond) << " " << rn + << write_back << ", " << registers; +} + +void Disassembler::stmdb(Condition cond, + EncodingSize size, + Register rn, + WriteBack write_back, + RegisterList registers) { + os().SetCurrentInstruction(kStmdb, kLoadStore | kLoadStoreMultiple); + os() << ToCString(kStmdb) << ConditionPrinter(it_block_, cond) << size << " " + << rn << write_back << ", " << registers; +} + +void Disassembler::stmea(Condition cond, + EncodingSize size, + Register rn, + WriteBack write_back, + RegisterList registers) { + os().SetCurrentInstruction(kStmea, kLoadStore | kLoadStoreMultiple); + os() << ToCString(kStmea) << ConditionPrinter(it_block_, cond) << size << " " + << rn << write_back << ", " << registers; +} + +void Disassembler::stmed(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + os().SetCurrentInstruction(kStmed, kLoadStore | kLoadStoreMultiple); + os() << ToCString(kStmed) << ConditionPrinter(it_block_, cond) << " " << rn + << write_back << ", " << registers; +} + +void Disassembler::stmfa(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + os().SetCurrentInstruction(kStmfa, kLoadStore | kLoadStoreMultiple); + os() << ToCString(kStmfa) << ConditionPrinter(it_block_, cond) << " " << rn + << write_back << ", " << registers; +} + +void Disassembler::stmfd(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + os().SetCurrentInstruction(kStmfd, kLoadStore | kLoadStoreMultiple); + os() << ToCString(kStmfd) << ConditionPrinter(it_block_, cond) << " " << rn + << write_back << ", " << registers; +} + +void Disassembler::stmib(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + os().SetCurrentInstruction(kStmib, kLoadStore | kLoadStoreMultiple); + os() << ToCString(kStmib) << ConditionPrinter(it_block_, cond) << " " << rn + << write_back << ", " << registers; +} + +void Disassembler::str(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand) { + os().SetCurrentInstruction(kStr, kAddress | kLoadStore); + os() << ToCString(kStr) << ConditionPrinter(it_block_, cond) << size << " " + << rt << ", " << PrintMemOperand(kStoreWordLocation, operand); +} + +void Disassembler::strb(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand) { + os().SetCurrentInstruction(kStrb, kAddress | kLoadStore); + os() << ToCString(kStrb) << ConditionPrinter(it_block_, cond) << size << " " + << rt << ", " << PrintMemOperand(kStoreByteLocation, operand); +} + +void Disassembler::strd(Condition cond, + Register rt, + Register rt2, + const MemOperand& operand) { + os().SetCurrentInstruction(kStrd, kAddress | kLoadStore); + os() << ToCString(kStrd) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " << rt2 << ", " + << PrintMemOperand(kStoreDoubleWordLocation, operand); +} + +void Disassembler::strex(Condition cond, + Register rd, + Register rt, + const MemOperand& operand) { + os().SetCurrentInstruction(kStrex, kAddress | kLoadStore); + os() << ToCString(kStrex) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rt << ", " << PrintMemOperand(kStoreWordLocation, operand); +} + +void Disassembler::strexb(Condition cond, + Register rd, + Register rt, + const MemOperand& operand) { + os().SetCurrentInstruction(kStrexb, kAddress | kLoadStore); + os() << ToCString(kStrexb) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rt << ", " << PrintMemOperand(kStoreByteLocation, operand); +} + +void Disassembler::strexd(Condition cond, + Register rd, + Register rt, + Register rt2, + const MemOperand& operand) { + os().SetCurrentInstruction(kStrexd, kAddress | kLoadStore); + os() << ToCString(kStrexd) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rt << ", " << rt2 << ", " + << PrintMemOperand(kStoreDoubleWordLocation, operand); +} + +void Disassembler::strexh(Condition cond, + Register rd, + Register rt, + const MemOperand& operand) { + os().SetCurrentInstruction(kStrexh, kAddress | kLoadStore); + os() << ToCString(kStrexh) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rt << ", " + << PrintMemOperand(kStoreHalfWordLocation, operand); +} + +void Disassembler::strh(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand) { + os().SetCurrentInstruction(kStrh, kAddress | kLoadStore); + os() << ToCString(kStrh) << ConditionPrinter(it_block_, cond) << size << " " + << rt << ", " << PrintMemOperand(kStoreHalfWordLocation, operand); +} + +void Disassembler::sub(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kSub, kArithmetic); + os() << ToCString(kSub) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::sub(Condition cond, Register rd, const Operand& operand) { + os().SetCurrentInstruction(kSub, kArithmetic); + os() << ToCString(kSub) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << operand; +} + +void Disassembler::subs(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kSubs, kArithmetic); + os() << ToCString(kSubs) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::subs(Register rd, const Operand& operand) { + os().SetCurrentInstruction(kSubs, kArithmetic); + os() << ToCString(kSubs) << " " << rd << ", " << operand; +} + +void Disassembler::subw(Condition cond, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kSubw, kArithmetic); + os() << ToCString(kSubw) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::svc(Condition cond, uint32_t imm) { + os().SetCurrentInstruction(kSvc, kSystem); + os() << ToCString(kSvc) << ConditionPrinter(it_block_, cond) << " " + << RawImmediatePrinter(imm); +} + +void Disassembler::sxtab(Condition cond, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kSxtab, kArithmetic); + os() << ToCString(kSxtab) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::sxtab16(Condition cond, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kSxtab16, kArithmetic); + os() << ToCString(kSxtab16) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::sxtah(Condition cond, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kSxtah, kArithmetic); + os() << ToCString(kSxtah) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::sxtb(Condition cond, + EncodingSize size, + Register rd, + const Operand& operand) { + os().SetCurrentInstruction(kSxtb, kArithmetic); + os() << ToCString(kSxtb) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(operand.GetBaseRegister()) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << operand; +} + +void Disassembler::sxtb16(Condition cond, Register rd, const Operand& operand) { + os().SetCurrentInstruction(kSxtb16, kArithmetic); + os() << ToCString(kSxtb16) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(operand.GetBaseRegister()) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << operand; +} + +void Disassembler::sxth(Condition cond, + EncodingSize size, + Register rd, + const Operand& operand) { + os().SetCurrentInstruction(kSxth, kArithmetic); + os() << ToCString(kSxth) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(operand.GetBaseRegister()) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << operand; +} + +void Disassembler::tbb(Condition cond, Register rn, Register rm) { + os().SetCurrentInstruction(kTbb, kBranch); + os() << ToCString(kTbb) << ConditionPrinter(it_block_, cond) << " " + << MemOperand(rn, rm); +} + +void Disassembler::tbh(Condition cond, Register rn, Register rm) { + os().SetCurrentInstruction(kTbh, kBranch); + os() << ToCString(kTbh) << ConditionPrinter(it_block_, cond) << " " + << MemOperand(rn, plus, rm, LSL, 1); +} + +void Disassembler::teq(Condition cond, Register rn, const Operand& operand) { + os().SetCurrentInstruction(kTeq, kBitwise); + os() << ToCString(kTeq) << ConditionPrinter(it_block_, cond) << " " << rn + << ", " << operand; +} + +void Disassembler::tst(Condition cond, + EncodingSize size, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kTst, kBitwise); + os() << ToCString(kTst) << ConditionPrinter(it_block_, cond) << size << " " + << rn << ", " << operand; +} + +void Disassembler::uadd16(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kUadd16, kArithmetic); + os() << ToCString(kUadd16) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::uadd8(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kUadd8, kArithmetic); + os() << ToCString(kUadd8) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::uasx(Condition cond, Register rd, Register rn, Register rm) { + os().SetCurrentInstruction(kUasx, kArithmetic); + os() << ToCString(kUasx) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::ubfx( + Condition cond, Register rd, Register rn, uint32_t lsb, uint32_t width) { + os().SetCurrentInstruction(kUbfx, kShift); + os() << ToCString(kUbfx) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << ImmediatePrinter(lsb) << ", " + << ImmediatePrinter(width); +} + +void Disassembler::udf(Condition cond, EncodingSize size, uint32_t imm) { + os().SetCurrentInstruction(kUdf, kNoAttribute); + os() << ToCString(kUdf) << ConditionPrinter(it_block_, cond) << size << " " + << RawImmediatePrinter(imm); +} + +void Disassembler::udiv(Condition cond, Register rd, Register rn, Register rm) { + os().SetCurrentInstruction(kUdiv, kArithmetic); + os() << ToCString(kUdiv) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::uhadd16(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kUhadd16, kArithmetic); + os() << ToCString(kUhadd16) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::uhadd8(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kUhadd8, kArithmetic); + os() << ToCString(kUhadd8) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::uhasx(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kUhasx, kArithmetic); + os() << ToCString(kUhasx) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::uhsax(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kUhsax, kArithmetic); + os() << ToCString(kUhsax) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::uhsub16(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kUhsub16, kArithmetic); + os() << ToCString(kUhsub16) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::uhsub8(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kUhsub8, kArithmetic); + os() << ToCString(kUhsub8) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::umaal( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + os().SetCurrentInstruction(kUmaal, kArithmetic); + os() << ToCString(kUmaal) << ConditionPrinter(it_block_, cond) << " " << rdlo + << ", " << rdhi << ", " << rn << ", " << rm; +} + +void Disassembler::umlal( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + os().SetCurrentInstruction(kUmlal, kArithmetic); + os() << ToCString(kUmlal) << ConditionPrinter(it_block_, cond) << " " << rdlo + << ", " << rdhi << ", " << rn << ", " << rm; +} + +void Disassembler::umlals( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + os().SetCurrentInstruction(kUmlals, kArithmetic); + os() << ToCString(kUmlals) << ConditionPrinter(it_block_, cond) << " " << rdlo + << ", " << rdhi << ", " << rn << ", " << rm; +} + +void Disassembler::umull( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + os().SetCurrentInstruction(kUmull, kArithmetic); + os() << ToCString(kUmull) << ConditionPrinter(it_block_, cond) << " " << rdlo + << ", " << rdhi << ", " << rn << ", " << rm; +} + +void Disassembler::umulls( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + os().SetCurrentInstruction(kUmulls, kArithmetic); + os() << ToCString(kUmulls) << ConditionPrinter(it_block_, cond) << " " << rdlo + << ", " << rdhi << ", " << rn << ", " << rm; +} + +void Disassembler::uqadd16(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kUqadd16, kArithmetic); + os() << ToCString(kUqadd16) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::uqadd8(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kUqadd8, kArithmetic); + os() << ToCString(kUqadd8) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::uqasx(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kUqasx, kArithmetic); + os() << ToCString(kUqasx) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::uqsax(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kUqsax, kArithmetic); + os() << ToCString(kUqsax) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::uqsub16(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kUqsub16, kArithmetic); + os() << ToCString(kUqsub16) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::uqsub8(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kUqsub8, kArithmetic); + os() << ToCString(kUqsub8) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::usad8(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kUsad8, kArithmetic); + os() << ToCString(kUsad8) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::usada8( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + os().SetCurrentInstruction(kUsada8, kArithmetic); + os() << ToCString(kUsada8) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm << ", " << ra; +} + +void Disassembler::usat(Condition cond, + Register rd, + uint32_t imm, + const Operand& operand) { + os().SetCurrentInstruction(kUsat, kArithmetic); + os() << ToCString(kUsat) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << ImmediatePrinter(imm) << ", " << operand; +} + +void Disassembler::usat16(Condition cond, + Register rd, + uint32_t imm, + Register rn) { + os().SetCurrentInstruction(kUsat16, kArithmetic); + os() << ToCString(kUsat16) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << ImmediatePrinter(imm) << ", " << rn; +} + +void Disassembler::usax(Condition cond, Register rd, Register rn, Register rm) { + os().SetCurrentInstruction(kUsax, kArithmetic); + os() << ToCString(kUsax) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::usub16(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kUsub16, kArithmetic); + os() << ToCString(kUsub16) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::usub8(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kUsub8, kArithmetic); + os() << ToCString(kUsub8) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::uxtab(Condition cond, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kUxtab, kArithmetic); + os() << ToCString(kUxtab) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::uxtab16(Condition cond, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kUxtab16, kArithmetic); + os() << ToCString(kUxtab16) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::uxtah(Condition cond, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kUxtah, kArithmetic); + os() << ToCString(kUxtah) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::uxtb(Condition cond, + EncodingSize size, + Register rd, + const Operand& operand) { + os().SetCurrentInstruction(kUxtb, kArithmetic); + os() << ToCString(kUxtb) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(operand.GetBaseRegister()) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << operand; +} + +void Disassembler::uxtb16(Condition cond, Register rd, const Operand& operand) { + os().SetCurrentInstruction(kUxtb16, kArithmetic); + os() << ToCString(kUxtb16) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(operand.GetBaseRegister()) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << operand; +} + +void Disassembler::uxth(Condition cond, + EncodingSize size, + Register rd, + const Operand& operand) { + os().SetCurrentInstruction(kUxth, kArithmetic); + os() << ToCString(kUxth) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(operand.GetBaseRegister()) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << operand; +} + +void Disassembler::vaba( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVaba, kFpNeon); + os() << ToCString(kVaba) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vaba( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVaba, kFpNeon); + os() << ToCString(kVaba) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vabal( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVabal, kFpNeon); + os() << ToCString(kVabal) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vabd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVabd, kFpNeon); + os() << ToCString(kVabd) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vabd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVabd, kFpNeon); + os() << ToCString(kVabd) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vabdl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVabdl, kFpNeon); + os() << ToCString(kVabdl) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vabs(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVabs, kFpNeon); + os() << ToCString(kVabs) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vabs(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVabs, kFpNeon); + os() << ToCString(kVabs) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vabs(Condition cond, + DataType dt, + SRegister rd, + SRegister rm) { + os().SetCurrentInstruction(kVabs, kFpNeon); + os() << ToCString(kVabs) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vacge( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVacge, kFpNeon); + os() << ToCString(kVacge) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vacge( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVacge, kFpNeon); + os() << ToCString(kVacge) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vacgt( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVacgt, kFpNeon); + os() << ToCString(kVacgt) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vacgt( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVacgt, kFpNeon); + os() << ToCString(kVacgt) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vacle( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVacle, kFpNeon); + os() << ToCString(kVacle) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vacle( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVacle, kFpNeon); + os() << ToCString(kVacle) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vaclt( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVaclt, kFpNeon); + os() << ToCString(kVaclt) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vaclt( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVaclt, kFpNeon); + os() << ToCString(kVaclt) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVadd, kFpNeon); + os() << ToCString(kVadd) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vadd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVadd, kFpNeon); + os() << ToCString(kVadd) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vadd( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + os().SetCurrentInstruction(kVadd, kFpNeon); + os() << ToCString(kVadd) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vaddhn( + Condition cond, DataType dt, DRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVaddhn, kFpNeon); + os() << ToCString(kVaddhn) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vaddl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVaddl, kFpNeon); + os() << ToCString(kVaddl) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vaddw( + Condition cond, DataType dt, QRegister rd, QRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVaddw, kFpNeon); + os() << ToCString(kVaddw) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vand(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + const DOperand& operand) { + os().SetCurrentInstruction(kVand, kFpNeon); + os() << ToCString(kVand) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::vand(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + const QOperand& operand) { + os().SetCurrentInstruction(kVand, kFpNeon); + os() << ToCString(kVand) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::vbic(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + const DOperand& operand) { + os().SetCurrentInstruction(kVbic, kFpNeon); + os() << ToCString(kVbic) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::vbic(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + const QOperand& operand) { + os().SetCurrentInstruction(kVbic, kFpNeon); + os() << ToCString(kVbic) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::vbif( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVbif, kFpNeon); + os() << ToCString(kVbif) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vbif( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVbif, kFpNeon); + os() << ToCString(kVbif) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vbit( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVbit, kFpNeon); + os() << ToCString(kVbit) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vbit( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVbit, kFpNeon); + os() << ToCString(kVbit) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vbsl( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVbsl, kFpNeon); + os() << ToCString(kVbsl) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vbsl( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVbsl, kFpNeon); + os() << ToCString(kVbsl) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vceq(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + os().SetCurrentInstruction(kVceq, kFpNeon); + os() << ToCString(kVceq) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vceq(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + os().SetCurrentInstruction(kVceq, kFpNeon); + os() << ToCString(kVceq) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vceq( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVceq, kFpNeon); + os() << ToCString(kVceq) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vceq( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVceq, kFpNeon); + os() << ToCString(kVceq) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vcge(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + os().SetCurrentInstruction(kVcge, kFpNeon); + os() << ToCString(kVcge) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vcge(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + os().SetCurrentInstruction(kVcge, kFpNeon); + os() << ToCString(kVcge) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vcge( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVcge, kFpNeon); + os() << ToCString(kVcge) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vcge( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVcge, kFpNeon); + os() << ToCString(kVcge) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vcgt(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + os().SetCurrentInstruction(kVcgt, kFpNeon); + os() << ToCString(kVcgt) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vcgt(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + os().SetCurrentInstruction(kVcgt, kFpNeon); + os() << ToCString(kVcgt) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vcgt( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVcgt, kFpNeon); + os() << ToCString(kVcgt) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vcgt( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVcgt, kFpNeon); + os() << ToCString(kVcgt) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vcle(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + os().SetCurrentInstruction(kVcle, kFpNeon); + os() << ToCString(kVcle) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vcle(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + os().SetCurrentInstruction(kVcle, kFpNeon); + os() << ToCString(kVcle) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vcle( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVcle, kFpNeon); + os() << ToCString(kVcle) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vcle( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVcle, kFpNeon); + os() << ToCString(kVcle) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vcls(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVcls, kFpNeon); + os() << ToCString(kVcls) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vcls(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVcls, kFpNeon); + os() << ToCString(kVcls) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vclt(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + os().SetCurrentInstruction(kVclt, kFpNeon); + os() << ToCString(kVclt) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vclt(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + os().SetCurrentInstruction(kVclt, kFpNeon); + os() << ToCString(kVclt) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vclt( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVclt, kFpNeon); + os() << ToCString(kVclt) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vclt( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVclt, kFpNeon); + os() << ToCString(kVclt) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vclz(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVclz, kFpNeon); + os() << ToCString(kVclz) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vclz(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVclz, kFpNeon); + os() << ToCString(kVclz) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vcmp(Condition cond, + DataType dt, + SRegister rd, + const SOperand& operand) { + os().SetCurrentInstruction(kVcmp, kFpNeon); + os() << ToCString(kVcmp) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << operand; +} + +void Disassembler::vcmp(Condition cond, + DataType dt, + DRegister rd, + const DOperand& operand) { + os().SetCurrentInstruction(kVcmp, kFpNeon); + os() << ToCString(kVcmp) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << operand; +} + +void Disassembler::vcmpe(Condition cond, + DataType dt, + SRegister rd, + const SOperand& operand) { + os().SetCurrentInstruction(kVcmpe, kFpNeon); + os() << ToCString(kVcmpe) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << operand; +} + +void Disassembler::vcmpe(Condition cond, + DataType dt, + DRegister rd, + const DOperand& operand) { + os().SetCurrentInstruction(kVcmpe, kFpNeon); + os() << ToCString(kVcmpe) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << operand; +} + +void Disassembler::vcnt(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVcnt, kFpNeon); + os() << ToCString(kVcnt) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vcnt(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVcnt, kFpNeon); + os() << ToCString(kVcnt) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vcvt( + Condition cond, DataType dt1, DataType dt2, DRegister rd, SRegister rm) { + os().SetCurrentInstruction(kVcvt, kFpNeon); + os() << ToCString(kVcvt) << ConditionPrinter(it_block_, cond) << dt1 << dt2 + << " " << rd << ", " << rm; +} + +void Disassembler::vcvt( + Condition cond, DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + os().SetCurrentInstruction(kVcvt, kFpNeon); + os() << ToCString(kVcvt) << ConditionPrinter(it_block_, cond) << dt1 << dt2 + << " " << rd << ", " << rm; +} + +void Disassembler::vcvt(Condition cond, + DataType dt1, + DataType dt2, + DRegister rd, + DRegister rm, + int32_t fbits) { + os().SetCurrentInstruction(kVcvt, kFpNeon); + os() << ToCString(kVcvt) << ConditionPrinter(it_block_, cond) << dt1 << dt2 + << " " << rd << ", " << rm << ", " << SignedImmediatePrinter(fbits); +} + +void Disassembler::vcvt(Condition cond, + DataType dt1, + DataType dt2, + QRegister rd, + QRegister rm, + int32_t fbits) { + os().SetCurrentInstruction(kVcvt, kFpNeon); + os() << ToCString(kVcvt) << ConditionPrinter(it_block_, cond) << dt1 << dt2 + << " " << rd << ", " << rm << ", " << SignedImmediatePrinter(fbits); +} + +void Disassembler::vcvt(Condition cond, + DataType dt1, + DataType dt2, + SRegister rd, + SRegister rm, + int32_t fbits) { + os().SetCurrentInstruction(kVcvt, kFpNeon); + os() << ToCString(kVcvt) << ConditionPrinter(it_block_, cond) << dt1 << dt2 + << " " << rd << ", " << rm << ", " << SignedImmediatePrinter(fbits); +} + +void Disassembler::vcvt( + Condition cond, DataType dt1, DataType dt2, DRegister rd, DRegister rm) { + os().SetCurrentInstruction(kVcvt, kFpNeon); + os() << ToCString(kVcvt) << ConditionPrinter(it_block_, cond) << dt1 << dt2 + << " " << rd << ", " << rm; +} + +void Disassembler::vcvt( + Condition cond, DataType dt1, DataType dt2, QRegister rd, QRegister rm) { + os().SetCurrentInstruction(kVcvt, kFpNeon); + os() << ToCString(kVcvt) << ConditionPrinter(it_block_, cond) << dt1 << dt2 + << " " << rd << ", " << rm; +} + +void Disassembler::vcvt( + Condition cond, DataType dt1, DataType dt2, DRegister rd, QRegister rm) { + os().SetCurrentInstruction(kVcvt, kFpNeon); + os() << ToCString(kVcvt) << ConditionPrinter(it_block_, cond) << dt1 << dt2 + << " " << rd << ", " << rm; +} + +void Disassembler::vcvt( + Condition cond, DataType dt1, DataType dt2, QRegister rd, DRegister rm) { + os().SetCurrentInstruction(kVcvt, kFpNeon); + os() << ToCString(kVcvt) << ConditionPrinter(it_block_, cond) << dt1 << dt2 + << " " << rd << ", " << rm; +} + +void Disassembler::vcvt( + Condition cond, DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + os().SetCurrentInstruction(kVcvt, kFpNeon); + os() << ToCString(kVcvt) << ConditionPrinter(it_block_, cond) << dt1 << dt2 + << " " << rd << ", " << rm; +} + +void Disassembler::vcvta(DataType dt1, + DataType dt2, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVcvta, kFpNeon); + os() << ToCString(kVcvta) << dt1 << dt2 << " " << rd << ", " << rm; +} + +void Disassembler::vcvta(DataType dt1, + DataType dt2, + QRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVcvta, kFpNeon); + os() << ToCString(kVcvta) << dt1 << dt2 << " " << rd << ", " << rm; +} + +void Disassembler::vcvta(DataType dt1, + DataType dt2, + SRegister rd, + SRegister rm) { + os().SetCurrentInstruction(kVcvta, kFpNeon); + os() << ToCString(kVcvta) << dt1 << dt2 << " " << rd << ", " << rm; +} + +void Disassembler::vcvta(DataType dt1, + DataType dt2, + SRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVcvta, kFpNeon); + os() << ToCString(kVcvta) << dt1 << dt2 << " " << rd << ", " << rm; +} + +void Disassembler::vcvtb( + Condition cond, DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + os().SetCurrentInstruction(kVcvtb, kFpNeon); + os() << ToCString(kVcvtb) << ConditionPrinter(it_block_, cond) << dt1 << dt2 + << " " << rd << ", " << rm; +} + +void Disassembler::vcvtb( + Condition cond, DataType dt1, DataType dt2, DRegister rd, SRegister rm) { + os().SetCurrentInstruction(kVcvtb, kFpNeon); + os() << ToCString(kVcvtb) << ConditionPrinter(it_block_, cond) << dt1 << dt2 + << " " << rd << ", " << rm; +} + +void Disassembler::vcvtb( + Condition cond, DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + os().SetCurrentInstruction(kVcvtb, kFpNeon); + os() << ToCString(kVcvtb) << ConditionPrinter(it_block_, cond) << dt1 << dt2 + << " " << rd << ", " << rm; +} + +void Disassembler::vcvtm(DataType dt1, + DataType dt2, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVcvtm, kFpNeon); + os() << ToCString(kVcvtm) << dt1 << dt2 << " " << rd << ", " << rm; +} + +void Disassembler::vcvtm(DataType dt1, + DataType dt2, + QRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVcvtm, kFpNeon); + os() << ToCString(kVcvtm) << dt1 << dt2 << " " << rd << ", " << rm; +} + +void Disassembler::vcvtm(DataType dt1, + DataType dt2, + SRegister rd, + SRegister rm) { + os().SetCurrentInstruction(kVcvtm, kFpNeon); + os() << ToCString(kVcvtm) << dt1 << dt2 << " " << rd << ", " << rm; +} + +void Disassembler::vcvtm(DataType dt1, + DataType dt2, + SRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVcvtm, kFpNeon); + os() << ToCString(kVcvtm) << dt1 << dt2 << " " << rd << ", " << rm; +} + +void Disassembler::vcvtn(DataType dt1, + DataType dt2, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVcvtn, kFpNeon); + os() << ToCString(kVcvtn) << dt1 << dt2 << " " << rd << ", " << rm; +} + +void Disassembler::vcvtn(DataType dt1, + DataType dt2, + QRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVcvtn, kFpNeon); + os() << ToCString(kVcvtn) << dt1 << dt2 << " " << rd << ", " << rm; +} + +void Disassembler::vcvtn(DataType dt1, + DataType dt2, + SRegister rd, + SRegister rm) { + os().SetCurrentInstruction(kVcvtn, kFpNeon); + os() << ToCString(kVcvtn) << dt1 << dt2 << " " << rd << ", " << rm; +} + +void Disassembler::vcvtn(DataType dt1, + DataType dt2, + SRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVcvtn, kFpNeon); + os() << ToCString(kVcvtn) << dt1 << dt2 << " " << rd << ", " << rm; +} + +void Disassembler::vcvtp(DataType dt1, + DataType dt2, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVcvtp, kFpNeon); + os() << ToCString(kVcvtp) << dt1 << dt2 << " " << rd << ", " << rm; +} + +void Disassembler::vcvtp(DataType dt1, + DataType dt2, + QRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVcvtp, kFpNeon); + os() << ToCString(kVcvtp) << dt1 << dt2 << " " << rd << ", " << rm; +} + +void Disassembler::vcvtp(DataType dt1, + DataType dt2, + SRegister rd, + SRegister rm) { + os().SetCurrentInstruction(kVcvtp, kFpNeon); + os() << ToCString(kVcvtp) << dt1 << dt2 << " " << rd << ", " << rm; +} + +void Disassembler::vcvtp(DataType dt1, + DataType dt2, + SRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVcvtp, kFpNeon); + os() << ToCString(kVcvtp) << dt1 << dt2 << " " << rd << ", " << rm; +} + +void Disassembler::vcvtr( + Condition cond, DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + os().SetCurrentInstruction(kVcvtr, kFpNeon); + os() << ToCString(kVcvtr) << ConditionPrinter(it_block_, cond) << dt1 << dt2 + << " " << rd << ", " << rm; +} + +void Disassembler::vcvtr( + Condition cond, DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + os().SetCurrentInstruction(kVcvtr, kFpNeon); + os() << ToCString(kVcvtr) << ConditionPrinter(it_block_, cond) << dt1 << dt2 + << " " << rd << ", " << rm; +} + +void Disassembler::vcvtt( + Condition cond, DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + os().SetCurrentInstruction(kVcvtt, kFpNeon); + os() << ToCString(kVcvtt) << ConditionPrinter(it_block_, cond) << dt1 << dt2 + << " " << rd << ", " << rm; +} + +void Disassembler::vcvtt( + Condition cond, DataType dt1, DataType dt2, DRegister rd, SRegister rm) { + os().SetCurrentInstruction(kVcvtt, kFpNeon); + os() << ToCString(kVcvtt) << ConditionPrinter(it_block_, cond) << dt1 << dt2 + << " " << rd << ", " << rm; +} + +void Disassembler::vcvtt( + Condition cond, DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + os().SetCurrentInstruction(kVcvtt, kFpNeon); + os() << ToCString(kVcvtt) << ConditionPrinter(it_block_, cond) << dt1 << dt2 + << " " << rd << ", " << rm; +} + +void Disassembler::vdiv( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + os().SetCurrentInstruction(kVdiv, kFpNeon); + os() << ToCString(kVdiv) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vdiv( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVdiv, kFpNeon); + os() << ToCString(kVdiv) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vdup(Condition cond, + DataType dt, + QRegister rd, + Register rt) { + os().SetCurrentInstruction(kVdup, kFpNeon); + os() << ToCString(kVdup) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rt; +} + +void Disassembler::vdup(Condition cond, + DataType dt, + DRegister rd, + Register rt) { + os().SetCurrentInstruction(kVdup, kFpNeon); + os() << ToCString(kVdup) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rt; +} + +void Disassembler::vdup(Condition cond, + DataType dt, + DRegister rd, + DRegisterLane rm) { + os().SetCurrentInstruction(kVdup, kFpNeon); + os() << ToCString(kVdup) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vdup(Condition cond, + DataType dt, + QRegister rd, + DRegisterLane rm) { + os().SetCurrentInstruction(kVdup, kFpNeon); + os() << ToCString(kVdup) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::veor( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVeor, kFpNeon); + os() << ToCString(kVeor) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::veor( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVeor, kFpNeon); + os() << ToCString(kVeor) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vext(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegister rm, + const DOperand& operand) { + os().SetCurrentInstruction(kVext, kFpNeon); + os() << ToCString(kVext) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm << ", " << operand; +} + +void Disassembler::vext(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + QRegister rm, + const QOperand& operand) { + os().SetCurrentInstruction(kVext, kFpNeon); + os() << ToCString(kVext) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm << ", " << operand; +} + +void Disassembler::vfma( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVfma, kFpNeon); + os() << ToCString(kVfma) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vfma( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVfma, kFpNeon); + os() << ToCString(kVfma) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vfma( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + os().SetCurrentInstruction(kVfma, kFpNeon); + os() << ToCString(kVfma) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vfms( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVfms, kFpNeon); + os() << ToCString(kVfms) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vfms( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVfms, kFpNeon); + os() << ToCString(kVfms) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vfms( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + os().SetCurrentInstruction(kVfms, kFpNeon); + os() << ToCString(kVfms) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vfnma( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + os().SetCurrentInstruction(kVfnma, kFpNeon); + os() << ToCString(kVfnma) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vfnma( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVfnma, kFpNeon); + os() << ToCString(kVfnma) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vfnms( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + os().SetCurrentInstruction(kVfnms, kFpNeon); + os() << ToCString(kVfnms) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vfnms( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVfnms, kFpNeon); + os() << ToCString(kVfnms) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vhadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVhadd, kFpNeon); + os() << ToCString(kVhadd) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vhadd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVhadd, kFpNeon); + os() << ToCString(kVhadd) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vhsub( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVhsub, kFpNeon); + os() << ToCString(kVhsub) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vhsub( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVhsub, kFpNeon); + os() << ToCString(kVhsub) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vld1(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + os().SetCurrentInstruction(kVld1, kFpNeon); + os() << ToCString(kVld1) << ConditionPrinter(it_block_, cond) << dt << " " + << nreglist << ", " << PrintAlignedMemOperand(kVld1Location, operand); +} + +void Disassembler::vld2(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + os().SetCurrentInstruction(kVld2, kFpNeon); + os() << ToCString(kVld2) << ConditionPrinter(it_block_, cond) << dt << " " + << nreglist << ", " << PrintAlignedMemOperand(kVld2Location, operand); +} + +void Disassembler::vld3(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + os().SetCurrentInstruction(kVld3, kFpNeon); + os() << ToCString(kVld3) << ConditionPrinter(it_block_, cond) << dt << " " + << nreglist << ", " << PrintAlignedMemOperand(kVld3Location, operand); +} + +void Disassembler::vld3(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const MemOperand& operand) { + os().SetCurrentInstruction(kVld3, kFpNeon); + os() << ToCString(kVld3) << ConditionPrinter(it_block_, cond) << dt << " " + << nreglist << ", " << PrintMemOperand(kVld3Location, operand); +} + +void Disassembler::vld4(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + os().SetCurrentInstruction(kVld4, kFpNeon); + os() << ToCString(kVld4) << ConditionPrinter(it_block_, cond) << dt << " " + << nreglist << ", " << PrintAlignedMemOperand(kVld4Location, operand); +} + +void Disassembler::vldm(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + os().SetCurrentInstruction(kVldm, kLoadStore | kLoadStoreMultiple | kFpNeon); + os() << ToCString(kVldm) << ConditionPrinter(it_block_, cond) << dt << " " + << rn << write_back << ", " << dreglist; +} + +void Disassembler::vldm(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + os().SetCurrentInstruction(kVldm, kLoadStore | kLoadStoreMultiple | kFpNeon); + os() << ToCString(kVldm) << ConditionPrinter(it_block_, cond) << dt << " " + << rn << write_back << ", " << sreglist; +} + +void Disassembler::vldmdb(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + os().SetCurrentInstruction(kVldmdb, + kLoadStore | kLoadStoreMultiple | kFpNeon); + os() << ToCString(kVldmdb) << ConditionPrinter(it_block_, cond) << dt << " " + << rn << write_back << ", " << dreglist; +} + +void Disassembler::vldmdb(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + os().SetCurrentInstruction(kVldmdb, + kLoadStore | kLoadStoreMultiple | kFpNeon); + os() << ToCString(kVldmdb) << ConditionPrinter(it_block_, cond) << dt << " " + << rn << write_back << ", " << sreglist; +} + +void Disassembler::vldmia(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + os().SetCurrentInstruction(kVldmia, + kLoadStore | kLoadStoreMultiple | kFpNeon); + os() << ToCString(kVldmia) << ConditionPrinter(it_block_, cond) << dt << " " + << rn << write_back << ", " << dreglist; +} + +void Disassembler::vldmia(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + os().SetCurrentInstruction(kVldmia, + kLoadStore | kLoadStoreMultiple | kFpNeon); + os() << ToCString(kVldmia) << ConditionPrinter(it_block_, cond) << dt << " " + << rn << write_back << ", " << sreglist; +} + +void Disassembler::vldr(Condition cond, + DataType dt, + DRegister rd, + Location* location) { + os().SetCurrentInstruction(kVldr, kFpNeon); + os() << ToCString(kVldr) << ConditionPrinter(it_block_, cond) + << DtPrinter(dt, Untyped64) << " " << rd << ", " + << PrintLabel(kLoadDoublePrecisionLocation, + location, + GetCodeAddress() & ~3); +} + +void Disassembler::vldr(Condition cond, + DataType dt, + DRegister rd, + const MemOperand& operand) { + os().SetCurrentInstruction(kVldr, kFpNeon); + os() << ToCString(kVldr) << ConditionPrinter(it_block_, cond) + << DtPrinter(dt, Untyped64) << " " << rd << ", " + << PrintMemOperand(kLoadDoublePrecisionLocation, operand); +} + +void Disassembler::vldr(Condition cond, + DataType dt, + SRegister rd, + Location* location) { + os().SetCurrentInstruction(kVldr, kFpNeon); + os() << ToCString(kVldr) << ConditionPrinter(it_block_, cond) + << DtPrinter(dt, Untyped32) << " " << rd << ", " + << PrintLabel(kLoadSinglePrecisionLocation, + location, + GetCodeAddress() & ~3); +} + +void Disassembler::vldr(Condition cond, + DataType dt, + SRegister rd, + const MemOperand& operand) { + os().SetCurrentInstruction(kVldr, kFpNeon); + os() << ToCString(kVldr) << ConditionPrinter(it_block_, cond) + << DtPrinter(dt, Untyped32) << " " << rd << ", " + << PrintMemOperand(kLoadSinglePrecisionLocation, operand); +} + +void Disassembler::vmax( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVmax, kFpNeon); + os() << ToCString(kVmax) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vmax( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVmax, kFpNeon); + os() << ToCString(kVmax) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vmaxnm(DataType dt, + DRegister rd, + DRegister rn, + DRegister rm) { + os().SetCurrentInstruction(kVmaxnm, kFpNeon); + os() << ToCString(kVmaxnm) << dt << " " << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vmaxnm(DataType dt, + QRegister rd, + QRegister rn, + QRegister rm) { + os().SetCurrentInstruction(kVmaxnm, kFpNeon); + os() << ToCString(kVmaxnm) << dt << " " << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vmaxnm(DataType dt, + SRegister rd, + SRegister rn, + SRegister rm) { + os().SetCurrentInstruction(kVmaxnm, kFpNeon); + os() << ToCString(kVmaxnm) << dt << " " << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vmin( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVmin, kFpNeon); + os() << ToCString(kVmin) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vmin( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVmin, kFpNeon); + os() << ToCString(kVmin) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vminnm(DataType dt, + DRegister rd, + DRegister rn, + DRegister rm) { + os().SetCurrentInstruction(kVminnm, kFpNeon); + os() << ToCString(kVminnm) << dt << " " << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vminnm(DataType dt, + QRegister rd, + QRegister rn, + QRegister rm) { + os().SetCurrentInstruction(kVminnm, kFpNeon); + os() << ToCString(kVminnm) << dt << " " << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vminnm(DataType dt, + SRegister rd, + SRegister rn, + SRegister rm) { + os().SetCurrentInstruction(kVminnm, kFpNeon); + os() << ToCString(kVminnm) << dt << " " << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vmla( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegisterLane rm) { + os().SetCurrentInstruction(kVmla, kFpNeon); + os() << ToCString(kVmla) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vmla( + Condition cond, DataType dt, QRegister rd, QRegister rn, DRegisterLane rm) { + os().SetCurrentInstruction(kVmla, kFpNeon); + os() << ToCString(kVmla) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vmla( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVmla, kFpNeon); + os() << ToCString(kVmla) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vmla( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVmla, kFpNeon); + os() << ToCString(kVmla) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vmla( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + os().SetCurrentInstruction(kVmla, kFpNeon); + os() << ToCString(kVmla) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vmlal( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegisterLane rm) { + os().SetCurrentInstruction(kVmlal, kFpNeon); + os() << ToCString(kVmlal) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vmlal( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVmlal, kFpNeon); + os() << ToCString(kVmlal) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vmls( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegisterLane rm) { + os().SetCurrentInstruction(kVmls, kFpNeon); + os() << ToCString(kVmls) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vmls( + Condition cond, DataType dt, QRegister rd, QRegister rn, DRegisterLane rm) { + os().SetCurrentInstruction(kVmls, kFpNeon); + os() << ToCString(kVmls) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vmls( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVmls, kFpNeon); + os() << ToCString(kVmls) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vmls( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVmls, kFpNeon); + os() << ToCString(kVmls) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vmls( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + os().SetCurrentInstruction(kVmls, kFpNeon); + os() << ToCString(kVmls) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vmlsl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegisterLane rm) { + os().SetCurrentInstruction(kVmlsl, kFpNeon); + os() << ToCString(kVmlsl) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vmlsl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVmlsl, kFpNeon); + os() << ToCString(kVmlsl) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vmov(Condition cond, Register rt, SRegister rn) { + os().SetCurrentInstruction(kVmov, kFpNeon); + os() << ToCString(kVmov) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " << rn; +} + +void Disassembler::vmov(Condition cond, SRegister rn, Register rt) { + os().SetCurrentInstruction(kVmov, kFpNeon); + os() << ToCString(kVmov) << ConditionPrinter(it_block_, cond) << " " << rn + << ", " << rt; +} + +void Disassembler::vmov(Condition cond, + Register rt, + Register rt2, + DRegister rm) { + os().SetCurrentInstruction(kVmov, kFpNeon); + os() << ToCString(kVmov) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " << rt2 << ", " << rm; +} + +void Disassembler::vmov(Condition cond, + DRegister rm, + Register rt, + Register rt2) { + os().SetCurrentInstruction(kVmov, kFpNeon); + os() << ToCString(kVmov) << ConditionPrinter(it_block_, cond) << " " << rm + << ", " << rt << ", " << rt2; +} + +void Disassembler::vmov( + Condition cond, Register rt, Register rt2, SRegister rm, SRegister rm1) { + os().SetCurrentInstruction(kVmov, kFpNeon); + os() << ToCString(kVmov) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " << rt2 << ", " << rm << ", " << rm1; +} + +void Disassembler::vmov( + Condition cond, SRegister rm, SRegister rm1, Register rt, Register rt2) { + os().SetCurrentInstruction(kVmov, kFpNeon); + os() << ToCString(kVmov) << ConditionPrinter(it_block_, cond) << " " << rm + << ", " << rm1 << ", " << rt << ", " << rt2; +} + +void Disassembler::vmov(Condition cond, + DataType dt, + DRegisterLane rd, + Register rt) { + os().SetCurrentInstruction(kVmov, kFpNeon); + os() << ToCString(kVmov) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rt; +} + +void Disassembler::vmov(Condition cond, + DataType dt, + DRegister rd, + const DOperand& operand) { + os().SetCurrentInstruction(kVmov, kFpNeon); + os() << ToCString(kVmov) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << operand; +} + +void Disassembler::vmov(Condition cond, + DataType dt, + QRegister rd, + const QOperand& operand) { + os().SetCurrentInstruction(kVmov, kFpNeon); + os() << ToCString(kVmov) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << operand; +} + +void Disassembler::vmov(Condition cond, + DataType dt, + SRegister rd, + const SOperand& operand) { + os().SetCurrentInstruction(kVmov, kFpNeon); + os() << ToCString(kVmov) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << operand; +} + +void Disassembler::vmov(Condition cond, + DataType dt, + Register rt, + DRegisterLane rn) { + os().SetCurrentInstruction(kVmov, kFpNeon); + os() << ToCString(kVmov) << ConditionPrinter(it_block_, cond) << dt << " " + << rt << ", " << rn; +} + +void Disassembler::vmovl(Condition cond, + DataType dt, + QRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVmovl, kFpNeon); + os() << ToCString(kVmovl) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vmovn(Condition cond, + DataType dt, + DRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVmovn, kFpNeon); + os() << ToCString(kVmovn) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vmrs(Condition cond, + RegisterOrAPSR_nzcv rt, + SpecialFPRegister spec_reg) { + os().SetCurrentInstruction(kVmrs, kFpNeon); + os() << ToCString(kVmrs) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " << spec_reg; +} + +void Disassembler::vmsr(Condition cond, + SpecialFPRegister spec_reg, + Register rt) { + os().SetCurrentInstruction(kVmsr, kFpNeon); + os() << ToCString(kVmsr) << ConditionPrinter(it_block_, cond) << " " + << spec_reg << ", " << rt; +} + +void Disassembler::vmul(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegister dm, + unsigned index) { + os().SetCurrentInstruction(kVmul, kFpNeon); + os() << ToCString(kVmul) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << IndexedRegisterPrinter(dm, index); +} + +void Disassembler::vmul(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + DRegister dm, + unsigned index) { + os().SetCurrentInstruction(kVmul, kFpNeon); + os() << ToCString(kVmul) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << IndexedRegisterPrinter(dm, index); +} + +void Disassembler::vmul( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVmul, kFpNeon); + os() << ToCString(kVmul) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vmul( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVmul, kFpNeon); + os() << ToCString(kVmul) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vmul( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + os().SetCurrentInstruction(kVmul, kFpNeon); + os() << ToCString(kVmul) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vmull(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegister dm, + unsigned index) { + os().SetCurrentInstruction(kVmull, kFpNeon); + os() << ToCString(kVmull) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << IndexedRegisterPrinter(dm, index); +} + +void Disassembler::vmull( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVmull, kFpNeon); + os() << ToCString(kVmull) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vmvn(Condition cond, + DataType dt, + DRegister rd, + const DOperand& operand) { + os().SetCurrentInstruction(kVmvn, kFpNeon); + os() << ToCString(kVmvn) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << operand; +} + +void Disassembler::vmvn(Condition cond, + DataType dt, + QRegister rd, + const QOperand& operand) { + os().SetCurrentInstruction(kVmvn, kFpNeon); + os() << ToCString(kVmvn) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << operand; +} + +void Disassembler::vneg(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVneg, kFpNeon); + os() << ToCString(kVneg) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vneg(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVneg, kFpNeon); + os() << ToCString(kVneg) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vneg(Condition cond, + DataType dt, + SRegister rd, + SRegister rm) { + os().SetCurrentInstruction(kVneg, kFpNeon); + os() << ToCString(kVneg) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vnmla( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + os().SetCurrentInstruction(kVnmla, kFpNeon); + os() << ToCString(kVnmla) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vnmla( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVnmla, kFpNeon); + os() << ToCString(kVnmla) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vnmls( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + os().SetCurrentInstruction(kVnmls, kFpNeon); + os() << ToCString(kVnmls) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vnmls( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVnmls, kFpNeon); + os() << ToCString(kVnmls) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vnmul( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + os().SetCurrentInstruction(kVnmul, kFpNeon); + os() << ToCString(kVnmul) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vnmul( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVnmul, kFpNeon); + os() << ToCString(kVnmul) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vorn(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + const DOperand& operand) { + os().SetCurrentInstruction(kVorn, kFpNeon); + os() << ToCString(kVorn) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::vorn(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + const QOperand& operand) { + os().SetCurrentInstruction(kVorn, kFpNeon); + os() << ToCString(kVorn) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::vorr(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + const DOperand& operand) { + os().SetCurrentInstruction(kVorr, kFpNeon); + os() << ToCString(kVorr) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::vorr(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + const QOperand& operand) { + os().SetCurrentInstruction(kVorr, kFpNeon); + os() << ToCString(kVorr) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::vpadal(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVpadal, kFpNeon); + os() << ToCString(kVpadal) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vpadal(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVpadal, kFpNeon); + os() << ToCString(kVpadal) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vpadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVpadd, kFpNeon); + os() << ToCString(kVpadd) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vpaddl(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVpaddl, kFpNeon); + os() << ToCString(kVpaddl) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vpaddl(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVpaddl, kFpNeon); + os() << ToCString(kVpaddl) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vpmax( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVpmax, kFpNeon); + os() << ToCString(kVpmax) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vpmin( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVpmin, kFpNeon); + os() << ToCString(kVpmin) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vpop(Condition cond, DataType dt, DRegisterList dreglist) { + os().SetCurrentInstruction(kVpop, kLoadStore | kLoadStoreMultiple | kFpNeon); + os() << ToCString(kVpop) << ConditionPrinter(it_block_, cond) << dt << " " + << dreglist; +} + +void Disassembler::vpop(Condition cond, DataType dt, SRegisterList sreglist) { + os().SetCurrentInstruction(kVpop, kLoadStore | kLoadStoreMultiple | kFpNeon); + os() << ToCString(kVpop) << ConditionPrinter(it_block_, cond) << dt << " " + << sreglist; +} + +void Disassembler::vpush(Condition cond, DataType dt, DRegisterList dreglist) { + os().SetCurrentInstruction(kVpush, kLoadStore | kLoadStoreMultiple | kFpNeon); + os() << ToCString(kVpush) << ConditionPrinter(it_block_, cond) << dt << " " + << dreglist; +} + +void Disassembler::vpush(Condition cond, DataType dt, SRegisterList sreglist) { + os().SetCurrentInstruction(kVpush, kLoadStore | kLoadStoreMultiple | kFpNeon); + os() << ToCString(kVpush) << ConditionPrinter(it_block_, cond) << dt << " " + << sreglist; +} + +void Disassembler::vqabs(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVqabs, kFpNeon); + os() << ToCString(kVqabs) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vqabs(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVqabs, kFpNeon); + os() << ToCString(kVqabs) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vqadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVqadd, kFpNeon); + os() << ToCString(kVqadd) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vqadd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVqadd, kFpNeon); + os() << ToCString(kVqadd) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vqdmlal( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVqdmlal, kFpNeon); + os() << ToCString(kVqdmlal) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vqdmlal(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegister dm, + unsigned index) { + os().SetCurrentInstruction(kVqdmlal, kFpNeon); + os() << ToCString(kVqdmlal) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << IndexedRegisterPrinter(dm, index); +} + +void Disassembler::vqdmlsl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVqdmlsl, kFpNeon); + os() << ToCString(kVqdmlsl) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vqdmlsl(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegister dm, + unsigned index) { + os().SetCurrentInstruction(kVqdmlsl, kFpNeon); + os() << ToCString(kVqdmlsl) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << IndexedRegisterPrinter(dm, index); +} + +void Disassembler::vqdmulh( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVqdmulh, kFpNeon); + os() << ToCString(kVqdmulh) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vqdmulh( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVqdmulh, kFpNeon); + os() << ToCString(kVqdmulh) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vqdmulh( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegisterLane rm) { + os().SetCurrentInstruction(kVqdmulh, kFpNeon); + os() << ToCString(kVqdmulh) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vqdmulh( + Condition cond, DataType dt, QRegister rd, QRegister rn, DRegisterLane rm) { + os().SetCurrentInstruction(kVqdmulh, kFpNeon); + os() << ToCString(kVqdmulh) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vqdmull( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVqdmull, kFpNeon); + os() << ToCString(kVqdmull) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vqdmull( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegisterLane rm) { + os().SetCurrentInstruction(kVqdmull, kFpNeon); + os() << ToCString(kVqdmull) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vqmovn(Condition cond, + DataType dt, + DRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVqmovn, kFpNeon); + os() << ToCString(kVqmovn) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vqmovun(Condition cond, + DataType dt, + DRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVqmovun, kFpNeon); + os() << ToCString(kVqmovun) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vqneg(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVqneg, kFpNeon); + os() << ToCString(kVqneg) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vqneg(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVqneg, kFpNeon); + os() << ToCString(kVqneg) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vqrdmulh( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVqrdmulh, kFpNeon); + os() << ToCString(kVqrdmulh) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vqrdmulh( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVqrdmulh, kFpNeon); + os() << ToCString(kVqrdmulh) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vqrdmulh( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegisterLane rm) { + os().SetCurrentInstruction(kVqrdmulh, kFpNeon); + os() << ToCString(kVqrdmulh) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vqrdmulh( + Condition cond, DataType dt, QRegister rd, QRegister rn, DRegisterLane rm) { + os().SetCurrentInstruction(kVqrdmulh, kFpNeon); + os() << ToCString(kVqrdmulh) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vqrshl( + Condition cond, DataType dt, DRegister rd, DRegister rm, DRegister rn) { + os().SetCurrentInstruction(kVqrshl, kFpNeon); + os() << ToCString(kVqrshl) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << rn; +} + +void Disassembler::vqrshl( + Condition cond, DataType dt, QRegister rd, QRegister rm, QRegister rn) { + os().SetCurrentInstruction(kVqrshl, kFpNeon); + os() << ToCString(kVqrshl) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << rn; +} + +void Disassembler::vqrshrn(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + os().SetCurrentInstruction(kVqrshrn, kFpNeon); + os() << ToCString(kVqrshrn) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm << ", " << operand; +} + +void Disassembler::vqrshrun(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + os().SetCurrentInstruction(kVqrshrun, kFpNeon); + os() << ToCString(kVqrshrun) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm << ", " << operand; +} + +void Disassembler::vqshl(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + os().SetCurrentInstruction(kVqshl, kFpNeon); + os() << ToCString(kVqshl) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vqshl(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + os().SetCurrentInstruction(kVqshl, kFpNeon); + os() << ToCString(kVqshl) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vqshlu(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + os().SetCurrentInstruction(kVqshlu, kFpNeon); + os() << ToCString(kVqshlu) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vqshlu(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + os().SetCurrentInstruction(kVqshlu, kFpNeon); + os() << ToCString(kVqshlu) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vqshrn(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + os().SetCurrentInstruction(kVqshrn, kFpNeon); + os() << ToCString(kVqshrn) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm << ", " << operand; +} + +void Disassembler::vqshrun(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + os().SetCurrentInstruction(kVqshrun, kFpNeon); + os() << ToCString(kVqshrun) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm << ", " << operand; +} + +void Disassembler::vqsub( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVqsub, kFpNeon); + os() << ToCString(kVqsub) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vqsub( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVqsub, kFpNeon); + os() << ToCString(kVqsub) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vraddhn( + Condition cond, DataType dt, DRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVraddhn, kFpNeon); + os() << ToCString(kVraddhn) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vrecpe(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVrecpe, kFpNeon); + os() << ToCString(kVrecpe) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vrecpe(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVrecpe, kFpNeon); + os() << ToCString(kVrecpe) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vrecps( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVrecps, kFpNeon); + os() << ToCString(kVrecps) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vrecps( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVrecps, kFpNeon); + os() << ToCString(kVrecps) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vrev16(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVrev16, kFpNeon); + os() << ToCString(kVrev16) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vrev16(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVrev16, kFpNeon); + os() << ToCString(kVrev16) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vrev32(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVrev32, kFpNeon); + os() << ToCString(kVrev32) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vrev32(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVrev32, kFpNeon); + os() << ToCString(kVrev32) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vrev64(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVrev64, kFpNeon); + os() << ToCString(kVrev64) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vrev64(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVrev64, kFpNeon); + os() << ToCString(kVrev64) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vrhadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVrhadd, kFpNeon); + os() << ToCString(kVrhadd) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vrhadd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVrhadd, kFpNeon); + os() << ToCString(kVrhadd) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vrinta(DataType dt, DRegister rd, DRegister rm) { + os().SetCurrentInstruction(kVrinta, kFpNeon); + os() << ToCString(kVrinta) << dt << " " << rd << ", " << rm; +} + +void Disassembler::vrinta(DataType dt, QRegister rd, QRegister rm) { + os().SetCurrentInstruction(kVrinta, kFpNeon); + os() << ToCString(kVrinta) << dt << " " << rd << ", " << rm; +} + +void Disassembler::vrinta(DataType dt, SRegister rd, SRegister rm) { + os().SetCurrentInstruction(kVrinta, kFpNeon); + os() << ToCString(kVrinta) << dt << " " << rd << ", " << rm; +} + +void Disassembler::vrintm(DataType dt, DRegister rd, DRegister rm) { + os().SetCurrentInstruction(kVrintm, kFpNeon); + os() << ToCString(kVrintm) << dt << " " << rd << ", " << rm; +} + +void Disassembler::vrintm(DataType dt, QRegister rd, QRegister rm) { + os().SetCurrentInstruction(kVrintm, kFpNeon); + os() << ToCString(kVrintm) << dt << " " << rd << ", " << rm; +} + +void Disassembler::vrintm(DataType dt, SRegister rd, SRegister rm) { + os().SetCurrentInstruction(kVrintm, kFpNeon); + os() << ToCString(kVrintm) << dt << " " << rd << ", " << rm; +} + +void Disassembler::vrintn(DataType dt, DRegister rd, DRegister rm) { + os().SetCurrentInstruction(kVrintn, kFpNeon); + os() << ToCString(kVrintn) << dt << " " << rd << ", " << rm; +} + +void Disassembler::vrintn(DataType dt, QRegister rd, QRegister rm) { + os().SetCurrentInstruction(kVrintn, kFpNeon); + os() << ToCString(kVrintn) << dt << " " << rd << ", " << rm; +} + +void Disassembler::vrintn(DataType dt, SRegister rd, SRegister rm) { + os().SetCurrentInstruction(kVrintn, kFpNeon); + os() << ToCString(kVrintn) << dt << " " << rd << ", " << rm; +} + +void Disassembler::vrintp(DataType dt, DRegister rd, DRegister rm) { + os().SetCurrentInstruction(kVrintp, kFpNeon); + os() << ToCString(kVrintp) << dt << " " << rd << ", " << rm; +} + +void Disassembler::vrintp(DataType dt, QRegister rd, QRegister rm) { + os().SetCurrentInstruction(kVrintp, kFpNeon); + os() << ToCString(kVrintp) << dt << " " << rd << ", " << rm; +} + +void Disassembler::vrintp(DataType dt, SRegister rd, SRegister rm) { + os().SetCurrentInstruction(kVrintp, kFpNeon); + os() << ToCString(kVrintp) << dt << " " << rd << ", " << rm; +} + +void Disassembler::vrintr(Condition cond, + DataType dt, + SRegister rd, + SRegister rm) { + os().SetCurrentInstruction(kVrintr, kFpNeon); + os() << ToCString(kVrintr) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vrintr(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVrintr, kFpNeon); + os() << ToCString(kVrintr) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vrintx(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVrintx, kFpNeon); + os() << ToCString(kVrintx) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vrintx(DataType dt, QRegister rd, QRegister rm) { + os().SetCurrentInstruction(kVrintx, kFpNeon); + os() << ToCString(kVrintx) << dt << " " << rd << ", " << rm; +} + +void Disassembler::vrintx(Condition cond, + DataType dt, + SRegister rd, + SRegister rm) { + os().SetCurrentInstruction(kVrintx, kFpNeon); + os() << ToCString(kVrintx) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vrintz(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVrintz, kFpNeon); + os() << ToCString(kVrintz) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vrintz(DataType dt, QRegister rd, QRegister rm) { + os().SetCurrentInstruction(kVrintz, kFpNeon); + os() << ToCString(kVrintz) << dt << " " << rd << ", " << rm; +} + +void Disassembler::vrintz(Condition cond, + DataType dt, + SRegister rd, + SRegister rm) { + os().SetCurrentInstruction(kVrintz, kFpNeon); + os() << ToCString(kVrintz) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vrshl( + Condition cond, DataType dt, DRegister rd, DRegister rm, DRegister rn) { + os().SetCurrentInstruction(kVrshl, kFpNeon); + os() << ToCString(kVrshl) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << rn; +} + +void Disassembler::vrshl( + Condition cond, DataType dt, QRegister rd, QRegister rm, QRegister rn) { + os().SetCurrentInstruction(kVrshl, kFpNeon); + os() << ToCString(kVrshl) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << rn; +} + +void Disassembler::vrshr(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + os().SetCurrentInstruction(kVrshr, kFpNeon); + os() << ToCString(kVrshr) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vrshr(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + os().SetCurrentInstruction(kVrshr, kFpNeon); + os() << ToCString(kVrshr) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vrshrn(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + os().SetCurrentInstruction(kVrshrn, kFpNeon); + os() << ToCString(kVrshrn) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm << ", " << operand; +} + +void Disassembler::vrsqrte(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVrsqrte, kFpNeon); + os() << ToCString(kVrsqrte) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vrsqrte(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVrsqrte, kFpNeon); + os() << ToCString(kVrsqrte) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vrsqrts( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVrsqrts, kFpNeon); + os() << ToCString(kVrsqrts) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vrsqrts( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVrsqrts, kFpNeon); + os() << ToCString(kVrsqrts) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vrsra(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + os().SetCurrentInstruction(kVrsra, kFpNeon); + os() << ToCString(kVrsra) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vrsra(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + os().SetCurrentInstruction(kVrsra, kFpNeon); + os() << ToCString(kVrsra) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vrsubhn( + Condition cond, DataType dt, DRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVrsubhn, kFpNeon); + os() << ToCString(kVrsubhn) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vseleq(DataType dt, + DRegister rd, + DRegister rn, + DRegister rm) { + os().SetCurrentInstruction(kVseleq, kFpNeon); + os() << ToCString(kVseleq) << dt << " " << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vseleq(DataType dt, + SRegister rd, + SRegister rn, + SRegister rm) { + os().SetCurrentInstruction(kVseleq, kFpNeon); + os() << ToCString(kVseleq) << dt << " " << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vselge(DataType dt, + DRegister rd, + DRegister rn, + DRegister rm) { + os().SetCurrentInstruction(kVselge, kFpNeon); + os() << ToCString(kVselge) << dt << " " << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vselge(DataType dt, + SRegister rd, + SRegister rn, + SRegister rm) { + os().SetCurrentInstruction(kVselge, kFpNeon); + os() << ToCString(kVselge) << dt << " " << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vselgt(DataType dt, + DRegister rd, + DRegister rn, + DRegister rm) { + os().SetCurrentInstruction(kVselgt, kFpNeon); + os() << ToCString(kVselgt) << dt << " " << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vselgt(DataType dt, + SRegister rd, + SRegister rn, + SRegister rm) { + os().SetCurrentInstruction(kVselgt, kFpNeon); + os() << ToCString(kVselgt) << dt << " " << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vselvs(DataType dt, + DRegister rd, + DRegister rn, + DRegister rm) { + os().SetCurrentInstruction(kVselvs, kFpNeon); + os() << ToCString(kVselvs) << dt << " " << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vselvs(DataType dt, + SRegister rd, + SRegister rn, + SRegister rm) { + os().SetCurrentInstruction(kVselvs, kFpNeon); + os() << ToCString(kVselvs) << dt << " " << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vshl(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + os().SetCurrentInstruction(kVshl, kFpNeon); + os() << ToCString(kVshl) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vshl(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + os().SetCurrentInstruction(kVshl, kFpNeon); + os() << ToCString(kVshl) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vshll(Condition cond, + DataType dt, + QRegister rd, + DRegister rm, + const DOperand& operand) { + os().SetCurrentInstruction(kVshll, kFpNeon); + os() << ToCString(kVshll) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm << ", " << operand; +} + +void Disassembler::vshr(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + os().SetCurrentInstruction(kVshr, kFpNeon); + os() << ToCString(kVshr) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vshr(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + os().SetCurrentInstruction(kVshr, kFpNeon); + os() << ToCString(kVshr) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vshrn(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + os().SetCurrentInstruction(kVshrn, kFpNeon); + os() << ToCString(kVshrn) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm << ", " << operand; +} + +void Disassembler::vsli(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + os().SetCurrentInstruction(kVsli, kFpNeon); + os() << ToCString(kVsli) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vsli(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + os().SetCurrentInstruction(kVsli, kFpNeon); + os() << ToCString(kVsli) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vsqrt(Condition cond, + DataType dt, + SRegister rd, + SRegister rm) { + os().SetCurrentInstruction(kVsqrt, kFpNeon); + os() << ToCString(kVsqrt) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vsqrt(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVsqrt, kFpNeon); + os() << ToCString(kVsqrt) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vsra(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + os().SetCurrentInstruction(kVsra, kFpNeon); + os() << ToCString(kVsra) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vsra(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + os().SetCurrentInstruction(kVsra, kFpNeon); + os() << ToCString(kVsra) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vsri(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + os().SetCurrentInstruction(kVsri, kFpNeon); + os() << ToCString(kVsri) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vsri(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + os().SetCurrentInstruction(kVsri, kFpNeon); + os() << ToCString(kVsri) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vst1(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + os().SetCurrentInstruction(kVst1, kFpNeon); + os() << ToCString(kVst1) << ConditionPrinter(it_block_, cond) << dt << " " + << nreglist << ", " << PrintAlignedMemOperand(kVst1Location, operand); +} + +void Disassembler::vst2(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + os().SetCurrentInstruction(kVst2, kFpNeon); + os() << ToCString(kVst2) << ConditionPrinter(it_block_, cond) << dt << " " + << nreglist << ", " << PrintAlignedMemOperand(kVst2Location, operand); +} + +void Disassembler::vst3(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + os().SetCurrentInstruction(kVst3, kFpNeon); + os() << ToCString(kVst3) << ConditionPrinter(it_block_, cond) << dt << " " + << nreglist << ", " << PrintAlignedMemOperand(kVst3Location, operand); +} + +void Disassembler::vst3(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const MemOperand& operand) { + os().SetCurrentInstruction(kVst3, kFpNeon); + os() << ToCString(kVst3) << ConditionPrinter(it_block_, cond) << dt << " " + << nreglist << ", " << PrintMemOperand(kVst3Location, operand); +} + +void Disassembler::vst4(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + os().SetCurrentInstruction(kVst4, kFpNeon); + os() << ToCString(kVst4) << ConditionPrinter(it_block_, cond) << dt << " " + << nreglist << ", " << PrintAlignedMemOperand(kVst4Location, operand); +} + +void Disassembler::vstm(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + os().SetCurrentInstruction(kVstm, kLoadStore | kLoadStoreMultiple | kFpNeon); + os() << ToCString(kVstm) << ConditionPrinter(it_block_, cond) << dt << " " + << rn << write_back << ", " << dreglist; +} + +void Disassembler::vstm(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + os().SetCurrentInstruction(kVstm, kLoadStore | kLoadStoreMultiple | kFpNeon); + os() << ToCString(kVstm) << ConditionPrinter(it_block_, cond) << dt << " " + << rn << write_back << ", " << sreglist; +} + +void Disassembler::vstmdb(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + os().SetCurrentInstruction(kVstmdb, + kLoadStore | kLoadStoreMultiple | kFpNeon); + os() << ToCString(kVstmdb) << ConditionPrinter(it_block_, cond) << dt << " " + << rn << write_back << ", " << dreglist; +} + +void Disassembler::vstmdb(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + os().SetCurrentInstruction(kVstmdb, + kLoadStore | kLoadStoreMultiple | kFpNeon); + os() << ToCString(kVstmdb) << ConditionPrinter(it_block_, cond) << dt << " " + << rn << write_back << ", " << sreglist; +} + +void Disassembler::vstmia(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + os().SetCurrentInstruction(kVstmia, + kLoadStore | kLoadStoreMultiple | kFpNeon); + os() << ToCString(kVstmia) << ConditionPrinter(it_block_, cond) << dt << " " + << rn << write_back << ", " << dreglist; +} + +void Disassembler::vstmia(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + os().SetCurrentInstruction(kVstmia, + kLoadStore | kLoadStoreMultiple | kFpNeon); + os() << ToCString(kVstmia) << ConditionPrinter(it_block_, cond) << dt << " " + << rn << write_back << ", " << sreglist; +} + +void Disassembler::vstr(Condition cond, + DataType dt, + DRegister rd, + const MemOperand& operand) { + os().SetCurrentInstruction(kVstr, kFpNeon); + os() << ToCString(kVstr) << ConditionPrinter(it_block_, cond) + << DtPrinter(dt, Untyped64) << " " << rd << ", " + << PrintMemOperand(kStoreDoublePrecisionLocation, operand); +} + +void Disassembler::vstr(Condition cond, + DataType dt, + SRegister rd, + const MemOperand& operand) { + os().SetCurrentInstruction(kVstr, kFpNeon); + os() << ToCString(kVstr) << ConditionPrinter(it_block_, cond) + << DtPrinter(dt, Untyped32) << " " << rd << ", " + << PrintMemOperand(kStoreSinglePrecisionLocation, operand); +} + +void Disassembler::vsub( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVsub, kFpNeon); + os() << ToCString(kVsub) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vsub( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVsub, kFpNeon); + os() << ToCString(kVsub) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vsub( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + os().SetCurrentInstruction(kVsub, kFpNeon); + os() << ToCString(kVsub) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vsubhn( + Condition cond, DataType dt, DRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVsubhn, kFpNeon); + os() << ToCString(kVsubhn) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vsubl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVsubl, kFpNeon); + os() << ToCString(kVsubl) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vsubw( + Condition cond, DataType dt, QRegister rd, QRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVsubw, kFpNeon); + os() << ToCString(kVsubw) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vswp(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVswp, kFpNeon); + os() << ToCString(kVswp) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vswp(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVswp, kFpNeon); + os() << ToCString(kVswp) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vtbl(Condition cond, + DataType dt, + DRegister rd, + const NeonRegisterList& nreglist, + DRegister rm) { + os().SetCurrentInstruction(kVtbl, kFpNeon); + os() << ToCString(kVtbl) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << nreglist << ", " << rm; +} + +void Disassembler::vtbx(Condition cond, + DataType dt, + DRegister rd, + const NeonRegisterList& nreglist, + DRegister rm) { + os().SetCurrentInstruction(kVtbx, kFpNeon); + os() << ToCString(kVtbx) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << nreglist << ", " << rm; +} + +void Disassembler::vtrn(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVtrn, kFpNeon); + os() << ToCString(kVtrn) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vtrn(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVtrn, kFpNeon); + os() << ToCString(kVtrn) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vtst( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVtst, kFpNeon); + os() << ToCString(kVtst) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vtst( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVtst, kFpNeon); + os() << ToCString(kVtst) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vuzp(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVuzp, kFpNeon); + os() << ToCString(kVuzp) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vuzp(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVuzp, kFpNeon); + os() << ToCString(kVuzp) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vzip(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVzip, kFpNeon); + os() << ToCString(kVzip) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vzip(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVzip, kFpNeon); + os() << ToCString(kVzip) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::yield(Condition cond, EncodingSize size) { + os().SetCurrentInstruction(kYield, kNoAttribute); + os() << ToCString(kYield) << ConditionPrinter(it_block_, cond) << size; +} + +int Disassembler::T32Size(uint32_t instr) { + if ((instr & 0xe0000000) == 0xe0000000) { + switch (instr & 0x08000000) { + case 0x00000000: + if ((instr & 0x10000000) == 0x10000000) return 4; + return 2; + case 0x08000000: + return 4; + default: + return 2; + } + } + return 2; +} + +void Disassembler::DecodeT32(uint32_t instr) { + T32CodeAddressIncrementer incrementer(instr, &code_address_); + ITBlockScope it_scope(&it_block_); + + switch (instr & 0xe0000000) { + case 0x00000000: { + // 0x00000000 + switch (instr & 0x18000000) { + case 0x18000000: { + // 0x18000000 + switch (instr & 0x06000000) { + case 0x00000000: { + // 0x18000000 + unsigned rd = (instr >> 16) & 0x7; + unsigned rn = (instr >> 19) & 0x7; + unsigned rm = (instr >> 22) & 0x7; + if (InITBlock()) { + // ADD{} , , ; T1 + add(CurrentCond(), + Narrow, + Register(rd), + Register(rn), + Register(rm)); + } else { + VIXL_ASSERT(OutsideITBlock()); + // ADDS{} {}, , ; T1 + adds(Condition::None(), + Narrow, + Register(rd), + Register(rn), + Register(rm)); + } + break; + } + case 0x02000000: { + // 0x1a000000 + unsigned rd = (instr >> 16) & 0x7; + unsigned rn = (instr >> 19) & 0x7; + unsigned rm = (instr >> 22) & 0x7; + if (InITBlock()) { + // SUB{} , , ; T1 + sub(CurrentCond(), + Narrow, + Register(rd), + Register(rn), + Register(rm)); + } else { + VIXL_ASSERT(OutsideITBlock()); + // SUBS{} {}, , ; T1 + subs(Condition::None(), + Narrow, + Register(rd), + Register(rn), + Register(rm)); + } + break; + } + case 0x04000000: { + // 0x1c000000 + unsigned rd = (instr >> 16) & 0x7; + unsigned rn = (instr >> 19) & 0x7; + uint32_t imm = (instr >> 22) & 0x7; + if (InITBlock()) { + // ADD{} , , # ; T1 + add(CurrentCond(), Narrow, Register(rd), Register(rn), imm); + } else { + VIXL_ASSERT(OutsideITBlock()); + // ADDS{} , , # ; T1 + adds(Condition::None(), + Narrow, + Register(rd), + Register(rn), + imm); + } + break; + } + case 0x06000000: { + // 0x1e000000 + unsigned rd = (instr >> 16) & 0x7; + unsigned rn = (instr >> 19) & 0x7; + uint32_t imm = (instr >> 22) & 0x7; + if (InITBlock()) { + // SUB{} , , # ; T1 + sub(CurrentCond(), Narrow, Register(rd), Register(rn), imm); + } else { + VIXL_ASSERT(OutsideITBlock()); + // SUBS{} , , # ; T1 + subs(Condition::None(), + Narrow, + Register(rd), + Register(rn), + imm); + } + break; + } + } + break; + } + default: { + if (((instr & 0x18000000) == 0x18000000)) { + UnallocatedT32(instr); + return; + } + if (((Uint32((instr >> 27)) & Uint32(0x3)) == Uint32(0x2)) && + InITBlock()) { + unsigned rd = (instr >> 16) & 0x7; + unsigned rm = (instr >> 19) & 0x7; + uint32_t amount = (instr >> 22) & 0x1f; + if (amount == 0) amount = 32; + // ASR{} {}, , # ; T2 + asr(CurrentCond(), Narrow, Register(rd), Register(rm), amount); + return; + } + if (((Uint32((instr >> 27)) & Uint32(0x3)) == Uint32(0x2)) && + !InITBlock()) { + unsigned rd = (instr >> 16) & 0x7; + unsigned rm = (instr >> 19) & 0x7; + uint32_t amount = (instr >> 22) & 0x1f; + if (amount == 0) amount = 32; + // ASRS{} {}, , # ; T2 + asrs(Condition::None(), Narrow, Register(rd), Register(rm), amount); + return; + } + if (((Uint32((instr >> 27)) & Uint32(0x3)) == Uint32(0x0)) && + ((instr & 0x07c00000) != 0x00000000) && InITBlock()) { + unsigned rd = (instr >> 16) & 0x7; + unsigned rm = (instr >> 19) & 0x7; + uint32_t amount = (instr >> 22) & 0x1f; + // LSL{} {}, , # ; T2 + lsl(CurrentCond(), Narrow, Register(rd), Register(rm), amount); + return; + } + if (((Uint32((instr >> 27)) & Uint32(0x3)) == Uint32(0x0)) && + ((instr & 0x07c00000) != 0x00000000) && !InITBlock()) { + unsigned rd = (instr >> 16) & 0x7; + unsigned rm = (instr >> 19) & 0x7; + uint32_t amount = (instr >> 22) & 0x1f; + // LSLS{} {}, , # ; T2 + lsls(Condition::None(), Narrow, Register(rd), Register(rm), amount); + return; + } + if (((Uint32((instr >> 27)) & Uint32(0x3)) == Uint32(0x1)) && + InITBlock()) { + unsigned rd = (instr >> 16) & 0x7; + unsigned rm = (instr >> 19) & 0x7; + uint32_t amount = (instr >> 22) & 0x1f; + if (amount == 0) amount = 32; + // LSR{} {}, , # ; T2 + lsr(CurrentCond(), Narrow, Register(rd), Register(rm), amount); + return; + } + if (((Uint32((instr >> 27)) & Uint32(0x3)) == Uint32(0x1)) && + !InITBlock()) { + unsigned rd = (instr >> 16) & 0x7; + unsigned rm = (instr >> 19) & 0x7; + uint32_t amount = (instr >> 22) & 0x1f; + if (amount == 0) amount = 32; + // LSRS{} {}, , # ; T2 + lsrs(Condition::None(), Narrow, Register(rd), Register(rm), amount); + return; + } + unsigned rd = (instr >> 16) & 0x7; + unsigned rm = (instr >> 19) & 0x7; + ImmediateShiftOperand shift_operand((instr >> 27) & 0x3, + (instr >> 22) & 0x1f); + if (InITBlock()) { + // MOV{} , {, # } ; T2 + mov(CurrentCond(), + Narrow, + Register(rd), + Operand(Register(rm), + shift_operand.GetType(), + shift_operand.GetAmount())); + } else { + VIXL_ASSERT(OutsideITBlock()); + // MOVS{} , {, # } ; T2 + movs(Condition::None(), + Narrow, + Register(rd), + Operand(Register(rm), + shift_operand.GetType(), + shift_operand.GetAmount())); + } + break; + } + } + break; + } + case 0x20000000: { + // 0x20000000 + switch (instr & 0x18000000) { + case 0x00000000: { + // 0x20000000 + unsigned rd = (instr >> 24) & 0x7; + uint32_t imm = (instr >> 16) & 0xff; + if (InITBlock()) { + // MOV{} , # ; T1 + mov(CurrentCond(), Narrow, Register(rd), imm); + } else { + VIXL_ASSERT(OutsideITBlock()); + // MOVS{} , # ; T1 + movs(Condition::None(), Narrow, Register(rd), imm); + } + break; + } + case 0x08000000: { + // 0x28000000 + unsigned rn = (instr >> 24) & 0x7; + uint32_t imm = (instr >> 16) & 0xff; + // CMP{}{} , # ; T1 + cmp(CurrentCond(), Narrow, Register(rn), imm); + break; + } + case 0x10000000: { + // 0x30000000 + unsigned rd = (instr >> 24) & 0x7; + uint32_t imm = (instr >> 16) & 0xff; + if (InITBlock() && ((imm <= 7))) { + // ADD{} , # ; T2 + add(CurrentCond(), Register(rd), imm); + } else if (InITBlock() && ((imm > 7))) { + // ADD{} {}, , # ; T2 + add(CurrentCond(), Narrow, Register(rd), Register(rd), imm); + } else if (OutsideITBlock() && ((imm <= 7))) { + // ADDS{} , # ; T2 + adds(Register(rd), imm); + } else if (OutsideITBlock() && ((imm > 7))) { + // ADDS{} {}, , # ; T2 + adds(Condition::None(), Narrow, Register(rd), Register(rd), imm); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x18000000: { + // 0x38000000 + unsigned rd = (instr >> 24) & 0x7; + uint32_t imm = (instr >> 16) & 0xff; + if (InITBlock() && ((imm <= 7))) { + // SUB{} , # ; T2 + sub(CurrentCond(), Register(rd), imm); + } else if (InITBlock() && ((imm > 7))) { + // SUB{} {}, , # ; T2 + sub(CurrentCond(), Narrow, Register(rd), Register(rd), imm); + } else if (OutsideITBlock() && ((imm <= 7))) { + // SUBS{} , # ; T2 + subs(Register(rd), imm); + } else if (OutsideITBlock() && ((imm > 7))) { + // SUBS{} {}, , # ; T2 + subs(Condition::None(), Narrow, Register(rd), Register(rd), imm); + } else { + UnallocatedT32(instr); + } + break; + } + } + break; + } + case 0x40000000: { + // 0x40000000 + switch (instr & 0x18000000) { + case 0x00000000: { + // 0x40000000 + switch (instr & 0x07000000) { + case 0x00000000: { + // 0x40000000 + switch (instr & 0x00c00000) { + case 0x00000000: { + // 0x40000000 + unsigned rd = (instr >> 16) & 0x7; + unsigned rm = (instr >> 19) & 0x7; + if (InITBlock()) { + // AND{} {}, , ; T1 + and_(CurrentCond(), + Narrow, + Register(rd), + Register(rd), + Register(rm)); + } else { + VIXL_ASSERT(OutsideITBlock()); + // ANDS{} {}, , ; T1 + ands(Condition::None(), + Narrow, + Register(rd), + Register(rd), + Register(rm)); + } + break; + } + case 0x00400000: { + // 0x40400000 + unsigned rd = (instr >> 16) & 0x7; + unsigned rm = (instr >> 19) & 0x7; + if (InITBlock()) { + // EOR{} {}, , ; T1 + eor(CurrentCond(), + Narrow, + Register(rd), + Register(rd), + Register(rm)); + } else { + VIXL_ASSERT(OutsideITBlock()); + // EORS{} {}, , ; T1 + eors(Condition::None(), + Narrow, + Register(rd), + Register(rd), + Register(rm)); + } + break; + } + case 0x00800000: { + // 0x40800000 + if (InITBlock()) { + unsigned rd = (instr >> 16) & 0x7; + unsigned rs = (instr >> 19) & 0x7; + // LSL{} {}, , ; T1 + lsl(CurrentCond(), + Narrow, + Register(rd), + Register(rd), + Register(rs)); + return; + } + if (!InITBlock()) { + unsigned rd = (instr >> 16) & 0x7; + unsigned rs = (instr >> 19) & 0x7; + // LSLS{} {}, , ; T1 + lsls(Condition::None(), + Narrow, + Register(rd), + Register(rd), + Register(rs)); + return; + } + unsigned rd = (instr >> 16) & 0x7; + unsigned rm = (instr >> 16) & 0x7; + unsigned rs = (instr >> 19) & 0x7; + if (InITBlock()) { + // MOV{} , , LSL ; T1 + mov(CurrentCond(), + Narrow, + Register(rd), + Operand(Register(rm), LSL, Register(rs))); + } else { + VIXL_ASSERT(OutsideITBlock()); + // MOVS{} , , LSL ; T1 + movs(Condition::None(), + Narrow, + Register(rd), + Operand(Register(rm), LSL, Register(rs))); + } + break; + } + case 0x00c00000: { + // 0x40c00000 + if (InITBlock()) { + unsigned rd = (instr >> 16) & 0x7; + unsigned rs = (instr >> 19) & 0x7; + // LSR{} {}, , ; T1 + lsr(CurrentCond(), + Narrow, + Register(rd), + Register(rd), + Register(rs)); + return; + } + if (!InITBlock()) { + unsigned rd = (instr >> 16) & 0x7; + unsigned rs = (instr >> 19) & 0x7; + // LSRS{} {}, , ; T1 + lsrs(Condition::None(), + Narrow, + Register(rd), + Register(rd), + Register(rs)); + return; + } + unsigned rd = (instr >> 16) & 0x7; + unsigned rm = (instr >> 16) & 0x7; + unsigned rs = (instr >> 19) & 0x7; + if (InITBlock()) { + // MOV{} , , LSR ; T1 + mov(CurrentCond(), + Narrow, + Register(rd), + Operand(Register(rm), LSR, Register(rs))); + } else { + VIXL_ASSERT(OutsideITBlock()); + // MOVS{} , , LSR ; T1 + movs(Condition::None(), + Narrow, + Register(rd), + Operand(Register(rm), LSR, Register(rs))); + } + break; + } + } + break; + } + case 0x01000000: { + // 0x41000000 + switch (instr & 0x00c00000) { + case 0x00000000: { + // 0x41000000 + if (InITBlock()) { + unsigned rd = (instr >> 16) & 0x7; + unsigned rs = (instr >> 19) & 0x7; + // ASR{} {}, , ; T1 + asr(CurrentCond(), + Narrow, + Register(rd), + Register(rd), + Register(rs)); + return; + } + if (!InITBlock()) { + unsigned rd = (instr >> 16) & 0x7; + unsigned rs = (instr >> 19) & 0x7; + // ASRS{} {}, , ; T1 + asrs(Condition::None(), + Narrow, + Register(rd), + Register(rd), + Register(rs)); + return; + } + unsigned rd = (instr >> 16) & 0x7; + unsigned rm = (instr >> 16) & 0x7; + unsigned rs = (instr >> 19) & 0x7; + if (InITBlock()) { + // MOV{} , , ASR ; T1 + mov(CurrentCond(), + Narrow, + Register(rd), + Operand(Register(rm), ASR, Register(rs))); + } else { + VIXL_ASSERT(OutsideITBlock()); + // MOVS{} , , ASR ; T1 + movs(Condition::None(), + Narrow, + Register(rd), + Operand(Register(rm), ASR, Register(rs))); + } + break; + } + case 0x00400000: { + // 0x41400000 + unsigned rd = (instr >> 16) & 0x7; + unsigned rm = (instr >> 19) & 0x7; + if (InITBlock()) { + // ADC{} {}, , ; T1 + adc(CurrentCond(), + Narrow, + Register(rd), + Register(rd), + Register(rm)); + } else { + VIXL_ASSERT(OutsideITBlock()); + // ADCS{} {}, , ; T1 + adcs(Condition::None(), + Narrow, + Register(rd), + Register(rd), + Register(rm)); + } + break; + } + case 0x00800000: { + // 0x41800000 + unsigned rd = (instr >> 16) & 0x7; + unsigned rm = (instr >> 19) & 0x7; + if (InITBlock()) { + // SBC{} {}, , ; T1 + sbc(CurrentCond(), + Narrow, + Register(rd), + Register(rd), + Register(rm)); + } else { + VIXL_ASSERT(OutsideITBlock()); + // SBCS{} {}, , ; T1 + sbcs(Condition::None(), + Narrow, + Register(rd), + Register(rd), + Register(rm)); + } + break; + } + case 0x00c00000: { + // 0x41c00000 + if (InITBlock()) { + unsigned rd = (instr >> 16) & 0x7; + unsigned rs = (instr >> 19) & 0x7; + // ROR{} {}, , ; T1 + ror(CurrentCond(), + Narrow, + Register(rd), + Register(rd), + Register(rs)); + return; + } + if (!InITBlock()) { + unsigned rd = (instr >> 16) & 0x7; + unsigned rs = (instr >> 19) & 0x7; + // RORS{} {}, , ; T1 + rors(Condition::None(), + Narrow, + Register(rd), + Register(rd), + Register(rs)); + return; + } + unsigned rd = (instr >> 16) & 0x7; + unsigned rm = (instr >> 16) & 0x7; + unsigned rs = (instr >> 19) & 0x7; + if (InITBlock()) { + // MOV{} , , ROR ; T1 + mov(CurrentCond(), + Narrow, + Register(rd), + Operand(Register(rm), ROR, Register(rs))); + } else { + VIXL_ASSERT(OutsideITBlock()); + // MOVS{} , , ROR ; T1 + movs(Condition::None(), + Narrow, + Register(rd), + Operand(Register(rm), ROR, Register(rs))); + } + break; + } + } + break; + } + case 0x02000000: { + // 0x42000000 + switch (instr & 0x00c00000) { + case 0x00000000: { + // 0x42000000 + unsigned rn = (instr >> 16) & 0x7; + unsigned rm = (instr >> 19) & 0x7; + // TST{}{} , ; T1 + tst(CurrentCond(), Narrow, Register(rn), Register(rm)); + break; + } + case 0x00400000: { + // 0x42400000 + unsigned rd = (instr >> 16) & 0x7; + unsigned rn = (instr >> 19) & 0x7; + if (InITBlock()) { + // RSB{} {}, , #0 ; T1 + rsb(CurrentCond(), + Narrow, + Register(rd), + Register(rn), + UINT32_C(0)); + } else { + VIXL_ASSERT(OutsideITBlock()); + // RSBS{} {}, , #0 ; T1 + rsbs(Condition::None(), + Narrow, + Register(rd), + Register(rn), + UINT32_C(0)); + } + break; + } + case 0x00800000: { + // 0x42800000 + unsigned rn = (instr >> 16) & 0x7; + unsigned rm = (instr >> 19) & 0x7; + // CMP{}{} , ; T1 + cmp(CurrentCond(), Narrow, Register(rn), Register(rm)); + break; + } + case 0x00c00000: { + // 0x42c00000 + unsigned rn = (instr >> 16) & 0x7; + unsigned rm = (instr >> 19) & 0x7; + // CMN{}{} , ; T1 + cmn(CurrentCond(), Narrow, Register(rn), Register(rm)); + break; + } + } + break; + } + case 0x03000000: { + // 0x43000000 + switch (instr & 0x00c00000) { + case 0x00000000: { + // 0x43000000 + unsigned rd = (instr >> 16) & 0x7; + unsigned rm = (instr >> 19) & 0x7; + if (InITBlock()) { + // ORR{} {}, , ; T1 + orr(CurrentCond(), + Narrow, + Register(rd), + Register(rd), + Register(rm)); + } else { + VIXL_ASSERT(OutsideITBlock()); + // ORRS{} {}, , ; T1 + orrs(Condition::None(), + Narrow, + Register(rd), + Register(rd), + Register(rm)); + } + break; + } + case 0x00400000: { + // 0x43400000 + unsigned rd = (instr >> 16) & 0x7; + unsigned rn = (instr >> 19) & 0x7; + if (InITBlock()) { + // MUL{} , , {} ; T1 + mul(CurrentCond(), + Narrow, + Register(rd), + Register(rn), + Register(rd)); + } else { + VIXL_ASSERT(OutsideITBlock()); + // MULS{} , , {} ; T1 + muls(Condition::None(), + Register(rd), + Register(rn), + Register(rd)); + } + break; + } + case 0x00800000: { + // 0x43800000 + unsigned rd = (instr >> 16) & 0x7; + unsigned rm = (instr >> 19) & 0x7; + if (InITBlock()) { + // BIC{} {}, , ; T1 + bic(CurrentCond(), + Narrow, + Register(rd), + Register(rd), + Register(rm)); + } else { + VIXL_ASSERT(OutsideITBlock()); + // BICS{} {}, , ; T1 + bics(Condition::None(), + Narrow, + Register(rd), + Register(rd), + Register(rm)); + } + break; + } + case 0x00c00000: { + // 0x43c00000 + unsigned rd = (instr >> 16) & 0x7; + unsigned rm = (instr >> 19) & 0x7; + if (InITBlock()) { + // MVN{} , ; T1 + mvn(CurrentCond(), Narrow, Register(rd), Register(rm)); + } else { + VIXL_ASSERT(OutsideITBlock()); + // MVNS{} , ; T1 + mvns(Condition::None(), Narrow, Register(rd), Register(rm)); + } + break; + } + } + break; + } + case 0x04000000: { + // 0x44000000 + switch (instr & 0x00780000) { + case 0x00680000: { + // 0x44680000 + unsigned rd = ((instr >> 16) & 0x7) | ((instr >> 20) & 0x8); + // ADD{}{} {}, SP, ; T1 + add(CurrentCond(), Narrow, Register(rd), sp, Register(rd)); + break; + } + default: { + switch (instr & 0x00870000) { + case 0x00850000: { + // 0x44850000 + if (((instr & 0x780000) == 0x680000)) { + UnallocatedT32(instr); + return; + } + unsigned rm = (instr >> 19) & 0xf; + // ADD{}{} {SP}, SP, ; T2 + add(CurrentCond(), Narrow, sp, sp, Register(rm)); + break; + } + default: { + if (((instr & 0x780000) == 0x680000) || + ((instr & 0x870000) == 0x850000)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ((instr >> 16) & 0x7) | ((instr >> 20) & 0x8); + unsigned rm = (instr >> 19) & 0xf; + if (InITBlock()) { + // ADD{} , ; T2 + add(CurrentCond(), Register(rd), Register(rm)); + } else { + // ADD{}{} {}, , ; T2 + add(CurrentCond(), + Narrow, + Register(rd), + Register(rd), + Register(rm)); + } + break; + } + } + break; + } + } + break; + } + case 0x05000000: { + // 0x45000000 + unsigned rn = ((instr >> 16) & 0x7) | ((instr >> 20) & 0x8); + unsigned rm = (instr >> 19) & 0xf; + // CMP{}{} , ; T2 + cmp(CurrentCond(), Narrow, Register(rn), Register(rm)); + break; + } + case 0x06000000: { + // 0x46000000 + unsigned rd = ((instr >> 16) & 0x7) | ((instr >> 20) & 0x8); + unsigned rm = (instr >> 19) & 0xf; + // MOV{}{} , ; T1 + mov(CurrentCond(), Narrow, Register(rd), Register(rm)); + break; + } + case 0x07000000: { + // 0x47000000 + switch (instr & 0x00800000) { + case 0x00000000: { + // 0x47000000 + unsigned rm = (instr >> 19) & 0xf; + // BX{}{} ; T1 + bx(CurrentCond(), Register(rm)); + if (((instr & 0xff870000) != 0x47000000)) { + UnpredictableT32(instr); + } + break; + } + case 0x00800000: { + // 0x47800000 + unsigned rm = (instr >> 19) & 0xf; + // BLX{}{} ; T1 + blx(CurrentCond(), Register(rm)); + if (((instr & 0xff870000) != 0x47800000)) { + UnpredictableT32(instr); + } + break; + } + } + break; + } + } + break; + } + case 0x08000000: { + // 0x48000000 + unsigned rt = (instr >> 24) & 0x7; + int32_t imm = ((instr >> 16) & 0xff) << 2; + Location location(imm, kT32PcDelta); + // LDR{}{} ,
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vst4(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000100: { + // 0xf900010d + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_4_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST4{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vst4(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000200: { + // 0xf900020d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_5_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST1{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vst1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000300: { + // 0xf900030d + if (((instr & 0xe30) == 0x830)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST2{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vst2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000400: { + // 0xf900040d + if (((instr & 0x20) == 0x20)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_3_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x4: + length = 3; + spacing = kSingle; + break; + case 0x5: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST3{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vst3(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000500: { + // 0xf900050d + if (((instr & 0x20) == 0x20)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_3_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x4: + length = 3; + spacing = kSingle; + break; + case 0x5: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST3{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vst3(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000600: { + // 0xf900060d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_5_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST1{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vst1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000700: { + // 0xf900070d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_5_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST1{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vst1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000800: { + // 0xf900080d + if (((instr & 0xe30) == 0x830)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST2{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vst2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000900: { + // 0xf900090d + if (((instr & 0xe30) == 0x830)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST2{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vst2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000a00: { + // 0xf9000a0d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_5_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST1{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vst1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00000002: { + // 0xf900000f + switch (instr & 0x00000f00) { + case 0x00000000: { + // 0xf900000d + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_4_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST4{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vst4(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000100: { + // 0xf900010d + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_4_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST4{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vst4(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000200: { + // 0xf900020d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_5_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST1{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vst1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000300: { + // 0xf900030d + if (((instr & 0xe30) == 0x830)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST2{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vst2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000400: { + // 0xf900040d + if (((instr & 0x20) == 0x20)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_3_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x4: + length = 3; + spacing = kSingle; + break; + case 0x5: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST3{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vst3(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000500: { + // 0xf900050d + if (((instr & 0x20) == 0x20)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_3_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x4: + length = 3; + spacing = kSingle; + break; + case 0x5: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST3{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vst3(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000600: { + // 0xf900060d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_5_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST1{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vst1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000700: { + // 0xf900070d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_5_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST1{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vst1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000800: { + // 0xf900080d + if (((instr & 0xe30) == 0x830)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST2{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vst2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000900: { + // 0xf900090d + if (((instr & 0xe30) == 0x830)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST2{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vst2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000a00: { + // 0xf9000a0d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_5_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST1{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vst1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + } + break; + } + default: { + switch (instr & 0x00000f00) { + case 0x00000000: { + // 0xf9000000 + if (((instr & 0xd) == 0xd)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_4_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST4{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vst4(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000100: { + // 0xf9000100 + if (((instr & 0xd) == 0xd)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_4_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST4{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vst4(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000200: { + // 0xf9000200 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_5_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST1{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vst1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000300: { + // 0xf9000300 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe30) == 0x830)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST2{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vst2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000400: { + // 0xf9000400 + if (((instr & 0xd) == 0xd) || + ((instr & 0x20) == 0x20)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_3_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x4: + length = 3; + spacing = kSingle; + break; + case 0x5: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST3{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vst3(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000500: { + // 0xf9000500 + if (((instr & 0xd) == 0xd) || + ((instr & 0x20) == 0x20)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_3_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x4: + length = 3; + spacing = kSingle; + break; + case 0x5: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST3{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vst3(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000600: { + // 0xf9000600 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_5_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST1{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vst1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000700: { + // 0xf9000700 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_5_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST1{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vst1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000800: { + // 0xf9000800 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe30) == 0x830)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST2{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vst2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000900: { + // 0xf9000900 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe30) == 0x830)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST2{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vst2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000a00: { + // 0xf9000a00 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_5_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST1{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vst1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + } + break; + } + case 0x01200000: { + // 0xf9200000 + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf920000d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf920000d + switch (instr & 0x00000f00) { + case 0x00000000: { + // 0xf920000d + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_4_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD4{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vld4(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000100: { + // 0xf920010d + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_4_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD4{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vld4(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000200: { + // 0xf920020d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_1_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD1{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vld1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000300: { + // 0xf920030d + if (((instr & 0xe30) == 0x830)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD2{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vld2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000400: { + // 0xf920040d + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_3_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x4: + length = 3; + spacing = kSingle; + break; + case 0x5: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD3{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vld3(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000500: { + // 0xf920050d + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_3_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x4: + length = 3; + spacing = kSingle; + break; + case 0x5: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD3{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vld3(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000600: { + // 0xf920060d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_1_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD1{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vld1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000700: { + // 0xf920070d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_1_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD1{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vld1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000800: { + // 0xf920080d + if (((instr & 0xe30) == 0x830)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD2{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vld2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000900: { + // 0xf920090d + if (((instr & 0xe30) == 0x830)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD2{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vld2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000a00: { + // 0xf9200a0d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_1_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD1{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vld1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00000002: { + // 0xf920000f + switch (instr & 0x00000f00) { + case 0x00000000: { + // 0xf920000d + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_4_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD4{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vld4(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000100: { + // 0xf920010d + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_4_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD4{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vld4(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000200: { + // 0xf920020d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_1_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD1{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vld1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000300: { + // 0xf920030d + if (((instr & 0xe30) == 0x830)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD2{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vld2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000400: { + // 0xf920040d + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_3_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x4: + length = 3; + spacing = kSingle; + break; + case 0x5: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD3{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vld3(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000500: { + // 0xf920050d + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_3_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x4: + length = 3; + spacing = kSingle; + break; + case 0x5: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD3{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vld3(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000600: { + // 0xf920060d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_1_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD1{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vld1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000700: { + // 0xf920070d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_1_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD1{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vld1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000800: { + // 0xf920080d + if (((instr & 0xe30) == 0x830)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD2{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vld2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000900: { + // 0xf920090d + if (((instr & 0xe30) == 0x830)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD2{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vld2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000a00: { + // 0xf9200a0d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_1_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD1{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vld1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + } + break; + } + default: { + switch (instr & 0x00000f00) { + case 0x00000000: { + // 0xf9200000 + if (((instr & 0xd) == 0xd)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_4_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD4{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vld4(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000100: { + // 0xf9200100 + if (((instr & 0xd) == 0xd)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_4_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD4{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vld4(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000200: { + // 0xf9200200 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_1_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD1{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vld1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000300: { + // 0xf9200300 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe30) == 0x830)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD2{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vld2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000400: { + // 0xf9200400 + if (((instr & 0xd) == 0xd)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_3_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x4: + length = 3; + spacing = kSingle; + break; + case 0x5: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD3{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vld3(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000500: { + // 0xf9200500 + if (((instr & 0xd) == 0xd)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_3_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x4: + length = 3; + spacing = kSingle; + break; + case 0x5: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD3{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vld3(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000600: { + // 0xf9200600 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_1_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD1{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vld1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000700: { + // 0xf9200700 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_1_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD1{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vld1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000800: { + // 0xf9200800 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe30) == 0x830)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD2{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vld2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000900: { + // 0xf9200900 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe30) == 0x830)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD2{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vld2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000a00: { + // 0xf9200a00 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_1_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD1{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vld1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + } + break; + } + case 0x01800000: { + // 0xf9800000 + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xf9800000 + switch (instr & 0x00000c00) { + case 0x00000c00: { + // 0xf9800c00 + UnallocatedT32(instr); + break; + } + default: { + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf980000d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf980000d + if (((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_1_Decode((instr >> + 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedT32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = + decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 1; + unsigned last = first + length - 1; + unsigned rn = (instr >> 16) & 0xf; + // VST1{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vst1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000002: { + // 0xf980000f + if (((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_1_Decode((instr >> + 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedT32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = + decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 1; + unsigned last = first + length - 1; + unsigned rn = (instr >> 16) & 0xf; + // VST1{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vst1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + } + break; + } + default: { + if (((instr & 0xc00) == 0xc00) || + ((instr & 0xd) == 0xd)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_1_Decode((instr >> 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedT32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = + decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 1; + unsigned last = first + length - 1; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST1{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vst1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + } + break; + } + } + break; + } + case 0x00000100: { + // 0xf9800100 + switch (instr & 0x00000c00) { + case 0x00000c00: { + // 0xf9800d00 + UnallocatedT32(instr); + break; + } + default: { + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf980010d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf980010d + if (((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_2_Decode((instr >> + 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedT32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = + decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 2; + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + // VST2{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vst2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000002: { + // 0xf980010f + if (((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_2_Decode((instr >> + 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedT32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = + decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 2; + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + // VST2{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vst2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + } + break; + } + default: { + if (((instr & 0xc00) == 0xc00) || + ((instr & 0xd) == 0xd)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_2_Decode((instr >> 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedT32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = + decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 2; + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST2{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vst2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + } + break; + } + } + break; + } + case 0x00000200: { + // 0xf9800200 + switch (instr & 0x00000c30) { + case 0x00000010: { + // 0xf9800210 + UnallocatedT32(instr); + break; + } + case 0x00000030: { + // 0xf9800230 + UnallocatedT32(instr); + break; + } + case 0x00000410: { + // 0xf9800610 + UnallocatedT32(instr); + break; + } + case 0x00000430: { + // 0xf9800630 + UnallocatedT32(instr); + break; + } + case 0x00000810: { + // 0xf9800a10 + UnallocatedT32(instr); + break; + } + case 0x00000820: { + // 0xf9800a20 + UnallocatedT32(instr); + break; + } + case 0x00000830: { + // 0xf9800a30 + UnallocatedT32(instr); + break; + } + case 0x00000c00: { + // 0xf9800e00 + UnallocatedT32(instr); + break; + } + case 0x00000c10: { + // 0xf9800e10 + UnallocatedT32(instr); + break; + } + case 0x00000c20: { + // 0xf9800e20 + UnallocatedT32(instr); + break; + } + case 0x00000c30: { + // 0xf9800e30 + UnallocatedT32(instr); + break; + } + default: { + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf980020d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf980020d + if (((instr & 0xc00) == 0xc00) || + ((instr & 0x810) == 0x10) || + ((instr & 0xc30) == 0x810) || + ((instr & 0xc30) == 0x820) || + ((instr & 0xc30) == 0x830)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DecodeNeon decode_neon = + Index_1_Decode((instr >> 4) & 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedT32(instr); + return; + } + int lane = decode_neon.GetLane(); + SpacingType spacing = + decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 3; + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + // VST3{}{}.
, []! ; T1 NOLINT(whitespace/line_length) + vst3(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + MemOperand(Register(rn), PostIndex)); + break; + } + case 0x00000002: { + // 0xf980020f + if (((instr & 0xc00) == 0xc00) || + ((instr & 0x810) == 0x10) || + ((instr & 0xc30) == 0x810) || + ((instr & 0xc30) == 0x820) || + ((instr & 0xc30) == 0x830)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DecodeNeon decode_neon = + Index_1_Decode((instr >> 4) & 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedT32(instr); + return; + } + int lane = decode_neon.GetLane(); + SpacingType spacing = + decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 3; + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + // VST3{}{}.
, [] ; T1 + vst3(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + MemOperand(Register(rn), Offset)); + break; + } + } + break; + } + default: { + if (((instr & 0xc00) == 0xc00) || + ((instr & 0xd) == 0xd) || + ((instr & 0x810) == 0x10) || + ((instr & 0xc30) == 0x810) || + ((instr & 0xc30) == 0x820) || + ((instr & 0xc30) == 0x830)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DecodeNeon decode_neon = + Index_1_Decode((instr >> 4) & 0xf, dt); + if (!decode_neon.IsValid()) { + UnallocatedT32(instr); + return; + } + int lane = decode_neon.GetLane(); + SpacingType spacing = + decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 3; + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + Sign sign(plus); + unsigned rm = instr & 0xf; + // VST3{}{}.
, [], # ; T1 NOLINT(whitespace/line_length) + vst3(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + MemOperand(Register(rn), + sign, + Register(rm), + PostIndex)); + break; + } + } + break; + } + } + break; + } + case 0x00000300: { + // 0xf9800300 + switch (instr & 0x00000c00) { + case 0x00000c00: { + // 0xf9800f00 + UnallocatedT32(instr); + break; + } + default: { + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf980030d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf980030d + if (((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_3_Decode((instr >> + 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedT32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = + decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 4; + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + // VST4{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vst4(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000002: { + // 0xf980030f + if (((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_3_Decode((instr >> + 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedT32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = + decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 4; + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + // VST4{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vst4(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + } + break; + } + default: { + if (((instr & 0xc00) == 0xc00) || + ((instr & 0xd) == 0xd)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_3_Decode((instr >> 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedT32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = + decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 4; + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST4{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vst4(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + } + break; + } + } + break; + } + } + break; + } + case 0x01a00000: { + // 0xf9a00000 + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xf9a00000 + switch (instr & 0x00000c00) { + case 0x00000c00: { + // 0xf9a00c00 + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf9a00c0d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf9a00c0d + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_a_1_Decode((instr >> 4) & 0x1, + dt); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 5) & 0x1) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 1; + break; + case 0x1: + length = 2; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kAllLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD1{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vld1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000002: { + // 0xf9a00c0f + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_a_1_Decode((instr >> 4) & 0x1, + dt); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 5) & 0x1) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 1; + break; + case 0x1: + length = 2; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kAllLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD1{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vld1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + } + break; + } + default: { + if (((instr & 0xd) == 0xd)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_a_1_Decode((instr >> 4) & 0x1, dt); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 5) & 0x1) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 1; + break; + case 0x1: + length = 2; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kAllLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD1{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vld1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + } + break; + } + default: { + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf9a0000d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf9a0000d + if (((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_1_Decode((instr >> + 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedT32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = + decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 1; + unsigned last = first + length - 1; + unsigned rn = (instr >> 16) & 0xf; + // VLD1{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vld1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000002: { + // 0xf9a0000f + if (((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_1_Decode((instr >> + 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedT32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = + decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 1; + unsigned last = first + length - 1; + unsigned rn = (instr >> 16) & 0xf; + // VLD1{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vld1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + } + break; + } + default: { + if (((instr & 0xc00) == 0xc00) || + ((instr & 0xd) == 0xd)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_1_Decode((instr >> 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedT32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = + decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 1; + unsigned last = first + length - 1; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD1{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vld1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + } + break; + } + } + break; + } + case 0x00000100: { + // 0xf9a00100 + switch (instr & 0x00000c00) { + case 0x00000c00: { + // 0xf9a00d00 + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf9a00d0d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf9a00d0d + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_a_2_Decode((instr >> 4) & 0x1, + dt); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 5) & 0x1) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 2; + spacing = kSingle; + break; + case 0x1: + length = 2; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kAllLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD2{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vld2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000002: { + // 0xf9a00d0f + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_a_2_Decode((instr >> 4) & 0x1, + dt); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 5) & 0x1) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 2; + spacing = kSingle; + break; + case 0x1: + length = 2; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kAllLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD2{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vld2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + } + break; + } + default: { + if (((instr & 0xd) == 0xd)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_a_2_Decode((instr >> 4) & 0x1, dt); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 5) & 0x1) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 2; + spacing = kSingle; + break; + case 0x1: + length = 2; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kAllLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD2{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vld2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + } + break; + } + default: { + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf9a0010d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf9a0010d + if (((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_2_Decode((instr >> + 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedT32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = + decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 2; + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + // VLD2{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vld2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000002: { + // 0xf9a0010f + if (((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_2_Decode((instr >> + 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedT32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = + decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 2; + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + // VLD2{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vld2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + } + break; + } + default: { + if (((instr & 0xc00) == 0xc00) || + ((instr & 0xd) == 0xd)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_2_Decode((instr >> 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedT32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = + decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 2; + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD2{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vld2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + } + break; + } + } + break; + } + case 0x00000200: { + // 0xf9a00200 + switch (instr & 0x00000c00) { + case 0x00000c00: { + // 0xf9a00e00 + switch (instr & 0x00000010) { + case 0x00000000: { + // 0xf9a00e00 + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf9a00e0d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf9a00e0d + DataType dt = Dt_size_7_Decode( + (instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 5) & 0x1) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 3; + spacing = kSingle; + break; + case 0x1: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kAllLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD3{}{}.
, []! ; T1 NOLINT(whitespace/line_length) + vld3(CurrentCond(), + dt, + NeonRegisterList(DRegister( + first), + DRegister(last), + spacing, + transfer), + MemOperand(Register(rn), + PostIndex)); + break; + } + case 0x00000002: { + // 0xf9a00e0f + DataType dt = Dt_size_7_Decode( + (instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 5) & 0x1) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 3; + spacing = kSingle; + break; + case 0x1: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kAllLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD3{}{}.
, [] ; T1 NOLINT(whitespace/line_length) + vld3(CurrentCond(), + dt, + NeonRegisterList(DRegister( + first), + DRegister(last), + spacing, + transfer), + MemOperand(Register(rn), + Offset)); + break; + } + } + break; + } + default: { + if (((instr & 0xd) == 0xd)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 5) & 0x1) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 3; + spacing = kSingle; + break; + case 0x1: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kAllLanes; + unsigned rn = (instr >> 16) & 0xf; + Sign sign(plus); + unsigned rm = instr & 0xf; + // VLD3{}{}.
, [], # ; T1 NOLINT(whitespace/line_length) + vld3(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + MemOperand(Register(rn), + sign, + Register(rm), + PostIndex)); + break; + } + } + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + default: { + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf9a0020d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf9a0020d + if (((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DecodeNeon decode_neon = + Index_1_Decode((instr >> 4) & 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedT32(instr); + return; + } + int lane = decode_neon.GetLane(); + SpacingType spacing = + decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 3; + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + // VLD3{}{}.
, []! ; T1 NOLINT(whitespace/line_length) + vld3(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + MemOperand(Register(rn), PostIndex)); + break; + } + case 0x00000002: { + // 0xf9a0020f + if (((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DecodeNeon decode_neon = + Index_1_Decode((instr >> 4) & 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedT32(instr); + return; + } + int lane = decode_neon.GetLane(); + SpacingType spacing = + decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 3; + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + // VLD3{}{}.
, [] ; T1 + vld3(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + MemOperand(Register(rn), Offset)); + break; + } + } + break; + } + default: { + if (((instr & 0xc00) == 0xc00) || + ((instr & 0xd) == 0xd)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DecodeNeon decode_neon = + Index_1_Decode((instr >> 4) & 0xf, dt); + if (!decode_neon.IsValid()) { + UnallocatedT32(instr); + return; + } + int lane = decode_neon.GetLane(); + SpacingType spacing = + decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 3; + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + Sign sign(plus); + unsigned rm = instr & 0xf; + // VLD3{}{}.
, [], # ; T1 NOLINT(whitespace/line_length) + vld3(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + MemOperand(Register(rn), + sign, + Register(rm), + PostIndex)); + break; + } + } + break; + } + } + break; + } + case 0x00000300: { + // 0xf9a00300 + switch (instr & 0x00000c00) { + case 0x00000c00: { + // 0xf9a00f00 + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf9a00f0d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf9a00f0d + DataType dt = + Dt_size_8_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_a_3_Decode((instr >> 4) & 0x1, + dt, + (instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 5) & 0x1) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kAllLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD4{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vld4(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000002: { + // 0xf9a00f0f + DataType dt = + Dt_size_8_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_a_3_Decode((instr >> 4) & 0x1, + dt, + (instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 5) & 0x1) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kAllLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD4{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vld4(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + } + break; + } + default: { + if (((instr & 0xd) == 0xd)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_8_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_a_3_Decode((instr >> 4) & 0x1, + dt, + (instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 5) & 0x1) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kAllLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD4{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vld4(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + } + break; + } + default: { + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf9a0030d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf9a0030d + if (((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_3_Decode((instr >> + 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedT32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = + decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 4; + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + // VLD4{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vld4(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000002: { + // 0xf9a0030f + if (((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_3_Decode((instr >> + 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedT32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = + decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 4; + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + // VLD4{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vld4(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + } + break; + } + default: { + if (((instr & 0xc00) == 0xc00) || + ((instr & 0xd) == 0xd)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_3_Decode((instr >> 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedT32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = + decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 4; + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD4{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vld4(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + } + break; + } + } + break; + } + } + break; + } + } + break; + } + case 0x10100000: { + // 0xf8100000 + switch (instr & 0x01400000) { + case 0x00000000: { + // 0xf8100000 + switch (instr & 0x000f0000) { + case 0x000f0000: { + // 0xf81f0000 + switch (instr & 0x0000f000) { + case 0x0000f000: { + // 0xf81ff000 + uint32_t U = (instr >> 23) & 0x1; + int32_t imm = instr & 0xfff; + if (U == 0) imm = -imm; + bool minus_zero = (imm == 0) && (U == 0); + Location location(imm, kT32PcDelta); + // PLD{}{}
, [{, #{+/-}}] ; T1 + vstr(CurrentCond(), + Untyped64, + DRegister(rd), + MemOperand(Register(rn), sign, offset, Offset)); + break; + } + case 0x00200000: { + // 0xed200a00 + if ((instr & 0x00800000) == 0x00000000) { + if (((Uint32((instr >> 16)) & Uint32(0xf)) == + Uint32(0xd))) { + unsigned first = ExtractSRegister(instr, 22, 12); + unsigned len = instr & 0xff; + // VPUSH{}{}{.} ; T2 + vpush(CurrentCond(), + kDataTypeValueNone, + SRegisterList(SRegister(first), len)); + if ((len == 0) || + ((first + len) > kNumberOfSRegisters)) { + UnpredictableT32(instr); + } + return; + } + unsigned rn = (instr >> 16) & 0xf; + unsigned first = ExtractSRegister(instr, 22, 12); + unsigned len = instr & 0xff; + // VSTMDB{}{}{.} !, ; T2 + vstmdb(CurrentCond(), + kDataTypeValueNone, + Register(rn), + WriteBack(WRITE_BACK), + SRegisterList(SRegister(first), len)); + if ((len == 0) || + ((first + len) > kNumberOfSRegisters)) { + UnpredictableT32(instr); + } + } else { + UnallocatedT32(instr); + } + break; + } + case 0x00200100: { + // 0xed200b00 + switch (instr & 0x00800001) { + case 0x00000000: { + // 0xed200b00 + if (((Uint32((instr >> 16)) & Uint32(0xf)) == + Uint32(0xd))) { + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned imm8 = (instr & 0xff); + unsigned len = imm8 / 2; + unsigned end = first + len; + // VPUSH{}{}{.} ; T1 + vpush(CurrentCond(), + kDataTypeValueNone, + DRegisterList(DRegister(first), len)); + if ((len == 0) || (len > 16) || + (end > kMaxNumberOfDRegisters)) { + UnpredictableT32(instr); + } + return; + } + unsigned rn = (instr >> 16) & 0xf; + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned imm8 = (instr & 0xff); + unsigned len = imm8 / 2; + unsigned end = first + len; + // VSTMDB{}{}{.} !, ; T1 + vstmdb(CurrentCond(), + kDataTypeValueNone, + Register(rn), + WriteBack(WRITE_BACK), + DRegisterList(DRegister(first), len)); + if ((len == 0) || (len > 16) || + (end > kMaxNumberOfDRegisters)) { + UnpredictableT32(instr); + } + break; + } + case 0x00000001: { + // 0xed200b01 + unsigned rn = (instr >> 16) & 0xf; + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned imm8 = (instr & 0xff); + unsigned len = imm8 / 2; + unsigned end = first + len; + // FSTMDBX{}{} !, ; T1 + fstmdbx(CurrentCond(), + Register(rn), + WriteBack(WRITE_BACK), + DRegisterList(DRegister(first), len)); + if ((len == 0) || (len > 16) || (end > 16)) { + UnpredictableT32(instr); + } + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + } + break; + } + case 0x01000e00: { + // 0xed000e00 + switch (instr & 0x0060f100) { + case 0x00005000: { + // 0xed005e00 + UnimplementedT32_32("STC", instr); + break; + } + case 0x00205000: { + // 0xed205e00 + UnimplementedT32_32("STC", instr); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x01100a00: { + // 0xed100a00 + switch (instr & 0x00200100) { + case 0x00000000: { + // 0xed100a00 + switch (instr & 0x000f0000) { + case 0x000f0000: { + // 0xed1f0a00 + unsigned rd = ExtractSRegister(instr, 22, 12); + uint32_t U = (instr >> 23) & 0x1; + int32_t imm = instr & 0xff; + imm <<= 2; + if (U == 0) imm = -imm; + bool minus_zero = (imm == 0) && (U == 0); + Location location(imm, kT32PcDelta); + // VLDR{}{}{.32} ,
,
, [{, #{+/-}}] ; T1 NOLINT(whitespace/line_length) + vldr(CurrentCond(), + Untyped64, + DRegister(rd), + MemOperand(Register(rn), sign, offset, Offset)); + break; + } + } + break; + } + case 0x00200000: { + // 0xed300a00 + if ((instr & 0x00800000) == 0x00000000) { + unsigned rn = (instr >> 16) & 0xf; + unsigned first = ExtractSRegister(instr, 22, 12); + unsigned len = instr & 0xff; + // VLDMDB{}{}{.} !, ; T2 + vldmdb(CurrentCond(), + kDataTypeValueNone, + Register(rn), + WriteBack(WRITE_BACK), + SRegisterList(SRegister(first), len)); + if ((len == 0) || + ((first + len) > kNumberOfSRegisters)) { + UnpredictableT32(instr); + } + } else { + UnallocatedT32(instr); + } + break; + } + case 0x00200100: { + // 0xed300b00 + switch (instr & 0x00800001) { + case 0x00000000: { + // 0xed300b00 + unsigned rn = (instr >> 16) & 0xf; + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned imm8 = (instr & 0xff); + unsigned len = imm8 / 2; + unsigned end = first + len; + // VLDMDB{}{}{.} !, ; T1 + vldmdb(CurrentCond(), + kDataTypeValueNone, + Register(rn), + WriteBack(WRITE_BACK), + DRegisterList(DRegister(first), len)); + if ((len == 0) || (len > 16) || + (end > kMaxNumberOfDRegisters)) { + UnpredictableT32(instr); + } + break; + } + case 0x00000001: { + // 0xed300b01 + unsigned rn = (instr >> 16) & 0xf; + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned imm8 = (instr & 0xff); + unsigned len = imm8 / 2; + unsigned end = first + len; + // FLDMDBX{}{} !, ; T1 + fldmdbx(CurrentCond(), + Register(rn), + WriteBack(WRITE_BACK), + DRegisterList(DRegister(first), len)); + if ((len == 0) || (len > 16) || (end > 16)) { + UnpredictableT32(instr); + } + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + } + break; + } + case 0x01100e00: { + // 0xed100e00 + switch (instr & 0x0060f100) { + case 0x00005000: { + // 0xed105e00 + switch (instr & 0x000f0000) { + case 0x000f0000: { + // 0xed1f5e00 + UnimplementedT32_32("LDC", instr); + break; + } + default: { + if (((instr & 0xf0000) == 0xf0000)) { + UnallocatedT32(instr); + return; + } + UnimplementedT32_32("LDC", instr); + break; + } + } + break; + } + case 0x00205000: { + // 0xed305e00 + if (((instr & 0xf0000) == 0xf0000)) { + UnallocatedT32(instr); + return; + } + UnimplementedT32_32("LDC", instr); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x06000000: { + // 0xee000000 + switch (instr & 0x01000010) { + case 0x00000000: { + // 0xee000000 + switch (instr & 0x10b00f40) { + case 0x00000a00: { + // 0xee000a00 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VMLA{}{}.F32 , , ; T2 + vmla(CurrentCond(), + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00000a40: { + // 0xee000a40 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VMLS{}{}.F32 , , ; T2 + vmls(CurrentCond(), + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00000b00: { + // 0xee000b00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMLA{}{}.F64
, , ; T2 + vmla(CurrentCond(), + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000b40: { + // 0xee000b40 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMLS{}{}.F64
, , ; T2 + vmls(CurrentCond(), + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00100a00: { + // 0xee100a00 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VNMLS{}{}.F32 , , ; T1 + vnmls(CurrentCond(), + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00100a40: { + // 0xee100a40 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VNMLA{}{}.F32 , , ; T1 + vnmla(CurrentCond(), + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00100b00: { + // 0xee100b00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VNMLS{}{}.F64
, , ; T1 + vnmls(CurrentCond(), + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00100b40: { + // 0xee100b40 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VNMLA{}{}.F64
, , ; T1 + vnmla(CurrentCond(), + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00200a00: { + // 0xee200a00 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VMUL{}{}.F32 {}, , ; T2 + vmul(CurrentCond(), + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00200a40: { + // 0xee200a40 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VNMUL{}{}.F32 {}, , ; T1 + vnmul(CurrentCond(), + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00200b00: { + // 0xee200b00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMUL{}{}.F64 {
}, , ; T2 + vmul(CurrentCond(), + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00200b40: { + // 0xee200b40 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VNMUL{}{}.F64 {
}, , ; T1 + vnmul(CurrentCond(), + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00300a00: { + // 0xee300a00 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VADD{}{}.F32 {}, , ; T2 + vadd(CurrentCond(), + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00300a40: { + // 0xee300a40 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VSUB{}{}.F32 {}, , ; T2 + vsub(CurrentCond(), + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00300b00: { + // 0xee300b00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VADD{}{}.F64 {
}, , ; T2 + vadd(CurrentCond(), + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00300b40: { + // 0xee300b40 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VSUB{}{}.F64 {
}, , ; T2 + vsub(CurrentCond(), + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00800a00: { + // 0xee800a00 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VDIV{}{}.F32 {}, , ; T1 + vdiv(CurrentCond(), + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00800b00: { + // 0xee800b00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VDIV{}{}.F64 {
}, , ; T1 + vdiv(CurrentCond(), + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00900a00: { + // 0xee900a00 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VFNMS{}{}.F32 , , ; T1 + vfnms(CurrentCond(), + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00900a40: { + // 0xee900a40 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VFNMA{}{}.F32 , , ; T1 + vfnma(CurrentCond(), + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00900b00: { + // 0xee900b00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VFNMS{}{}.F64
, , ; T1 + vfnms(CurrentCond(), + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00900b40: { + // 0xee900b40 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VFNMA{}{}.F64
, , ; T1 + vfnma(CurrentCond(), + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00a00a00: { + // 0xeea00a00 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VFMA{}{}.F32 , , ; T2 + vfma(CurrentCond(), + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00a00a40: { + // 0xeea00a40 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VFMS{}{}.F32 , , ; T2 + vfms(CurrentCond(), + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00a00b00: { + // 0xeea00b00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VFMA{}{}.F64
, , ; T2 + vfma(CurrentCond(), + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00a00b40: { + // 0xeea00b40 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VFMS{}{}.F64
, , ; T2 + vfms(CurrentCond(), + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00b00a00: { + // 0xeeb00a00 + unsigned rd = ExtractSRegister(instr, 22, 12); + uint32_t encoded_imm = + (instr & 0xf) | ((instr >> 12) & 0xf0); + NeonImmediate imm = + ImmediateVFP::Decode(encoded_imm); + // VMOV{}{}.F32 , # ; T2 + vmov(CurrentCond(), F32, SRegister(rd), imm); + if (((instr & 0xffb00ff0) != 0xeeb00a00)) { + UnpredictableT32(instr); + } + break; + } + case 0x00b00a40: { + // 0xeeb00a40 + switch (instr & 0x000e0000) { + case 0x00000000: { + // 0xeeb00a40 + switch (instr & 0x00010080) { + case 0x00000000: { + // 0xeeb00a40 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VMOV{}{}.F32 , ; T2 + vmov(CurrentCond(), + F32, + SRegister(rd), + SRegister(rm)); + break; + } + case 0x00000080: { + // 0xeeb00ac0 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VABS{}{}.F32 , ; T2 + vabs(CurrentCond(), + F32, + SRegister(rd), + SRegister(rm)); + break; + } + case 0x00010000: { + // 0xeeb10a40 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VNEG{}{}.F32 , ; T2 + vneg(CurrentCond(), + F32, + SRegister(rd), + SRegister(rm)); + break; + } + case 0x00010080: { + // 0xeeb10ac0 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VSQRT{}{}.F32 , ; T1 + vsqrt(CurrentCond(), + F32, + SRegister(rd), + SRegister(rm)); + break; + } + } + break; + } + case 0x00020000: { + // 0xeeb20a40 + switch (instr & 0x00010080) { + case 0x00000000: { + // 0xeeb20a40 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVTB{}{}.F32.F16 , ; T1 + vcvtb(CurrentCond(), + F32, + F16, + SRegister(rd), + SRegister(rm)); + break; + } + case 0x00000080: { + // 0xeeb20ac0 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVTT{}{}.F32.F16 , ; T1 + vcvtt(CurrentCond(), + F32, + F16, + SRegister(rd), + SRegister(rm)); + break; + } + case 0x00010000: { + // 0xeeb30a40 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVTB{}{}.F16.F32 , ; T1 + vcvtb(CurrentCond(), + F16, + F32, + SRegister(rd), + SRegister(rm)); + break; + } + case 0x00010080: { + // 0xeeb30ac0 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVTT{}{}.F16.F32 , ; T1 + vcvtt(CurrentCond(), + F16, + F32, + SRegister(rd), + SRegister(rm)); + break; + } + } + break; + } + case 0x00040000: { + // 0xeeb40a40 + switch (instr & 0x00010080) { + case 0x00000000: { + // 0xeeb40a40 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCMP{}{}.F32 , ; T1 + vcmp(CurrentCond(), + F32, + SRegister(rd), + SRegister(rm)); + break; + } + case 0x00000080: { + // 0xeeb40ac0 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCMPE{}{}.F32 , ; T1 + vcmpe(CurrentCond(), + F32, + SRegister(rd), + SRegister(rm)); + break; + } + case 0x00010000: { + // 0xeeb50a40 + unsigned rd = ExtractSRegister(instr, 22, 12); + // VCMP{}{}.F32 , #0.0 ; T2 + vcmp(CurrentCond(), F32, SRegister(rd), 0.0); + if (((instr & 0xffbf0fff) != 0xeeb50a40)) { + UnpredictableT32(instr); + } + break; + } + case 0x00010080: { + // 0xeeb50ac0 + unsigned rd = ExtractSRegister(instr, 22, 12); + // VCMPE{}{}.F32 , #0.0 ; T2 + vcmpe(CurrentCond(), F32, SRegister(rd), 0.0); + if (((instr & 0xffbf0fff) != 0xeeb50ac0)) { + UnpredictableT32(instr); + } + break; + } + } + break; + } + case 0x00060000: { + // 0xeeb60a40 + switch (instr & 0x00010080) { + case 0x00000000: { + // 0xeeb60a40 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VRINTR{}{}.F32 , ; T1 + vrintr(CurrentCond(), + F32, + SRegister(rd), + SRegister(rm)); + break; + } + case 0x00000080: { + // 0xeeb60ac0 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VRINTZ{}{}.F32 , ; T1 + vrintz(CurrentCond(), + F32, + SRegister(rd), + SRegister(rm)); + break; + } + case 0x00010000: { + // 0xeeb70a40 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VRINTX{}{}.F32 , ; T1 + vrintx(CurrentCond(), + F32, + SRegister(rd), + SRegister(rm)); + break; + } + case 0x00010080: { + // 0xeeb70ac0 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVT{}{}.F64.F32
, ; T1 + vcvt(CurrentCond(), + F64, + F32, + DRegister(rd), + SRegister(rm)); + break; + } + } + break; + } + case 0x00080000: { + // 0xeeb80a40 + if ((instr & 0x00010000) == 0x00000000) { + DataType dt = Dt_op_2_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVT{}{}.F32.
, ; T1 + vcvt(CurrentCond(), + F32, + dt, + SRegister(rd), + SRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x000a0000: { + // 0xeeba0a40 + DataType dt = Dt_U_sx_1_Decode(((instr >> 7) & 0x1) | + ((instr >> 15) & 0x2)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned offset = 32; + if (dt.Is(S16) || dt.Is(U16)) { + offset = 16; + } + uint32_t fbits = offset - (((instr >> 5) & 0x1) | + ((instr << 1) & 0x1e)); + // VCVT{}{}.F32.
, , # ; T1 + vcvt(CurrentCond(), + F32, + dt, + SRegister(rd), + SRegister(rd), + fbits); + break; + } + case 0x000c0000: { + // 0xeebc0a40 + switch (instr & 0x00010080) { + case 0x00000000: { + // 0xeebc0a40 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVTR{}{}.U32.F32 , ; T1 + vcvtr(CurrentCond(), + U32, + F32, + SRegister(rd), + SRegister(rm)); + break; + } + case 0x00000080: { + // 0xeebc0ac0 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVT{}{}.U32.F32 , ; T1 + vcvt(CurrentCond(), + U32, + F32, + SRegister(rd), + SRegister(rm)); + break; + } + case 0x00010000: { + // 0xeebd0a40 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVTR{}{}.S32.F32 , ; T1 + vcvtr(CurrentCond(), + S32, + F32, + SRegister(rd), + SRegister(rm)); + break; + } + case 0x00010080: { + // 0xeebd0ac0 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVT{}{}.S32.F32 , ; T1 + vcvt(CurrentCond(), + S32, + F32, + SRegister(rd), + SRegister(rm)); + break; + } + } + break; + } + case 0x000e0000: { + // 0xeebe0a40 + DataType dt = Dt_U_sx_1_Decode(((instr >> 7) & 0x1) | + ((instr >> 15) & 0x2)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned offset = 32; + if (dt.Is(S16) || dt.Is(U16)) { + offset = 16; + } + uint32_t fbits = offset - (((instr >> 5) & 0x1) | + ((instr << 1) & 0x1e)); + // VCVT{}{}.
.F32 , , # ; T1 + vcvt(CurrentCond(), + dt, + F32, + SRegister(rd), + SRegister(rd), + fbits); + break; + } + } + break; + } + case 0x00b00b00: { + // 0xeeb00b00 + unsigned rd = ExtractDRegister(instr, 22, 12); + uint32_t encoded_imm = + (instr & 0xf) | ((instr >> 12) & 0xf0); + NeonImmediate imm = + ImmediateVFP::Decode(encoded_imm); + // VMOV{}{}.F64
, # ; T2 + vmov(CurrentCond(), F64, DRegister(rd), imm); + if (((instr & 0xffb00ff0) != 0xeeb00b00)) { + UnpredictableT32(instr); + } + break; + } + case 0x00b00b40: { + // 0xeeb00b40 + switch (instr & 0x000e0000) { + case 0x00000000: { + // 0xeeb00b40 + switch (instr & 0x00010080) { + case 0x00000000: { + // 0xeeb00b40 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMOV{}{}.F64
, ; T2 + vmov(CurrentCond(), + F64, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00000080: { + // 0xeeb00bc0 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VABS{}{}.F64
, ; T2 + vabs(CurrentCond(), + F64, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00010000: { + // 0xeeb10b40 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VNEG{}{}.F64
, ; T2 + vneg(CurrentCond(), + F64, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00010080: { + // 0xeeb10bc0 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VSQRT{}{}.F64
, ; T1 + vsqrt(CurrentCond(), + F64, + DRegister(rd), + DRegister(rm)); + break; + } + } + break; + } + case 0x00020000: { + // 0xeeb20b40 + switch (instr & 0x00010080) { + case 0x00000000: { + // 0xeeb20b40 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVTB{}{}.F64.F16
, ; T1 + vcvtb(CurrentCond(), + F64, + F16, + DRegister(rd), + SRegister(rm)); + break; + } + case 0x00000080: { + // 0xeeb20bc0 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVTT{}{}.F64.F16
, ; T1 + vcvtt(CurrentCond(), + F64, + F16, + DRegister(rd), + SRegister(rm)); + break; + } + case 0x00010000: { + // 0xeeb30b40 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVTB{}{}.F16.F64 , ; T1 + vcvtb(CurrentCond(), + F16, + F64, + SRegister(rd), + DRegister(rm)); + break; + } + case 0x00010080: { + // 0xeeb30bc0 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVTT{}{}.F16.F64 , ; T1 + vcvtt(CurrentCond(), + F16, + F64, + SRegister(rd), + DRegister(rm)); + break; + } + } + break; + } + case 0x00040000: { + // 0xeeb40b40 + switch (instr & 0x00010080) { + case 0x00000000: { + // 0xeeb40b40 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCMP{}{}.F64
, ; T1 + vcmp(CurrentCond(), + F64, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00000080: { + // 0xeeb40bc0 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCMPE{}{}.F64
, ; T1 + vcmpe(CurrentCond(), + F64, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00010000: { + // 0xeeb50b40 + unsigned rd = ExtractDRegister(instr, 22, 12); + // VCMP{}{}.F64
, #0.0 ; T2 + vcmp(CurrentCond(), F64, DRegister(rd), 0.0); + if (((instr & 0xffbf0fff) != 0xeeb50b40)) { + UnpredictableT32(instr); + } + break; + } + case 0x00010080: { + // 0xeeb50bc0 + unsigned rd = ExtractDRegister(instr, 22, 12); + // VCMPE{}{}.F64
, #0.0 ; T2 + vcmpe(CurrentCond(), F64, DRegister(rd), 0.0); + if (((instr & 0xffbf0fff) != 0xeeb50bc0)) { + UnpredictableT32(instr); + } + break; + } + } + break; + } + case 0x00060000: { + // 0xeeb60b40 + switch (instr & 0x00010080) { + case 0x00000000: { + // 0xeeb60b40 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRINTR{}{}.F64
, ; T1 + vrintr(CurrentCond(), + F64, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00000080: { + // 0xeeb60bc0 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRINTZ{}{}.F64
, ; T1 + vrintz(CurrentCond(), + F64, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00010000: { + // 0xeeb70b40 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRINTX{}{}.F64
, ; T1 + vrintx(CurrentCond(), + F64, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00010080: { + // 0xeeb70bc0 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVT{}{}.F32.F64 , ; T1 + vcvt(CurrentCond(), + F32, + F64, + SRegister(rd), + DRegister(rm)); + break; + } + } + break; + } + case 0x00080000: { + // 0xeeb80b40 + if ((instr & 0x00010000) == 0x00000000) { + DataType dt = Dt_op_2_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVT{}{}.F64.
, ; T1 + vcvt(CurrentCond(), + F64, + dt, + DRegister(rd), + SRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x000a0000: { + // 0xeeba0b40 + DataType dt = Dt_U_sx_1_Decode(((instr >> 7) & 0x1) | + ((instr >> 15) & 0x2)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned offset = 32; + if (dt.Is(S16) || dt.Is(U16)) { + offset = 16; + } + uint32_t fbits = offset - (((instr >> 5) & 0x1) | + ((instr << 1) & 0x1e)); + // VCVT{}{}.F64.
, , # ; T1 + vcvt(CurrentCond(), + F64, + dt, + DRegister(rd), + DRegister(rd), + fbits); + break; + } + case 0x000c0000: { + // 0xeebc0b40 + switch (instr & 0x00010080) { + case 0x00000000: { + // 0xeebc0b40 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVTR{}{}.U32.F64 , ; T1 + vcvtr(CurrentCond(), + U32, + F64, + SRegister(rd), + DRegister(rm)); + break; + } + case 0x00000080: { + // 0xeebc0bc0 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVT{}{}.U32.F64 , ; T1 + vcvt(CurrentCond(), + U32, + F64, + SRegister(rd), + DRegister(rm)); + break; + } + case 0x00010000: { + // 0xeebd0b40 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVTR{}{}.S32.F64 , ; T1 + vcvtr(CurrentCond(), + S32, + F64, + SRegister(rd), + DRegister(rm)); + break; + } + case 0x00010080: { + // 0xeebd0bc0 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVT{}{}.S32.F64 , ; T1 + vcvt(CurrentCond(), + S32, + F64, + SRegister(rd), + DRegister(rm)); + break; + } + } + break; + } + case 0x000e0000: { + // 0xeebe0b40 + DataType dt = Dt_U_sx_1_Decode(((instr >> 7) & 0x1) | + ((instr >> 15) & 0x2)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned offset = 32; + if (dt.Is(S16) || dt.Is(U16)) { + offset = 16; + } + uint32_t fbits = offset - (((instr >> 5) & 0x1) | + ((instr << 1) & 0x1e)); + // VCVT{}{}.
.F64 , , # ; T1 + vcvt(CurrentCond(), + dt, + F64, + DRegister(rd), + DRegister(rd), + fbits); + break; + } + } + break; + } + case 0x10000a00: { + // 0xfe000a00 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VSELEQ.F32 , , ; T1 + vseleq(F32, SRegister(rd), SRegister(rn), SRegister(rm)); + if (InITBlock()) { + UnpredictableT32(instr); + } + break; + } + case 0x10000b00: { + // 0xfe000b00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VSELEQ.F64
, , ; T1 + vseleq(F64, DRegister(rd), DRegister(rn), DRegister(rm)); + if (InITBlock()) { + UnpredictableT32(instr); + } + break; + } + case 0x10100a00: { + // 0xfe100a00 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VSELVS.F32 , , ; T1 + vselvs(F32, SRegister(rd), SRegister(rn), SRegister(rm)); + if (InITBlock()) { + UnpredictableT32(instr); + } + break; + } + case 0x10100b00: { + // 0xfe100b00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VSELVS.F64
, , ; T1 + vselvs(F64, DRegister(rd), DRegister(rn), DRegister(rm)); + if (InITBlock()) { + UnpredictableT32(instr); + } + break; + } + case 0x10200a00: { + // 0xfe200a00 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VSELGE.F32 , , ; T1 + vselge(F32, SRegister(rd), SRegister(rn), SRegister(rm)); + if (InITBlock()) { + UnpredictableT32(instr); + } + break; + } + case 0x10200b00: { + // 0xfe200b00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VSELGE.F64
, , ; T1 + vselge(F64, DRegister(rd), DRegister(rn), DRegister(rm)); + if (InITBlock()) { + UnpredictableT32(instr); + } + break; + } + case 0x10300a00: { + // 0xfe300a00 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VSELGT.F32 , , ; T1 + vselgt(F32, SRegister(rd), SRegister(rn), SRegister(rm)); + if (InITBlock()) { + UnpredictableT32(instr); + } + break; + } + case 0x10300b00: { + // 0xfe300b00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VSELGT.F64
, , ; T1 + vselgt(F64, DRegister(rd), DRegister(rn), DRegister(rm)); + if (InITBlock()) { + UnpredictableT32(instr); + } + break; + } + case 0x10800a00: { + // 0xfe800a00 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VMAXNM{}.F32 , , ; T2 + vmaxnm(F32, SRegister(rd), SRegister(rn), SRegister(rm)); + if (InITBlock()) { + UnpredictableT32(instr); + } + break; + } + case 0x10800a40: { + // 0xfe800a40 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VMINNM{}.F32 , , ; T2 + vminnm(F32, SRegister(rd), SRegister(rn), SRegister(rm)); + if (InITBlock()) { + UnpredictableT32(instr); + } + break; + } + case 0x10800b00: { + // 0xfe800b00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMAXNM{}.F64
, , ; T2 + vmaxnm(F64, DRegister(rd), DRegister(rn), DRegister(rm)); + if (InITBlock()) { + UnpredictableT32(instr); + } + break; + } + case 0x10800b40: { + // 0xfe800b40 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMINNM{}.F64
, , ; T2 + vminnm(F64, DRegister(rd), DRegister(rn), DRegister(rm)); + if (InITBlock()) { + UnpredictableT32(instr); + } + break; + } + case 0x10b00a40: { + // 0xfeb00a40 + switch (instr & 0x000f0000) { + case 0x00080000: { + // 0xfeb80a40 + if ((instr & 0x00000080) == 0x00000000) { + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VRINTA{}.F32 , ; T1 + vrinta(F32, SRegister(rd), SRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x00090000: { + // 0xfeb90a40 + if ((instr & 0x00000080) == 0x00000000) { + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VRINTN{}.F32 , ; T1 + vrintn(F32, SRegister(rd), SRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x000a0000: { + // 0xfeba0a40 + if ((instr & 0x00000080) == 0x00000000) { + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VRINTP{}.F32 , ; T1 + vrintp(F32, SRegister(rd), SRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x000b0000: { + // 0xfebb0a40 + if ((instr & 0x00000080) == 0x00000000) { + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VRINTM{}.F32 , ; T1 + vrintm(F32, SRegister(rd), SRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x000c0000: { + // 0xfebc0a40 + DataType dt = Dt_op_2_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVTA{}.
.F32 , ; T1 + vcvta(dt, F32, SRegister(rd), SRegister(rm)); + break; + } + case 0x000d0000: { + // 0xfebd0a40 + DataType dt = Dt_op_2_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVTN{}.
.F32 , ; T1 + vcvtn(dt, F32, SRegister(rd), SRegister(rm)); + break; + } + case 0x000e0000: { + // 0xfebe0a40 + DataType dt = Dt_op_2_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVTP{}.
.F32 , ; T1 + vcvtp(dt, F32, SRegister(rd), SRegister(rm)); + break; + } + case 0x000f0000: { + // 0xfebf0a40 + DataType dt = Dt_op_2_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVTM{}.
.F32 , ; T1 + vcvtm(dt, F32, SRegister(rd), SRegister(rm)); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x10b00b40: { + // 0xfeb00b40 + switch (instr & 0x000f0000) { + case 0x00080000: { + // 0xfeb80b40 + if ((instr & 0x00000080) == 0x00000000) { + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRINTA{}.F64
, ; T1 + vrinta(F64, DRegister(rd), DRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x00090000: { + // 0xfeb90b40 + if ((instr & 0x00000080) == 0x00000000) { + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRINTN{}.F64
, ; T1 + vrintn(F64, DRegister(rd), DRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x000a0000: { + // 0xfeba0b40 + if ((instr & 0x00000080) == 0x00000000) { + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRINTP{}.F64
, ; T1 + vrintp(F64, DRegister(rd), DRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x000b0000: { + // 0xfebb0b40 + if ((instr & 0x00000080) == 0x00000000) { + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRINTM{}.F64
, ; T1 + vrintm(F64, DRegister(rd), DRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x000c0000: { + // 0xfebc0b40 + DataType dt = Dt_op_2_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVTA{}.
.F64 , ; T1 + vcvta(dt, F64, SRegister(rd), DRegister(rm)); + break; + } + case 0x000d0000: { + // 0xfebd0b40 + DataType dt = Dt_op_2_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVTN{}.
.F64 , ; T1 + vcvtn(dt, F64, SRegister(rd), DRegister(rm)); + break; + } + case 0x000e0000: { + // 0xfebe0b40 + DataType dt = Dt_op_2_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVTP{}.
.F64 , ; T1 + vcvtp(dt, F64, SRegister(rd), DRegister(rm)); + break; + } + case 0x000f0000: { + // 0xfebf0b40 + DataType dt = Dt_op_2_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVTM{}.
.F64 , ; T1 + vcvtm(dt, F64, SRegister(rd), DRegister(rm)); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00000010: { + // 0xee000010 + switch (instr & 0x10100e00) { + case 0x00000a00: { + // 0xee000a10 + switch (instr & 0x00800100) { + case 0x00000000: { + // 0xee000a10 + if ((instr & 0x00600000) == 0x00000000) { + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rt = (instr >> 12) & 0xf; + // VMOV{}{} , ; T1 + vmov(CurrentCond(), SRegister(rn), Register(rt)); + if (((instr & 0xfff00f7f) != 0xee000a10)) { + UnpredictableT32(instr); + } + } else { + UnallocatedT32(instr); + } + break; + } + case 0x00000100: { + // 0xee000b10 + unsigned lane; + DataType dt = + Dt_opc1_opc2_1_Decode(((instr >> 5) & 0x3) | + ((instr >> 19) & 0xc), + &lane); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 7, 16); + unsigned rt = (instr >> 12) & 0xf; + // VMOV{}{}{.} , ; T1 + vmov(CurrentCond(), + dt, + DRegisterLane(rd, lane), + Register(rt)); + if (((instr & 0xff900f1f) != 0xee000b10)) { + UnpredictableT32(instr); + } + break; + } + case 0x00800000: { + // 0xee800a10 + if ((instr & 0x00600000) == 0x00600000) { + unsigned spec_reg = (instr >> 16) & 0xf; + unsigned rt = (instr >> 12) & 0xf; + switch (spec_reg) { + case 0x0: + case 0x1: + case 0x8: { + // VMSR{}{} , ; T1 + vmsr(CurrentCond(), + SpecialFPRegister(spec_reg), + Register(rt)); + if (((instr & 0xfff00fff) != 0xeee00a10)) { + UnpredictableT32(instr); + } + break; + } + default: + UnallocatedT32(instr); + break; + } + } else { + UnallocatedT32(instr); + } + break; + } + case 0x00800100: { + // 0xee800b10 + switch (instr & 0x00200040) { + case 0x00000000: { + // 0xee800b10 + DataType dt = Dt_B_E_1_Decode( + ((instr >> 5) & 0x1) | ((instr >> 21) & 0x2)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 7, 16); + unsigned rt = (instr >> 12) & 0xf; + // VDUP{}{}.
, ; T1 + vdup(CurrentCond(), + dt, + DRegister(rd), + Register(rt)); + if (((instr & 0xffb00f5f) != 0xee800b10)) { + UnpredictableT32(instr); + } + break; + } + case 0x00200000: { + // 0xeea00b10 + DataType dt = Dt_B_E_1_Decode( + ((instr >> 5) & 0x1) | ((instr >> 21) & 0x2)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 7, 16); + unsigned rt = (instr >> 12) & 0xf; + // VDUP{}{}.
, ; T1 + vdup(CurrentCond(), + dt, + QRegister(rd), + Register(rt)); + if (((instr & 0xffb00f5f) != 0xeea00b10)) { + UnpredictableT32(instr); + } + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + } + break; + } + case 0x00000e00: { + // 0xee000e10 + UnimplementedT32_32("MCR", instr); + break; + } + case 0x00100a00: { + // 0xee100a10 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xee100a10 + switch (instr & 0x00e00000) { + case 0x00000000: { + // 0xee100a10 + unsigned rt = (instr >> 12) & 0xf; + unsigned rn = ExtractSRegister(instr, 7, 16); + // VMOV{}{} , ; T1 + vmov(CurrentCond(), Register(rt), SRegister(rn)); + if (((instr & 0xfff00f7f) != 0xee100a10)) { + UnpredictableT32(instr); + } + break; + } + case 0x00e00000: { + // 0xeef00a10 + unsigned rt = (instr >> 12) & 0xf; + unsigned spec_reg = (instr >> 16) & 0xf; + switch (spec_reg) { + case 0x0: + case 0x1: + case 0x5: + case 0x6: + case 0x7: + case 0x8: { + // VMRS{}{} , ; T1 + vmrs(CurrentCond(), + RegisterOrAPSR_nzcv(rt), + SpecialFPRegister(spec_reg)); + if (((instr & 0xfff00fff) != 0xeef00a10)) { + UnpredictableT32(instr); + } + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00000100: { + // 0xee100b10 + unsigned lane; + DataType dt = + Dt_U_opc1_opc2_1_Decode(((instr >> 5) & 0x3) | + ((instr >> 19) & + 0xc) | + ((instr >> 19) & + 0x10), + &lane); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rt = (instr >> 12) & 0xf; + unsigned rn = ExtractDRegister(instr, 7, 16); + // VMOV{}{}{.
} , ; T1 + vmov(CurrentCond(), + dt, + Register(rt), + DRegisterLane(rn, lane)); + if (((instr & 0xff100f1f) != 0xee100b10)) { + UnpredictableT32(instr); + } + break; + } + } + break; + } + case 0x00100e00: { + // 0xee100e10 + UnimplementedT32_32("MRC", instr); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x01000000: { + // 0xef000000 + switch (instr & 0x00800000) { + case 0x00000000: { + // 0xef000000 + switch (instr & 0x00000f40) { + case 0x00000000: { + // 0xef000000 + DataType dt = Dt_U_size_1_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VHADD{}{}.
{
}, , ; T1 + vhadd(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000040: { + // 0xef000040 + DataType dt = Dt_U_size_1_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VHADD{}{}.
{}, , ; T1 + vhadd(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x00000100: { + // 0xef000100 + DataType dt = Dt_U_size_1_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRHADD{}{}.
{
}, , ; T1 + vrhadd(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000140: { + // 0xef000140 + DataType dt = Dt_U_size_1_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VRHADD{}{}.
{}, , ; T1 + vrhadd(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x00000200: { + // 0xef000200 + DataType dt = Dt_U_size_1_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VHSUB{}{}.
{
}, , ; T1 + vhsub(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000240: { + // 0xef000240 + DataType dt = Dt_U_size_1_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VHSUB{}{}.
{}, , ; T1 + vhsub(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x00000300: { + // 0xef000300 + DataType dt = Dt_U_size_1_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCGT{}{}.
{
}, , ; T1 + vcgt(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000340: { + // 0xef000340 + DataType dt = Dt_U_size_1_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCGT{}{}.
{}, , ; T1 + vcgt(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x00000400: { + // 0xef000400 + DataType dt = Dt_U_size_3_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + unsigned rn = ExtractDRegister(instr, 7, 16); + // VSHL{}{}.
{
}, , ; T1 + vshl(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm), + DRegister(rn)); + break; + } + case 0x00000440: { + // 0xef000440 + DataType dt = Dt_U_size_3_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + // VSHL{}{}.
{}, , ; T1 + vshl(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm), + QRegister(rn)); + break; + } + case 0x00000500: { + // 0xef000500 + DataType dt = Dt_U_size_3_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + unsigned rn = ExtractDRegister(instr, 7, 16); + // VRSHL{}{}.
{
}, , ; T1 + vrshl(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm), + DRegister(rn)); + break; + } + case 0x00000540: { + // 0xef000540 + DataType dt = Dt_U_size_3_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + // VRSHL{}{}.
{}, , ; T1 + vrshl(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm), + QRegister(rn)); + break; + } + case 0x00000600: { + // 0xef000600 + DataType dt = Dt_U_size_1_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMAX{}{}.
{
}, , ; T1 + vmax(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000640: { + // 0xef000640 + DataType dt = Dt_U_size_1_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMAX{}{}.
{}, , ; T1 + vmax(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x00000700: { + // 0xef000700 + DataType dt = Dt_U_size_1_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VABD{}{}.
{
}, , ; T1 + vabd(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000740: { + // 0xef000740 + DataType dt = Dt_U_size_1_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VABD{}{}.
{}, , ; T1 + vabd(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x00000800: { + // 0xef000800 + switch (instr & 0x10000000) { + case 0x00000000: { + // 0xef000800 + DataType dt = + Dt_size_2_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VADD{}{}.
{
}, , ; T1 + vadd(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x10000000: { + // 0xff000800 + DataType dt = + Dt_size_2_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VSUB{}{}.
{
}, , ; T1 + vsub(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + } + break; + } + case 0x00000840: { + // 0xef000840 + switch (instr & 0x10000000) { + case 0x00000000: { + // 0xef000840 + DataType dt = + Dt_size_2_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VADD{}{}.
{}, , ; T1 + vadd(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x10000000: { + // 0xff000840 + DataType dt = + Dt_size_2_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VSUB{}{}.
{}, , ; T1 + vsub(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + } + break; + } + case 0x00000900: { + // 0xef000900 + switch (instr & 0x10000000) { + case 0x00000000: { + // 0xef000900 + DataType dt = + Dt_size_10_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMLA{}{}.
, , ; T1 NOLINT(whitespace/line_length) + vmla(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x10000000: { + // 0xff000900 + DataType dt = + Dt_size_10_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMLS{}{}.
, , ; T1 NOLINT(whitespace/line_length) + vmls(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + } + break; + } + case 0x00000940: { + // 0xef000940 + switch (instr & 0x10000000) { + case 0x00000000: { + // 0xef000940 + DataType dt = + Dt_size_10_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMLA{}{}. , , ; T1 NOLINT(whitespace/line_length) + vmla(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x10000000: { + // 0xff000940 + DataType dt = + Dt_size_10_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMLS{}{}. , , ; T1 NOLINT(whitespace/line_length) + vmls(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + } + break; + } + case 0x00000a00: { + // 0xef000a00 + DataType dt = Dt_U_size_1_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VPMAX{}{}.
{
}, , ; T1 + vpmax(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000b00: { + // 0xef000b00 + switch (instr & 0x10000000) { + case 0x00000000: { + // 0xef000b00 + DataType dt = + Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VQDMULH{}{}.
{
}, , ; T1 + vqdmulh(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x10000000: { + // 0xff000b00 + DataType dt = + Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VQRDMULH{}{}.
{
}, , ; T1 + vqrdmulh(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + } + break; + } + case 0x00000b40: { + // 0xef000b40 + switch (instr & 0x10000000) { + case 0x00000000: { + // 0xef000b40 + DataType dt = + Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VQDMULH{}{}.
{}, , ; T1 + vqdmulh(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x10000000: { + // 0xff000b40 + DataType dt = + Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VQRDMULH{}{}.
{}, , ; T1 + vqrdmulh(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + } + break; + } + case 0x00000c40: { + // 0xef000c40 + switch (instr & 0x10300000) { + case 0x00000000: { + // 0xef000c40 + UnimplementedT32_32("SHA1C", instr); + break; + } + case 0x00100000: { + // 0xef100c40 + UnimplementedT32_32("SHA1P", instr); + break; + } + case 0x00200000: { + // 0xef200c40 + UnimplementedT32_32("SHA1M", instr); + break; + } + case 0x00300000: { + // 0xef300c40 + UnimplementedT32_32("SHA1SU0", instr); + break; + } + case 0x10000000: { + // 0xff000c40 + UnimplementedT32_32("SHA256H", instr); + break; + } + case 0x10100000: { + // 0xff100c40 + UnimplementedT32_32("SHA256H2", instr); + break; + } + case 0x10200000: { + // 0xff200c40 + UnimplementedT32_32("SHA256SU1", instr); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00000d00: { + // 0xef000d00 + switch (instr & 0x10300000) { + case 0x00000000: { + // 0xef000d00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VADD{}{}.F32 {
}, , ; T1 + vadd(CurrentCond(), + F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00200000: { + // 0xef200d00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VSUB{}{}.F32 {
}, , ; T1 + vsub(CurrentCond(), + F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x10000000: { + // 0xff000d00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VPADD{}{}.F32 {
}, , ; T1 + vpadd(CurrentCond(), + F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x10200000: { + // 0xff200d00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VABD{}{}.F32 {
}, , ; T1 + vabd(CurrentCond(), + F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00000d40: { + // 0xef000d40 + switch (instr & 0x10300000) { + case 0x00000000: { + // 0xef000d40 + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VADD{}{}.F32 {}, , ; T1 + vadd(CurrentCond(), + F32, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x00200000: { + // 0xef200d40 + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VSUB{}{}.F32 {}, , ; T1 + vsub(CurrentCond(), + F32, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x10200000: { + // 0xff200d40 + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VABD{}{}.F32 {}, , ; T1 + vabd(CurrentCond(), + F32, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00000e00: { + // 0xef000e00 + switch (instr & 0x10200000) { + case 0x00000000: { + // 0xef000e00 + DataType dt = Dt_sz_1_Decode((instr >> 20) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCEQ{}{}.
{
}, , ; T2 + vceq(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x10000000: { + // 0xff000e00 + if ((instr & 0x00100000) == 0x00000000) { + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCGE{}{}.F32 {
}, , ; T2 + vcge(CurrentCond(), + F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x10200000: { + // 0xff200e00 + if ((instr & 0x00100000) == 0x00000000) { + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCGT{}{}.F32 {
}, , ; T2 + vcgt(CurrentCond(), + F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00000e40: { + // 0xef000e40 + switch (instr & 0x10200000) { + case 0x00000000: { + // 0xef000e40 + DataType dt = Dt_sz_1_Decode((instr >> 20) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCEQ{}{}.
{}, , ; T2 + vceq(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x10000000: { + // 0xff000e40 + if ((instr & 0x00100000) == 0x00000000) { + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCGE{}{}.F32 {}, , ; T2 + vcge(CurrentCond(), + F32, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x10200000: { + // 0xff200e40 + if ((instr & 0x00100000) == 0x00000000) { + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCGT{}{}.F32 {}, , ; T2 + vcgt(CurrentCond(), + F32, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00000f00: { + // 0xef000f00 + switch (instr & 0x10300000) { + case 0x00000000: { + // 0xef000f00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMAX{}{}.F32 {
}, , ; T1 + vmax(CurrentCond(), + F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00200000: { + // 0xef200f00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMIN{}{}.F32 {
}, , ; T1 + vmin(CurrentCond(), + F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x10000000: { + // 0xff000f00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VPMAX{}{}.F32 {
}, , ; T1 + vpmax(CurrentCond(), + F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x10200000: { + // 0xff200f00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VPMIN{}{}.F32 {
}, , ; T1 + vpmin(CurrentCond(), + F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00000f40: { + // 0xef000f40 + switch (instr & 0x10300000) { + case 0x00000000: { + // 0xef000f40 + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMAX{}{}.F32 {}, , ; T1 + vmax(CurrentCond(), + F32, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x00200000: { + // 0xef200f40 + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMIN{}{}.F32 {}, , ; T1 + vmin(CurrentCond(), + F32, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00800000: { + // 0xef800000 + switch (instr & 0x00300000) { + case 0x00300000: { + // 0xefb00000 + switch (instr & 0x10000000) { + case 0x00000000: { + // 0xefb00000 + switch (instr & 0x00000040) { + case 0x00000000: { + // 0xefb00000 + if (((instr & 0x800) == 0x800)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t imm = (instr >> 8) & 0xf; + // VEXT{}{}.8 {
}, , , # ; T1 NOLINT(whitespace/line_length) + vext(CurrentCond(), + Untyped8, + DRegister(rd), + DRegister(rn), + DRegister(rm), + imm); + break; + } + case 0x00000040: { + // 0xefb00040 + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm = (instr >> 8) & 0xf; + // VEXT{}{}.8 {}, , , # ; T1 NOLINT(whitespace/line_length) + vext(CurrentCond(), + Untyped8, + QRegister(rd), + QRegister(rn), + QRegister(rm), + imm); + break; + } + } + break; + } + case 0x10000000: { + // 0xffb00000 + switch (instr & 0x00000800) { + case 0x00000000: { + // 0xffb00000 + switch (instr & 0x00030200) { + case 0x00000000: { + // 0xffb00000 + switch (instr & 0x000005c0) { + case 0x00000000: { + // 0xffb00000 + DataType dt = Dt_size_7_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VREV64{}{}.
, ; T1 NOLINT(whitespace/line_length) + vrev64(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00000040: { + // 0xffb00040 + DataType dt = Dt_size_7_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VREV64{}{}.
, ; T1 NOLINT(whitespace/line_length) + vrev64(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm)); + break; + } + case 0x00000080: { + // 0xffb00080 + DataType dt = Dt_size_15_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VREV32{}{}.
, ; T1 NOLINT(whitespace/line_length) + vrev32(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x000000c0: { + // 0xffb000c0 + DataType dt = Dt_size_15_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VREV32{}{}.
, ; T1 NOLINT(whitespace/line_length) + vrev32(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm)); + break; + } + case 0x00000100: { + // 0xffb00100 + DataType dt = Dt_size_1_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VREV16{}{}.
, ; T1 NOLINT(whitespace/line_length) + vrev16(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00000140: { + // 0xffb00140 + DataType dt = Dt_size_1_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VREV16{}{}.
, ; T1 NOLINT(whitespace/line_length) + vrev16(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm)); + break; + } + case 0x00000400: { + // 0xffb00400 + DataType dt = Dt_size_5_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VCLS{}{}.
, ; T1 + vcls(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00000440: { + // 0xffb00440 + DataType dt = Dt_size_5_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VCLS{}{}.
, ; T1 + vcls(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm)); + break; + } + case 0x00000480: { + // 0xffb00480 + DataType dt = Dt_size_4_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VCLZ{}{}.
, ; T1 + vclz(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x000004c0: { + // 0xffb004c0 + DataType dt = Dt_size_4_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VCLZ{}{}.
, ; T1 + vclz(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm)); + break; + } + case 0x00000500: { + // 0xffb00500 + if ((instr & 0x000c0000) == + 0x00000000) { + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VCNT{}{}.8
, ; T1 + vcnt(CurrentCond(), + Untyped8, + DRegister(rd), + DRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x00000540: { + // 0xffb00540 + if ((instr & 0x000c0000) == + 0x00000000) { + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VCNT{}{}.8 , ; T1 + vcnt(CurrentCond(), + Untyped8, + QRegister(rd), + QRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x00000580: { + // 0xffb00580 + if ((instr & 0x000c0000) == + 0x00000000) { + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VMVN{}{}{.
}
, ; T1 NOLINT(whitespace/line_length) + vmvn(CurrentCond(), + kDataTypeValueNone, + DRegister(rd), + DRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x000005c0: { + // 0xffb005c0 + if ((instr & 0x000c0000) == + 0x00000000) { + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VMVN{}{}{.
} , ; T1 NOLINT(whitespace/line_length) + vmvn(CurrentCond(), + kDataTypeValueNone, + QRegister(rd), + QRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00000200: { + // 0xffb00200 + switch (instr & 0x00000540) { + case 0x00000000: { + // 0xffb00200 + DataType dt = Dt_op_size_2_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 5) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VPADDL{}{}.
, ; T1 NOLINT(whitespace/line_length) + vpaddl(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00000040: { + // 0xffb00240 + DataType dt = Dt_op_size_2_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 5) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VPADDL{}{}.
, ; T1 NOLINT(whitespace/line_length) + vpaddl(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm)); + break; + } + case 0x00000100: { + // 0xffb00300 + switch (instr & 0x00000080) { + case 0x00000000: { + // 0xffb00300 + UnimplementedT32_32("AESE", + instr); + break; + } + case 0x00000080: { + // 0xffb00380 + UnimplementedT32_32("AESMC", + instr); + break; + } + } + break; + } + case 0x00000140: { + // 0xffb00340 + switch (instr & 0x00000080) { + case 0x00000000: { + // 0xffb00340 + UnimplementedT32_32("AESD", + instr); + break; + } + case 0x00000080: { + // 0xffb003c0 + UnimplementedT32_32("AESIMC", + instr); + break; + } + } + break; + } + case 0x00000400: { + // 0xffb00600 + DataType dt = Dt_op_size_2_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 5) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VPADAL{}{}.
, ; T1 NOLINT(whitespace/line_length) + vpadal(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00000440: { + // 0xffb00640 + DataType dt = Dt_op_size_2_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 5) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VPADAL{}{}.
, ; T1 NOLINT(whitespace/line_length) + vpadal(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm)); + break; + } + case 0x00000500: { + // 0xffb00700 + switch (instr & 0x00000080) { + case 0x00000000: { + // 0xffb00700 + DataType dt = Dt_size_5_Decode( + (instr >> 18) & 0x3); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VQABS{}{}.
, ; T1 NOLINT(whitespace/line_length) + vqabs(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00000080: { + // 0xffb00780 + DataType dt = Dt_size_5_Decode( + (instr >> 18) & 0x3); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VQNEG{}{}.
, ; T1 NOLINT(whitespace/line_length) + vqneg(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm)); + break; + } + } + break; + } + case 0x00000540: { + // 0xffb00740 + switch (instr & 0x00000080) { + case 0x00000000: { + // 0xffb00740 + DataType dt = Dt_size_5_Decode( + (instr >> 18) & 0x3); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VQABS{}{}.
, ; T1 NOLINT(whitespace/line_length) + vqabs(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm)); + break; + } + case 0x00000080: { + // 0xffb007c0 + DataType dt = Dt_size_5_Decode( + (instr >> 18) & 0x3); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VQNEG{}{}.
, ; T1 NOLINT(whitespace/line_length) + vqneg(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm)); + break; + } + } + break; + } + } + break; + } + case 0x00010000: { + // 0xffb10000 + switch (instr & 0x000001c0) { + case 0x00000000: { + // 0xffb10000 + DataType dt = Dt_F_size_1_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VCGT{}{}.
{
}, , #0 ; T1 NOLINT(whitespace/line_length) + vcgt(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm), + UINT32_C(0)); + break; + } + case 0x00000040: { + // 0xffb10040 + DataType dt = Dt_F_size_1_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VCGT{}{}.
{}, , #0 ; T1 NOLINT(whitespace/line_length) + vcgt(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm), + UINT32_C(0)); + break; + } + case 0x00000080: { + // 0xffb10080 + DataType dt = Dt_F_size_1_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VCGE{}{}.
{
}, , #0 ; T1 NOLINT(whitespace/line_length) + vcge(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm), + UINT32_C(0)); + break; + } + case 0x000000c0: { + // 0xffb100c0 + DataType dt = Dt_F_size_1_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VCGE{}{}.
{}, , #0 ; T1 NOLINT(whitespace/line_length) + vcge(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm), + UINT32_C(0)); + break; + } + case 0x00000100: { + // 0xffb10100 + DataType dt = Dt_F_size_2_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VCEQ{}{}.
{
}, , #0 ; T1 NOLINT(whitespace/line_length) + vceq(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm), + UINT32_C(0)); + break; + } + case 0x00000140: { + // 0xffb10140 + DataType dt = Dt_F_size_2_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VCEQ{}{}.
{}, , #0 ; T1 NOLINT(whitespace/line_length) + vceq(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm), + UINT32_C(0)); + break; + } + case 0x00000180: { + // 0xffb10180 + DataType dt = Dt_F_size_1_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VCLE{}{}.
{
}, , #0 ; T1 NOLINT(whitespace/line_length) + vcle(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm), + UINT32_C(0)); + break; + } + case 0x000001c0: { + // 0xffb101c0 + DataType dt = Dt_F_size_1_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VCLE{}{}.
{}, , #0 ; T1 NOLINT(whitespace/line_length) + vcle(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm), + UINT32_C(0)); + break; + } + } + break; + } + case 0x00010200: { + // 0xffb10200 + switch (instr & 0x000001c0) { + case 0x00000000: { + // 0xffb10200 + DataType dt = Dt_F_size_1_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VCLT{}{}.
{
}, , #0 ; T1 NOLINT(whitespace/line_length) + vclt(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm), + UINT32_C(0)); + break; + } + case 0x00000040: { + // 0xffb10240 + DataType dt = Dt_F_size_1_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VCLT{}{}.
{}, , #0 ; T1 NOLINT(whitespace/line_length) + vclt(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm), + UINT32_C(0)); + break; + } + case 0x000000c0: { + // 0xffb102c0 + if ((instr & 0x000c0400) == + 0x00080000) { + UnimplementedT32_32("SHA1H", instr); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x00000100: { + // 0xffb10300 + DataType dt = Dt_F_size_1_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VABS{}{}.
, ; T1 + vabs(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00000140: { + // 0xffb10340 + DataType dt = Dt_F_size_1_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VABS{}{}.
, ; T1 + vabs(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm)); + break; + } + case 0x00000180: { + // 0xffb10380 + DataType dt = Dt_F_size_1_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VNEG{}{}.
, ; T1 + vneg(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x000001c0: { + // 0xffb103c0 + DataType dt = Dt_F_size_1_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VNEG{}{}.
, ; T1 + vneg(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm)); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00020000: { + // 0xffb20000 + switch (instr & 0x000005c0) { + case 0x00000000: { + // 0xffb20000 + if ((instr & 0x000c0000) == + 0x00000000) { + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VSWP{}{}{.
}
, ; T1 NOLINT(whitespace/line_length) + vswp(CurrentCond(), + kDataTypeValueNone, + DRegister(rd), + DRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x00000040: { + // 0xffb20040 + if ((instr & 0x000c0000) == + 0x00000000) { + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VSWP{}{}{.
} , ; T1 NOLINT(whitespace/line_length) + vswp(CurrentCond(), + kDataTypeValueNone, + QRegister(rd), + QRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x00000080: { + // 0xffb20080 + DataType dt = Dt_size_7_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VTRN{}{}.
, ; T1 + vtrn(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x000000c0: { + // 0xffb200c0 + DataType dt = Dt_size_7_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VTRN{}{}.
, ; T1 + vtrn(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm)); + break; + } + case 0x00000100: { + // 0xffb20100 + DataType dt = Dt_size_15_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VUZP{}{}.
, ; T1 + vuzp(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00000140: { + // 0xffb20140 + DataType dt = Dt_size_7_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VUZP{}{}.
, ; T1 + vuzp(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm)); + break; + } + case 0x00000180: { + // 0xffb20180 + DataType dt = Dt_size_15_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VZIP{}{}.
, ; T1 + vzip(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x000001c0: { + // 0xffb201c0 + DataType dt = Dt_size_7_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VZIP{}{}.
, ; T1 + vzip(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm)); + break; + } + case 0x00000400: { + // 0xffb20400 + DataType dt = Dt_size_16_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VRINTN{}.
, ; T1 + vrintn(dt, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00000440: { + // 0xffb20440 + DataType dt = Dt_size_16_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VRINTN{}.
, ; T1 + vrintn(dt, + QRegister(rd), + QRegister(rm)); + break; + } + case 0x00000480: { + // 0xffb20480 + DataType dt = Dt_size_16_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VRINTX{}.
, ; T1 + vrintx(Condition::None(), + dt, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x000004c0: { + // 0xffb204c0 + DataType dt = Dt_size_16_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VRINTX{}.
, ; T1 + vrintx(dt, + QRegister(rd), + QRegister(rm)); + break; + } + case 0x00000500: { + // 0xffb20500 + DataType dt = Dt_size_16_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VRINTA{}.
, ; T1 + vrinta(dt, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00000540: { + // 0xffb20540 + DataType dt = Dt_size_16_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VRINTA{}.
, ; T1 + vrinta(dt, + QRegister(rd), + QRegister(rm)); + break; + } + case 0x00000580: { + // 0xffb20580 + DataType dt = Dt_size_16_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VRINTZ{}.
, ; T1 + vrintz(Condition::None(), + dt, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x000005c0: { + // 0xffb205c0 + DataType dt = Dt_size_16_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VRINTZ{}.
, ; T1 + vrintz(dt, + QRegister(rd), + QRegister(rm)); + break; + } + } + break; + } + case 0x00020200: { + // 0xffb20200 + switch (instr & 0x00000580) { + case 0x00000000: { + // 0xffb20200 + switch (instr & 0x00000040) { + case 0x00000000: { + // 0xffb20200 + DataType dt = Dt_size_3_Decode( + (instr >> 18) & 0x3); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VMOVN{}{}.
, ; T1 NOLINT(whitespace/line_length) + vmovn(CurrentCond(), + dt, + DRegister(rd), + QRegister(rm)); + break; + } + case 0x00000040: { + // 0xffb20240 + DataType dt = Dt_size_14_Decode( + (instr >> 18) & 0x3); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VQMOVUN{}{}.
, ; T1 NOLINT(whitespace/line_length) + vqmovun(CurrentCond(), + dt, + DRegister(rd), + QRegister(rm)); + break; + } + } + break; + } + case 0x00000080: { + // 0xffb20280 + DataType dt = Dt_op_size_3_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 4) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VQMOVN{}{}.
, ; T1 NOLINT(whitespace/line_length) + vqmovn(CurrentCond(), + dt, + DRegister(rd), + QRegister(rm)); + break; + } + case 0x00000100: { + // 0xffb20300 + if ((instr & 0x00000040) == + 0x00000000) { + DataType dt = Dt_size_17_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm = dt.GetSize(); + // VSHLL{}{}. , , # ; T2 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x00000180: { + // 0xffb20380 + switch (instr & 0x000c0040) { + case 0x00080000: { + // 0xffba0380 + UnimplementedT32_32("SHA1SU1", + instr); + break; + } + case 0x00080040: { + // 0xffba03c0 + UnimplementedT32_32("SHA256SU0", + instr); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00000400: { + // 0xffb20600 + if ((instr & 0x000c0040) == + 0x00040000) { + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VCVT{}{}.F16.F32
, ; T1 NOLINT(whitespace/line_length) + vcvt(CurrentCond(), + F16, + F32, + DRegister(rd), + QRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x00000480: { + // 0xffb20680 + switch (instr & 0x00000040) { + case 0x00000000: { + // 0xffb20680 + DataType dt = Dt_size_16_Decode( + (instr >> 18) & 0x3); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VRINTM{}.
, ; T1 NOLINT(whitespace/line_length) + vrintm(dt, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00000040: { + // 0xffb206c0 + DataType dt = Dt_size_16_Decode( + (instr >> 18) & 0x3); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VRINTM{}.
, ; T1 NOLINT(whitespace/line_length) + vrintm(dt, + QRegister(rd), + QRegister(rm)); + break; + } + } + break; + } + case 0x00000500: { + // 0xffb20700 + if ((instr & 0x000c0040) == + 0x00040000) { + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VCVT{}{}.F32.F16 , ; T1 NOLINT(whitespace/line_length) + vcvt(CurrentCond(), + F32, + F16, + QRegister(rd), + DRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x00000580: { + // 0xffb20780 + switch (instr & 0x00000040) { + case 0x00000000: { + // 0xffb20780 + DataType dt = Dt_size_16_Decode( + (instr >> 18) & 0x3); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VRINTP{}.
, ; T1 NOLINT(whitespace/line_length) + vrintp(dt, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00000040: { + // 0xffb207c0 + DataType dt = Dt_size_16_Decode( + (instr >> 18) & 0x3); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VRINTP{}.
, ; T1 NOLINT(whitespace/line_length) + vrintp(dt, + QRegister(rd), + QRegister(rm)); + break; + } + } + break; + } + } + break; + } + case 0x00030000: { + // 0xffb30000 + switch (instr & 0x00000440) { + case 0x00000000: { + // 0xffb30000 + switch (instr & 0x000c0100) { + case 0x00080000: { + // 0xffbb0000 + DataType dt = Dt_op_3_Decode( + (instr >> 7) & 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VCVTA{}.
.F32
, ; T1 NOLINT(whitespace/line_length) + vcvta(dt, + F32, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00080100: { + // 0xffbb0100 + DataType dt = Dt_op_3_Decode( + (instr >> 7) & 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VCVTN{}.
.F32
, ; T1 NOLINT(whitespace/line_length) + vcvtn(dt, + F32, + DRegister(rd), + DRegister(rm)); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00000040: { + // 0xffb30040 + switch (instr & 0x000c0100) { + case 0x00080000: { + // 0xffbb0040 + DataType dt = Dt_op_3_Decode( + (instr >> 7) & 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VCVTA{}.
.F32 , ; T1 NOLINT(whitespace/line_length) + vcvta(dt, + F32, + QRegister(rd), + QRegister(rm)); + break; + } + case 0x00080100: { + // 0xffbb0140 + DataType dt = Dt_op_3_Decode( + (instr >> 7) & 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VCVTN{}.
.F32 , ; T1 NOLINT(whitespace/line_length) + vcvtn(dt, + F32, + QRegister(rd), + QRegister(rm)); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00000400: { + // 0xffb30400 + switch (instr & 0x00000080) { + case 0x00000000: { + // 0xffb30400 + DataType dt = Dt_F_size_4_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 6) & 0x4)); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VRECPE{}{}.
, ; T1 NOLINT(whitespace/line_length) + vrecpe(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00000080: { + // 0xffb30480 + DataType dt = Dt_F_size_4_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 6) & 0x4)); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VRSQRTE{}{}.
, ; T1 NOLINT(whitespace/line_length) + vrsqrte(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm)); + break; + } + } + break; + } + case 0x00000440: { + // 0xffb30440 + switch (instr & 0x00000080) { + case 0x00000000: { + // 0xffb30440 + DataType dt = Dt_F_size_4_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 6) & 0x4)); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VRECPE{}{}.
, ; T1 NOLINT(whitespace/line_length) + vrecpe(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm)); + break; + } + case 0x00000080: { + // 0xffb304c0 + DataType dt = Dt_F_size_4_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 6) & 0x4)); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VRSQRTE{}{}.
, ; T1 NOLINT(whitespace/line_length) + vrsqrte(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm)); + break; + } + } + break; + } + } + break; + } + case 0x00030200: { + // 0xffb30200 + switch (instr & 0x000c0440) { + case 0x00080000: { + // 0xffbb0200 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xffbb0200 + DataType dt = Dt_op_3_Decode( + (instr >> 7) & 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VCVTP{}.
.F32
, ; T1 NOLINT(whitespace/line_length) + vcvtp(dt, + F32, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00000100: { + // 0xffbb0300 + DataType dt = Dt_op_3_Decode( + (instr >> 7) & 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VCVTM{}.
.F32
, ; T1 NOLINT(whitespace/line_length) + vcvtm(dt, + F32, + DRegister(rd), + DRegister(rm)); + break; + } + } + break; + } + case 0x00080040: { + // 0xffbb0240 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xffbb0240 + DataType dt = Dt_op_3_Decode( + (instr >> 7) & 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VCVTP{}.
.F32 , ; T1 NOLINT(whitespace/line_length) + vcvtp(dt, + F32, + QRegister(rd), + QRegister(rm)); + break; + } + case 0x00000100: { + // 0xffbb0340 + DataType dt = Dt_op_3_Decode( + (instr >> 7) & 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VCVTM{}.
.F32 , ; T1 NOLINT(whitespace/line_length) + vcvtm(dt, + F32, + QRegister(rd), + QRegister(rm)); + break; + } + } + break; + } + case 0x00080400: { + // 0xffbb0600 + DataType dt1 = Dt_op_1_Decode1( + (instr >> 7) & 0x3); + if (dt1.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DataType dt2 = Dt_op_1_Decode2( + (instr >> 7) & 0x3); + if (dt2.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VCVT{}{}.
.
, ; T1 NOLINT(whitespace/line_length) + vcvt(CurrentCond(), + dt1, + dt2, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00080440: { + // 0xffbb0640 + DataType dt1 = Dt_op_1_Decode1( + (instr >> 7) & 0x3); + if (dt1.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DataType dt2 = Dt_op_1_Decode2( + (instr >> 7) & 0x3); + if (dt2.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VCVT{}{}.
.
, ; T1 NOLINT(whitespace/line_length) + vcvt(CurrentCond(), + dt1, + dt2, + QRegister(rd), + QRegister(rm)); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + } + break; + } + case 0x00000800: { + // 0xffb00800 + switch (instr & 0x00000440) { + case 0x00000000: { + // 0xffb00800 + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned first = + ExtractDRegister(instr, 7, 16); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0x3) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 1; + break; + case 0x1: + length = 2; + break; + case 0x2: + length = 3; + break; + case 0x3: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VTBL{}{}.8
, , ; T1 NOLINT(whitespace/line_length) + vtbl(CurrentCond(), + Untyped8, + DRegister(rd), + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + DRegister(rm)); + break; + } + case 0x00000040: { + // 0xffb00840 + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned first = + ExtractDRegister(instr, 7, 16); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0x3) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 1; + break; + case 0x1: + length = 2; + break; + case 0x2: + length = 3; + break; + case 0x3: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VTBX{}{}.8
, , ; T1 NOLINT(whitespace/line_length) + vtbx(CurrentCond(), + Untyped8, + DRegister(rd), + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + DRegister(rm)); + break; + } + case 0x00000400: { + // 0xffb00c00 + if ((instr & 0x00000380) == 0x00000000) { + unsigned lane; + DataType dt = + Dt_imm4_1_Decode((instr >> 16) & + 0xf, + &lane); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VDUP{}{}.
, ; T1 NOLINT(whitespace/line_length) + vdup(CurrentCond(), + dt, + DRegister(rd), + DRegisterLane(rm, lane)); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x00000440: { + // 0xffb00c40 + if ((instr & 0x00000380) == 0x00000000) { + unsigned lane; + DataType dt = + Dt_imm4_1_Decode((instr >> 16) & + 0xf, + &lane); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VDUP{}{}.
, ; T1 NOLINT(whitespace/line_length) + vdup(CurrentCond(), + dt, + QRegister(rd), + DRegisterLane(rm, lane)); + } else { + UnallocatedT32(instr); + } + break; + } + } + break; + } + } + break; + } + } + break; + } + default: { + switch (instr & 0x00000c40) { + case 0x00000000: { + // 0xef800000 + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xef800000 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VADDL{}{}.
, , ; T1 + vaddl(CurrentCond(), + dt, + QRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000100: { + // 0xef800100 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VADDW{}{}.
{}, , ; T1 NOLINT(whitespace/line_length) + vaddw(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + DRegister(rm)); + break; + } + case 0x00000200: { + // 0xef800200 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VSUBL{}{}.
, , ; T1 + vsubl(CurrentCond(), + dt, + QRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000300: { + // 0xef800300 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VSUBW{}{}.
{}, , ; T1 NOLINT(whitespace/line_length) + vsubw(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + DRegister(rm)); + break; + } + } + break; + } + case 0x00000040: { + // 0xef800040 + switch (instr & 0x00000200) { + case 0x00000000: { + // 0xef800040 + switch (instr & 0x10000000) { + case 0x00000000: { + // 0xef800040 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_9_Decode((instr >> 20) & 0x3, + (instr >> 8) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rn = + ExtractDRegister(instr, 7, 16); + int lane; + unsigned rm = + ExtractDRegisterAndLane(instr, + dt, + 5, + 0, + &lane); + // VMLA{}{}.
, , ; T1 NOLINT(whitespace/line_length) + vmla(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegisterLane(rm, lane)); + break; + } + case 0x10000000: { + // 0xff800040 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_9_Decode((instr >> 20) & 0x3, + (instr >> 8) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = + ExtractQRegister(instr, 7, 16); + int lane; + unsigned rm = + ExtractDRegisterAndLane(instr, + dt, + 5, + 0, + &lane); + // VMLA{}{}. , , ; T1 NOLINT(whitespace/line_length) + vmla(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + DRegisterLane(rm, lane)); + break; + } + } + break; + } + case 0x00000200: { + // 0xef800240 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xef800240 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_11_Decode((instr >> 20) & 0x3, + (instr >> 28) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rn = + ExtractDRegister(instr, 7, 16); + int lane; + unsigned rm = + ExtractDRegisterAndLane(instr, + dt, + 5, + 0, + &lane); + // VMLAL{}{}. , , ; T1 NOLINT(whitespace/line_length) + vmlal(CurrentCond(), + dt, + QRegister(rd), + DRegister(rn), + DRegisterLane(rm, lane)); + break; + } + case 0x00000100: { + // 0xef800340 + if ((instr & 0x10000000) == 0x00000000) { + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = Dt_size_13_Decode( + (instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rn = + ExtractDRegister(instr, 7, 16); + uint32_t mvm = (instr & 0xf) | + ((instr >> 1) & 0x10); + uint32_t shift = 4; + if (dt.Is(S16)) { + shift = 3; + } + uint32_t vm = mvm & ((1 << shift) - 1); + uint32_t index = mvm >> shift; + // VQDMLAL{}{}.
, , [] ; T2 NOLINT(whitespace/line_length) + vqdmlal(CurrentCond(), + dt, + QRegister(rd), + DRegister(rn), + DRegister(vm), + index); + } else { + UnallocatedT32(instr); + } + break; + } + } + break; + } + } + break; + } + case 0x00000400: { + // 0xef800400 + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xef800400 + switch (instr & 0x10000000) { + case 0x00000000: { + // 0xef800400 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_3_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = + ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VADDHN{}{}.
, , ; T1 NOLINT(whitespace/line_length) + vaddhn(CurrentCond(), + dt, + DRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x10000000: { + // 0xff800400 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_3_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = + ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VRADDHN{}{}.
, , ; T1 NOLINT(whitespace/line_length) + vraddhn(CurrentCond(), + dt, + DRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + } + break; + } + case 0x00000100: { + // 0xef800500 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VABAL{}{}.
, , ; T1 + vabal(CurrentCond(), + dt, + QRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000200: { + // 0xef800600 + switch (instr & 0x10000000) { + case 0x00000000: { + // 0xef800600 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_3_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = + ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VSUBHN{}{}.
, , ; T1 NOLINT(whitespace/line_length) + vsubhn(CurrentCond(), + dt, + DRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x10000000: { + // 0xff800600 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_3_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = + ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VRSUBHN{}{}.
, , ; T1 NOLINT(whitespace/line_length) + vrsubhn(CurrentCond(), + dt, + DRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + } + break; + } + case 0x00000300: { + // 0xef800700 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VABDL{}{}.
, , ; T1 + vabdl(CurrentCond(), + dt, + QRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + } + break; + } + case 0x00000440: { + // 0xef800440 + switch (instr & 0x00000200) { + case 0x00000000: { + // 0xef800440 + switch (instr & 0x10000000) { + case 0x00000000: { + // 0xef800440 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_9_Decode((instr >> 20) & 0x3, + (instr >> 8) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rn = + ExtractDRegister(instr, 7, 16); + int lane; + unsigned rm = + ExtractDRegisterAndLane(instr, + dt, + 5, + 0, + &lane); + // VMLS{}{}.
, , ; T1 NOLINT(whitespace/line_length) + vmls(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegisterLane(rm, lane)); + break; + } + case 0x10000000: { + // 0xff800440 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_9_Decode((instr >> 20) & 0x3, + (instr >> 8) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = + ExtractQRegister(instr, 7, 16); + int lane; + unsigned rm = + ExtractDRegisterAndLane(instr, + dt, + 5, + 0, + &lane); + // VMLS{}{}. , , ; T1 NOLINT(whitespace/line_length) + vmls(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + DRegisterLane(rm, lane)); + break; + } + } + break; + } + case 0x00000200: { + // 0xef800640 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xef800640 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_11_Decode((instr >> 20) & 0x3, + (instr >> 28) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rn = + ExtractDRegister(instr, 7, 16); + int lane; + unsigned rm = + ExtractDRegisterAndLane(instr, + dt, + 5, + 0, + &lane); + // VMLSL{}{}. , , ; T1 NOLINT(whitespace/line_length) + vmlsl(CurrentCond(), + dt, + QRegister(rd), + DRegister(rn), + DRegisterLane(rm, lane)); + break; + } + case 0x00000100: { + // 0xef800740 + if ((instr & 0x10000000) == 0x00000000) { + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = Dt_size_13_Decode( + (instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rn = + ExtractDRegister(instr, 7, 16); + uint32_t mvm = (instr & 0xf) | + ((instr >> 1) & 0x10); + uint32_t shift = 4; + if (dt.Is(S16)) { + shift = 3; + } + uint32_t vm = mvm & ((1 << shift) - 1); + uint32_t index = mvm >> shift; + // VQDMLSL{}{}.
, , [] ; T2 NOLINT(whitespace/line_length) + vqdmlsl(CurrentCond(), + dt, + QRegister(rd), + DRegister(rn), + DRegister(vm), + index); + } else { + UnallocatedT32(instr); + } + break; + } + } + break; + } + } + break; + } + case 0x00000800: { + // 0xef800800 + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xef800800 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_12_Decode((instr >> 20) & 0x3, + (instr >> 28) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMLAL{}{}. , , ; T1 NOLINT(whitespace/line_length) + vmlal(CurrentCond(), + dt, + QRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000100: { + // 0xef800900 + if ((instr & 0x10000000) == 0x00000000) { + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rn = + ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VQDMLAL{}{}.
, , ; T1 NOLINT(whitespace/line_length) + vqdmlal(CurrentCond(), + dt, + QRegister(rd), + DRegister(rn), + DRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x00000200: { + // 0xef800a00 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_12_Decode((instr >> 20) & 0x3, + (instr >> 28) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMLSL{}{}. , , ; T1 NOLINT(whitespace/line_length) + vmlsl(CurrentCond(), + dt, + QRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000300: { + // 0xef800b00 + if ((instr & 0x10000000) == 0x00000000) { + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rn = + ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VQDMLSL{}{}.
, , ; T1 NOLINT(whitespace/line_length) + vqdmlsl(CurrentCond(), + dt, + QRegister(rd), + DRegister(rn), + DRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + } + break; + } + case 0x00000840: { + // 0xef800840 + switch (instr & 0x00000200) { + case 0x00000000: { + // 0xef800840 + switch (instr & 0x10000000) { + case 0x00000000: { + // 0xef800840 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = Dt_F_size_3_Decode( + ((instr >> 20) & 0x3) | + ((instr >> 6) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rn = + ExtractDRegister(instr, 7, 16); + uint32_t mvm = + (instr & 0xf) | ((instr >> 1) & 0x10); + uint32_t shift = 4; + if (dt.Is(I16)) { + shift = 3; + } + uint32_t vm = mvm & ((1 << shift) - 1); + uint32_t index = mvm >> shift; + // VMUL{}{}.
{
}, , [] ; T1 NOLINT(whitespace/line_length) + vmul(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(vm), + index); + break; + } + case 0x10000000: { + // 0xff800840 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = Dt_F_size_3_Decode( + ((instr >> 20) & 0x3) | + ((instr >> 6) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = + ExtractQRegister(instr, 7, 16); + uint32_t mvm = + (instr & 0xf) | ((instr >> 1) & 0x10); + uint32_t shift = 4; + if (dt.Is(I16)) { + shift = 3; + } + uint32_t vm = mvm & ((1 << shift) - 1); + uint32_t index = mvm >> shift; + // VMUL{}{}.
{}, , [] ; T1 NOLINT(whitespace/line_length) + vmul(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + DRegister(vm), + index); + break; + } + } + break; + } + case 0x00000200: { + // 0xef800a40 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xef800a40 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = Dt_U_size_2_Decode( + ((instr >> 20) & 0x3) | + ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rn = + ExtractDRegister(instr, 7, 16); + uint32_t mvm = + (instr & 0xf) | ((instr >> 1) & 0x10); + uint32_t shift = 4; + if (dt.Is(S16) || dt.Is(U16)) { + shift = 3; + } + uint32_t vm = mvm & ((1 << shift) - 1); + uint32_t index = mvm >> shift; + // VMULL{}{}.
, , [] ; T1 NOLINT(whitespace/line_length) + vmull(CurrentCond(), + dt, + QRegister(rd), + DRegister(rn), + DRegister(vm), + index); + break; + } + case 0x00000100: { + // 0xef800b40 + if ((instr & 0x10000000) == 0x00000000) { + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = Dt_size_13_Decode( + (instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rn = + ExtractDRegister(instr, 7, 16); + int lane; + unsigned rm = + ExtractDRegisterAndLane(instr, + dt, + 5, + 0, + &lane); + // VQDMULL{}{}.
, , ; T2 NOLINT(whitespace/line_length) + vqdmull(CurrentCond(), + dt, + QRegister(rd), + DRegister(rn), + DRegisterLane(rm, lane)); + } else { + UnallocatedT32(instr); + } + break; + } + } + break; + } + } + break; + } + case 0x00000c00: { + // 0xef800c00 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xef800c00 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = Dt_op_U_size_1_Decode( + ((instr >> 20) & 0x3) | + ((instr >> 26) & 0x4) | + ((instr >> 6) & 0x8)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMULL{}{}.
, , ; T1 + vmull(CurrentCond(), + dt, + QRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000100: { + // 0xef800d00 + if ((instr & 0x10000200) == 0x00000000) { + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rn = + ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VQDMULL{}{}.
, , ; T1 NOLINT(whitespace/line_length) + vqdmull(CurrentCond(), + dt, + QRegister(rd), + DRegister(rn), + DRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + } + break; + } + case 0x00000c40: { + // 0xef800c40 + switch (instr & 0x10000300) { + case 0x00000000: { + // 0xef800c40 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + int lane; + unsigned rm = ExtractDRegisterAndLane(instr, + dt, + 5, + 0, + &lane); + // VQDMULH{}{}.
{
}, , ; T2 NOLINT(whitespace/line_length) + vqdmulh(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegisterLane(rm, lane)); + break; + } + case 0x00000100: { + // 0xef800d40 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + int lane; + unsigned rm = ExtractDRegisterAndLane(instr, + dt, + 5, + 0, + &lane); + // VQRDMULH{}{}.
{
}, , ; T2 NOLINT(whitespace/line_length) + vqrdmulh(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegisterLane(rm, lane)); + break; + } + case 0x10000000: { + // 0xff800c40 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + int lane; + unsigned rm = ExtractDRegisterAndLane(instr, + dt, + 5, + 0, + &lane); + // VQDMULH{}{}.
{}, , ; T2 NOLINT(whitespace/line_length) + vqdmulh(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + DRegisterLane(rm, lane)); + break; + } + case 0x10000100: { + // 0xff800d40 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + int lane; + unsigned rm = ExtractDRegisterAndLane(instr, + dt, + 5, + 0, + &lane); + // VQRDMULH{}{}.
{}, , ; T2 NOLINT(whitespace/line_length) + vqrdmulh(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + DRegisterLane(rm, lane)); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + } + break; + } + } + break; + } + } + break; + } + case 0x01000010: { + // 0xef000010 + switch (instr & 0x00800040) { + case 0x00000000: { + // 0xef000010 + switch (instr & 0x00000f00) { + case 0x00000000: { + // 0xef000010 + DataType dt = Dt_U_size_3_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VQADD{}{}.
{
}, , ; T1 + vqadd(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000100: { + // 0xef000110 + switch (instr & 0x10300000) { + case 0x00000000: { + // 0xef000110 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VAND{}{}{.
} {
}, , ; T1 + vand(CurrentCond(), + kDataTypeValueNone, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00100000: { + // 0xef100110 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VBIC{}{}{.
} {
}, , ; T1 + vbic(CurrentCond(), + kDataTypeValueNone, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00200000: { + // 0xef200110 + if (((instr & 0x00000040) == 0x00000000) && + ((((Uint32((instr >> 7)) & Uint32(0x1)) + << 4) | + (Uint32((instr >> 16)) & Uint32(0xf))) == + (((Uint32((instr >> 5)) & Uint32(0x1)) + << 4) | + (Uint32(instr) & Uint32(0xf))))) { + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 7, 16); + // VMOV{}{}{.
}
, ; T1 + vmov(CurrentCond(), + kDataTypeValueNone, + DRegister(rd), + DRegister(rm)); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VORR{}{}{.
} {
}, , ; T1 + vorr(CurrentCond(), + kDataTypeValueNone, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00300000: { + // 0xef300110 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VORN{}{}{.
} {
}, , ; T1 + vorn(CurrentCond(), + kDataTypeValueNone, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x10000000: { + // 0xff000110 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VEOR{}{}{.
} {
}, , ; T1 + veor(CurrentCond(), + kDataTypeValueNone, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x10100000: { + // 0xff100110 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VBSL{}{}{.
} {
}, , ; T1 + vbsl(CurrentCond(), + kDataTypeValueNone, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x10200000: { + // 0xff200110 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VBIT{}{}{.
} {
}, , ; T1 + vbit(CurrentCond(), + kDataTypeValueNone, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x10300000: { + // 0xff300110 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VBIF{}{}{.
} {
}, , ; T1 + vbif(CurrentCond(), + kDataTypeValueNone, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + } + break; + } + case 0x00000200: { + // 0xef000210 + DataType dt = Dt_U_size_3_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VQSUB{}{}.
{
}, , ; T1 + vqsub(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000300: { + // 0xef000310 + DataType dt = Dt_U_size_1_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCGE{}{}.
{
}, , ; T1 + vcge(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000400: { + // 0xef000410 + DataType dt = Dt_U_size_3_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + unsigned rn = ExtractDRegister(instr, 7, 16); + // VQSHL{}{}.
{
}, , ; T1 + vqshl(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm), + DRegister(rn)); + break; + } + case 0x00000500: { + // 0xef000510 + DataType dt = Dt_U_size_3_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + unsigned rn = ExtractDRegister(instr, 7, 16); + // VQRSHL{}{}.
{
}, , ; T1 + vqrshl(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm), + DRegister(rn)); + break; + } + case 0x00000600: { + // 0xef000610 + DataType dt = Dt_U_size_1_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMIN{}{}.
{
}, , ; T1 + vmin(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000700: { + // 0xef000710 + DataType dt = Dt_U_size_1_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VABA{}{}.
, , ; T1 + vaba(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000800: { + // 0xef000810 + switch (instr & 0x10000000) { + case 0x00000000: { + // 0xef000810 + DataType dt = + Dt_size_7_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VTST{}{}.
{
}, , ; T1 + vtst(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x10000000: { + // 0xff000810 + DataType dt = + Dt_size_4_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCEQ{}{}.
{
}, , ; T1 + vceq(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + } + break; + } + case 0x00000900: { + // 0xef000910 + DataType dt = Dt_op_size_1_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMUL{}{}.
{
}, , ; T1 + vmul(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000a00: { + // 0xef000a10 + DataType dt = Dt_U_size_1_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VPMIN{}{}.
{
}, , ; T1 + vpmin(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000b00: { + // 0xef000b10 + if ((instr & 0x10000000) == 0x00000000) { + DataType dt = Dt_size_4_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VPADD{}{}.
{
}, , ; T1 + vpadd(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x00000c00: { + // 0xef000c10 + switch (instr & 0x10300000) { + case 0x00000000: { + // 0xef000c10 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VFMA{}{}.F32
, , ; T1 + vfma(CurrentCond(), + F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00200000: { + // 0xef200c10 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VFMS{}{}.F32
, , ; T1 + vfms(CurrentCond(), + F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00000d00: { + // 0xef000d10 + switch (instr & 0x10300000) { + case 0x00000000: { + // 0xef000d10 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMLA{}{}.F32
, , ; T1 + vmla(CurrentCond(), + F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00200000: { + // 0xef200d10 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMLS{}{}.F32
, , ; T1 + vmls(CurrentCond(), + F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x10000000: { + // 0xff000d10 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMUL{}{}.F32 {
}, , ; T1 + vmul(CurrentCond(), + F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00000e00: { + // 0xef000e10 + switch (instr & 0x10300000) { + case 0x10000000: { + // 0xff000e10 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VACGE{}{}.F32 {
}, , ; T1 + vacge(CurrentCond(), + F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x10200000: { + // 0xff200e10 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VACGT{}{}.F32 {
}, , ; T1 + vacgt(CurrentCond(), + F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00000f00: { + // 0xef000f10 + switch (instr & 0x10300000) { + case 0x00000000: { + // 0xef000f10 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRECPS{}{}.F32 {
}, , ; T1 + vrecps(CurrentCond(), + F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00200000: { + // 0xef200f10 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRSQRTS{}{}.F32 {
}, , ; T1 + vrsqrts(CurrentCond(), + F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x10000000: { + // 0xff000f10 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMAXNM{}.F32
, , ; T1 + vmaxnm(F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + if (InITBlock()) { + UnpredictableT32(instr); + } + break; + } + case 0x10200000: { + // 0xff200f10 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMINNM{}.F32
, , ; T1 + vminnm(F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + if (InITBlock()) { + UnpredictableT32(instr); + } + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + } + break; + } + case 0x00000040: { + // 0xef000050 + switch (instr & 0x00000f00) { + case 0x00000000: { + // 0xef000050 + DataType dt = Dt_U_size_3_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VQADD{}{}.
{}, , ; T1 + vqadd(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x00000100: { + // 0xef000150 + switch (instr & 0x10300000) { + case 0x00000000: { + // 0xef000150 + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VAND{}{}{.
} {}, , ; T1 + vand(CurrentCond(), + kDataTypeValueNone, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x00100000: { + // 0xef100150 + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VBIC{}{}{.
} {}, , ; T1 + vbic(CurrentCond(), + kDataTypeValueNone, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x00200000: { + // 0xef200150 + if (((instr & 0x00000040) == 0x00000040) && + ((((Uint32((instr >> 7)) & Uint32(0x1)) + << 4) | + (Uint32((instr >> 16)) & Uint32(0xf))) == + (((Uint32((instr >> 5)) & Uint32(0x1)) + << 4) | + (Uint32(instr) & Uint32(0xf))))) { + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 7, 16); + // VMOV{}{}{.
} , ; T1 + vmov(CurrentCond(), + kDataTypeValueNone, + QRegister(rd), + QRegister(rm)); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VORR{}{}{.
} {}, , ; T1 + vorr(CurrentCond(), + kDataTypeValueNone, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x00300000: { + // 0xef300150 + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VORN{}{}{.
} {}, , ; T1 + vorn(CurrentCond(), + kDataTypeValueNone, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x10000000: { + // 0xff000150 + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VEOR{}{}{.
} {}, , ; T1 + veor(CurrentCond(), + kDataTypeValueNone, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x10100000: { + // 0xff100150 + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VBSL{}{}{.
} {}, , ; T1 + vbsl(CurrentCond(), + kDataTypeValueNone, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x10200000: { + // 0xff200150 + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VBIT{}{}{.
} {}, , ; T1 + vbit(CurrentCond(), + kDataTypeValueNone, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x10300000: { + // 0xff300150 + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VBIF{}{}{.
} {}, , ; T1 + vbif(CurrentCond(), + kDataTypeValueNone, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + } + break; + } + case 0x00000200: { + // 0xef000250 + DataType dt = Dt_U_size_3_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VQSUB{}{}.
{}, , ; T1 + vqsub(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x00000300: { + // 0xef000350 + DataType dt = Dt_U_size_1_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCGE{}{}.
{}, , ; T1 + vcge(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x00000400: { + // 0xef000450 + DataType dt = Dt_U_size_3_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + // VQSHL{}{}.
{}, , ; T1 + vqshl(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm), + QRegister(rn)); + break; + } + case 0x00000500: { + // 0xef000550 + DataType dt = Dt_U_size_3_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + // VQRSHL{}{}.
{}, , ; T1 + vqrshl(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm), + QRegister(rn)); + break; + } + case 0x00000600: { + // 0xef000650 + DataType dt = Dt_U_size_1_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMIN{}{}.
{}, , ; T1 + vmin(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x00000700: { + // 0xef000750 + DataType dt = Dt_U_size_1_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VABA{}{}.
, , ; T1 + vaba(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x00000800: { + // 0xef000850 + switch (instr & 0x10000000) { + case 0x00000000: { + // 0xef000850 + DataType dt = + Dt_size_7_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VTST{}{}.
{}, , ; T1 + vtst(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x10000000: { + // 0xff000850 + DataType dt = + Dt_size_4_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCEQ{}{}.
{}, , ; T1 + vceq(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + } + break; + } + case 0x00000900: { + // 0xef000950 + DataType dt = Dt_op_size_1_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMUL{}{}.
{}, , ; T1 + vmul(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x00000c00: { + // 0xef000c50 + switch (instr & 0x10300000) { + case 0x00000000: { + // 0xef000c50 + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VFMA{}{}.F32 , , ; T1 + vfma(CurrentCond(), + F32, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x00200000: { + // 0xef200c50 + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VFMS{}{}.F32 , , ; T1 + vfms(CurrentCond(), + F32, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00000d00: { + // 0xef000d50 + switch (instr & 0x10300000) { + case 0x00000000: { + // 0xef000d50 + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMLA{}{}.F32 , , ; T1 + vmla(CurrentCond(), + F32, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x00200000: { + // 0xef200d50 + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMLS{}{}.F32 , , ; T1 + vmls(CurrentCond(), + F32, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x10000000: { + // 0xff000d50 + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMUL{}{}.F32 {}, , ; T1 + vmul(CurrentCond(), + F32, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00000e00: { + // 0xef000e50 + switch (instr & 0x10300000) { + case 0x10000000: { + // 0xff000e50 + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VACGE{}{}.F32 {}, , ; T1 + vacge(CurrentCond(), + F32, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x10200000: { + // 0xff200e50 + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VACGT{}{}.F32 {}, , ; T1 + vacgt(CurrentCond(), + F32, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00000f00: { + // 0xef000f50 + switch (instr & 0x10300000) { + case 0x00000000: { + // 0xef000f50 + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VRECPS{}{}.F32 {}, , ; T1 + vrecps(CurrentCond(), + F32, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x00200000: { + // 0xef200f50 + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VRSQRTS{}{}.F32 {}, , ; T1 + vrsqrts(CurrentCond(), + F32, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x10000000: { + // 0xff000f50 + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMAXNM{}.F32 , , ; T1 + vmaxnm(F32, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + if (InITBlock()) { + UnpredictableT32(instr); + } + break; + } + case 0x10200000: { + // 0xff200f50 + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMINNM{}.F32 , , ; T1 + vminnm(F32, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + if (InITBlock()) { + UnpredictableT32(instr); + } + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00800000: { + // 0xef800010 + switch (instr & 0x00000c00) { + case 0x00000000: { + // 0xef800010 + switch (instr & 0x00380080) { + case 0x00000000: { + // 0xef800010 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xef800010 + switch (instr & 0x00000200) { + default: { + switch (instr & 0x00000020) { + case 0x00000020: { + // 0xef800030 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 21) & 0x80)); + // VMVN{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmvn(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + default: { + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 21) & 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + } + break; + } + } + break; + } + case 0x00000100: { + // 0xef800110 + switch (instr & 0x00000020) { + case 0x00000000: { + // 0xef800110 + if (((instr & 0x100) == 0x0) || + ((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVorr::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVorr::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 21) & 0x80)); + // VORR{}{}.
{}, , # ; T1 NOLINT(whitespace/line_length) + vorr(CurrentCond(), + dt, + DRegister(rd), + DRegister(rd), + imm); + break; + } + case 0x00000020: { + // 0xef800130 + if (((instr & 0x100) == 0x0) || + ((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVbic::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVbic::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 21) & 0x80)); + // VBIC{}{}.
{}, , # ; T1 NOLINT(whitespace/line_length) + vbic(CurrentCond(), + dt, + DRegister(rd), + DRegister(rd), + imm); + break; + } + } + break; + } + } + break; + } + default: { + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xef800010 + if (((instr & 0x380080) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_L_imm6_1_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & + 0x8), + (instr >> 28) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + (dt.IsSize(64) ? 64 + : (dt.GetSize() * 2)) - + imm6; + // VSHR{}{}. {
}, , # ; T1 NOLINT(whitespace/line_length) + vshr(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00000100: { + // 0xef800110 + if (((instr & 0x380080) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_L_imm6_1_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & + 0x8), + (instr >> 28) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + (dt.IsSize(64) ? 64 + : (dt.GetSize() * 2)) - + imm6; + // VSRA{}{}. {
}, , # ; T1 NOLINT(whitespace/line_length) + vsra(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00000200: { + // 0xef800210 + if (((instr & 0x380080) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_L_imm6_1_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & + 0x8), + (instr >> 28) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + (dt.IsSize(64) ? 64 + : (dt.GetSize() * 2)) - + imm6; + // VRSHR{}{}. {
}, , # ; T1 NOLINT(whitespace/line_length) + vrshr(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00000300: { + // 0xef800310 + if (((instr & 0x380080) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_L_imm6_1_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & + 0x8), + (instr >> 28) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + (dt.IsSize(64) ? 64 + : (dt.GetSize() * 2)) - + imm6; + // VRSRA{}{}. {
}, , # ; T1 NOLINT(whitespace/line_length) + vrsra(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm), + imm); + break; + } + } + break; + } + } + break; + } + case 0x00000400: { + // 0xef800410 + switch (instr & 0x00380080) { + case 0x00000000: { + // 0xef800410 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xef800410 + switch (instr & 0x00000200) { + default: { + switch (instr & 0x00000020) { + case 0x00000020: { + // 0xef800430 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 21) & 0x80)); + // VMVN{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmvn(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + default: { + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 21) & 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + } + break; + } + } + break; + } + case 0x00000100: { + // 0xef800510 + switch (instr & 0x00000020) { + case 0x00000000: { + // 0xef800510 + if (((instr & 0x100) == 0x0) || + ((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVorr::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVorr::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 21) & 0x80)); + // VORR{}{}.
{}, , # ; T1 NOLINT(whitespace/line_length) + vorr(CurrentCond(), + dt, + DRegister(rd), + DRegister(rd), + imm); + break; + } + case 0x00000020: { + // 0xef800530 + if (((instr & 0x100) == 0x0) || + ((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVbic::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVbic::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 21) & 0x80)); + // VBIC{}{}.
{}, , # ; T1 NOLINT(whitespace/line_length) + vbic(CurrentCond(), + dt, + DRegister(rd), + DRegister(rd), + imm); + break; + } + } + break; + } + } + break; + } + default: { + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xef800410 + if ((instr & 0x10000000) == 0x10000000) { + if (((instr & 0x380080) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = Dt_L_imm6_4_Decode( + ((instr >> 19) & 0x7) | + ((instr >> 4) & 0x8)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + (dt.IsSize(64) ? 64 + : (dt.GetSize() * 2)) - + imm6; + // VSRI{}{}.
{
}, , # ; T1 NOLINT(whitespace/line_length) + vsri(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm), + imm); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x00000100: { + // 0xef800510 + switch (instr & 0x10000000) { + case 0x00000000: { + // 0xef800510 + if (((instr & 0x380080) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = Dt_L_imm6_3_Decode( + ((instr >> 19) & 0x7) | + ((instr >> 4) & 0x8)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - + (dt.IsSize(64) ? 0 : dt.GetSize()); + // VSHL{}{}.I {
}, , # ; T1 NOLINT(whitespace/line_length) + vshl(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x10000000: { + // 0xff800510 + if (((instr & 0x380080) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = Dt_L_imm6_4_Decode( + ((instr >> 19) & 0x7) | + ((instr >> 4) & 0x8)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - + (dt.IsSize(64) ? 0 : dt.GetSize()); + // VSLI{}{}.
{
}, , # ; T1 NOLINT(whitespace/line_length) + vsli(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm), + imm); + break; + } + } + break; + } + case 0x00000200: { + // 0xef800610 + if (((instr & 0x380080) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_L_imm6_2_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & + 0x8), + (instr >> 28) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - (dt.IsSize(64) ? 0 : dt.GetSize()); + // VQSHLU{}{}. {
}, , # ; T1 NOLINT(whitespace/line_length) + vqshlu(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00000300: { + // 0xef800710 + if (((instr & 0x380080) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_L_imm6_1_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & + 0x8), + (instr >> 28) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - (dt.IsSize(64) ? 0 : dt.GetSize()); + // VQSHL{}{}. {
}, , # ; T1 NOLINT(whitespace/line_length) + vqshl(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm), + imm); + break; + } + } + break; + } + } + break; + } + case 0x00000800: { + // 0xef800810 + switch (instr & 0x00000080) { + case 0x00000000: { + // 0xef800810 + switch (instr & 0x00380000) { + case 0x00000000: { + // 0xef800810 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xef800810 + switch (instr & 0x00000200) { + default: { + switch (instr & 0x00000020) { + case 0x00000020: { + // 0xef800830 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + DOperand imm = ImmediateVmvn:: + DecodeImmediate(cmode, + (instr & + 0xf) | + ((instr >> + 12) & + 0x70) | + ((instr >> + 21) & + 0x80)); + // VMVN{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmvn(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + default: { + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + DOperand imm = ImmediateVmov:: + DecodeImmediate(cmode, + (instr & + 0xf) | + ((instr >> + 12) & + 0x70) | + ((instr >> + 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + } + break; + } + } + break; + } + case 0x00000100: { + // 0xef800910 + switch (instr & 0x00000020) { + case 0x00000000: { + // 0xef800910 + if (((instr & 0x100) == 0x0) || + ((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVorr::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVorr::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 21) & 0x80)); + // VORR{}{}.
{}, , # ; T1 NOLINT(whitespace/line_length) + vorr(CurrentCond(), + dt, + DRegister(rd), + DRegister(rd), + imm); + break; + } + case 0x00000020: { + // 0xef800930 + if (((instr & 0x100) == 0x0) || + ((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVbic::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVbic::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 21) & 0x80)); + // VBIC{}{}.
{}, , # ; T1 NOLINT(whitespace/line_length) + vbic(CurrentCond(), + dt, + DRegister(rd), + DRegister(rd), + imm); + break; + } + } + break; + } + } + break; + } + case 0x00180000: { + // 0xef980810 + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xef980810 + switch (instr & 0x10000000) { + case 0x00000000: { + // 0xef980810 + if (((instr & 0x380000) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = Dt_imm6_3_Decode( + (instr >> 19) & 0x7); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VSHRN{}{}.I
, , # ; T1 NOLINT(whitespace/line_length) + vshrn(CurrentCond(), + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x10000000: { + // 0xff980810 + if (((instr & 0x380000) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_2_Decode((instr >> 19) & + 0x7, + (instr >> 28) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VQSHRUN{}{}.
, , # ; T1 NOLINT(whitespace/line_length) + vqshrun(CurrentCond(), + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + } + break; + } + case 0x00000100: { + // 0xef980910 + if (((instr & 0x380000) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_1_Decode((instr >> 19) & 0x7, + (instr >> 28) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VQSHRN{}{}.
, , # ; T1 NOLINT(whitespace/line_length) + vqshrn(CurrentCond(), + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x00000200: { + // 0xef980a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & 0x7, + (instr >> 28) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00280000: { + // 0xefa80810 + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xefa80810 + switch (instr & 0x10000000) { + case 0x00000000: { + // 0xefa80810 + if (((instr & 0x380000) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = Dt_imm6_3_Decode( + (instr >> 19) & 0x7); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VSHRN{}{}.I
, , # ; T1 NOLINT(whitespace/line_length) + vshrn(CurrentCond(), + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x10000000: { + // 0xffa80810 + if (((instr & 0x380000) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_2_Decode((instr >> 19) & + 0x7, + (instr >> 28) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VQSHRUN{}{}.
, , # ; T1 NOLINT(whitespace/line_length) + vqshrun(CurrentCond(), + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + } + break; + } + case 0x00000100: { + // 0xefa80910 + if (((instr & 0x380000) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_1_Decode((instr >> 19) & 0x7, + (instr >> 28) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VQSHRN{}{}.
, , # ; T1 NOLINT(whitespace/line_length) + vqshrn(CurrentCond(), + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x00000200: { + // 0xefa80a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & 0x7, + (instr >> 28) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00300000: { + // 0xefb00810 + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xefb00810 + switch (instr & 0x10000000) { + case 0x00000000: { + // 0xefb00810 + if (((instr & 0x380000) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = Dt_imm6_3_Decode( + (instr >> 19) & 0x7); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VSHRN{}{}.I
, , # ; T1 NOLINT(whitespace/line_length) + vshrn(CurrentCond(), + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x10000000: { + // 0xffb00810 + if (((instr & 0x380000) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_2_Decode((instr >> 19) & + 0x7, + (instr >> 28) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VQSHRUN{}{}.
, , # ; T1 NOLINT(whitespace/line_length) + vqshrun(CurrentCond(), + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + } + break; + } + case 0x00000100: { + // 0xefb00910 + if (((instr & 0x380000) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_1_Decode((instr >> 19) & 0x7, + (instr >> 28) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VQSHRN{}{}.
, , # ; T1 NOLINT(whitespace/line_length) + vqshrn(CurrentCond(), + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x00000200: { + // 0xefb00a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & 0x7, + (instr >> 28) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00380000: { + // 0xefb80810 + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xefb80810 + switch (instr & 0x10000000) { + case 0x00000000: { + // 0xefb80810 + if (((instr & 0x380000) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = Dt_imm6_3_Decode( + (instr >> 19) & 0x7); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VSHRN{}{}.I
, , # ; T1 NOLINT(whitespace/line_length) + vshrn(CurrentCond(), + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x10000000: { + // 0xffb80810 + if (((instr & 0x380000) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_2_Decode((instr >> 19) & + 0x7, + (instr >> 28) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VQSHRUN{}{}.
, , # ; T1 NOLINT(whitespace/line_length) + vqshrun(CurrentCond(), + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + } + break; + } + case 0x00000100: { + // 0xefb80910 + if (((instr & 0x380000) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_1_Decode((instr >> 19) & 0x7, + (instr >> 28) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VQSHRN{}{}.
, , # ; T1 NOLINT(whitespace/line_length) + vqshrn(CurrentCond(), + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x00000200: { + // 0xefb80a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & 0x7, + (instr >> 28) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + default: { + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xef800810 + switch (instr & 0x10000000) { + case 0x00000000: { + // 0xef800810 + if (((instr & 0x380000) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = Dt_imm6_3_Decode( + (instr >> 19) & 0x7); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VSHRN{}{}.I
, , # ; T1 NOLINT(whitespace/line_length) + vshrn(CurrentCond(), + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x10000000: { + // 0xff800810 + if (((instr & 0x380000) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_2_Decode((instr >> 19) & + 0x7, + (instr >> 28) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VQSHRUN{}{}.
, , # ; T1 NOLINT(whitespace/line_length) + vqshrun(CurrentCond(), + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + } + break; + } + case 0x00000100: { + // 0xef800910 + if (((instr & 0x380000) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_1_Decode((instr >> 19) & 0x7, + (instr >> 28) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VQSHRN{}{}.
, , # ; T1 NOLINT(whitespace/line_length) + vqshrn(CurrentCond(), + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x00000200: { + // 0xef800a10 + switch (instr & 0x00070000) { + case 0x00000000: { + // 0xef800a10 + switch (instr & 0x003f0000) { + case 0x00080000: { + // 0xef880a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x380000) == + 0x180000) || + ((instr & 0x380000) == + 0x280000) || + ((instr & 0x380000) == + 0x300000) || + ((instr & 0x380000) == + 0x380000)) { + UnallocatedT32(instr); + return; + } + DataType dt = Dt_U_imm3H_1_Decode( + ((instr >> 19) & 0x7) | + ((instr >> 25) & 0x8)); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VMOVL{}{}.
, ; T1 NOLINT(whitespace/line_length) + vmovl(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm)); + break; + } + case 0x00090000: { + // 0xef890a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x000a0000: { + // 0xef8a0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x000b0000: { + // 0xef8b0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x000c0000: { + // 0xef8c0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x000d0000: { + // 0xef8d0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x000e0000: { + // 0xef8e0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x000f0000: { + // 0xef8f0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00100000: { + // 0xef900a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x380000) == + 0x180000) || + ((instr & 0x380000) == + 0x280000) || + ((instr & 0x380000) == + 0x300000) || + ((instr & 0x380000) == + 0x380000)) { + UnallocatedT32(instr); + return; + } + DataType dt = Dt_U_imm3H_1_Decode( + ((instr >> 19) & 0x7) | + ((instr >> 25) & 0x8)); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VMOVL{}{}.
, ; T1 NOLINT(whitespace/line_length) + vmovl(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm)); + break; + } + case 0x00110000: { + // 0xef910a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00120000: { + // 0xef920a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00130000: { + // 0xef930a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00140000: { + // 0xef940a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00150000: { + // 0xef950a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00160000: { + // 0xef960a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00170000: { + // 0xef970a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00180000: { + // 0xef980a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00190000: { + // 0xef990a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x001a0000: { + // 0xef9a0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x001b0000: { + // 0xef9b0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x001c0000: { + // 0xef9c0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x001d0000: { + // 0xef9d0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x001e0000: { + // 0xef9e0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x001f0000: { + // 0xef9f0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00200000: { + // 0xefa00a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x380000) == + 0x180000) || + ((instr & 0x380000) == + 0x280000) || + ((instr & 0x380000) == + 0x300000) || + ((instr & 0x380000) == + 0x380000)) { + UnallocatedT32(instr); + return; + } + DataType dt = Dt_U_imm3H_1_Decode( + ((instr >> 19) & 0x7) | + ((instr >> 25) & 0x8)); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VMOVL{}{}.
, ; T1 NOLINT(whitespace/line_length) + vmovl(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm)); + break; + } + case 0x00210000: { + // 0xefa10a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00220000: { + // 0xefa20a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00230000: { + // 0xefa30a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00240000: { + // 0xefa40a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00250000: { + // 0xefa50a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00260000: { + // 0xefa60a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00270000: { + // 0xefa70a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00280000: { + // 0xefa80a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00290000: { + // 0xefa90a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x002a0000: { + // 0xefaa0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x002b0000: { + // 0xefab0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x002c0000: { + // 0xefac0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x002d0000: { + // 0xefad0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x002e0000: { + // 0xefae0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x002f0000: { + // 0xefaf0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00300000: { + // 0xefb00a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00310000: { + // 0xefb10a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00320000: { + // 0xefb20a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00330000: { + // 0xefb30a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00340000: { + // 0xefb40a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00350000: { + // 0xefb50a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00360000: { + // 0xefb60a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00370000: { + // 0xefb70a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00380000: { + // 0xefb80a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00390000: { + // 0xefb90a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x003a0000: { + // 0xefba0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x003b0000: { + // 0xefbb0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x003c0000: { + // 0xefbc0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x003d0000: { + // 0xefbd0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x003e0000: { + // 0xefbe0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x003f0000: { + // 0xefbf0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + default: { + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 28) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + } + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + } + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00000c00: { + // 0xef800c10 + switch (instr & 0x00000080) { + case 0x00000000: { + // 0xef800c10 + switch (instr & 0x00200000) { + case 0x00000000: { + // 0xef800c10 + switch (instr & 0x00180000) { + case 0x00000000: { + // 0xef800c10 + switch (instr & 0x00000300) { + case 0x00000200: { + // 0xef800e10 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 21) & 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + case 0x00000300: { + // 0xef800f10 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 21) & 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + default: { + switch (instr & 0x00000020) { + case 0x00000020: { + // 0xef800c30 + switch (instr & 0x00000f20) { + case 0x00000000: { + // 0xef800c10 + if (((instr & 0x920) == + 0x100) || + ((instr & 0x520) == + 0x100) || + ((instr & 0x820) == + 0x20) || + ((instr & 0x420) == + 0x20) || + ((instr & 0x220) == + 0x20) || + ((instr & 0x120) == + 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + DOperand imm = ImmediateVmov:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + case 0x00000020: { + // 0xef800c30 + if (((instr & 0xd00) == + 0x100) || + ((instr & 0xd00) == + 0x500) || + ((instr & 0xd00) == + 0x900) || + ((instr & 0xe00) == + 0xe00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + DOperand imm = ImmediateVmvn:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMVN{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmvn(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + case 0x00000200: { + // 0xef800e10 + if (((instr & 0x920) == + 0x100) || + ((instr & 0x520) == + 0x100) || + ((instr & 0x820) == + 0x20) || + ((instr & 0x420) == + 0x20) || + ((instr & 0x220) == + 0x20) || + ((instr & 0x120) == + 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + DOperand imm = ImmediateVmov:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + case 0x00000220: { + // 0xef800e30 + if (((instr & 0xd00) == + 0x100) || + ((instr & 0xd00) == + 0x500) || + ((instr & 0xd00) == + 0x900) || + ((instr & 0xe00) == + 0xe00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + DOperand imm = ImmediateVmvn:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMVN{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmvn(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + case 0x00000400: { + // 0xef800c10 + if (((instr & 0x920) == + 0x100) || + ((instr & 0x520) == + 0x100) || + ((instr & 0x820) == + 0x20) || + ((instr & 0x420) == + 0x20) || + ((instr & 0x220) == + 0x20) || + ((instr & 0x120) == + 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + DOperand imm = ImmediateVmov:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + case 0x00000420: { + // 0xef800c30 + if (((instr & 0xd00) == + 0x100) || + ((instr & 0xd00) == + 0x500) || + ((instr & 0xd00) == + 0x900) || + ((instr & 0xe00) == + 0xe00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + DOperand imm = ImmediateVmvn:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMVN{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmvn(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + case 0x00000600: { + // 0xef800e10 + if (((instr & 0x920) == + 0x100) || + ((instr & 0x520) == + 0x100) || + ((instr & 0x820) == + 0x20) || + ((instr & 0x420) == + 0x20) || + ((instr & 0x220) == + 0x20) || + ((instr & 0x120) == + 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + DOperand imm = ImmediateVmov:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + case 0x00000620: { + // 0xef800e30 + if (((instr & 0xd00) == + 0x100) || + ((instr & 0xd00) == + 0x500) || + ((instr & 0xd00) == + 0x900) || + ((instr & 0xe00) == + 0xe00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + DOperand imm = ImmediateVmvn:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMVN{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmvn(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + case 0x00000800: { + // 0xef800c10 + if (((instr & 0x920) == + 0x100) || + ((instr & 0x520) == + 0x100) || + ((instr & 0x820) == + 0x20) || + ((instr & 0x420) == + 0x20) || + ((instr & 0x220) == + 0x20) || + ((instr & 0x120) == + 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + DOperand imm = ImmediateVmov:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + case 0x00000820: { + // 0xef800c30 + if (((instr & 0xd00) == + 0x100) || + ((instr & 0xd00) == + 0x500) || + ((instr & 0xd00) == + 0x900) || + ((instr & 0xe00) == + 0xe00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + DOperand imm = ImmediateVmvn:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMVN{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmvn(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + case 0x00000a00: { + // 0xef800e10 + if (((instr & 0x920) == + 0x100) || + ((instr & 0x520) == + 0x100) || + ((instr & 0x820) == + 0x20) || + ((instr & 0x420) == + 0x20) || + ((instr & 0x220) == + 0x20) || + ((instr & 0x120) == + 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + DOperand imm = ImmediateVmov:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + case 0x00000a20: { + // 0xef800e30 + if (((instr & 0xd00) == + 0x100) || + ((instr & 0xd00) == + 0x500) || + ((instr & 0xd00) == + 0x900) || + ((instr & 0xe00) == + 0xe00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + DOperand imm = ImmediateVmvn:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMVN{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmvn(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + case 0x00000c00: { + // 0xef800c10 + if (((instr & 0x920) == + 0x100) || + ((instr & 0x520) == + 0x100) || + ((instr & 0x820) == + 0x20) || + ((instr & 0x420) == + 0x20) || + ((instr & 0x220) == + 0x20) || + ((instr & 0x120) == + 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + DOperand imm = ImmediateVmov:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + case 0x00000c20: { + // 0xef800c30 + if (((instr & 0xd00) == + 0x100) || + ((instr & 0xd00) == + 0x500) || + ((instr & 0xd00) == + 0x900) || + ((instr & 0xe00) == + 0xe00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + DOperand imm = ImmediateVmvn:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMVN{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmvn(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + case 0x00000d00: { + // 0xef800d10 + if (((instr & 0x920) == + 0x100) || + ((instr & 0x520) == + 0x100) || + ((instr & 0x820) == + 0x20) || + ((instr & 0x420) == + 0x20) || + ((instr & 0x220) == + 0x20) || + ((instr & 0x120) == + 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + DOperand imm = ImmediateVmov:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + case 0x00000d20: { + // 0xef800d30 + if (((instr & 0xd00) == + 0x100) || + ((instr & 0xd00) == + 0x500) || + ((instr & 0xd00) == + 0x900) || + ((instr & 0xe00) == + 0xe00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + DOperand imm = ImmediateVmvn:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMVN{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmvn(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + case 0x00000e00: { + // 0xef800e10 + if (((instr & 0x920) == + 0x100) || + ((instr & 0x520) == + 0x100) || + ((instr & 0x820) == + 0x20) || + ((instr & 0x420) == + 0x20) || + ((instr & 0x220) == + 0x20) || + ((instr & 0x120) == + 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + DOperand imm = ImmediateVmov:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + case 0x00000e20: { + // 0xef800e30 + if (((instr & 0x920) == + 0x100) || + ((instr & 0x520) == + 0x100) || + ((instr & 0x820) == + 0x20) || + ((instr & 0x420) == + 0x20) || + ((instr & 0x220) == + 0x20) || + ((instr & 0x120) == + 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + DOperand imm = ImmediateVmov:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + case 0x00000f00: { + // 0xef800f10 + if (((instr & 0x920) == + 0x100) || + ((instr & 0x520) == + 0x100) || + ((instr & 0x820) == + 0x20) || + ((instr & 0x420) == + 0x20) || + ((instr & 0x220) == + 0x20) || + ((instr & 0x120) == + 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + DOperand imm = ImmediateVmov:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + default: { + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + DOperand imm = ImmediateVmov:: + DecodeImmediate(cmode, + (instr & + 0xf) | + ((instr >> + 12) & + 0x70) | + ((instr >> + 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + } + break; + } + } + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + default: { + if ((instr & 0x00000200) == 0x00000200) { + if (((instr & 0x200000) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt1 = Dt_op_U_1_Decode1( + ((instr >> 28) & 0x1) | + ((instr >> 7) & 0x2)); + if (dt1.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DataType dt2 = Dt_op_U_1_Decode2( + ((instr >> 28) & 0x1) | + ((instr >> 7) & 0x2)); + if (dt2.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t fbits = + 64 - ((instr >> 16) & 0x3f); + // VCVT{}{}.
.
, , # ; T1 NOLINT(whitespace/line_length) + vcvt(CurrentCond(), + dt1, + dt2, + DRegister(rd), + DRegister(rm), + fbits); + } else { + UnallocatedT32(instr); + } + break; + } + } + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + } + break; + } + case 0x00800040: { + // 0xef800050 + switch (instr & 0x00000c00) { + case 0x00000000: { + // 0xef800050 + switch (instr & 0x00380080) { + case 0x00000000: { + // 0xef800050 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xef800050 + switch (instr & 0x00000200) { + default: { + switch (instr & 0x00000020) { + case 0x00000020: { + // 0xef800070 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 21) & 0x80)); + // VMVN{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmvn(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + default: { + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 21) & 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + } + break; + } + } + break; + } + case 0x00000100: { + // 0xef800150 + switch (instr & 0x00000020) { + case 0x00000000: { + // 0xef800150 + if (((instr & 0x100) == 0x0) || + ((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVorr::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVorr::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 21) & 0x80)); + // VORR{}{}.
{}, , # ; T1 NOLINT(whitespace/line_length) + vorr(CurrentCond(), + dt, + QRegister(rd), + QRegister(rd), + imm); + break; + } + case 0x00000020: { + // 0xef800170 + if (((instr & 0x100) == 0x0) || + ((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVbic::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVbic::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 21) & 0x80)); + // VBIC{}{}.
{}, , # ; T1 NOLINT(whitespace/line_length) + vbic(CurrentCond(), + dt, + QRegister(rd), + QRegister(rd), + imm); + break; + } + } + break; + } + } + break; + } + default: { + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xef800050 + if (((instr & 0x380080) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_L_imm6_1_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & + 0x8), + (instr >> 28) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + (dt.IsSize(64) ? 64 + : (dt.GetSize() * 2)) - + imm6; + // VSHR{}{}. {}, , # ; T1 NOLINT(whitespace/line_length) + vshr(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x00000100: { + // 0xef800150 + if (((instr & 0x380080) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_L_imm6_1_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & + 0x8), + (instr >> 28) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + (dt.IsSize(64) ? 64 + : (dt.GetSize() * 2)) - + imm6; + // VSRA{}{}. {}, , # ; T1 NOLINT(whitespace/line_length) + vsra(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x00000200: { + // 0xef800250 + if (((instr & 0x380080) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_L_imm6_1_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & + 0x8), + (instr >> 28) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + (dt.IsSize(64) ? 64 + : (dt.GetSize() * 2)) - + imm6; + // VRSHR{}{}. {}, , # ; T1 NOLINT(whitespace/line_length) + vrshr(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x00000300: { + // 0xef800350 + if (((instr & 0x380080) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_L_imm6_1_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & + 0x8), + (instr >> 28) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + (dt.IsSize(64) ? 64 + : (dt.GetSize() * 2)) - + imm6; + // VRSRA{}{}. {}, , # ; T1 NOLINT(whitespace/line_length) + vrsra(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm), + imm); + break; + } + } + break; + } + } + break; + } + case 0x00000400: { + // 0xef800450 + switch (instr & 0x00380080) { + case 0x00000000: { + // 0xef800450 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xef800450 + switch (instr & 0x00000200) { + default: { + switch (instr & 0x00000020) { + case 0x00000020: { + // 0xef800470 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 21) & 0x80)); + // VMVN{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmvn(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + default: { + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 21) & 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + } + break; + } + } + break; + } + case 0x00000100: { + // 0xef800550 + switch (instr & 0x00000020) { + case 0x00000000: { + // 0xef800550 + if (((instr & 0x100) == 0x0) || + ((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVorr::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVorr::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 21) & 0x80)); + // VORR{}{}.
{}, , # ; T1 NOLINT(whitespace/line_length) + vorr(CurrentCond(), + dt, + QRegister(rd), + QRegister(rd), + imm); + break; + } + case 0x00000020: { + // 0xef800570 + if (((instr & 0x100) == 0x0) || + ((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVbic::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVbic::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 21) & 0x80)); + // VBIC{}{}.
{}, , # ; T1 NOLINT(whitespace/line_length) + vbic(CurrentCond(), + dt, + QRegister(rd), + QRegister(rd), + imm); + break; + } + } + break; + } + } + break; + } + default: { + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xef800450 + if ((instr & 0x10000000) == 0x10000000) { + if (((instr & 0x380080) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = Dt_L_imm6_4_Decode( + ((instr >> 19) & 0x7) | + ((instr >> 4) & 0x8)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + (dt.IsSize(64) ? 64 + : (dt.GetSize() * 2)) - + imm6; + // VSRI{}{}.
{}, , # ; T1 NOLINT(whitespace/line_length) + vsri(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm), + imm); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x00000100: { + // 0xef800550 + switch (instr & 0x10000000) { + case 0x00000000: { + // 0xef800550 + if (((instr & 0x380080) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = Dt_L_imm6_3_Decode( + ((instr >> 19) & 0x7) | + ((instr >> 4) & 0x8)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - + (dt.IsSize(64) ? 0 : dt.GetSize()); + // VSHL{}{}.I {}, , # ; T1 NOLINT(whitespace/line_length) + vshl(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x10000000: { + // 0xff800550 + if (((instr & 0x380080) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = Dt_L_imm6_4_Decode( + ((instr >> 19) & 0x7) | + ((instr >> 4) & 0x8)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - + (dt.IsSize(64) ? 0 : dt.GetSize()); + // VSLI{}{}.
{}, , # ; T1 NOLINT(whitespace/line_length) + vsli(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm), + imm); + break; + } + } + break; + } + case 0x00000200: { + // 0xef800650 + if (((instr & 0x380080) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_L_imm6_2_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & + 0x8), + (instr >> 28) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - (dt.IsSize(64) ? 0 : dt.GetSize()); + // VQSHLU{}{}. {}, , # ; T1 NOLINT(whitespace/line_length) + vqshlu(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x00000300: { + // 0xef800750 + if (((instr & 0x380080) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_L_imm6_1_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & + 0x8), + (instr >> 28) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - (dt.IsSize(64) ? 0 : dt.GetSize()); + // VQSHL{}{}. {}, , # ; T1 NOLINT(whitespace/line_length) + vqshl(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm), + imm); + break; + } + } + break; + } + } + break; + } + case 0x00000800: { + // 0xef800850 + switch (instr & 0x00000080) { + case 0x00000000: { + // 0xef800850 + switch (instr & 0x00380000) { + case 0x00000000: { + // 0xef800850 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xef800850 + switch (instr & 0x00000200) { + default: { + switch (instr & 0x00000020) { + case 0x00000020: { + // 0xef800870 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + QOperand imm = ImmediateVmvn:: + DecodeImmediate(cmode, + (instr & + 0xf) | + ((instr >> + 12) & + 0x70) | + ((instr >> + 21) & + 0x80)); + // VMVN{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmvn(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + default: { + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + QOperand imm = ImmediateVmov:: + DecodeImmediate(cmode, + (instr & + 0xf) | + ((instr >> + 12) & + 0x70) | + ((instr >> + 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + } + break; + } + } + break; + } + case 0x00000100: { + // 0xef800950 + switch (instr & 0x00000020) { + case 0x00000000: { + // 0xef800950 + if (((instr & 0x100) == 0x0) || + ((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVorr::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVorr::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 21) & 0x80)); + // VORR{}{}.
{}, , # ; T1 NOLINT(whitespace/line_length) + vorr(CurrentCond(), + dt, + QRegister(rd), + QRegister(rd), + imm); + break; + } + case 0x00000020: { + // 0xef800970 + if (((instr & 0x100) == 0x0) || + ((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVbic::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVbic::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 21) & 0x80)); + // VBIC{}{}.
{}, , # ; T1 NOLINT(whitespace/line_length) + vbic(CurrentCond(), + dt, + QRegister(rd), + QRegister(rd), + imm); + break; + } + } + break; + } + } + break; + } + default: { + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xef800850 + switch (instr & 0x10000000) { + case 0x00000000: { + // 0xef800850 + if (((instr & 0x380000) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = Dt_imm6_3_Decode( + (instr >> 19) & 0x7); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VRSHRN{}{}.I
, , # ; T1 NOLINT(whitespace/line_length) + vrshrn(CurrentCond(), + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x10000000: { + // 0xff800850 + if (((instr & 0x380000) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_2_Decode((instr >> 19) & + 0x7, + (instr >> 28) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VQRSHRUN{}{}.
, , # ; T1 NOLINT(whitespace/line_length) + vqrshrun(CurrentCond(), + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + } + break; + } + case 0x00000100: { + // 0xef800950 + if (((instr & 0x380000) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_1_Decode((instr >> 19) & 0x7, + (instr >> 28) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VQRSHRN{}{}.
, , # ; T1 NOLINT(whitespace/line_length) + vqrshrn(CurrentCond(), + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + } + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00000c00: { + // 0xef800c50 + switch (instr & 0x00000080) { + case 0x00000000: { + // 0xef800c50 + switch (instr & 0x00200000) { + case 0x00000000: { + // 0xef800c50 + switch (instr & 0x00180000) { + case 0x00000000: { + // 0xef800c50 + switch (instr & 0x00000300) { + case 0x00000200: { + // 0xef800e50 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 21) & 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + case 0x00000300: { + // 0xef800f50 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 21) & 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + default: { + switch (instr & 0x00000020) { + case 0x00000020: { + // 0xef800c70 + switch (instr & 0x00000f20) { + case 0x00000000: { + // 0xef800c50 + if (((instr & 0x920) == + 0x100) || + ((instr & 0x520) == + 0x100) || + ((instr & 0x820) == + 0x20) || + ((instr & 0x420) == + 0x20) || + ((instr & 0x220) == + 0x20) || + ((instr & 0x120) == + 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != + 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + QOperand imm = ImmediateVmov:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + case 0x00000020: { + // 0xef800c70 + if (((instr & 0xd00) == + 0x100) || + ((instr & 0xd00) == + 0x500) || + ((instr & 0xd00) == + 0x900) || + ((instr & 0xe00) == + 0xe00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != + 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + QOperand imm = ImmediateVmvn:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMVN{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmvn(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + case 0x00000200: { + // 0xef800e50 + if (((instr & 0x920) == + 0x100) || + ((instr & 0x520) == + 0x100) || + ((instr & 0x820) == + 0x20) || + ((instr & 0x420) == + 0x20) || + ((instr & 0x220) == + 0x20) || + ((instr & 0x120) == + 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != + 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + QOperand imm = ImmediateVmov:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + case 0x00000220: { + // 0xef800e70 + if (((instr & 0xd00) == + 0x100) || + ((instr & 0xd00) == + 0x500) || + ((instr & 0xd00) == + 0x900) || + ((instr & 0xe00) == + 0xe00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != + 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + QOperand imm = ImmediateVmvn:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMVN{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmvn(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + case 0x00000400: { + // 0xef800c50 + if (((instr & 0x920) == + 0x100) || + ((instr & 0x520) == + 0x100) || + ((instr & 0x820) == + 0x20) || + ((instr & 0x420) == + 0x20) || + ((instr & 0x220) == + 0x20) || + ((instr & 0x120) == + 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != + 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + QOperand imm = ImmediateVmov:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + case 0x00000420: { + // 0xef800c70 + if (((instr & 0xd00) == + 0x100) || + ((instr & 0xd00) == + 0x500) || + ((instr & 0xd00) == + 0x900) || + ((instr & 0xe00) == + 0xe00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != + 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + QOperand imm = ImmediateVmvn:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMVN{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmvn(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + case 0x00000600: { + // 0xef800e50 + if (((instr & 0x920) == + 0x100) || + ((instr & 0x520) == + 0x100) || + ((instr & 0x820) == + 0x20) || + ((instr & 0x420) == + 0x20) || + ((instr & 0x220) == + 0x20) || + ((instr & 0x120) == + 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != + 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + QOperand imm = ImmediateVmov:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + case 0x00000620: { + // 0xef800e70 + if (((instr & 0xd00) == + 0x100) || + ((instr & 0xd00) == + 0x500) || + ((instr & 0xd00) == + 0x900) || + ((instr & 0xe00) == + 0xe00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != + 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + QOperand imm = ImmediateVmvn:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMVN{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmvn(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + case 0x00000800: { + // 0xef800c50 + if (((instr & 0x920) == + 0x100) || + ((instr & 0x520) == + 0x100) || + ((instr & 0x820) == + 0x20) || + ((instr & 0x420) == + 0x20) || + ((instr & 0x220) == + 0x20) || + ((instr & 0x120) == + 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != + 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + QOperand imm = ImmediateVmov:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + case 0x00000820: { + // 0xef800c70 + if (((instr & 0xd00) == + 0x100) || + ((instr & 0xd00) == + 0x500) || + ((instr & 0xd00) == + 0x900) || + ((instr & 0xe00) == + 0xe00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != + 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + QOperand imm = ImmediateVmvn:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMVN{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmvn(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + case 0x00000a00: { + // 0xef800e50 + if (((instr & 0x920) == + 0x100) || + ((instr & 0x520) == + 0x100) || + ((instr & 0x820) == + 0x20) || + ((instr & 0x420) == + 0x20) || + ((instr & 0x220) == + 0x20) || + ((instr & 0x120) == + 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != + 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + QOperand imm = ImmediateVmov:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + case 0x00000a20: { + // 0xef800e70 + if (((instr & 0xd00) == + 0x100) || + ((instr & 0xd00) == + 0x500) || + ((instr & 0xd00) == + 0x900) || + ((instr & 0xe00) == + 0xe00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != + 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + QOperand imm = ImmediateVmvn:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMVN{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmvn(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + case 0x00000c00: { + // 0xef800c50 + if (((instr & 0x920) == + 0x100) || + ((instr & 0x520) == + 0x100) || + ((instr & 0x820) == + 0x20) || + ((instr & 0x420) == + 0x20) || + ((instr & 0x220) == + 0x20) || + ((instr & 0x120) == + 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != + 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + QOperand imm = ImmediateVmov:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + case 0x00000c20: { + // 0xef800c70 + if (((instr & 0xd00) == + 0x100) || + ((instr & 0xd00) == + 0x500) || + ((instr & 0xd00) == + 0x900) || + ((instr & 0xe00) == + 0xe00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != + 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + QOperand imm = ImmediateVmvn:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMVN{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmvn(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + case 0x00000d00: { + // 0xef800d50 + if (((instr & 0x920) == + 0x100) || + ((instr & 0x520) == + 0x100) || + ((instr & 0x820) == + 0x20) || + ((instr & 0x420) == + 0x20) || + ((instr & 0x220) == + 0x20) || + ((instr & 0x120) == + 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != + 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + QOperand imm = ImmediateVmov:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + case 0x00000d20: { + // 0xef800d70 + if (((instr & 0xd00) == + 0x100) || + ((instr & 0xd00) == + 0x500) || + ((instr & 0xd00) == + 0x900) || + ((instr & 0xe00) == + 0xe00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != + 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + QOperand imm = ImmediateVmvn:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMVN{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmvn(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + case 0x00000e00: { + // 0xef800e50 + if (((instr & 0x920) == + 0x100) || + ((instr & 0x520) == + 0x100) || + ((instr & 0x820) == + 0x20) || + ((instr & 0x420) == + 0x20) || + ((instr & 0x220) == + 0x20) || + ((instr & 0x120) == + 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != + 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + QOperand imm = ImmediateVmov:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + case 0x00000e20: { + // 0xef800e70 + if (((instr & 0x920) == + 0x100) || + ((instr & 0x520) == + 0x100) || + ((instr & 0x820) == + 0x20) || + ((instr & 0x420) == + 0x20) || + ((instr & 0x220) == + 0x20) || + ((instr & 0x120) == + 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != + 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + QOperand imm = ImmediateVmov:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + case 0x00000f00: { + // 0xef800f50 + if (((instr & 0x920) == + 0x100) || + ((instr & 0x520) == + 0x100) || + ((instr & 0x820) == + 0x20) || + ((instr & 0x420) == + 0x20) || + ((instr & 0x220) == + 0x20) || + ((instr & 0x120) == + 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != + 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + QOperand imm = ImmediateVmov:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + default: { + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + QOperand imm = ImmediateVmov:: + DecodeImmediate(cmode, + (instr & + 0xf) | + ((instr >> + 12) & + 0x70) | + ((instr >> + 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + } + break; + } + } + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + default: { + if ((instr & 0x00000200) == 0x00000200) { + if (((instr & 0x200000) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt1 = Dt_op_U_1_Decode1( + ((instr >> 28) & 0x1) | + ((instr >> 7) & 0x2)); + if (dt1.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DataType dt2 = Dt_op_U_1_Decode2( + ((instr >> 28) & 0x1) | + ((instr >> 7) & 0x2)); + if (dt2.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t fbits = + 64 - ((instr >> 16) & 0x3f); + // VCVT{}{}.
.
, , # ; T1 NOLINT(whitespace/line_length) + vcvt(CurrentCond(), + dt1, + dt2, + QRegister(rd), + QRegister(rm), + fbits); + } else { + UnallocatedT32(instr); + } + break; + } + } + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + } + break; + } + } + break; + } + } + break; + } + } + break; + } + } + break; + } + } +} // NOLINT(readability/fn_size) + +void Disassembler::DecodeA32(uint32_t instr) { + A32CodeAddressIncrementer incrementer(&code_address_); + if ((instr & 0xf0000000) == 0xf0000000) { + switch (instr & 0x0e000000) { + case 0x00000000: { + // 0xf0000000 + switch (instr & 0x01f10020) { + case 0x01000000: { + // 0xf1000000 + switch (instr & 0x000e0000) { + case 0x00020000: { + // 0xf1020000 + if ((instr & 0x000001c0) == 0x00000000) { + UnimplementedA32("CPS", instr); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x00080000: { + // 0xf1080000 + if ((instr & 0x0000001f) == 0x00000000) { + UnimplementedA32("CPSIE", instr); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x000a0000: { + // 0xf10a0000 + UnimplementedA32("CPSIE", instr); + break; + } + case 0x000c0000: { + // 0xf10c0000 + if ((instr & 0x0000001f) == 0x00000000) { + UnimplementedA32("CPSID", instr); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x000e0000: { + // 0xf10e0000 + UnimplementedA32("CPSID", instr); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x01010000: { + // 0xf1010000 + if ((instr & 0x000000d0) == 0x00000000) { + UnimplementedA32("SETEND", instr); + } else { + UnallocatedA32(instr); + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x02000000: { + // 0xf2000000 + switch (instr & 0x00800010) { + case 0x00000000: { + // 0xf2000000 + switch (instr & 0x00000f40) { + case 0x00000000: { + // 0xf2000000 + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VHADD{}{}.
{
}, , ; A1 + vhadd(al, dt, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x00000040: { + // 0xf2000040 + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VHADD{}{}.
{}, , ; A1 + vhadd(al, dt, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + case 0x00000100: { + // 0xf2000100 + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRHADD{}{}.
{
}, , ; A1 + vrhadd(al, dt, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x00000140: { + // 0xf2000140 + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VRHADD{}{}.
{}, , ; A1 + vrhadd(al, dt, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + case 0x00000200: { + // 0xf2000200 + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VHSUB{}{}.
{
}, , ; A1 + vhsub(al, dt, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x00000240: { + // 0xf2000240 + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VHSUB{}{}.
{}, , ; A1 + vhsub(al, dt, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + case 0x00000300: { + // 0xf2000300 + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCGT{}{}.
{
}, , ; A1 + vcgt(al, dt, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x00000340: { + // 0xf2000340 + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCGT{}{}.
{}, , ; A1 + vcgt(al, dt, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + case 0x00000400: { + // 0xf2000400 + DataType dt = Dt_U_size_3_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + unsigned rn = ExtractDRegister(instr, 7, 16); + // VSHL{}{}.
{
}, , ; A1 + vshl(al, dt, DRegister(rd), DRegister(rm), DRegister(rn)); + break; + } + case 0x00000440: { + // 0xf2000440 + DataType dt = Dt_U_size_3_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + // VSHL{}{}.
{}, , ; A1 + vshl(al, dt, QRegister(rd), QRegister(rm), QRegister(rn)); + break; + } + case 0x00000500: { + // 0xf2000500 + DataType dt = Dt_U_size_3_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + unsigned rn = ExtractDRegister(instr, 7, 16); + // VRSHL{}{}.
{
}, , ; A1 + vrshl(al, dt, DRegister(rd), DRegister(rm), DRegister(rn)); + break; + } + case 0x00000540: { + // 0xf2000540 + DataType dt = Dt_U_size_3_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + // VRSHL{}{}.
{}, , ; A1 + vrshl(al, dt, QRegister(rd), QRegister(rm), QRegister(rn)); + break; + } + case 0x00000600: { + // 0xf2000600 + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMAX{}{}.
{
}, , ; A1 + vmax(al, dt, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x00000640: { + // 0xf2000640 + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMAX{}{}.
{}, , ; A1 + vmax(al, dt, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + case 0x00000700: { + // 0xf2000700 + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VABD{}{}.
{
}, , ; A1 + vabd(al, dt, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x00000740: { + // 0xf2000740 + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VABD{}{}.
{}, , ; A1 + vabd(al, dt, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + case 0x00000800: { + // 0xf2000800 + switch (instr & 0x01000000) { + case 0x00000000: { + // 0xf2000800 + DataType dt = Dt_size_2_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VADD{}{}.
{
}, , ; A1 + vadd(al, dt, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x01000000: { + // 0xf3000800 + DataType dt = Dt_size_2_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VSUB{}{}.
{
}, , ; A1 + vsub(al, dt, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + } + break; + } + case 0x00000840: { + // 0xf2000840 + switch (instr & 0x01000000) { + case 0x00000000: { + // 0xf2000840 + DataType dt = Dt_size_2_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VADD{}{}.
{}, , ; A1 + vadd(al, dt, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + case 0x01000000: { + // 0xf3000840 + DataType dt = Dt_size_2_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VSUB{}{}.
{}, , ; A1 + vsub(al, dt, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + } + break; + } + case 0x00000900: { + // 0xf2000900 + switch (instr & 0x01000000) { + case 0x00000000: { + // 0xf2000900 + DataType dt = Dt_size_10_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMLA{}{}.
, , ; A1 + vmla(al, dt, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x01000000: { + // 0xf3000900 + DataType dt = Dt_size_10_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMLS{}{}.
, , ; A1 + vmls(al, dt, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + } + break; + } + case 0x00000940: { + // 0xf2000940 + switch (instr & 0x01000000) { + case 0x00000000: { + // 0xf2000940 + DataType dt = Dt_size_10_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMLA{}{}. , , ; A1 + vmla(al, dt, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + case 0x01000000: { + // 0xf3000940 + DataType dt = Dt_size_10_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMLS{}{}. , , ; A1 + vmls(al, dt, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + } + break; + } + case 0x00000a00: { + // 0xf2000a00 + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VPMAX{}{}.
{
}, , ; A1 + vpmax(al, dt, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x00000b00: { + // 0xf2000b00 + switch (instr & 0x01000000) { + case 0x00000000: { + // 0xf2000b00 + DataType dt = Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VQDMULH{}{}.
{
}, , ; A1 + vqdmulh(al, + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x01000000: { + // 0xf3000b00 + DataType dt = Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VQRDMULH{}{}.
{
}, , ; A1 + vqrdmulh(al, + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + } + break; + } + case 0x00000b40: { + // 0xf2000b40 + switch (instr & 0x01000000) { + case 0x00000000: { + // 0xf2000b40 + DataType dt = Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VQDMULH{}{}.
{}, , ; A1 + vqdmulh(al, + dt, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x01000000: { + // 0xf3000b40 + DataType dt = Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VQRDMULH{}{}.
{}, , ; A1 + vqrdmulh(al, + dt, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + } + break; + } + case 0x00000c40: { + // 0xf2000c40 + switch (instr & 0x01300000) { + case 0x00000000: { + // 0xf2000c40 + UnimplementedA32("SHA1C", instr); + break; + } + case 0x00100000: { + // 0xf2100c40 + UnimplementedA32("SHA1P", instr); + break; + } + case 0x00200000: { + // 0xf2200c40 + UnimplementedA32("SHA1M", instr); + break; + } + case 0x00300000: { + // 0xf2300c40 + UnimplementedA32("SHA1SU0", instr); + break; + } + case 0x01000000: { + // 0xf3000c40 + UnimplementedA32("SHA256H", instr); + break; + } + case 0x01100000: { + // 0xf3100c40 + UnimplementedA32("SHA256H2", instr); + break; + } + case 0x01200000: { + // 0xf3200c40 + UnimplementedA32("SHA256SU1", instr); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000d00: { + // 0xf2000d00 + switch (instr & 0x01300000) { + case 0x00000000: { + // 0xf2000d00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VADD{}{}.F32 {
}, , ; A1 + vadd(al, F32, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x00200000: { + // 0xf2200d00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VSUB{}{}.F32 {
}, , ; A1 + vsub(al, F32, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x01000000: { + // 0xf3000d00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VPADD{}{}.F32 {
}, , ; A1 + vpadd(al, F32, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x01200000: { + // 0xf3200d00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VABD{}{}.F32 {
}, , ; A1 + vabd(al, F32, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000d40: { + // 0xf2000d40 + switch (instr & 0x01300000) { + case 0x00000000: { + // 0xf2000d40 + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VADD{}{}.F32 {}, , ; A1 + vadd(al, F32, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + case 0x00200000: { + // 0xf2200d40 + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VSUB{}{}.F32 {}, , ; A1 + vsub(al, F32, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + case 0x01200000: { + // 0xf3200d40 + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VABD{}{}.F32 {}, , ; A1 + vabd(al, F32, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000e00: { + // 0xf2000e00 + switch (instr & 0x01200000) { + case 0x00000000: { + // 0xf2000e00 + DataType dt = Dt_sz_1_Decode((instr >> 20) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCEQ{}{}.
{
}, , ; A2 + vceq(al, dt, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x01000000: { + // 0xf3000e00 + if ((instr & 0x00100000) == 0x00000000) { + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCGE{}{}.F32 {
}, , ; A2 + vcge(al, + F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x01200000: { + // 0xf3200e00 + if ((instr & 0x00100000) == 0x00000000) { + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCGT{}{}.F32 {
}, , ; A2 + vcgt(al, + F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000e40: { + // 0xf2000e40 + switch (instr & 0x01200000) { + case 0x00000000: { + // 0xf2000e40 + DataType dt = Dt_sz_1_Decode((instr >> 20) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCEQ{}{}.
{}, , ; A2 + vceq(al, dt, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + case 0x01000000: { + // 0xf3000e40 + if ((instr & 0x00100000) == 0x00000000) { + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCGE{}{}.F32 {}, , ; A2 + vcge(al, + F32, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x01200000: { + // 0xf3200e40 + if ((instr & 0x00100000) == 0x00000000) { + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCGT{}{}.F32 {}, , ; A2 + vcgt(al, + F32, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000f00: { + // 0xf2000f00 + switch (instr & 0x01300000) { + case 0x00000000: { + // 0xf2000f00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMAX{}{}.F32 {
}, , ; A1 + vmax(al, F32, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x00200000: { + // 0xf2200f00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMIN{}{}.F32 {
}, , ; A1 + vmin(al, F32, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x01000000: { + // 0xf3000f00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VPMAX{}{}.F32 {
}, , ; A1 + vpmax(al, F32, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x01200000: { + // 0xf3200f00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VPMIN{}{}.F32 {
}, , ; A1 + vpmin(al, F32, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000f40: { + // 0xf2000f40 + switch (instr & 0x01300000) { + case 0x00000000: { + // 0xf2000f40 + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMAX{}{}.F32 {}, , ; A1 + vmax(al, F32, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + case 0x00200000: { + // 0xf2200f40 + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMIN{}{}.F32 {}, , ; A1 + vmin(al, F32, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000010: { + // 0xf2000010 + switch (instr & 0x00000f40) { + case 0x00000000: { + // 0xf2000010 + DataType dt = Dt_U_size_3_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VQADD{}{}.
{
}, , ; A1 + vqadd(al, dt, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x00000040: { + // 0xf2000050 + DataType dt = Dt_U_size_3_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VQADD{}{}.
{}, , ; A1 + vqadd(al, dt, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + case 0x00000100: { + // 0xf2000110 + switch (instr & 0x01300000) { + case 0x00000000: { + // 0xf2000110 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VAND{}{}{.
} {
}, , ; A1 + vand(al, + kDataTypeValueNone, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00100000: { + // 0xf2100110 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VBIC{}{}{.
} {
}, , ; A1 + vbic(al, + kDataTypeValueNone, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00200000: { + // 0xf2200110 + if (((instr & 0x00000040) == 0x00000000) && + ((((Uint32((instr >> 7)) & Uint32(0x1)) << 4) | + (Uint32((instr >> 16)) & Uint32(0xf))) == + (((Uint32((instr >> 5)) & Uint32(0x1)) << 4) | + (Uint32(instr) & Uint32(0xf))))) { + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 7, 16); + // VMOV{}{}{.
}
, ; A1 + vmov(al, + kDataTypeValueNone, + DRegister(rd), + DRegister(rm)); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VORR{}{}{.
} {
}, , ; A1 + vorr(al, + kDataTypeValueNone, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00300000: { + // 0xf2300110 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VORN{}{}{.
} {
}, , ; A1 + vorn(al, + kDataTypeValueNone, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x01000000: { + // 0xf3000110 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VEOR{}{}{.
} {
}, , ; A1 + veor(al, + kDataTypeValueNone, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x01100000: { + // 0xf3100110 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VBSL{}{}{.
} {
}, , ; A1 + vbsl(al, + kDataTypeValueNone, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x01200000: { + // 0xf3200110 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VBIT{}{}{.
} {
}, , ; A1 + vbit(al, + kDataTypeValueNone, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x01300000: { + // 0xf3300110 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VBIF{}{}{.
} {
}, , ; A1 + vbif(al, + kDataTypeValueNone, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + } + break; + } + case 0x00000140: { + // 0xf2000150 + switch (instr & 0x01300000) { + case 0x00000000: { + // 0xf2000150 + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VAND{}{}{.
} {}, , ; A1 + vand(al, + kDataTypeValueNone, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x00100000: { + // 0xf2100150 + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VBIC{}{}{.
} {}, , ; A1 + vbic(al, + kDataTypeValueNone, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x00200000: { + // 0xf2200150 + if (((instr & 0x00000040) == 0x00000040) && + ((((Uint32((instr >> 7)) & Uint32(0x1)) << 4) | + (Uint32((instr >> 16)) & Uint32(0xf))) == + (((Uint32((instr >> 5)) & Uint32(0x1)) << 4) | + (Uint32(instr) & Uint32(0xf))))) { + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 7, 16); + // VMOV{}{}{.
} , ; A1 + vmov(al, + kDataTypeValueNone, + QRegister(rd), + QRegister(rm)); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VORR{}{}{.
} {}, , ; A1 + vorr(al, + kDataTypeValueNone, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x00300000: { + // 0xf2300150 + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VORN{}{}{.
} {}, , ; A1 + vorn(al, + kDataTypeValueNone, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x01000000: { + // 0xf3000150 + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VEOR{}{}{.
} {}, , ; A1 + veor(al, + kDataTypeValueNone, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x01100000: { + // 0xf3100150 + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VBSL{}{}{.
} {}, , ; A1 + vbsl(al, + kDataTypeValueNone, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x01200000: { + // 0xf3200150 + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VBIT{}{}{.
} {}, , ; A1 + vbit(al, + kDataTypeValueNone, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x01300000: { + // 0xf3300150 + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VBIF{}{}{.
} {}, , ; A1 + vbif(al, + kDataTypeValueNone, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + } + break; + } + case 0x00000200: { + // 0xf2000210 + DataType dt = Dt_U_size_3_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VQSUB{}{}.
{
}, , ; A1 + vqsub(al, dt, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x00000240: { + // 0xf2000250 + DataType dt = Dt_U_size_3_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VQSUB{}{}.
{}, , ; A1 + vqsub(al, dt, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + case 0x00000300: { + // 0xf2000310 + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCGE{}{}.
{
}, , ; A1 + vcge(al, dt, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x00000340: { + // 0xf2000350 + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCGE{}{}.
{}, , ; A1 + vcge(al, dt, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + case 0x00000400: { + // 0xf2000410 + DataType dt = Dt_U_size_3_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + unsigned rn = ExtractDRegister(instr, 7, 16); + // VQSHL{}{}.
{
}, , ; A1 + vqshl(al, dt, DRegister(rd), DRegister(rm), DRegister(rn)); + break; + } + case 0x00000440: { + // 0xf2000450 + DataType dt = Dt_U_size_3_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + // VQSHL{}{}.
{}, , ; A1 + vqshl(al, dt, QRegister(rd), QRegister(rm), QRegister(rn)); + break; + } + case 0x00000500: { + // 0xf2000510 + DataType dt = Dt_U_size_3_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + unsigned rn = ExtractDRegister(instr, 7, 16); + // VQRSHL{}{}.
{
}, , ; A1 + vqrshl(al, dt, DRegister(rd), DRegister(rm), DRegister(rn)); + break; + } + case 0x00000540: { + // 0xf2000550 + DataType dt = Dt_U_size_3_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + // VQRSHL{}{}.
{}, , ; A1 + vqrshl(al, dt, QRegister(rd), QRegister(rm), QRegister(rn)); + break; + } + case 0x00000600: { + // 0xf2000610 + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMIN{}{}.
{
}, , ; A1 + vmin(al, dt, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x00000640: { + // 0xf2000650 + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMIN{}{}.
{}, , ; A1 + vmin(al, dt, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + case 0x00000700: { + // 0xf2000710 + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VABA{}{}.
, , ; A1 + vaba(al, dt, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x00000740: { + // 0xf2000750 + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VABA{}{}.
, , ; A1 + vaba(al, dt, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + case 0x00000800: { + // 0xf2000810 + switch (instr & 0x01000000) { + case 0x00000000: { + // 0xf2000810 + DataType dt = Dt_size_7_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VTST{}{}.
{
}, , ; A1 + vtst(al, dt, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x01000000: { + // 0xf3000810 + DataType dt = Dt_size_4_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCEQ{}{}.
{
}, , ; A1 + vceq(al, dt, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + } + break; + } + case 0x00000840: { + // 0xf2000850 + switch (instr & 0x01000000) { + case 0x00000000: { + // 0xf2000850 + DataType dt = Dt_size_7_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VTST{}{}.
{}, , ; A1 + vtst(al, dt, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + case 0x01000000: { + // 0xf3000850 + DataType dt = Dt_size_4_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCEQ{}{}.
{}, , ; A1 + vceq(al, dt, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + } + break; + } + case 0x00000900: { + // 0xf2000910 + DataType dt = Dt_op_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMUL{}{}.
{
}, , ; A1 + vmul(al, dt, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x00000940: { + // 0xf2000950 + DataType dt = Dt_op_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMUL{}{}.
{}, , ; A1 + vmul(al, dt, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + case 0x00000a00: { + // 0xf2000a10 + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VPMIN{}{}.
{
}, , ; A1 + vpmin(al, dt, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x00000b00: { + // 0xf2000b10 + if ((instr & 0x01000000) == 0x00000000) { + DataType dt = Dt_size_4_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VPADD{}{}.
{
}, , ; A1 + vpadd(al, dt, DRegister(rd), DRegister(rn), DRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x00000c00: { + // 0xf2000c10 + switch (instr & 0x01300000) { + case 0x00000000: { + // 0xf2000c10 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VFMA{}{}.F32
, , ; A1 + vfma(al, F32, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x00200000: { + // 0xf2200c10 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VFMS{}{}.F32
, , ; A1 + vfms(al, F32, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000c40: { + // 0xf2000c50 + switch (instr & 0x01300000) { + case 0x00000000: { + // 0xf2000c50 + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VFMA{}{}.F32 , , ; A1 + vfma(al, F32, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + case 0x00200000: { + // 0xf2200c50 + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VFMS{}{}.F32 , , ; A1 + vfms(al, F32, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000d00: { + // 0xf2000d10 + switch (instr & 0x01300000) { + case 0x00000000: { + // 0xf2000d10 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMLA{}{}.F32
, , ; A1 + vmla(al, F32, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x00200000: { + // 0xf2200d10 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMLS{}{}.F32
, , ; A1 + vmls(al, F32, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x01000000: { + // 0xf3000d10 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMUL{}{}.F32 {
}, , ; A1 + vmul(al, F32, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000d40: { + // 0xf2000d50 + switch (instr & 0x01300000) { + case 0x00000000: { + // 0xf2000d50 + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMLA{}{}.F32 , , ; A1 + vmla(al, F32, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + case 0x00200000: { + // 0xf2200d50 + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMLS{}{}.F32 , , ; A1 + vmls(al, F32, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + case 0x01000000: { + // 0xf3000d50 + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMUL{}{}.F32 {}, , ; A1 + vmul(al, F32, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000e00: { + // 0xf2000e10 + switch (instr & 0x01300000) { + case 0x01000000: { + // 0xf3000e10 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VACGE{}{}.F32 {
}, , ; A1 + vacge(al, F32, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x01200000: { + // 0xf3200e10 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VACGT{}{}.F32 {
}, , ; A1 + vacgt(al, F32, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000e40: { + // 0xf2000e50 + switch (instr & 0x01300000) { + case 0x01000000: { + // 0xf3000e50 + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VACGE{}{}.F32 {}, , ; A1 + vacge(al, F32, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + case 0x01200000: { + // 0xf3200e50 + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VACGT{}{}.F32 {}, , ; A1 + vacgt(al, F32, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000f00: { + // 0xf2000f10 + switch (instr & 0x01300000) { + case 0x00000000: { + // 0xf2000f10 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRECPS{}{}.F32 {
}, , ; A1 + vrecps(al, + F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00200000: { + // 0xf2200f10 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRSQRTS{}{}.F32 {
}, , ; A1 + vrsqrts(al, + F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x01000000: { + // 0xf3000f10 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMAXNM{}.F32
, , ; A1 + vmaxnm(F32, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x01200000: { + // 0xf3200f10 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMINNM{}.F32
, , ; A1 + vminnm(F32, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000f40: { + // 0xf2000f50 + switch (instr & 0x01300000) { + case 0x00000000: { + // 0xf2000f50 + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VRECPS{}{}.F32 {}, , ; A1 + vrecps(al, + F32, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x00200000: { + // 0xf2200f50 + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VRSQRTS{}{}.F32 {}, , ; A1 + vrsqrts(al, + F32, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x01000000: { + // 0xf3000f50 + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMAXNM{}.F32 , , ; A1 + vmaxnm(F32, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + case 0x01200000: { + // 0xf3200f50 + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMINNM{}.F32 , , ; A1 + vminnm(F32, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00800000: { + // 0xf2800000 + switch (instr & 0x00300000) { + case 0x00300000: { + // 0xf2b00000 + switch (instr & 0x01000000) { + case 0x00000000: { + // 0xf2b00000 + switch (instr & 0x00000040) { + case 0x00000000: { + // 0xf2b00000 + if (((instr & 0x800) == 0x800)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t imm = (instr >> 8) & 0xf; + // VEXT{}{}.8 {
}, , , # ; A1 + vext(al, + Untyped8, + DRegister(rd), + DRegister(rn), + DRegister(rm), + imm); + break; + } + case 0x00000040: { + // 0xf2b00040 + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm = (instr >> 8) & 0xf; + // VEXT{}{}.8 {}, , , # ; A1 + vext(al, + Untyped8, + QRegister(rd), + QRegister(rn), + QRegister(rm), + imm); + break; + } + } + break; + } + case 0x01000000: { + // 0xf3b00000 + switch (instr & 0x00000800) { + case 0x00000000: { + // 0xf3b00000 + switch (instr & 0x00030200) { + case 0x00000000: { + // 0xf3b00000 + switch (instr & 0x000005c0) { + case 0x00000000: { + // 0xf3b00000 + DataType dt = + Dt_size_7_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VREV64{}{}.
, ; A1 + vrev64(al, dt, DRegister(rd), DRegister(rm)); + break; + } + case 0x00000040: { + // 0xf3b00040 + DataType dt = + Dt_size_7_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VREV64{}{}.
, ; A1 + vrev64(al, dt, QRegister(rd), QRegister(rm)); + break; + } + case 0x00000080: { + // 0xf3b00080 + DataType dt = + Dt_size_15_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VREV32{}{}.
, ; A1 + vrev32(al, dt, DRegister(rd), DRegister(rm)); + break; + } + case 0x000000c0: { + // 0xf3b000c0 + DataType dt = + Dt_size_15_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VREV32{}{}.
, ; A1 + vrev32(al, dt, QRegister(rd), QRegister(rm)); + break; + } + case 0x00000100: { + // 0xf3b00100 + DataType dt = + Dt_size_1_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VREV16{}{}.
, ; A1 + vrev16(al, dt, DRegister(rd), DRegister(rm)); + break; + } + case 0x00000140: { + // 0xf3b00140 + DataType dt = + Dt_size_1_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VREV16{}{}.
, ; A1 + vrev16(al, dt, QRegister(rd), QRegister(rm)); + break; + } + case 0x00000400: { + // 0xf3b00400 + DataType dt = + Dt_size_5_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCLS{}{}.
, ; A1 + vcls(al, dt, DRegister(rd), DRegister(rm)); + break; + } + case 0x00000440: { + // 0xf3b00440 + DataType dt = + Dt_size_5_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCLS{}{}.
, ; A1 + vcls(al, dt, QRegister(rd), QRegister(rm)); + break; + } + case 0x00000480: { + // 0xf3b00480 + DataType dt = + Dt_size_4_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCLZ{}{}.
, ; A1 + vclz(al, dt, DRegister(rd), DRegister(rm)); + break; + } + case 0x000004c0: { + // 0xf3b004c0 + DataType dt = + Dt_size_4_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCLZ{}{}.
, ; A1 + vclz(al, dt, QRegister(rd), QRegister(rm)); + break; + } + case 0x00000500: { + // 0xf3b00500 + if ((instr & 0x000c0000) == 0x00000000) { + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCNT{}{}.8
, ; A1 + vcnt(al, + Untyped8, + DRegister(rd), + DRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x00000540: { + // 0xf3b00540 + if ((instr & 0x000c0000) == 0x00000000) { + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCNT{}{}.8 , ; A1 + vcnt(al, + Untyped8, + QRegister(rd), + QRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x00000580: { + // 0xf3b00580 + if ((instr & 0x000c0000) == 0x00000000) { + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMVN{}{}{.
}
, ; A1 + vmvn(al, + kDataTypeValueNone, + DRegister(rd), + DRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x000005c0: { + // 0xf3b005c0 + if ((instr & 0x000c0000) == 0x00000000) { + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMVN{}{}{.
} , ; A1 + vmvn(al, + kDataTypeValueNone, + QRegister(rd), + QRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000200: { + // 0xf3b00200 + switch (instr & 0x00000540) { + case 0x00000000: { + // 0xf3b00200 + DataType dt = + Dt_op_size_2_Decode(((instr >> 18) & 0x3) | + ((instr >> 5) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VPADDL{}{}.
, ; A1 + vpaddl(al, dt, DRegister(rd), DRegister(rm)); + break; + } + case 0x00000040: { + // 0xf3b00240 + DataType dt = + Dt_op_size_2_Decode(((instr >> 18) & 0x3) | + ((instr >> 5) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VPADDL{}{}.
, ; A1 + vpaddl(al, dt, QRegister(rd), QRegister(rm)); + break; + } + case 0x00000100: { + // 0xf3b00300 + switch (instr & 0x00000080) { + case 0x00000000: { + // 0xf3b00300 + UnimplementedA32("AESE", instr); + break; + } + case 0x00000080: { + // 0xf3b00380 + UnimplementedA32("AESMC", instr); + break; + } + } + break; + } + case 0x00000140: { + // 0xf3b00340 + switch (instr & 0x00000080) { + case 0x00000000: { + // 0xf3b00340 + UnimplementedA32("AESD", instr); + break; + } + case 0x00000080: { + // 0xf3b003c0 + UnimplementedA32("AESIMC", instr); + break; + } + } + break; + } + case 0x00000400: { + // 0xf3b00600 + DataType dt = + Dt_op_size_2_Decode(((instr >> 18) & 0x3) | + ((instr >> 5) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VPADAL{}{}.
, ; A1 + vpadal(al, dt, DRegister(rd), DRegister(rm)); + break; + } + case 0x00000440: { + // 0xf3b00640 + DataType dt = + Dt_op_size_2_Decode(((instr >> 18) & 0x3) | + ((instr >> 5) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VPADAL{}{}.
, ; A1 + vpadal(al, dt, QRegister(rd), QRegister(rm)); + break; + } + case 0x00000500: { + // 0xf3b00700 + switch (instr & 0x00000080) { + case 0x00000000: { + // 0xf3b00700 + DataType dt = + Dt_size_5_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VQABS{}{}.
, ; A1 + vqabs(al, dt, DRegister(rd), DRegister(rm)); + break; + } + case 0x00000080: { + // 0xf3b00780 + DataType dt = + Dt_size_5_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VQNEG{}{}.
, ; A1 + vqneg(al, dt, DRegister(rd), DRegister(rm)); + break; + } + } + break; + } + case 0x00000540: { + // 0xf3b00740 + switch (instr & 0x00000080) { + case 0x00000000: { + // 0xf3b00740 + DataType dt = + Dt_size_5_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VQABS{}{}.
, ; A1 + vqabs(al, dt, QRegister(rd), QRegister(rm)); + break; + } + case 0x00000080: { + // 0xf3b007c0 + DataType dt = + Dt_size_5_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VQNEG{}{}.
, ; A1 + vqneg(al, dt, QRegister(rd), QRegister(rm)); + break; + } + } + break; + } + } + break; + } + case 0x00010000: { + // 0xf3b10000 + switch (instr & 0x000001c0) { + case 0x00000000: { + // 0xf3b10000 + DataType dt = + Dt_F_size_1_Decode(((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCGT{}{}.
{
}, , #0 ; A1 + vcgt(al, + dt, + DRegister(rd), + DRegister(rm), + UINT32_C(0)); + break; + } + case 0x00000040: { + // 0xf3b10040 + DataType dt = + Dt_F_size_1_Decode(((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCGT{}{}.
{}, , #0 ; A1 + vcgt(al, + dt, + QRegister(rd), + QRegister(rm), + UINT32_C(0)); + break; + } + case 0x00000080: { + // 0xf3b10080 + DataType dt = + Dt_F_size_1_Decode(((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCGE{}{}.
{
}, , #0 ; A1 + vcge(al, + dt, + DRegister(rd), + DRegister(rm), + UINT32_C(0)); + break; + } + case 0x000000c0: { + // 0xf3b100c0 + DataType dt = + Dt_F_size_1_Decode(((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCGE{}{}.
{}, , #0 ; A1 + vcge(al, + dt, + QRegister(rd), + QRegister(rm), + UINT32_C(0)); + break; + } + case 0x00000100: { + // 0xf3b10100 + DataType dt = + Dt_F_size_2_Decode(((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCEQ{}{}.
{
}, , #0 ; A1 + vceq(al, + dt, + DRegister(rd), + DRegister(rm), + UINT32_C(0)); + break; + } + case 0x00000140: { + // 0xf3b10140 + DataType dt = + Dt_F_size_2_Decode(((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCEQ{}{}.
{}, , #0 ; A1 + vceq(al, + dt, + QRegister(rd), + QRegister(rm), + UINT32_C(0)); + break; + } + case 0x00000180: { + // 0xf3b10180 + DataType dt = + Dt_F_size_1_Decode(((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCLE{}{}.
{
}, , #0 ; A1 + vcle(al, + dt, + DRegister(rd), + DRegister(rm), + UINT32_C(0)); + break; + } + case 0x000001c0: { + // 0xf3b101c0 + DataType dt = + Dt_F_size_1_Decode(((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCLE{}{}.
{}, , #0 ; A1 + vcle(al, + dt, + QRegister(rd), + QRegister(rm), + UINT32_C(0)); + break; + } + } + break; + } + case 0x00010200: { + // 0xf3b10200 + switch (instr & 0x000001c0) { + case 0x00000000: { + // 0xf3b10200 + DataType dt = + Dt_F_size_1_Decode(((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCLT{}{}.
{
}, , #0 ; A1 + vclt(al, + dt, + DRegister(rd), + DRegister(rm), + UINT32_C(0)); + break; + } + case 0x00000040: { + // 0xf3b10240 + DataType dt = + Dt_F_size_1_Decode(((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCLT{}{}.
{}, , #0 ; A1 + vclt(al, + dt, + QRegister(rd), + QRegister(rm), + UINT32_C(0)); + break; + } + case 0x000000c0: { + // 0xf3b102c0 + if ((instr & 0x000c0400) == 0x00080000) { + UnimplementedA32("SHA1H", instr); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x00000100: { + // 0xf3b10300 + DataType dt = + Dt_F_size_1_Decode(((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VABS{}{}.
, ; A1 + vabs(al, dt, DRegister(rd), DRegister(rm)); + break; + } + case 0x00000140: { + // 0xf3b10340 + DataType dt = + Dt_F_size_1_Decode(((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VABS{}{}.
, ; A1 + vabs(al, dt, QRegister(rd), QRegister(rm)); + break; + } + case 0x00000180: { + // 0xf3b10380 + DataType dt = + Dt_F_size_1_Decode(((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VNEG{}{}.
, ; A1 + vneg(al, dt, DRegister(rd), DRegister(rm)); + break; + } + case 0x000001c0: { + // 0xf3b103c0 + DataType dt = + Dt_F_size_1_Decode(((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VNEG{}{}.
, ; A1 + vneg(al, dt, QRegister(rd), QRegister(rm)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00020000: { + // 0xf3b20000 + switch (instr & 0x000005c0) { + case 0x00000000: { + // 0xf3b20000 + if ((instr & 0x000c0000) == 0x00000000) { + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VSWP{}{}{.
}
, ; A1 + vswp(al, + kDataTypeValueNone, + DRegister(rd), + DRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x00000040: { + // 0xf3b20040 + if ((instr & 0x000c0000) == 0x00000000) { + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VSWP{}{}{.
} , ; A1 + vswp(al, + kDataTypeValueNone, + QRegister(rd), + QRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x00000080: { + // 0xf3b20080 + DataType dt = + Dt_size_7_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VTRN{}{}.
, ; A1 + vtrn(al, dt, DRegister(rd), DRegister(rm)); + break; + } + case 0x000000c0: { + // 0xf3b200c0 + DataType dt = + Dt_size_7_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VTRN{}{}.
, ; A1 + vtrn(al, dt, QRegister(rd), QRegister(rm)); + break; + } + case 0x00000100: { + // 0xf3b20100 + DataType dt = + Dt_size_15_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VUZP{}{}.
, ; A1 + vuzp(al, dt, DRegister(rd), DRegister(rm)); + break; + } + case 0x00000140: { + // 0xf3b20140 + DataType dt = + Dt_size_7_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VUZP{}{}.
, ; A1 + vuzp(al, dt, QRegister(rd), QRegister(rm)); + break; + } + case 0x00000180: { + // 0xf3b20180 + DataType dt = + Dt_size_15_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VZIP{}{}.
, ; A1 + vzip(al, dt, DRegister(rd), DRegister(rm)); + break; + } + case 0x000001c0: { + // 0xf3b201c0 + DataType dt = + Dt_size_7_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VZIP{}{}.
, ; A1 + vzip(al, dt, QRegister(rd), QRegister(rm)); + break; + } + case 0x00000400: { + // 0xf3b20400 + DataType dt = + Dt_size_16_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRINTN{}.
, ; A1 + vrintn(dt, DRegister(rd), DRegister(rm)); + break; + } + case 0x00000440: { + // 0xf3b20440 + DataType dt = + Dt_size_16_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VRINTN{}.
, ; A1 + vrintn(dt, QRegister(rd), QRegister(rm)); + break; + } + case 0x00000480: { + // 0xf3b20480 + DataType dt = + Dt_size_16_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRINTX{}.
, ; A1 + vrintx(al, dt, DRegister(rd), DRegister(rm)); + break; + } + case 0x000004c0: { + // 0xf3b204c0 + DataType dt = + Dt_size_16_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VRINTX{}.
, ; A1 + vrintx(dt, QRegister(rd), QRegister(rm)); + break; + } + case 0x00000500: { + // 0xf3b20500 + DataType dt = + Dt_size_16_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRINTA{}.
, ; A1 + vrinta(dt, DRegister(rd), DRegister(rm)); + break; + } + case 0x00000540: { + // 0xf3b20540 + DataType dt = + Dt_size_16_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VRINTA{}.
, ; A1 + vrinta(dt, QRegister(rd), QRegister(rm)); + break; + } + case 0x00000580: { + // 0xf3b20580 + DataType dt = + Dt_size_16_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRINTZ{}.
, ; A1 + vrintz(al, dt, DRegister(rd), DRegister(rm)); + break; + } + case 0x000005c0: { + // 0xf3b205c0 + DataType dt = + Dt_size_16_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VRINTZ{}.
, ; A1 + vrintz(dt, QRegister(rd), QRegister(rm)); + break; + } + } + break; + } + case 0x00020200: { + // 0xf3b20200 + switch (instr & 0x00000580) { + case 0x00000000: { + // 0xf3b20200 + switch (instr & 0x00000040) { + case 0x00000000: { + // 0xf3b20200 + DataType dt = + Dt_size_3_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMOVN{}{}.
, ; A1 + vmovn(al, dt, DRegister(rd), QRegister(rm)); + break; + } + case 0x00000040: { + // 0xf3b20240 + DataType dt = + Dt_size_14_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VQMOVUN{}{}.
, ; A1 + vqmovun(al, + dt, + DRegister(rd), + QRegister(rm)); + break; + } + } + break; + } + case 0x00000080: { + // 0xf3b20280 + DataType dt = + Dt_op_size_3_Decode(((instr >> 18) & 0x3) | + ((instr >> 4) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VQMOVN{}{}.
, ; A1 + vqmovn(al, dt, DRegister(rd), QRegister(rm)); + break; + } + case 0x00000100: { + // 0xf3b20300 + if ((instr & 0x00000040) == 0x00000000) { + DataType dt = + Dt_size_17_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t imm = dt.GetSize(); + // VSHLL{}{}. , , # ; A2 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x00000180: { + // 0xf3b20380 + switch (instr & 0x000c0040) { + case 0x00080000: { + // 0xf3ba0380 + UnimplementedA32("SHA1SU1", instr); + break; + } + case 0x00080040: { + // 0xf3ba03c0 + UnimplementedA32("SHA256SU0", instr); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000400: { + // 0xf3b20600 + if ((instr & 0x000c0040) == 0x00040000) { + unsigned rd = ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCVT{}{}.F16.F32
, ; A1 + vcvt(al, + F16, + F32, + DRegister(rd), + QRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x00000480: { + // 0xf3b20680 + switch (instr & 0x00000040) { + case 0x00000000: { + // 0xf3b20680 + DataType dt = + Dt_size_16_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRINTM{}.
, ; A1 + vrintm(dt, DRegister(rd), DRegister(rm)); + break; + } + case 0x00000040: { + // 0xf3b206c0 + DataType dt = + Dt_size_16_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VRINTM{}.
, ; A1 + vrintm(dt, QRegister(rd), QRegister(rm)); + break; + } + } + break; + } + case 0x00000500: { + // 0xf3b20700 + if ((instr & 0x000c0040) == 0x00040000) { + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVT{}{}.F32.F16 , ; A1 + vcvt(al, + F32, + F16, + QRegister(rd), + DRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x00000580: { + // 0xf3b20780 + switch (instr & 0x00000040) { + case 0x00000000: { + // 0xf3b20780 + DataType dt = + Dt_size_16_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRINTP{}.
, ; A1 + vrintp(dt, DRegister(rd), DRegister(rm)); + break; + } + case 0x00000040: { + // 0xf3b207c0 + DataType dt = + Dt_size_16_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VRINTP{}.
, ; A1 + vrintp(dt, QRegister(rd), QRegister(rm)); + break; + } + } + break; + } + } + break; + } + case 0x00030000: { + // 0xf3b30000 + switch (instr & 0x00000440) { + case 0x00000000: { + // 0xf3b30000 + switch (instr & 0x000c0100) { + case 0x00080000: { + // 0xf3bb0000 + DataType dt = + Dt_op_3_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVTA{}.
.F32
, ; A1 + vcvta(dt, + F32, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00080100: { + // 0xf3bb0100 + DataType dt = + Dt_op_3_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVTN{}.
.F32
, ; A1 + vcvtn(dt, + F32, + DRegister(rd), + DRegister(rm)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000040: { + // 0xf3b30040 + switch (instr & 0x000c0100) { + case 0x00080000: { + // 0xf3bb0040 + DataType dt = + Dt_op_3_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCVTA{}.
.F32 , ; A1 + vcvta(dt, + F32, + QRegister(rd), + QRegister(rm)); + break; + } + case 0x00080100: { + // 0xf3bb0140 + DataType dt = + Dt_op_3_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCVTN{}.
.F32 , ; A1 + vcvtn(dt, + F32, + QRegister(rd), + QRegister(rm)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000400: { + // 0xf3b30400 + switch (instr & 0x00000080) { + case 0x00000000: { + // 0xf3b30400 + DataType dt = Dt_F_size_4_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 6) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRECPE{}{}.
, ; A1 + vrecpe(al, + dt, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00000080: { + // 0xf3b30480 + DataType dt = Dt_F_size_4_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 6) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRSQRTE{}{}.
, ; A1 + vrsqrte(al, + dt, + DRegister(rd), + DRegister(rm)); + break; + } + } + break; + } + case 0x00000440: { + // 0xf3b30440 + switch (instr & 0x00000080) { + case 0x00000000: { + // 0xf3b30440 + DataType dt = Dt_F_size_4_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 6) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VRECPE{}{}.
, ; A1 + vrecpe(al, + dt, + QRegister(rd), + QRegister(rm)); + break; + } + case 0x00000080: { + // 0xf3b304c0 + DataType dt = Dt_F_size_4_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 6) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VRSQRTE{}{}.
, ; A1 + vrsqrte(al, + dt, + QRegister(rd), + QRegister(rm)); + break; + } + } + break; + } + } + break; + } + case 0x00030200: { + // 0xf3b30200 + switch (instr & 0x000c0440) { + case 0x00080000: { + // 0xf3bb0200 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xf3bb0200 + DataType dt = + Dt_op_3_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVTP{}.
.F32
, ; A1 + vcvtp(dt, + F32, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00000100: { + // 0xf3bb0300 + DataType dt = + Dt_op_3_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVTM{}.
.F32
, ; A1 + vcvtm(dt, + F32, + DRegister(rd), + DRegister(rm)); + break; + } + } + break; + } + case 0x00080040: { + // 0xf3bb0240 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xf3bb0240 + DataType dt = + Dt_op_3_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCVTP{}.
.F32 , ; A1 + vcvtp(dt, + F32, + QRegister(rd), + QRegister(rm)); + break; + } + case 0x00000100: { + // 0xf3bb0340 + DataType dt = + Dt_op_3_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCVTM{}.
.F32 , ; A1 + vcvtm(dt, + F32, + QRegister(rd), + QRegister(rm)); + break; + } + } + break; + } + case 0x00080400: { + // 0xf3bb0600 + DataType dt1 = + Dt_op_1_Decode1((instr >> 7) & 0x3); + if (dt1.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DataType dt2 = + Dt_op_1_Decode2((instr >> 7) & 0x3); + if (dt2.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVT{}{}.
.
, ; A1 + vcvt(al, + dt1, + dt2, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00080440: { + // 0xf3bb0640 + DataType dt1 = + Dt_op_1_Decode1((instr >> 7) & 0x3); + if (dt1.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DataType dt2 = + Dt_op_1_Decode2((instr >> 7) & 0x3); + if (dt2.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCVT{}{}.
.
, ; A1 + vcvt(al, + dt1, + dt2, + QRegister(rd), + QRegister(rm)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + } + break; + } + case 0x00000800: { + // 0xf3b00800 + switch (instr & 0x00000440) { + case 0x00000000: { + // 0xf3b00800 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned first = ExtractDRegister(instr, 7, 16); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0x3) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 1; + break; + case 0x1: + length = 2; + break; + case 0x2: + length = 3; + break; + case 0x3: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rm = ExtractDRegister(instr, 5, 0); + // VTBL{}{}.8
, , ; A1 + vtbl(al, + Untyped8, + DRegister(rd), + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + DRegister(rm)); + break; + } + case 0x00000040: { + // 0xf3b00840 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned first = ExtractDRegister(instr, 7, 16); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0x3) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 1; + break; + case 0x1: + length = 2; + break; + case 0x2: + length = 3; + break; + case 0x3: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rm = ExtractDRegister(instr, 5, 0); + // VTBX{}{}.8
, , ; A1 + vtbx(al, + Untyped8, + DRegister(rd), + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + DRegister(rm)); + break; + } + case 0x00000400: { + // 0xf3b00c00 + if ((instr & 0x00000380) == 0x00000000) { + unsigned lane; + DataType dt = + Dt_imm4_1_Decode((instr >> 16) & 0xf, &lane); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VDUP{}{}.
, ; A1 + vdup(al, + dt, + DRegister(rd), + DRegisterLane(rm, lane)); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x00000440: { + // 0xf3b00c40 + if ((instr & 0x00000380) == 0x00000000) { + unsigned lane; + DataType dt = + Dt_imm4_1_Decode((instr >> 16) & 0xf, &lane); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VDUP{}{}.
, ; A1 + vdup(al, + dt, + QRegister(rd), + DRegisterLane(rm, lane)); + } else { + UnallocatedA32(instr); + } + break; + } + } + break; + } + } + break; + } + } + break; + } + default: { + switch (instr & 0x00000c40) { + case 0x00000000: { + // 0xf2800000 + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xf2800000 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VADDL{}{}.
, , ; A1 + vaddl(al, + dt, + QRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000100: { + // 0xf2800100 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VADDW{}{}.
{}, , ; A1 + vaddw(al, + dt, + QRegister(rd), + QRegister(rn), + DRegister(rm)); + break; + } + case 0x00000200: { + // 0xf2800200 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VSUBL{}{}.
, , ; A1 + vsubl(al, + dt, + QRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000300: { + // 0xf2800300 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VSUBW{}{}.
{}, , ; A1 + vsubw(al, + dt, + QRegister(rd), + QRegister(rn), + DRegister(rm)); + break; + } + } + break; + } + case 0x00000040: { + // 0xf2800040 + switch (instr & 0x00000200) { + case 0x00000000: { + // 0xf2800040 + switch (instr & 0x01000000) { + case 0x00000000: { + // 0xf2800040 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_9_Decode((instr >> 20) & 0x3, + (instr >> 8) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + int lane; + unsigned rm = + ExtractDRegisterAndLane(instr, dt, 5, 0, &lane); + // VMLA{}{}.
, , ; A1 NOLINT(whitespace/line_length) + vmla(al, + dt, + DRegister(rd), + DRegister(rn), + DRegisterLane(rm, lane)); + break; + } + case 0x01000000: { + // 0xf3800040 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_9_Decode((instr >> 20) & 0x3, + (instr >> 8) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + int lane; + unsigned rm = + ExtractDRegisterAndLane(instr, dt, 5, 0, &lane); + // VMLA{}{}. , , ; A1 NOLINT(whitespace/line_length) + vmla(al, + dt, + QRegister(rd), + QRegister(rn), + DRegisterLane(rm, lane)); + break; + } + } + break; + } + case 0x00000200: { + // 0xf2800240 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xf2800240 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_size_11_Decode((instr >> 20) & 0x3, + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + int lane; + unsigned rm = + ExtractDRegisterAndLane(instr, dt, 5, 0, &lane); + // VMLAL{}{}. , , ; A1 NOLINT(whitespace/line_length) + vmlal(al, + dt, + QRegister(rd), + DRegister(rn), + DRegisterLane(rm, lane)); + break; + } + case 0x00000100: { + // 0xf2800340 + if ((instr & 0x01000000) == 0x00000000) { + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + uint32_t mvm = + (instr & 0xf) | ((instr >> 1) & 0x10); + uint32_t shift = 4; + if (dt.Is(S16)) { + shift = 3; + } + uint32_t vm = mvm & ((1 << shift) - 1); + uint32_t index = mvm >> shift; + // VQDMLAL{}{}.
, , [] ; A2 NOLINT(whitespace/line_length) + vqdmlal(al, + dt, + QRegister(rd), + DRegister(rn), + DRegister(vm), + index); + } else { + UnallocatedA32(instr); + } + break; + } + } + break; + } + } + break; + } + case 0x00000400: { + // 0xf2800400 + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xf2800400 + switch (instr & 0x01000000) { + case 0x00000000: { + // 0xf2800400 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_3_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VADDHN{}{}.
, , ; A1 + vaddhn(al, + dt, + DRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x01000000: { + // 0xf3800400 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_3_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VRADDHN{}{}.
, , ; A1 + vraddhn(al, + dt, + DRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + } + break; + } + case 0x00000100: { + // 0xf2800500 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VABAL{}{}.
, , ; A1 + vabal(al, + dt, + QRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000200: { + // 0xf2800600 + switch (instr & 0x01000000) { + case 0x00000000: { + // 0xf2800600 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_3_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VSUBHN{}{}.
, , ; A1 + vsubhn(al, + dt, + DRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x01000000: { + // 0xf3800600 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_3_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VRSUBHN{}{}.
, , ; A1 + vrsubhn(al, + dt, + DRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + } + break; + } + case 0x00000300: { + // 0xf2800700 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VABDL{}{}.
, , ; A1 + vabdl(al, + dt, + QRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + } + break; + } + case 0x00000440: { + // 0xf2800440 + switch (instr & 0x00000200) { + case 0x00000000: { + // 0xf2800440 + switch (instr & 0x01000000) { + case 0x00000000: { + // 0xf2800440 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_9_Decode((instr >> 20) & 0x3, + (instr >> 8) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + int lane; + unsigned rm = + ExtractDRegisterAndLane(instr, dt, 5, 0, &lane); + // VMLS{}{}.
, , ; A1 NOLINT(whitespace/line_length) + vmls(al, + dt, + DRegister(rd), + DRegister(rn), + DRegisterLane(rm, lane)); + break; + } + case 0x01000000: { + // 0xf3800440 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_9_Decode((instr >> 20) & 0x3, + (instr >> 8) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + int lane; + unsigned rm = + ExtractDRegisterAndLane(instr, dt, 5, 0, &lane); + // VMLS{}{}. , , ; A1 NOLINT(whitespace/line_length) + vmls(al, + dt, + QRegister(rd), + QRegister(rn), + DRegisterLane(rm, lane)); + break; + } + } + break; + } + case 0x00000200: { + // 0xf2800640 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xf2800640 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_size_11_Decode((instr >> 20) & 0x3, + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + int lane; + unsigned rm = + ExtractDRegisterAndLane(instr, dt, 5, 0, &lane); + // VMLSL{}{}. , , ; A1 NOLINT(whitespace/line_length) + vmlsl(al, + dt, + QRegister(rd), + DRegister(rn), + DRegisterLane(rm, lane)); + break; + } + case 0x00000100: { + // 0xf2800740 + if ((instr & 0x01000000) == 0x00000000) { + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + uint32_t mvm = + (instr & 0xf) | ((instr >> 1) & 0x10); + uint32_t shift = 4; + if (dt.Is(S16)) { + shift = 3; + } + uint32_t vm = mvm & ((1 << shift) - 1); + uint32_t index = mvm >> shift; + // VQDMLSL{}{}.
, , [] ; A2 NOLINT(whitespace/line_length) + vqdmlsl(al, + dt, + QRegister(rd), + DRegister(rn), + DRegister(vm), + index); + } else { + UnallocatedA32(instr); + } + break; + } + } + break; + } + } + break; + } + case 0x00000800: { + // 0xf2800800 + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xf2800800 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_12_Decode((instr >> 20) & 0x3, + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMLAL{}{}. , , ; A1 + vmlal(al, + dt, + QRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000100: { + // 0xf2800900 + if ((instr & 0x01000000) == 0x00000000) { + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VQDMLAL{}{}.
, , ; A1 + vqdmlal(al, + dt, + QRegister(rd), + DRegister(rn), + DRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x00000200: { + // 0xf2800a00 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_12_Decode((instr >> 20) & 0x3, + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMLSL{}{}. , , ; A1 + vmlsl(al, + dt, + QRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000300: { + // 0xf2800b00 + if ((instr & 0x01000000) == 0x00000000) { + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VQDMLSL{}{}.
, , ; A1 + vqdmlsl(al, + dt, + QRegister(rd), + DRegister(rn), + DRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + } + break; + } + case 0x00000840: { + // 0xf2800840 + switch (instr & 0x00000200) { + case 0x00000000: { + // 0xf2800840 + switch (instr & 0x01000000) { + case 0x00000000: { + // 0xf2800840 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_F_size_3_Decode( + ((instr >> 20) & 0x3) | ((instr >> 6) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + uint32_t mvm = + (instr & 0xf) | ((instr >> 1) & 0x10); + uint32_t shift = 4; + if (dt.Is(I16)) { + shift = 3; + } + uint32_t vm = mvm & ((1 << shift) - 1); + uint32_t index = mvm >> shift; + // VMUL{}{}.
{
}, , [] ; A1 NOLINT(whitespace/line_length) + vmul(al, + dt, + DRegister(rd), + DRegister(rn), + DRegister(vm), + index); + break; + } + case 0x01000000: { + // 0xf3800840 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_F_size_3_Decode( + ((instr >> 20) & 0x3) | ((instr >> 6) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + uint32_t mvm = + (instr & 0xf) | ((instr >> 1) & 0x10); + uint32_t shift = 4; + if (dt.Is(I16)) { + shift = 3; + } + uint32_t vm = mvm & ((1 << shift) - 1); + uint32_t index = mvm >> shift; + // VMUL{}{}.
{}, , [] ; A1 NOLINT(whitespace/line_length) + vmul(al, + dt, + QRegister(rd), + QRegister(rn), + DRegister(vm), + index); + break; + } + } + break; + } + case 0x00000200: { + // 0xf2800a40 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xf2800a40 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_U_size_2_Decode( + ((instr >> 20) & 0x3) | ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + uint32_t mvm = + (instr & 0xf) | ((instr >> 1) & 0x10); + uint32_t shift = 4; + if (dt.Is(S16) || dt.Is(U16)) { + shift = 3; + } + uint32_t vm = mvm & ((1 << shift) - 1); + uint32_t index = mvm >> shift; + // VMULL{}{}.
, , [] ; A1 NOLINT(whitespace/line_length) + vmull(al, + dt, + QRegister(rd), + DRegister(rn), + DRegister(vm), + index); + break; + } + case 0x00000100: { + // 0xf2800b40 + if ((instr & 0x01000000) == 0x00000000) { + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + int lane; + unsigned rm = ExtractDRegisterAndLane(instr, + dt, + 5, + 0, + &lane); + // VQDMULL{}{}.
, , ; A2 + vqdmull(al, + dt, + QRegister(rd), + DRegister(rn), + DRegisterLane(rm, lane)); + } else { + UnallocatedA32(instr); + } + break; + } + } + break; + } + } + break; + } + case 0x00000c00: { + // 0xf2800c00 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xf2800c00 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_op_U_size_1_Decode( + ((instr >> 20) & 0x3) | ((instr >> 22) & 0x4) | + ((instr >> 6) & 0x8)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMULL{}{}.
, , ; A1 + vmull(al, + dt, + QRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000100: { + // 0xf2800d00 + if ((instr & 0x01000200) == 0x00000000) { + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VQDMULL{}{}.
, , ; A1 + vqdmull(al, + dt, + QRegister(rd), + DRegister(rn), + DRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + } + break; + } + case 0x00000c40: { + // 0xf2800c40 + switch (instr & 0x01000300) { + case 0x00000000: { + // 0xf2800c40 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + int lane; + unsigned rm = + ExtractDRegisterAndLane(instr, dt, 5, 0, &lane); + // VQDMULH{}{}.
{
}, , ; A2 + vqdmulh(al, + dt, + DRegister(rd), + DRegister(rn), + DRegisterLane(rm, lane)); + break; + } + case 0x00000100: { + // 0xf2800d40 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + int lane; + unsigned rm = + ExtractDRegisterAndLane(instr, dt, 5, 0, &lane); + // VQRDMULH{}{}.
{
}, , ; A2 + vqrdmulh(al, + dt, + DRegister(rd), + DRegister(rn), + DRegisterLane(rm, lane)); + break; + } + case 0x01000000: { + // 0xf3800c40 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + int lane; + unsigned rm = + ExtractDRegisterAndLane(instr, dt, 5, 0, &lane); + // VQDMULH{}{}.
{}, , ; A2 + vqdmulh(al, + dt, + QRegister(rd), + QRegister(rn), + DRegisterLane(rm, lane)); + break; + } + case 0x01000100: { + // 0xf3800d40 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + int lane; + unsigned rm = + ExtractDRegisterAndLane(instr, dt, 5, 0, &lane); + // VQRDMULH{}{}.
{}, , ; A2 + vqrdmulh(al, + dt, + QRegister(rd), + QRegister(rn), + DRegisterLane(rm, lane)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + } + break; + } + } + break; + } + case 0x00800010: { + // 0xf2800010 + switch (instr & 0x00000040) { + case 0x00000000: { + // 0xf2800010 + switch (instr & 0x00000c00) { + case 0x00000000: { + // 0xf2800010 + switch (instr & 0x00380080) { + case 0x00000000: { + // 0xf2800010 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xf2800010 + switch (instr & 0x00000200) { + default: { + switch (instr & 0x00000020) { + case 0x00000020: { + // 0xf2800030 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMVN{}{}.
, # ; A1 + vmvn(al, dt, DRegister(rd), imm); + break; + } + default: { + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 + vmov(al, dt, DRegister(rd), imm); + break; + } + } + break; + } + } + break; + } + case 0x00000100: { + // 0xf2800110 + switch (instr & 0x00000020) { + case 0x00000000: { + // 0xf2800110 + if (((instr & 0x100) == 0x0) || + ((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = ImmediateVorr::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + DOperand imm = ImmediateVorr::DecodeImmediate( + cmode, + (instr & 0xf) | ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VORR{}{}.
{}, , # ; A1 NOLINT(whitespace/line_length) + vorr(al, dt, DRegister(rd), DRegister(rd), imm); + break; + } + case 0x00000020: { + // 0xf2800130 + if (((instr & 0x100) == 0x0) || + ((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = ImmediateVbic::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + DOperand imm = ImmediateVbic::DecodeImmediate( + cmode, + (instr & 0xf) | ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VBIC{}{}.
{}, , # ; A1 NOLINT(whitespace/line_length) + vbic(al, dt, DRegister(rd), DRegister(rd), imm); + break; + } + } + break; + } + } + break; + } + default: { + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xf2800010 + if (((instr & 0x380080) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_L_imm6_1_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & 0x8), + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + (dt.IsSize(64) ? 64 : (dt.GetSize() * 2)) - + imm6; + // VSHR{}{}. {
}, , # ; A1 NOLINT(whitespace/line_length) + vshr(al, dt, DRegister(rd), DRegister(rm), imm); + break; + } + case 0x00000100: { + // 0xf2800110 + if (((instr & 0x380080) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_L_imm6_1_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & 0x8), + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + (dt.IsSize(64) ? 64 : (dt.GetSize() * 2)) - + imm6; + // VSRA{}{}. {
}, , # ; A1 NOLINT(whitespace/line_length) + vsra(al, dt, DRegister(rd), DRegister(rm), imm); + break; + } + case 0x00000200: { + // 0xf2800210 + if (((instr & 0x380080) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_L_imm6_1_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & 0x8), + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + (dt.IsSize(64) ? 64 : (dt.GetSize() * 2)) - + imm6; + // VRSHR{}{}. {
}, , # ; A1 NOLINT(whitespace/line_length) + vrshr(al, dt, DRegister(rd), DRegister(rm), imm); + break; + } + case 0x00000300: { + // 0xf2800310 + if (((instr & 0x380080) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_L_imm6_1_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & 0x8), + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + (dt.IsSize(64) ? 64 : (dt.GetSize() * 2)) - + imm6; + // VRSRA{}{}. {
}, , # ; A1 NOLINT(whitespace/line_length) + vrsra(al, dt, DRegister(rd), DRegister(rm), imm); + break; + } + } + break; + } + } + break; + } + case 0x00000400: { + // 0xf2800410 + switch (instr & 0x00380080) { + case 0x00000000: { + // 0xf2800410 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xf2800410 + switch (instr & 0x00000200) { + default: { + switch (instr & 0x00000020) { + case 0x00000020: { + // 0xf2800430 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMVN{}{}.
, # ; A1 + vmvn(al, dt, DRegister(rd), imm); + break; + } + default: { + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 + vmov(al, dt, DRegister(rd), imm); + break; + } + } + break; + } + } + break; + } + case 0x00000100: { + // 0xf2800510 + switch (instr & 0x00000020) { + case 0x00000000: { + // 0xf2800510 + if (((instr & 0x100) == 0x0) || + ((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = ImmediateVorr::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + DOperand imm = ImmediateVorr::DecodeImmediate( + cmode, + (instr & 0xf) | ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VORR{}{}.
{}, , # ; A1 NOLINT(whitespace/line_length) + vorr(al, dt, DRegister(rd), DRegister(rd), imm); + break; + } + case 0x00000020: { + // 0xf2800530 + if (((instr & 0x100) == 0x0) || + ((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = ImmediateVbic::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + DOperand imm = ImmediateVbic::DecodeImmediate( + cmode, + (instr & 0xf) | ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VBIC{}{}.
{}, , # ; A1 NOLINT(whitespace/line_length) + vbic(al, dt, DRegister(rd), DRegister(rd), imm); + break; + } + } + break; + } + } + break; + } + default: { + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xf2800410 + if ((instr & 0x01000000) == 0x01000000) { + if (((instr & 0x380080) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_L_imm6_4_Decode( + ((instr >> 19) & 0x7) | ((instr >> 4) & 0x8)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + (dt.IsSize(64) ? 64 : (dt.GetSize() * 2)) - + imm6; + // VSRI{}{}.
{
}, , # ; A1 + vsri(al, dt, DRegister(rd), DRegister(rm), imm); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x00000100: { + // 0xf2800510 + switch (instr & 0x01000000) { + case 0x00000000: { + // 0xf2800510 + if (((instr & 0x380080) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_L_imm6_3_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & 0x8)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - (dt.IsSize(64) ? 0 : dt.GetSize()); + // VSHL{}{}.I {
}, , # ; A1 NOLINT(whitespace/line_length) + vshl(al, dt, DRegister(rd), DRegister(rm), imm); + break; + } + case 0x01000000: { + // 0xf3800510 + if (((instr & 0x380080) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_L_imm6_4_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & 0x8)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - (dt.IsSize(64) ? 0 : dt.GetSize()); + // VSLI{}{}.
{
}, , # ; A1 + vsli(al, dt, DRegister(rd), DRegister(rm), imm); + break; + } + } + break; + } + case 0x00000200: { + // 0xf2800610 + if (((instr & 0x380080) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_L_imm6_2_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & 0x8), + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - (dt.IsSize(64) ? 0 : dt.GetSize()); + // VQSHLU{}{}. {
}, , # ; A1 NOLINT(whitespace/line_length) + vqshlu(al, dt, DRegister(rd), DRegister(rm), imm); + break; + } + case 0x00000300: { + // 0xf2800710 + if (((instr & 0x380080) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_L_imm6_1_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & 0x8), + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - (dt.IsSize(64) ? 0 : dt.GetSize()); + // VQSHL{}{}. {
}, , # ; A1 NOLINT(whitespace/line_length) + vqshl(al, dt, DRegister(rd), DRegister(rm), imm); + break; + } + } + break; + } + } + break; + } + case 0x00000800: { + // 0xf2800810 + switch (instr & 0x00000080) { + case 0x00000000: { + // 0xf2800810 + switch (instr & 0x00380000) { + case 0x00000000: { + // 0xf2800810 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xf2800810 + switch (instr & 0x00000200) { + default: { + switch (instr & 0x00000020) { + case 0x00000020: { + // 0xf2800830 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMVN{}{}.
, # ; A1 + vmvn(al, dt, DRegister(rd), imm); + break; + } + default: { + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 + vmov(al, dt, DRegister(rd), imm); + break; + } + } + break; + } + } + break; + } + case 0x00000100: { + // 0xf2800910 + switch (instr & 0x00000020) { + case 0x00000000: { + // 0xf2800910 + if (((instr & 0x100) == 0x0) || + ((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVorr::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVorr::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VORR{}{}.
{}, , # ; A1 NOLINT(whitespace/line_length) + vorr(al, + dt, + DRegister(rd), + DRegister(rd), + imm); + break; + } + case 0x00000020: { + // 0xf2800930 + if (((instr & 0x100) == 0x0) || + ((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVbic::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVbic::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VBIC{}{}.
{}, , # ; A1 NOLINT(whitespace/line_length) + vbic(al, + dt, + DRegister(rd), + DRegister(rd), + imm); + break; + } + } + break; + } + } + break; + } + case 0x00180000: { + // 0xf2980810 + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xf2980810 + switch (instr & 0x01000000) { + case 0x00000000: { + // 0xf2980810 + if (((instr & 0x380000) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_3_Decode((instr >> 19) & 0x7); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VSHRN{}{}.I
, , # ; A1 NOLINT(whitespace/line_length) + vshrn(al, + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x01000000: { + // 0xf3980810 + if (((instr & 0x380000) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_2_Decode((instr >> 19) & 0x7, + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VQSHRUN{}{}.
, , # ; A1 NOLINT(whitespace/line_length) + vqshrun(al, + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + } + break; + } + case 0x00000100: { + // 0xf2980910 + if (((instr & 0x380000) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_1_Decode((instr >> 19) & 0x7, + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VQSHRN{}{}.
, , # ; A1 NOLINT(whitespace/line_length) + vqshrn(al, + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x00000200: { + // 0xf2980a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & 0x7, + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00280000: { + // 0xf2a80810 + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xf2a80810 + switch (instr & 0x01000000) { + case 0x00000000: { + // 0xf2a80810 + if (((instr & 0x380000) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_3_Decode((instr >> 19) & 0x7); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VSHRN{}{}.I
, , # ; A1 NOLINT(whitespace/line_length) + vshrn(al, + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x01000000: { + // 0xf3a80810 + if (((instr & 0x380000) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_2_Decode((instr >> 19) & 0x7, + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VQSHRUN{}{}.
, , # ; A1 NOLINT(whitespace/line_length) + vqshrun(al, + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + } + break; + } + case 0x00000100: { + // 0xf2a80910 + if (((instr & 0x380000) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_1_Decode((instr >> 19) & 0x7, + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VQSHRN{}{}.
, , # ; A1 NOLINT(whitespace/line_length) + vqshrn(al, + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x00000200: { + // 0xf2a80a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & 0x7, + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00300000: { + // 0xf2b00810 + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xf2b00810 + switch (instr & 0x01000000) { + case 0x00000000: { + // 0xf2b00810 + if (((instr & 0x380000) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_3_Decode((instr >> 19) & 0x7); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VSHRN{}{}.I
, , # ; A1 NOLINT(whitespace/line_length) + vshrn(al, + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x01000000: { + // 0xf3b00810 + if (((instr & 0x380000) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_2_Decode((instr >> 19) & 0x7, + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VQSHRUN{}{}.
, , # ; A1 NOLINT(whitespace/line_length) + vqshrun(al, + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + } + break; + } + case 0x00000100: { + // 0xf2b00910 + if (((instr & 0x380000) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_1_Decode((instr >> 19) & 0x7, + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VQSHRN{}{}.
, , # ; A1 NOLINT(whitespace/line_length) + vqshrn(al, + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x00000200: { + // 0xf2b00a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & 0x7, + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00380000: { + // 0xf2b80810 + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xf2b80810 + switch (instr & 0x01000000) { + case 0x00000000: { + // 0xf2b80810 + if (((instr & 0x380000) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_3_Decode((instr >> 19) & 0x7); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VSHRN{}{}.I
, , # ; A1 NOLINT(whitespace/line_length) + vshrn(al, + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x01000000: { + // 0xf3b80810 + if (((instr & 0x380000) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_2_Decode((instr >> 19) & 0x7, + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VQSHRUN{}{}.
, , # ; A1 NOLINT(whitespace/line_length) + vqshrun(al, + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + } + break; + } + case 0x00000100: { + // 0xf2b80910 + if (((instr & 0x380000) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_1_Decode((instr >> 19) & 0x7, + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VQSHRN{}{}.
, , # ; A1 NOLINT(whitespace/line_length) + vqshrn(al, + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x00000200: { + // 0xf2b80a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & 0x7, + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + default: { + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xf2800810 + switch (instr & 0x01000000) { + case 0x00000000: { + // 0xf2800810 + if (((instr & 0x380000) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_3_Decode((instr >> 19) & 0x7); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VSHRN{}{}.I
, , # ; A1 NOLINT(whitespace/line_length) + vshrn(al, + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x01000000: { + // 0xf3800810 + if (((instr & 0x380000) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_2_Decode((instr >> 19) & 0x7, + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VQSHRUN{}{}.
, , # ; A1 NOLINT(whitespace/line_length) + vqshrun(al, + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + } + break; + } + case 0x00000100: { + // 0xf2800910 + if (((instr & 0x380000) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_1_Decode((instr >> 19) & 0x7, + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VQSHRN{}{}.
, , # ; A1 NOLINT(whitespace/line_length) + vqshrn(al, + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x00000200: { + // 0xf2800a10 + switch (instr & 0x00070000) { + case 0x00000000: { + // 0xf2800a10 + switch (instr & 0x003f0000) { + case 0x00080000: { + // 0xf2880a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x380000) == 0x180000) || + ((instr & 0x380000) == 0x280000) || + ((instr & 0x380000) == 0x300000) || + ((instr & 0x380000) == 0x380000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_U_imm3H_1_Decode( + ((instr >> 19) & 0x7) | + ((instr >> 21) & 0x8)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VMOVL{}{}.
, ; A1 + vmovl(al, + dt, + QRegister(rd), + DRegister(rm)); + break; + } + case 0x00090000: { + // 0xf2890a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x000a0000: { + // 0xf28a0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x000b0000: { + // 0xf28b0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x000c0000: { + // 0xf28c0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x000d0000: { + // 0xf28d0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x000e0000: { + // 0xf28e0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x000f0000: { + // 0xf28f0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00100000: { + // 0xf2900a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x380000) == 0x180000) || + ((instr & 0x380000) == 0x280000) || + ((instr & 0x380000) == 0x300000) || + ((instr & 0x380000) == 0x380000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_U_imm3H_1_Decode( + ((instr >> 19) & 0x7) | + ((instr >> 21) & 0x8)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VMOVL{}{}.
, ; A1 + vmovl(al, + dt, + QRegister(rd), + DRegister(rm)); + break; + } + case 0x00110000: { + // 0xf2910a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00120000: { + // 0xf2920a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00130000: { + // 0xf2930a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00140000: { + // 0xf2940a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00150000: { + // 0xf2950a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00160000: { + // 0xf2960a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00170000: { + // 0xf2970a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00180000: { + // 0xf2980a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00190000: { + // 0xf2990a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x001a0000: { + // 0xf29a0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x001b0000: { + // 0xf29b0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x001c0000: { + // 0xf29c0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x001d0000: { + // 0xf29d0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x001e0000: { + // 0xf29e0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x001f0000: { + // 0xf29f0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00200000: { + // 0xf2a00a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x380000) == 0x180000) || + ((instr & 0x380000) == 0x280000) || + ((instr & 0x380000) == 0x300000) || + ((instr & 0x380000) == 0x380000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_U_imm3H_1_Decode( + ((instr >> 19) & 0x7) | + ((instr >> 21) & 0x8)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VMOVL{}{}.
, ; A1 + vmovl(al, + dt, + QRegister(rd), + DRegister(rm)); + break; + } + case 0x00210000: { + // 0xf2a10a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00220000: { + // 0xf2a20a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00230000: { + // 0xf2a30a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00240000: { + // 0xf2a40a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00250000: { + // 0xf2a50a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00260000: { + // 0xf2a60a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00270000: { + // 0xf2a70a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00280000: { + // 0xf2a80a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00290000: { + // 0xf2a90a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x002a0000: { + // 0xf2aa0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x002b0000: { + // 0xf2ab0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x002c0000: { + // 0xf2ac0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x002d0000: { + // 0xf2ad0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x002e0000: { + // 0xf2ae0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x002f0000: { + // 0xf2af0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00300000: { + // 0xf2b00a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00310000: { + // 0xf2b10a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00320000: { + // 0xf2b20a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00330000: { + // 0xf2b30a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00340000: { + // 0xf2b40a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00350000: { + // 0xf2b50a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00360000: { + // 0xf2b60a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00370000: { + // 0xf2b70a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00380000: { + // 0xf2b80a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00390000: { + // 0xf2b90a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x003a0000: { + // 0xf2ba0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x003b0000: { + // 0xf2bb0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x003c0000: { + // 0xf2bc0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x003d0000: { + // 0xf2bd0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x003e0000: { + // 0xf2be0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x003f0000: { + // 0xf2bf0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + default: { + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & 0x7, + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000c00: { + // 0xf2800c10 + switch (instr & 0x00000080) { + case 0x00000000: { + // 0xf2800c10 + switch (instr & 0x00200000) { + case 0x00000000: { + // 0xf2800c10 + switch (instr & 0x00180000) { + case 0x00000000: { + // 0xf2800c10 + switch (instr & 0x00000300) { + case 0x00000200: { + // 0xf2800e10 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 + vmov(al, dt, DRegister(rd), imm); + break; + } + case 0x00000300: { + // 0xf2800f10 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 + vmov(al, dt, DRegister(rd), imm); + break; + } + default: { + switch (instr & 0x00000020) { + case 0x00000020: { + // 0xf2800c30 + switch (instr & 0x00000f20) { + case 0x00000000: { + // 0xf2800c10 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmov(al, dt, DRegister(rd), imm); + break; + } + case 0x00000020: { + // 0xf2800c30 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMVN{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmvn(al, dt, DRegister(rd), imm); + break; + } + case 0x00000200: { + // 0xf2800e10 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmov(al, dt, DRegister(rd), imm); + break; + } + case 0x00000220: { + // 0xf2800e30 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMVN{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmvn(al, dt, DRegister(rd), imm); + break; + } + case 0x00000400: { + // 0xf2800c10 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmov(al, dt, DRegister(rd), imm); + break; + } + case 0x00000420: { + // 0xf2800c30 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMVN{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmvn(al, dt, DRegister(rd), imm); + break; + } + case 0x00000600: { + // 0xf2800e10 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmov(al, dt, DRegister(rd), imm); + break; + } + case 0x00000620: { + // 0xf2800e30 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMVN{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmvn(al, dt, DRegister(rd), imm); + break; + } + case 0x00000800: { + // 0xf2800c10 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmov(al, dt, DRegister(rd), imm); + break; + } + case 0x00000820: { + // 0xf2800c30 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMVN{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmvn(al, dt, DRegister(rd), imm); + break; + } + case 0x00000a00: { + // 0xf2800e10 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmov(al, dt, DRegister(rd), imm); + break; + } + case 0x00000a20: { + // 0xf2800e30 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMVN{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmvn(al, dt, DRegister(rd), imm); + break; + } + case 0x00000c00: { + // 0xf2800c10 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmov(al, dt, DRegister(rd), imm); + break; + } + case 0x00000c20: { + // 0xf2800c30 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMVN{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmvn(al, dt, DRegister(rd), imm); + break; + } + case 0x00000d00: { + // 0xf2800d10 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmov(al, dt, DRegister(rd), imm); + break; + } + case 0x00000d20: { + // 0xf2800d30 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMVN{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmvn(al, dt, DRegister(rd), imm); + break; + } + case 0x00000e00: { + // 0xf2800e10 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmov(al, dt, DRegister(rd), imm); + break; + } + case 0x00000e20: { + // 0xf2800e30 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmov(al, dt, DRegister(rd), imm); + break; + } + case 0x00000f00: { + // 0xf2800f10 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmov(al, dt, DRegister(rd), imm); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + default: { + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 + vmov(al, dt, DRegister(rd), imm); + break; + } + } + break; + } + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + default: { + if ((instr & 0x00000200) == 0x00000200) { + if (((instr & 0x200000) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt1 = Dt_op_U_1_Decode1( + ((instr >> 24) & 0x1) | ((instr >> 7) & 0x2)); + if (dt1.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DataType dt2 = Dt_op_U_1_Decode2( + ((instr >> 24) & 0x1) | ((instr >> 7) & 0x2)); + if (dt2.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t fbits = 64 - ((instr >> 16) & 0x3f); + // VCVT{}{}.
.
, , # ; A1 NOLINT(whitespace/line_length) + vcvt(al, + dt1, + dt2, + DRegister(rd), + DRegister(rm), + fbits); + } else { + UnallocatedA32(instr); + } + break; + } + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + } + break; + } + case 0x00000040: { + // 0xf2800050 + switch (instr & 0x00000c00) { + case 0x00000000: { + // 0xf2800050 + switch (instr & 0x00380080) { + case 0x00000000: { + // 0xf2800050 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xf2800050 + switch (instr & 0x00000200) { + default: { + switch (instr & 0x00000020) { + case 0x00000020: { + // 0xf2800070 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMVN{}{}.
, # ; A1 + vmvn(al, dt, QRegister(rd), imm); + break; + } + default: { + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 + vmov(al, dt, QRegister(rd), imm); + break; + } + } + break; + } + } + break; + } + case 0x00000100: { + // 0xf2800150 + switch (instr & 0x00000020) { + case 0x00000000: { + // 0xf2800150 + if (((instr & 0x100) == 0x0) || + ((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = ImmediateVorr::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + QOperand imm = ImmediateVorr::DecodeImmediate( + cmode, + (instr & 0xf) | ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VORR{}{}.
{}, , # ; A1 NOLINT(whitespace/line_length) + vorr(al, dt, QRegister(rd), QRegister(rd), imm); + break; + } + case 0x00000020: { + // 0xf2800170 + if (((instr & 0x100) == 0x0) || + ((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = ImmediateVbic::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + QOperand imm = ImmediateVbic::DecodeImmediate( + cmode, + (instr & 0xf) | ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VBIC{}{}.
{}, , # ; A1 NOLINT(whitespace/line_length) + vbic(al, dt, QRegister(rd), QRegister(rd), imm); + break; + } + } + break; + } + } + break; + } + default: { + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xf2800050 + if (((instr & 0x380080) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_L_imm6_1_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & 0x8), + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + (dt.IsSize(64) ? 64 : (dt.GetSize() * 2)) - + imm6; + // VSHR{}{}. {}, , # ; A1 NOLINT(whitespace/line_length) + vshr(al, dt, QRegister(rd), QRegister(rm), imm); + break; + } + case 0x00000100: { + // 0xf2800150 + if (((instr & 0x380080) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_L_imm6_1_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & 0x8), + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + (dt.IsSize(64) ? 64 : (dt.GetSize() * 2)) - + imm6; + // VSRA{}{}. {}, , # ; A1 NOLINT(whitespace/line_length) + vsra(al, dt, QRegister(rd), QRegister(rm), imm); + break; + } + case 0x00000200: { + // 0xf2800250 + if (((instr & 0x380080) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_L_imm6_1_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & 0x8), + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + (dt.IsSize(64) ? 64 : (dt.GetSize() * 2)) - + imm6; + // VRSHR{}{}. {}, , # ; A1 NOLINT(whitespace/line_length) + vrshr(al, dt, QRegister(rd), QRegister(rm), imm); + break; + } + case 0x00000300: { + // 0xf2800350 + if (((instr & 0x380080) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_L_imm6_1_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & 0x8), + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + (dt.IsSize(64) ? 64 : (dt.GetSize() * 2)) - + imm6; + // VRSRA{}{}. {}, , # ; A1 NOLINT(whitespace/line_length) + vrsra(al, dt, QRegister(rd), QRegister(rm), imm); + break; + } + } + break; + } + } + break; + } + case 0x00000400: { + // 0xf2800450 + switch (instr & 0x00380080) { + case 0x00000000: { + // 0xf2800450 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xf2800450 + switch (instr & 0x00000200) { + default: { + switch (instr & 0x00000020) { + case 0x00000020: { + // 0xf2800470 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMVN{}{}.
, # ; A1 + vmvn(al, dt, QRegister(rd), imm); + break; + } + default: { + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 + vmov(al, dt, QRegister(rd), imm); + break; + } + } + break; + } + } + break; + } + case 0x00000100: { + // 0xf2800550 + switch (instr & 0x00000020) { + case 0x00000000: { + // 0xf2800550 + if (((instr & 0x100) == 0x0) || + ((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = ImmediateVorr::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + QOperand imm = ImmediateVorr::DecodeImmediate( + cmode, + (instr & 0xf) | ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VORR{}{}.
{}, , # ; A1 NOLINT(whitespace/line_length) + vorr(al, dt, QRegister(rd), QRegister(rd), imm); + break; + } + case 0x00000020: { + // 0xf2800570 + if (((instr & 0x100) == 0x0) || + ((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = ImmediateVbic::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + QOperand imm = ImmediateVbic::DecodeImmediate( + cmode, + (instr & 0xf) | ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VBIC{}{}.
{}, , # ; A1 NOLINT(whitespace/line_length) + vbic(al, dt, QRegister(rd), QRegister(rd), imm); + break; + } + } + break; + } + } + break; + } + default: { + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xf2800450 + if ((instr & 0x01000000) == 0x01000000) { + if (((instr & 0x380080) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_L_imm6_4_Decode( + ((instr >> 19) & 0x7) | ((instr >> 4) & 0x8)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + (dt.IsSize(64) ? 64 : (dt.GetSize() * 2)) - + imm6; + // VSRI{}{}.
{}, , # ; A1 + vsri(al, dt, QRegister(rd), QRegister(rm), imm); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x00000100: { + // 0xf2800550 + switch (instr & 0x01000000) { + case 0x00000000: { + // 0xf2800550 + if (((instr & 0x380080) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_L_imm6_3_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & 0x8)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - (dt.IsSize(64) ? 0 : dt.GetSize()); + // VSHL{}{}.I {}, , # ; A1 NOLINT(whitespace/line_length) + vshl(al, dt, QRegister(rd), QRegister(rm), imm); + break; + } + case 0x01000000: { + // 0xf3800550 + if (((instr & 0x380080) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_L_imm6_4_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & 0x8)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - (dt.IsSize(64) ? 0 : dt.GetSize()); + // VSLI{}{}.
{}, , # ; A1 + vsli(al, dt, QRegister(rd), QRegister(rm), imm); + break; + } + } + break; + } + case 0x00000200: { + // 0xf2800650 + if (((instr & 0x380080) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_L_imm6_2_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & 0x8), + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - (dt.IsSize(64) ? 0 : dt.GetSize()); + // VQSHLU{}{}. {}, , # ; A1 NOLINT(whitespace/line_length) + vqshlu(al, dt, QRegister(rd), QRegister(rm), imm); + break; + } + case 0x00000300: { + // 0xf2800750 + if (((instr & 0x380080) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_L_imm6_1_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & 0x8), + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - (dt.IsSize(64) ? 0 : dt.GetSize()); + // VQSHL{}{}. {}, , # ; A1 NOLINT(whitespace/line_length) + vqshl(al, dt, QRegister(rd), QRegister(rm), imm); + break; + } + } + break; + } + } + break; + } + case 0x00000800: { + // 0xf2800850 + switch (instr & 0x00000080) { + case 0x00000000: { + // 0xf2800850 + switch (instr & 0x00380000) { + case 0x00000000: { + // 0xf2800850 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xf2800850 + switch (instr & 0x00000200) { + default: { + switch (instr & 0x00000020) { + case 0x00000020: { + // 0xf2800870 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMVN{}{}.
, # ; A1 + vmvn(al, dt, QRegister(rd), imm); + break; + } + default: { + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 + vmov(al, dt, QRegister(rd), imm); + break; + } + } + break; + } + } + break; + } + case 0x00000100: { + // 0xf2800950 + switch (instr & 0x00000020) { + case 0x00000000: { + // 0xf2800950 + if (((instr & 0x100) == 0x0) || + ((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVorr::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVorr::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VORR{}{}.
{}, , # ; A1 NOLINT(whitespace/line_length) + vorr(al, + dt, + QRegister(rd), + QRegister(rd), + imm); + break; + } + case 0x00000020: { + // 0xf2800970 + if (((instr & 0x100) == 0x0) || + ((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVbic::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVbic::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VBIC{}{}.
{}, , # ; A1 NOLINT(whitespace/line_length) + vbic(al, + dt, + QRegister(rd), + QRegister(rd), + imm); + break; + } + } + break; + } + } + break; + } + default: { + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xf2800850 + switch (instr & 0x01000000) { + case 0x00000000: { + // 0xf2800850 + if (((instr & 0x380000) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_3_Decode((instr >> 19) & 0x7); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VRSHRN{}{}.I
, , # ; A1 NOLINT(whitespace/line_length) + vrshrn(al, + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x01000000: { + // 0xf3800850 + if (((instr & 0x380000) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_2_Decode((instr >> 19) & 0x7, + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VQRSHRUN{}{}.
, , # ; A1 NOLINT(whitespace/line_length) + vqrshrun(al, + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + } + break; + } + case 0x00000100: { + // 0xf2800950 + if (((instr & 0x380000) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_1_Decode((instr >> 19) & 0x7, + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VQRSHRN{}{}.
, , # ; A1 NOLINT(whitespace/line_length) + vqrshrn(al, + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000c00: { + // 0xf2800c50 + switch (instr & 0x00000080) { + case 0x00000000: { + // 0xf2800c50 + switch (instr & 0x00200000) { + case 0x00000000: { + // 0xf2800c50 + switch (instr & 0x00180000) { + case 0x00000000: { + // 0xf2800c50 + switch (instr & 0x00000300) { + case 0x00000200: { + // 0xf2800e50 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 + vmov(al, dt, QRegister(rd), imm); + break; + } + case 0x00000300: { + // 0xf2800f50 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 + vmov(al, dt, QRegister(rd), imm); + break; + } + default: { + switch (instr & 0x00000020) { + case 0x00000020: { + // 0xf2800c70 + switch (instr & 0x00000f20) { + case 0x00000000: { + // 0xf2800c50 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmov(al, dt, QRegister(rd), imm); + break; + } + case 0x00000020: { + // 0xf2800c70 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMVN{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmvn(al, dt, QRegister(rd), imm); + break; + } + case 0x00000200: { + // 0xf2800e50 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmov(al, dt, QRegister(rd), imm); + break; + } + case 0x00000220: { + // 0xf2800e70 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMVN{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmvn(al, dt, QRegister(rd), imm); + break; + } + case 0x00000400: { + // 0xf2800c50 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmov(al, dt, QRegister(rd), imm); + break; + } + case 0x00000420: { + // 0xf2800c70 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMVN{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmvn(al, dt, QRegister(rd), imm); + break; + } + case 0x00000600: { + // 0xf2800e50 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmov(al, dt, QRegister(rd), imm); + break; + } + case 0x00000620: { + // 0xf2800e70 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMVN{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmvn(al, dt, QRegister(rd), imm); + break; + } + case 0x00000800: { + // 0xf2800c50 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmov(al, dt, QRegister(rd), imm); + break; + } + case 0x00000820: { + // 0xf2800c70 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMVN{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmvn(al, dt, QRegister(rd), imm); + break; + } + case 0x00000a00: { + // 0xf2800e50 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmov(al, dt, QRegister(rd), imm); + break; + } + case 0x00000a20: { + // 0xf2800e70 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMVN{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmvn(al, dt, QRegister(rd), imm); + break; + } + case 0x00000c00: { + // 0xf2800c50 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmov(al, dt, QRegister(rd), imm); + break; + } + case 0x00000c20: { + // 0xf2800c70 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMVN{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmvn(al, dt, QRegister(rd), imm); + break; + } + case 0x00000d00: { + // 0xf2800d50 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmov(al, dt, QRegister(rd), imm); + break; + } + case 0x00000d20: { + // 0xf2800d70 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMVN{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmvn(al, dt, QRegister(rd), imm); + break; + } + case 0x00000e00: { + // 0xf2800e50 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmov(al, dt, QRegister(rd), imm); + break; + } + case 0x00000e20: { + // 0xf2800e70 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmov(al, dt, QRegister(rd), imm); + break; + } + case 0x00000f00: { + // 0xf2800f50 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmov(al, dt, QRegister(rd), imm); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + default: { + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 + vmov(al, dt, QRegister(rd), imm); + break; + } + } + break; + } + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + default: { + if ((instr & 0x00000200) == 0x00000200) { + if (((instr & 0x200000) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt1 = Dt_op_U_1_Decode1( + ((instr >> 24) & 0x1) | ((instr >> 7) & 0x2)); + if (dt1.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DataType dt2 = Dt_op_U_1_Decode2( + ((instr >> 24) & 0x1) | ((instr >> 7) & 0x2)); + if (dt2.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t fbits = 64 - ((instr >> 16) & 0x3f); + // VCVT{}{}.
.
, , # ; A1 NOLINT(whitespace/line_length) + vcvt(al, + dt1, + dt2, + QRegister(rd), + QRegister(rm), + fbits); + } else { + UnallocatedA32(instr); + } + break; + } + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + } + break; + } + } + break; + } + } + break; + } + case 0x04000000: { + // 0xf4000000 + switch (instr & 0x01300000) { + case 0x00000000: { + // 0xf4000000 + switch (instr & 0x00800000) { + case 0x00000000: { + // 0xf4000000 + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf400000d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf400000d + switch (instr & 0x00000f00) { + case 0x00000000: { + // 0xf400000d + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_4_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST4{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vst4(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000100: { + // 0xf400010d + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_4_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST4{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vst4(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000200: { + // 0xf400020d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_5_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST1{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vst1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000300: { + // 0xf400030d + if (((instr & 0xe30) == 0x830)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST2{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vst2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000400: { + // 0xf400040d + if (((instr & 0x20) == 0x20)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_3_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x4: + length = 3; + spacing = kSingle; + break; + case 0x5: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST3{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vst3(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000500: { + // 0xf400050d + if (((instr & 0x20) == 0x20)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_3_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x4: + length = 3; + spacing = kSingle; + break; + case 0x5: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST3{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vst3(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000600: { + // 0xf400060d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_5_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST1{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vst1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000700: { + // 0xf400070d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_5_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST1{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vst1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000800: { + // 0xf400080d + if (((instr & 0xe30) == 0x830)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST2{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vst2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000900: { + // 0xf400090d + if (((instr & 0xe30) == 0x830)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST2{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vst2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000a00: { + // 0xf4000a0d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_5_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST1{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vst1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000002: { + // 0xf400000f + switch (instr & 0x00000f00) { + case 0x00000000: { + // 0xf400000d + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_4_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST4{}{}.
, [{:}] ; A1 + vst4(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000100: { + // 0xf400010d + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_4_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST4{}{}.
, [{:}] ; A1 + vst4(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000200: { + // 0xf400020d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_5_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST1{}{}.
, [{:}] ; A1 + vst1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000300: { + // 0xf400030d + if (((instr & 0xe30) == 0x830)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST2{}{}.
, [{:}] ; A1 + vst2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000400: { + // 0xf400040d + if (((instr & 0x20) == 0x20)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_3_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x4: + length = 3; + spacing = kSingle; + break; + case 0x5: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST3{}{}.
, [{:}] ; A1 + vst3(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000500: { + // 0xf400050d + if (((instr & 0x20) == 0x20)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_3_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x4: + length = 3; + spacing = kSingle; + break; + case 0x5: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST3{}{}.
, [{:}] ; A1 + vst3(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000600: { + // 0xf400060d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_5_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST1{}{}.
, [{:}] ; A1 + vst1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000700: { + // 0xf400070d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_5_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST1{}{}.
, [{:}] ; A1 + vst1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000800: { + // 0xf400080d + if (((instr & 0xe30) == 0x830)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST2{}{}.
, [{:}] ; A1 + vst2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000900: { + // 0xf400090d + if (((instr & 0xe30) == 0x830)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST2{}{}.
, [{:}] ; A1 + vst2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000a00: { + // 0xf4000a0d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_5_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST1{}{}.
, [{:}] ; A1 + vst1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + } + break; + } + default: { + switch (instr & 0x00000f00) { + case 0x00000000: { + // 0xf4000000 + if (((instr & 0xd) == 0xd)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_4_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST4{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vst4(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000100: { + // 0xf4000100 + if (((instr & 0xd) == 0xd)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_4_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST4{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vst4(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000200: { + // 0xf4000200 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_5_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST1{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vst1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000300: { + // 0xf4000300 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe30) == 0x830)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST2{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vst2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000400: { + // 0xf4000400 + if (((instr & 0xd) == 0xd) || + ((instr & 0x20) == 0x20)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_3_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x4: + length = 3; + spacing = kSingle; + break; + case 0x5: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST3{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vst3(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000500: { + // 0xf4000500 + if (((instr & 0xd) == 0xd) || + ((instr & 0x20) == 0x20)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_3_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x4: + length = 3; + spacing = kSingle; + break; + case 0x5: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST3{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vst3(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000600: { + // 0xf4000600 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_5_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST1{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vst1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000700: { + // 0xf4000700 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_5_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST1{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vst1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000800: { + // 0xf4000800 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe30) == 0x830)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST2{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vst2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000900: { + // 0xf4000900 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe30) == 0x830)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST2{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vst2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000a00: { + // 0xf4000a00 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_5_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST1{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vst1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + } + break; + } + case 0x00800000: { + // 0xf4800000 + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xf4800000 + switch (instr & 0x00000c00) { + case 0x00000c00: { + // 0xf4800c00 + UnallocatedA32(instr); + break; + } + default: { + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf480000d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf480000d + if (((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_1_Decode((instr >> 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedA32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 1; + unsigned last = first + length - 1; + unsigned rn = (instr >> 16) & 0xf; + // VST1{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vst1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000002: { + // 0xf480000f + if (((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_1_Decode((instr >> 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedA32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 1; + unsigned last = first + length - 1; + unsigned rn = (instr >> 16) & 0xf; + // VST1{}{}.
, [{:}] ; A1 NOLINT(whitespace/line_length) + vst1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + } + break; + } + default: { + if (((instr & 0xc00) == 0xc00) || + ((instr & 0xd) == 0xd)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_1_Decode((instr >> 4) & 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedA32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = decode_neon.GetSpacing(); + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length = 1; + unsigned last = first + length - 1; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST1{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vst1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + } + break; + } + } + break; + } + case 0x00000100: { + // 0xf4800100 + switch (instr & 0x00000c00) { + case 0x00000c00: { + // 0xf4800d00 + UnallocatedA32(instr); + break; + } + default: { + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf480010d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf480010d + if (((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_2_Decode((instr >> 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedA32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 2; + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + // VST2{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vst2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000002: { + // 0xf480010f + if (((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_2_Decode((instr >> 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedA32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 2; + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + // VST2{}{}.
, [{:}] ; A1 NOLINT(whitespace/line_length) + vst2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + } + break; + } + default: { + if (((instr & 0xc00) == 0xc00) || + ((instr & 0xd) == 0xd)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_2_Decode((instr >> 4) & 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedA32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = decode_neon.GetSpacing(); + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length = 2; + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST2{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vst2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + } + break; + } + } + break; + } + case 0x00000200: { + // 0xf4800200 + switch (instr & 0x00000c30) { + case 0x00000010: { + // 0xf4800210 + UnallocatedA32(instr); + break; + } + case 0x00000030: { + // 0xf4800230 + UnallocatedA32(instr); + break; + } + case 0x00000410: { + // 0xf4800610 + UnallocatedA32(instr); + break; + } + case 0x00000430: { + // 0xf4800630 + UnallocatedA32(instr); + break; + } + case 0x00000810: { + // 0xf4800a10 + UnallocatedA32(instr); + break; + } + case 0x00000820: { + // 0xf4800a20 + UnallocatedA32(instr); + break; + } + case 0x00000830: { + // 0xf4800a30 + UnallocatedA32(instr); + break; + } + case 0x00000c00: { + // 0xf4800e00 + UnallocatedA32(instr); + break; + } + case 0x00000c10: { + // 0xf4800e10 + UnallocatedA32(instr); + break; + } + case 0x00000c20: { + // 0xf4800e20 + UnallocatedA32(instr); + break; + } + case 0x00000c30: { + // 0xf4800e30 + UnallocatedA32(instr); + break; + } + default: { + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf480020d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf480020d + if (((instr & 0xc00) == 0xc00) || + ((instr & 0x810) == 0x10) || + ((instr & 0xc30) == 0x810) || + ((instr & 0xc30) == 0x820) || + ((instr & 0xc30) == 0x830)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DecodeNeon decode_neon = + Index_1_Decode((instr >> 4) & 0xf, dt); + if (!decode_neon.IsValid()) { + UnallocatedA32(instr); + return; + } + int lane = decode_neon.GetLane(); + SpacingType spacing = decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 3; + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + // VST3{}{}.
, []! ; A1 + vst3(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + MemOperand(Register(rn), PostIndex)); + break; + } + case 0x00000002: { + // 0xf480020f + if (((instr & 0xc00) == 0xc00) || + ((instr & 0x810) == 0x10) || + ((instr & 0xc30) == 0x810) || + ((instr & 0xc30) == 0x820) || + ((instr & 0xc30) == 0x830)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DecodeNeon decode_neon = + Index_1_Decode((instr >> 4) & 0xf, dt); + if (!decode_neon.IsValid()) { + UnallocatedA32(instr); + return; + } + int lane = decode_neon.GetLane(); + SpacingType spacing = decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 3; + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + // VST3{}{}.
, [] ; A1 + vst3(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + MemOperand(Register(rn), Offset)); + break; + } + } + break; + } + default: { + if (((instr & 0xc00) == 0xc00) || + ((instr & 0xd) == 0xd) || + ((instr & 0x810) == 0x10) || + ((instr & 0xc30) == 0x810) || + ((instr & 0xc30) == 0x820) || + ((instr & 0xc30) == 0x830)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DecodeNeon decode_neon = + Index_1_Decode((instr >> 4) & 0xf, dt); + if (!decode_neon.IsValid()) { + UnallocatedA32(instr); + return; + } + int lane = decode_neon.GetLane(); + SpacingType spacing = decode_neon.GetSpacing(); + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length = 3; + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + Sign sign(plus); + unsigned rm = instr & 0xf; + // VST3{}{}.
, [], # ; A1 + vst3(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + MemOperand(Register(rn), + sign, + Register(rm), + PostIndex)); + break; + } + } + break; + } + } + break; + } + case 0x00000300: { + // 0xf4800300 + switch (instr & 0x00000c00) { + case 0x00000c00: { + // 0xf4800f00 + UnallocatedA32(instr); + break; + } + default: { + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf480030d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf480030d + if (((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_3_Decode((instr >> 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedA32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 4; + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + // VST4{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vst4(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000002: { + // 0xf480030f + if (((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_3_Decode((instr >> 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedA32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 4; + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + // VST4{}{}.
, [{:}] ; A1 NOLINT(whitespace/line_length) + vst4(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + } + break; + } + default: { + if (((instr & 0xc00) == 0xc00) || + ((instr & 0xd) == 0xd)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_3_Decode((instr >> 4) & 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedA32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = decode_neon.GetSpacing(); + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length = 4; + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST4{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vst4(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + } + break; + } + } + break; + } + } + break; + } + } + break; + } + case 0x00100000: { + // 0xf4100000 + switch (instr & 0x00400000) { + case 0x00400000: { + // 0xf4500000 + switch (instr & 0x000f0000) { + case 0x000f0000: { + // 0xf45f0000 + uint32_t U = (instr >> 23) & 0x1; + int32_t imm = instr & 0xfff; + if (U == 0) imm = -imm; + bool minus_zero = (imm == 0) && (U == 0); + Location location(imm, kA32PcDelta); + // PLI{}{}
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vld4(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000100: { + // 0xf420010d + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_4_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD4{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vld4(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000200: { + // 0xf420020d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_1_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD1{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vld1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000300: { + // 0xf420030d + if (((instr & 0xe30) == 0x830)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD2{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vld2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000400: { + // 0xf420040d + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_3_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x4: + length = 3; + spacing = kSingle; + break; + case 0x5: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD3{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vld3(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000500: { + // 0xf420050d + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_3_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x4: + length = 3; + spacing = kSingle; + break; + case 0x5: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD3{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vld3(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000600: { + // 0xf420060d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_1_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD1{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vld1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000700: { + // 0xf420070d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_1_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD1{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vld1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000800: { + // 0xf420080d + if (((instr & 0xe30) == 0x830)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD2{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vld2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000900: { + // 0xf420090d + if (((instr & 0xe30) == 0x830)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD2{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vld2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000a00: { + // 0xf4200a0d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_1_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD1{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vld1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000002: { + // 0xf420000f + switch (instr & 0x00000f00) { + case 0x00000000: { + // 0xf420000d + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_4_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD4{}{}.
, [{:}] ; A1 + vld4(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000100: { + // 0xf420010d + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_4_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD4{}{}.
, [{:}] ; A1 + vld4(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000200: { + // 0xf420020d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_1_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD1{}{}.
, [{:}] ; A1 + vld1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000300: { + // 0xf420030d + if (((instr & 0xe30) == 0x830)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD2{}{}.
, [{:}] ; A1 + vld2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000400: { + // 0xf420040d + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_3_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x4: + length = 3; + spacing = kSingle; + break; + case 0x5: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD3{}{}.
, [{:}] ; A1 + vld3(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000500: { + // 0xf420050d + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_3_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x4: + length = 3; + spacing = kSingle; + break; + case 0x5: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD3{}{}.
, [{:}] ; A1 + vld3(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000600: { + // 0xf420060d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_1_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD1{}{}.
, [{:}] ; A1 + vld1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000700: { + // 0xf420070d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_1_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD1{}{}.
, [{:}] ; A1 + vld1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000800: { + // 0xf420080d + if (((instr & 0xe30) == 0x830)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD2{}{}.
, [{:}] ; A1 + vld2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000900: { + // 0xf420090d + if (((instr & 0xe30) == 0x830)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD2{}{}.
, [{:}] ; A1 + vld2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000a00: { + // 0xf4200a0d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_1_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD1{}{}.
, [{:}] ; A1 + vld1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + } + break; + } + default: { + switch (instr & 0x00000f00) { + case 0x00000000: { + // 0xf4200000 + if (((instr & 0xd) == 0xd)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_4_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD4{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vld4(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000100: { + // 0xf4200100 + if (((instr & 0xd) == 0xd)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_4_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD4{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vld4(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000200: { + // 0xf4200200 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_1_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD1{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vld1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000300: { + // 0xf4200300 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe30) == 0x830)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD2{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vld2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000400: { + // 0xf4200400 + if (((instr & 0xd) == 0xd)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_3_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x4: + length = 3; + spacing = kSingle; + break; + case 0x5: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD3{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vld3(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000500: { + // 0xf4200500 + if (((instr & 0xd) == 0xd)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_3_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x4: + length = 3; + spacing = kSingle; + break; + case 0x5: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD3{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vld3(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000600: { + // 0xf4200600 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_1_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD1{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vld1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000700: { + // 0xf4200700 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_1_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD1{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vld1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000800: { + // 0xf4200800 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe30) == 0x830)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD2{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vld2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000900: { + // 0xf4200900 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe30) == 0x830)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD2{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vld2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000a00: { + // 0xf4200a00 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_1_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD1{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vld1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + } + break; + } + case 0x00800000: { + // 0xf4a00000 + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xf4a00000 + switch (instr & 0x00000c00) { + case 0x00000c00: { + // 0xf4a00c00 + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf4a00c0d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf4a00c0d + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_a_1_Decode((instr >> 4) & 0x1, dt); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 5) & 0x1) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 1; + break; + case 0x1: + length = 2; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kAllLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD1{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vld1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000002: { + // 0xf4a00c0f + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_a_1_Decode((instr >> 4) & 0x1, dt); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 5) & 0x1) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 1; + break; + case 0x1: + length = 2; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kAllLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD1{}{}.
, [{:}] ; A1 NOLINT(whitespace/line_length) + vld1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + } + break; + } + default: { + if (((instr & 0xd) == 0xd)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_a_1_Decode((instr >> 4) & 0x1, dt); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 5) & 0x1) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 1; + break; + case 0x1: + length = 2; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kAllLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD1{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vld1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + } + break; + } + default: { + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf4a0000d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf4a0000d + if (((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_1_Decode((instr >> 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedA32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 1; + unsigned last = first + length - 1; + unsigned rn = (instr >> 16) & 0xf; + // VLD1{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vld1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000002: { + // 0xf4a0000f + if (((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_1_Decode((instr >> 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedA32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 1; + unsigned last = first + length - 1; + unsigned rn = (instr >> 16) & 0xf; + // VLD1{}{}.
, [{:}] ; A1 NOLINT(whitespace/line_length) + vld1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + } + break; + } + default: { + if (((instr & 0xc00) == 0xc00) || + ((instr & 0xd) == 0xd)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_1_Decode((instr >> 4) & 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedA32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = decode_neon.GetSpacing(); + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length = 1; + unsigned last = first + length - 1; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD1{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vld1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + } + break; + } + } + break; + } + case 0x00000100: { + // 0xf4a00100 + switch (instr & 0x00000c00) { + case 0x00000c00: { + // 0xf4a00d00 + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf4a00d0d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf4a00d0d + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_a_2_Decode((instr >> 4) & 0x1, dt); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 5) & 0x1) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 2; + spacing = kSingle; + break; + case 0x1: + length = 2; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kAllLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD2{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vld2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000002: { + // 0xf4a00d0f + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_a_2_Decode((instr >> 4) & 0x1, dt); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 5) & 0x1) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 2; + spacing = kSingle; + break; + case 0x1: + length = 2; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kAllLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD2{}{}.
, [{:}] ; A1 NOLINT(whitespace/line_length) + vld2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + } + break; + } + default: { + if (((instr & 0xd) == 0xd)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_a_2_Decode((instr >> 4) & 0x1, dt); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 5) & 0x1) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 2; + spacing = kSingle; + break; + case 0x1: + length = 2; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kAllLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD2{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vld2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + } + break; + } + default: { + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf4a0010d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf4a0010d + if (((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_2_Decode((instr >> 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedA32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 2; + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + // VLD2{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vld2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000002: { + // 0xf4a0010f + if (((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_2_Decode((instr >> 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedA32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 2; + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + // VLD2{}{}.
, [{:}] ; A1 NOLINT(whitespace/line_length) + vld2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + } + break; + } + default: { + if (((instr & 0xc00) == 0xc00) || + ((instr & 0xd) == 0xd)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_2_Decode((instr >> 4) & 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedA32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = decode_neon.GetSpacing(); + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length = 2; + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD2{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vld2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + } + break; + } + } + break; + } + case 0x00000200: { + // 0xf4a00200 + switch (instr & 0x00000c00) { + case 0x00000c00: { + // 0xf4a00e00 + switch (instr & 0x00000010) { + case 0x00000000: { + // 0xf4a00e00 + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf4a00e0d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf4a00e0d + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 5) & 0x1) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 3; + spacing = kSingle; + break; + case 0x1: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kAllLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD3{}{}.
, []! ; A1 + vld3(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + MemOperand(Register(rn), PostIndex)); + break; + } + case 0x00000002: { + // 0xf4a00e0f + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 5) & 0x1) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 3; + spacing = kSingle; + break; + case 0x1: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kAllLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD3{}{}.
, [] ; A1 + vld3(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + MemOperand(Register(rn), Offset)); + break; + } + } + break; + } + default: { + if (((instr & 0xd) == 0xd)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 5) & 0x1) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 3; + spacing = kSingle; + break; + case 0x1: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kAllLanes; + unsigned rn = (instr >> 16) & 0xf; + Sign sign(plus); + unsigned rm = instr & 0xf; + // VLD3{}{}.
, [], # ; A1 NOLINT(whitespace/line_length) + vld3(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + MemOperand(Register(rn), + sign, + Register(rm), + PostIndex)); + break; + } + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + default: { + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf4a0020d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf4a0020d + if (((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DecodeNeon decode_neon = + Index_1_Decode((instr >> 4) & 0xf, dt); + if (!decode_neon.IsValid()) { + UnallocatedA32(instr); + return; + } + int lane = decode_neon.GetLane(); + SpacingType spacing = decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 3; + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + // VLD3{}{}.
, []! ; A1 + vld3(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + MemOperand(Register(rn), PostIndex)); + break; + } + case 0x00000002: { + // 0xf4a0020f + if (((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DecodeNeon decode_neon = + Index_1_Decode((instr >> 4) & 0xf, dt); + if (!decode_neon.IsValid()) { + UnallocatedA32(instr); + return; + } + int lane = decode_neon.GetLane(); + SpacingType spacing = decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 3; + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + // VLD3{}{}.
, [] ; A1 + vld3(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + MemOperand(Register(rn), Offset)); + break; + } + } + break; + } + default: { + if (((instr & 0xc00) == 0xc00) || + ((instr & 0xd) == 0xd)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DecodeNeon decode_neon = + Index_1_Decode((instr >> 4) & 0xf, dt); + if (!decode_neon.IsValid()) { + UnallocatedA32(instr); + return; + } + int lane = decode_neon.GetLane(); + SpacingType spacing = decode_neon.GetSpacing(); + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length = 3; + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + Sign sign(plus); + unsigned rm = instr & 0xf; + // VLD3{}{}.
, [], # ; A1 + vld3(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + MemOperand(Register(rn), + sign, + Register(rm), + PostIndex)); + break; + } + } + break; + } + } + break; + } + case 0x00000300: { + // 0xf4a00300 + switch (instr & 0x00000c00) { + case 0x00000c00: { + // 0xf4a00f00 + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf4a00f0d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf4a00f0d + DataType dt = + Dt_size_8_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_a_3_Decode((instr >> 4) & 0x1, + dt, + (instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 5) & 0x1) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kAllLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD4{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vld4(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000002: { + // 0xf4a00f0f + DataType dt = + Dt_size_8_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_a_3_Decode((instr >> 4) & 0x1, + dt, + (instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 5) & 0x1) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kAllLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD4{}{}.
, [{:}] ; A1 NOLINT(whitespace/line_length) + vld4(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + } + break; + } + default: { + if (((instr & 0xd) == 0xd)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_8_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_a_3_Decode((instr >> 4) & 0x1, + dt, + (instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 5) & 0x1) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kAllLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD4{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vld4(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + } + break; + } + default: { + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf4a0030d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf4a0030d + if (((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_3_Decode((instr >> 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedA32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 4; + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + // VLD4{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vld4(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000002: { + // 0xf4a0030f + if (((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_3_Decode((instr >> 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedA32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 4; + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + // VLD4{}{}.
, [{:}] ; A1 NOLINT(whitespace/line_length) + vld4(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + } + break; + } + default: { + if (((instr & 0xc00) == 0xc00) || + ((instr & 0xd) == 0xd)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_3_Decode((instr >> 4) & 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedA32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = decode_neon.GetSpacing(); + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length = 4; + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD4{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vld4(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + } + break; + } + } + break; + } + } + break; + } + } + break; + } + case 0x01100000: { + // 0xf5100000 + switch (instr & 0x000f0000) { + case 0x000f0000: { + // 0xf51f0000 + uint32_t U = (instr >> 23) & 0x1; + int32_t imm = instr & 0xfff; + if (U == 0) imm = -imm; + bool minus_zero = (imm == 0) && (U == 0); + Location location(imm, kA32PcDelta); + // PLD{}{}
, , ; A1 + vseleq(F64, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x00100a00: { + // 0xfe100a00 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VSELVS.F32 , , ; A1 + vselvs(F32, SRegister(rd), SRegister(rn), SRegister(rm)); + break; + } + case 0x00100b00: { + // 0xfe100b00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VSELVS.F64
, , ; A1 + vselvs(F64, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x00200a00: { + // 0xfe200a00 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VSELGE.F32 , , ; A1 + vselge(F32, SRegister(rd), SRegister(rn), SRegister(rm)); + break; + } + case 0x00200b00: { + // 0xfe200b00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VSELGE.F64
, , ; A1 + vselge(F64, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x00300a00: { + // 0xfe300a00 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VSELGT.F32 , , ; A1 + vselgt(F32, SRegister(rd), SRegister(rn), SRegister(rm)); + break; + } + case 0x00300b00: { + // 0xfe300b00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VSELGT.F64
, , ; A1 + vselgt(F64, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x00800a00: { + // 0xfe800a00 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VMAXNM{}.F32 , , ; A2 + vmaxnm(F32, SRegister(rd), SRegister(rn), SRegister(rm)); + break; + } + case 0x00800a40: { + // 0xfe800a40 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VMINNM{}.F32 , , ; A2 + vminnm(F32, SRegister(rd), SRegister(rn), SRegister(rm)); + break; + } + case 0x00800b00: { + // 0xfe800b00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMAXNM{}.F64
, , ; A2 + vmaxnm(F64, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x00800b40: { + // 0xfe800b40 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMINNM{}.F64
, , ; A2 + vminnm(F64, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x00b00a40: { + // 0xfeb00a40 + switch (instr & 0x000f0000) { + case 0x00080000: { + // 0xfeb80a40 + if ((instr & 0x00000080) == 0x00000000) { + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VRINTA{}.F32 , ; A1 + vrinta(F32, SRegister(rd), SRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x00090000: { + // 0xfeb90a40 + if ((instr & 0x00000080) == 0x00000000) { + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VRINTN{}.F32 , ; A1 + vrintn(F32, SRegister(rd), SRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x000a0000: { + // 0xfeba0a40 + if ((instr & 0x00000080) == 0x00000000) { + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VRINTP{}.F32 , ; A1 + vrintp(F32, SRegister(rd), SRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x000b0000: { + // 0xfebb0a40 + if ((instr & 0x00000080) == 0x00000000) { + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VRINTM{}.F32 , ; A1 + vrintm(F32, SRegister(rd), SRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x000c0000: { + // 0xfebc0a40 + DataType dt = Dt_op_2_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVTA{}.
.F32 , ; A1 + vcvta(dt, F32, SRegister(rd), SRegister(rm)); + break; + } + case 0x000d0000: { + // 0xfebd0a40 + DataType dt = Dt_op_2_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVTN{}.
.F32 , ; A1 + vcvtn(dt, F32, SRegister(rd), SRegister(rm)); + break; + } + case 0x000e0000: { + // 0xfebe0a40 + DataType dt = Dt_op_2_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVTP{}.
.F32 , ; A1 + vcvtp(dt, F32, SRegister(rd), SRegister(rm)); + break; + } + case 0x000f0000: { + // 0xfebf0a40 + DataType dt = Dt_op_2_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVTM{}.
.F32 , ; A1 + vcvtm(dt, F32, SRegister(rd), SRegister(rm)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00b00b40: { + // 0xfeb00b40 + switch (instr & 0x000f0000) { + case 0x00080000: { + // 0xfeb80b40 + if ((instr & 0x00000080) == 0x00000000) { + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRINTA{}.F64
, ; A1 + vrinta(F64, DRegister(rd), DRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x00090000: { + // 0xfeb90b40 + if ((instr & 0x00000080) == 0x00000000) { + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRINTN{}.F64
, ; A1 + vrintn(F64, DRegister(rd), DRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x000a0000: { + // 0xfeba0b40 + if ((instr & 0x00000080) == 0x00000000) { + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRINTP{}.F64
, ; A1 + vrintp(F64, DRegister(rd), DRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x000b0000: { + // 0xfebb0b40 + if ((instr & 0x00000080) == 0x00000000) { + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRINTM{}.F64
, ; A1 + vrintm(F64, DRegister(rd), DRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x000c0000: { + // 0xfebc0b40 + DataType dt = Dt_op_2_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVTA{}.
.F64 , ; A1 + vcvta(dt, F64, SRegister(rd), DRegister(rm)); + break; + } + case 0x000d0000: { + // 0xfebd0b40 + DataType dt = Dt_op_2_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVTN{}.
.F64 , ; A1 + vcvtn(dt, F64, SRegister(rd), DRegister(rm)); + break; + } + case 0x000e0000: { + // 0xfebe0b40 + DataType dt = Dt_op_2_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVTP{}.
.F64 , ; A1 + vcvtp(dt, F64, SRegister(rd), DRegister(rm)); + break; + } + case 0x000f0000: { + // 0xfebf0b40 + DataType dt = Dt_op_2_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVTM{}.
.F64 , ; A1 + vcvtm(dt, F64, SRegister(rd), DRegister(rm)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + default: + UnallocatedA32(instr); + break; + } + } else { + switch (instr & 0x0e000000) { + case 0x00000000: { + // 0x00000000 + switch (instr & 0x00100010) { + case 0x00000000: { + // 0x00000000 + switch (instr & 0x01a00000) { + case 0x00000000: { + // 0x00000000 + switch (instr & 0x00400000) { + case 0x00000000: { + // 0x00000000 + switch (instr & 0x00000fe0) { + case 0x00000060: { + // 0x00000060 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // AND{}{} {}, , , RRX ; A1 + and_(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), RRX)); + break; + } + default: { + if (((instr & 0xf0000000) == 0xf0000000) || + ((instr & 0xfe0) == 0x60)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + ImmediateShiftOperand shift_operand((instr >> 5) & 0x3, + (instr >> 7) & + 0x1f); + // AND{}{} {}, , {, # } ; A1 NOLINT(whitespace/line_length) + and_(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), + shift_operand.GetType(), + shift_operand.GetAmount())); + break; + } + } + break; + } + case 0x00400000: { + // 0x00400000 + switch (instr & 0x000f0000) { + case 0x000d0000: { + // 0x004d0000 + switch (instr & 0x00000fe0) { + case 0x00000060: { + // 0x004d0060 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rm = instr & 0xf; + // SUB{}{} {}, SP, , RRX ; A1 + sub(condition, + Best, + Register(rd), + sp, + Operand(Register(rm), RRX)); + break; + } + default: { + if (((instr & 0xf0000000) == 0xf0000000) || + ((instr & 0xfe0) == 0x60)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rm = instr & 0xf; + ImmediateShiftOperand shift_operand((instr >> 5) & + 0x3, + (instr >> 7) & + 0x1f); + // SUB{}{} {}, SP, {, # } ; A1 NOLINT(whitespace/line_length) + sub(condition, + Best, + Register(rd), + sp, + Operand(Register(rm), + shift_operand.GetType(), + shift_operand.GetAmount())); + break; + } + } + break; + } + default: { + switch (instr & 0x00000fe0) { + case 0x00000060: { + // 0x00400060 + if (((instr & 0xf0000000) == 0xf0000000) || + ((instr & 0xf0000) == 0xd0000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // SUB{}{} {}, , , RRX ; A1 + sub(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), RRX)); + break; + } + default: { + if (((instr & 0xf0000000) == 0xf0000000) || + ((instr & 0xf0000) == 0xd0000) || + ((instr & 0xfe0) == 0x60)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + ImmediateShiftOperand shift_operand((instr >> 5) & + 0x3, + (instr >> 7) & + 0x1f); + // SUB{}{} {}, , {, # } ; A1 NOLINT(whitespace/line_length) + sub(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), + shift_operand.GetType(), + shift_operand.GetAmount())); + break; + } + } + break; + } + } + break; + } + } + break; + } + case 0x00200000: { + // 0x00200000 + switch (instr & 0x00400000) { + case 0x00000000: { + // 0x00200000 + switch (instr & 0x00000fe0) { + case 0x00000060: { + // 0x00200060 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // EOR{}{} {}, , , RRX ; A1 + eor(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), RRX)); + break; + } + default: { + if (((instr & 0xf0000000) == 0xf0000000) || + ((instr & 0xfe0) == 0x60)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + ImmediateShiftOperand shift_operand((instr >> 5) & 0x3, + (instr >> 7) & + 0x1f); + // EOR{}{} {}, , {, # } ; A1 NOLINT(whitespace/line_length) + eor(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), + shift_operand.GetType(), + shift_operand.GetAmount())); + break; + } + } + break; + } + case 0x00400000: { + // 0x00600000 + switch (instr & 0x00000fe0) { + case 0x00000060: { + // 0x00600060 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // RSB{}{} {}, , , RRX ; A1 + rsb(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), RRX)); + break; + } + default: { + if (((instr & 0xf0000000) == 0xf0000000) || + ((instr & 0xfe0) == 0x60)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + ImmediateShiftOperand shift_operand((instr >> 5) & 0x3, + (instr >> 7) & + 0x1f); + // RSB{}{} {}, , {, # } ; A1 NOLINT(whitespace/line_length) + rsb(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), + shift_operand.GetType(), + shift_operand.GetAmount())); + break; + } + } + break; + } + } + break; + } + case 0x00800000: { + // 0x00800000 + switch (instr & 0x00400000) { + case 0x00000000: { + // 0x00800000 + switch (instr & 0x000f0000) { + case 0x000d0000: { + // 0x008d0000 + switch (instr & 0x00000fe0) { + case 0x00000060: { + // 0x008d0060 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rm = instr & 0xf; + // ADD{}{} {}, SP, , RRX ; A1 + add(condition, + Best, + Register(rd), + sp, + Operand(Register(rm), RRX)); + break; + } + default: { + if (((instr & 0xf0000000) == 0xf0000000) || + ((instr & 0xfe0) == 0x60)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rm = instr & 0xf; + ImmediateShiftOperand shift_operand((instr >> 5) & + 0x3, + (instr >> 7) & + 0x1f); + // ADD{}{} {}, SP, {, # } ; A1 NOLINT(whitespace/line_length) + add(condition, + Best, + Register(rd), + sp, + Operand(Register(rm), + shift_operand.GetType(), + shift_operand.GetAmount())); + break; + } + } + break; + } + default: { + switch (instr & 0x00000fe0) { + case 0x00000060: { + // 0x00800060 + if (((instr & 0xf0000000) == 0xf0000000) || + ((instr & 0xf0000) == 0xd0000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // ADD{}{} {}, , , RRX ; A1 + add(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), RRX)); + break; + } + default: { + if (((instr & 0xf0000000) == 0xf0000000) || + ((instr & 0xf0000) == 0xd0000) || + ((instr & 0xfe0) == 0x60)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + ImmediateShiftOperand shift_operand((instr >> 5) & + 0x3, + (instr >> 7) & + 0x1f); + // ADD{}{} {}, , {, # } ; A1 NOLINT(whitespace/line_length) + add(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), + shift_operand.GetType(), + shift_operand.GetAmount())); + break; + } + } + break; + } + } + break; + } + case 0x00400000: { + // 0x00c00000 + switch (instr & 0x00000fe0) { + case 0x00000060: { + // 0x00c00060 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // SBC{}{} {}, , , RRX ; A1 + sbc(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), RRX)); + break; + } + default: { + if (((instr & 0xf0000000) == 0xf0000000) || + ((instr & 0xfe0) == 0x60)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + ImmediateShiftOperand shift_operand((instr >> 5) & 0x3, + (instr >> 7) & + 0x1f); + // SBC{}{} {}, , {, # } ; A1 NOLINT(whitespace/line_length) + sbc(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), + shift_operand.GetType(), + shift_operand.GetAmount())); + break; + } + } + break; + } + } + break; + } + case 0x00a00000: { + // 0x00a00000 + switch (instr & 0x00400000) { + case 0x00000000: { + // 0x00a00000 + switch (instr & 0x00000fe0) { + case 0x00000060: { + // 0x00a00060 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // ADC{}{} {}, , , RRX ; A1 + adc(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), RRX)); + break; + } + default: { + if (((instr & 0xf0000000) == 0xf0000000) || + ((instr & 0xfe0) == 0x60)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + ImmediateShiftOperand shift_operand((instr >> 5) & 0x3, + (instr >> 7) & + 0x1f); + // ADC{}{} {}, , {, # } ; A1 NOLINT(whitespace/line_length) + adc(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), + shift_operand.GetType(), + shift_operand.GetAmount())); + break; + } + } + break; + } + case 0x00400000: { + // 0x00e00000 + switch (instr & 0x00000fe0) { + case 0x00000060: { + // 0x00e00060 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // RSC{}{} {}, , , RRX ; A1 + rsc(condition, + Register(rd), + Register(rn), + Operand(Register(rm), RRX)); + break; + } + default: { + if (((instr & 0xf0000000) == 0xf0000000) || + ((instr & 0xfe0) == 0x60)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + ImmediateShiftOperand shift_operand((instr >> 5) & 0x3, + (instr >> 7) & + 0x1f); + // RSC{}{} {}, , {, # } ; A1 NOLINT(whitespace/line_length) + rsc(condition, + Register(rd), + Register(rn), + Operand(Register(rm), + shift_operand.GetType(), + shift_operand.GetAmount())); + break; + } + } + break; + } + } + break; + } + case 0x01000000: { + // 0x01000000 + switch (instr & 0x000000e0) { + case 0x00000000: { + // 0x01000000 + switch (instr & 0x00000200) { + case 0x00000000: { + // 0x01000000 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned spec_reg = (instr >> 22) & 0x1; + // MRS{}{} , ; A1 + mrs(condition, Register(rd), SpecialRegister(spec_reg)); + if (((instr & 0xfbf0fff) != 0x10f0000)) { + UnpredictableA32(instr); + } + break; + } + case 0x00000200: { + // 0x01000200 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + UnimplementedA32("MRS", instr); + break; + } + } + break; + } + case 0x00000040: { + // 0x01000040 + switch (instr & 0x00400200) { + case 0x00000000: { + // 0x01000040 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // CRC32B{} , , ; A1 + crc32b(al, Register(rd), Register(rn), Register(rm)); + if (((instr & 0xff00ff0) != 0x1000040)) { + UnpredictableA32(instr); + } + break; + } + case 0x00000200: { + // 0x01000240 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // CRC32CB{} , , ; A1 + crc32cb(al, Register(rd), Register(rn), Register(rm)); + if (((instr & 0xff00ff0) != 0x1000240)) { + UnpredictableA32(instr); + } + break; + } + case 0x00400000: { + // 0x01400040 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // CRC32W{} , , ; A1 + crc32w(al, Register(rd), Register(rn), Register(rm)); + if (((instr & 0xff00ff0) != 0x1400040)) { + UnpredictableA32(instr); + } + break; + } + case 0x00400200: { + // 0x01400240 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // CRC32CW{} , , ; A1 + crc32cw(al, Register(rd), Register(rn), Register(rm)); + if (((instr & 0xff00ff0) != 0x1400240)) { + UnpredictableA32(instr); + } + break; + } + } + break; + } + case 0x00000080: { + // 0x01000080 + switch (instr & 0x00400000) { + case 0x00000000: { + // 0x01000080 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 16) & 0xf; + unsigned rn = instr & 0xf; + unsigned rm = (instr >> 8) & 0xf; + unsigned ra = (instr >> 12) & 0xf; + // SMLABB{}{} , , , ; A1 + smlabb(condition, + Register(rd), + Register(rn), + Register(rm), + Register(ra)); + break; + } + case 0x00400000: { + // 0x01400080 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rdlo = (instr >> 12) & 0xf; + unsigned rdhi = (instr >> 16) & 0xf; + unsigned rn = instr & 0xf; + unsigned rm = (instr >> 8) & 0xf; + // SMLALBB{}{} , , , ; A1 + smlalbb(condition, + Register(rdlo), + Register(rdhi), + Register(rn), + Register(rm)); + break; + } + } + break; + } + case 0x000000a0: { + // 0x010000a0 + switch (instr & 0x00400000) { + case 0x00000000: { + // 0x010000a0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 16) & 0xf; + unsigned rn = instr & 0xf; + unsigned rm = (instr >> 8) & 0xf; + unsigned ra = (instr >> 12) & 0xf; + // SMLATB{}{} , , , ; A1 + smlatb(condition, + Register(rd), + Register(rn), + Register(rm), + Register(ra)); + break; + } + case 0x00400000: { + // 0x014000a0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rdlo = (instr >> 12) & 0xf; + unsigned rdhi = (instr >> 16) & 0xf; + unsigned rn = instr & 0xf; + unsigned rm = (instr >> 8) & 0xf; + // SMLALTB{}{} , , , ; A1 + smlaltb(condition, + Register(rdlo), + Register(rdhi), + Register(rn), + Register(rm)); + break; + } + } + break; + } + case 0x000000c0: { + // 0x010000c0 + switch (instr & 0x00400000) { + case 0x00000000: { + // 0x010000c0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 16) & 0xf; + unsigned rn = instr & 0xf; + unsigned rm = (instr >> 8) & 0xf; + unsigned ra = (instr >> 12) & 0xf; + // SMLABT{}{} , , , ; A1 + smlabt(condition, + Register(rd), + Register(rn), + Register(rm), + Register(ra)); + break; + } + case 0x00400000: { + // 0x014000c0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rdlo = (instr >> 12) & 0xf; + unsigned rdhi = (instr >> 16) & 0xf; + unsigned rn = instr & 0xf; + unsigned rm = (instr >> 8) & 0xf; + // SMLALBT{}{} , , , ; A1 + smlalbt(condition, + Register(rdlo), + Register(rdhi), + Register(rn), + Register(rm)); + break; + } + } + break; + } + case 0x000000e0: { + // 0x010000e0 + switch (instr & 0x00400000) { + case 0x00000000: { + // 0x010000e0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 16) & 0xf; + unsigned rn = instr & 0xf; + unsigned rm = (instr >> 8) & 0xf; + unsigned ra = (instr >> 12) & 0xf; + // SMLATT{}{} , , , ; A1 + smlatt(condition, + Register(rd), + Register(rn), + Register(rm), + Register(ra)); + break; + } + case 0x00400000: { + // 0x014000e0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rdlo = (instr >> 12) & 0xf; + unsigned rdhi = (instr >> 16) & 0xf; + unsigned rn = instr & 0xf; + unsigned rm = (instr >> 8) & 0xf; + // SMLALTT{}{} , , , ; A1 + smlaltt(condition, + Register(rdlo), + Register(rdhi), + Register(rn), + Register(rm)); + break; + } + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x01200000: { + // 0x01200000 + switch (instr & 0x000000e0) { + case 0x00000000: { + // 0x01200000 + switch (instr & 0x00000200) { + case 0x00000000: { + // 0x01200000 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned spec_reg = + ((instr >> 16) & 0xf) | ((instr >> 18) & 0x10); + unsigned rn = instr & 0xf; + // MSR{}{} , ; A1 + msr(condition, + MaskedSpecialRegister(spec_reg), + Register(rn)); + if (((instr & 0xfb0fff0) != 0x120f000)) { + UnpredictableA32(instr); + } + break; + } + case 0x00000200: { + // 0x01200200 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + UnimplementedA32("MSR", instr); + break; + } + } + break; + } + case 0x00000020: { + // 0x01200020 + if ((instr & 0x00400000) == 0x00000000) { + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rm = instr & 0xf; + // BXJ{}{} ; A1 + bxj(condition, Register(rm)); + if (((instr & 0xffffff0) != 0x12fff20)) { + UnpredictableA32(instr); + } + } else { + UnallocatedA32(instr); + } + break; + } + case 0x00000040: { + // 0x01200040 + switch (instr & 0x00400200) { + case 0x00000000: { + // 0x01200040 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // CRC32H{} , , ; A1 + crc32h(al, Register(rd), Register(rn), Register(rm)); + if (((instr & 0xff00ff0) != 0x1200040)) { + UnpredictableA32(instr); + } + break; + } + case 0x00000200: { + // 0x01200240 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // CRC32CH{} , , ; A1 + crc32ch(al, Register(rd), Register(rn), Register(rm)); + if (((instr & 0xff00ff0) != 0x1200240)) { + UnpredictableA32(instr); + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000060: { + // 0x01200060 + if ((instr & 0x00400000) == 0x00400000) { + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + UnimplementedA32("ERET", instr); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x00000080: { + // 0x01200080 + switch (instr & 0x00400000) { + case 0x00000000: { + // 0x01200080 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 16) & 0xf; + unsigned rn = instr & 0xf; + unsigned rm = (instr >> 8) & 0xf; + unsigned ra = (instr >> 12) & 0xf; + // SMLAWB{}{} , , , ; A1 + smlawb(condition, + Register(rd), + Register(rn), + Register(rm), + Register(ra)); + break; + } + case 0x00400000: { + // 0x01600080 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 16) & 0xf; + unsigned rn = instr & 0xf; + unsigned rm = (instr >> 8) & 0xf; + // SMULBB{}{} {}, , ; A1 + smulbb(condition, + Register(rd), + Register(rn), + Register(rm)); + if (((instr & 0xff0f0f0) != 0x1600080)) { + UnpredictableA32(instr); + } + break; + } + } + break; + } + case 0x000000a0: { + // 0x012000a0 + switch (instr & 0x00400000) { + case 0x00000000: { + // 0x012000a0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 16) & 0xf; + unsigned rn = instr & 0xf; + unsigned rm = (instr >> 8) & 0xf; + // SMULWB{}{} {}, , ; A1 + smulwb(condition, + Register(rd), + Register(rn), + Register(rm)); + if (((instr & 0xff0f0f0) != 0x12000a0)) { + UnpredictableA32(instr); + } + break; + } + case 0x00400000: { + // 0x016000a0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 16) & 0xf; + unsigned rn = instr & 0xf; + unsigned rm = (instr >> 8) & 0xf; + // SMULTB{}{} {}, , ; A1 + smultb(condition, + Register(rd), + Register(rn), + Register(rm)); + if (((instr & 0xff0f0f0) != 0x16000a0)) { + UnpredictableA32(instr); + } + break; + } + } + break; + } + case 0x000000c0: { + // 0x012000c0 + switch (instr & 0x00400000) { + case 0x00000000: { + // 0x012000c0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 16) & 0xf; + unsigned rn = instr & 0xf; + unsigned rm = (instr >> 8) & 0xf; + unsigned ra = (instr >> 12) & 0xf; + // SMLAWT{}{} , , , ; A1 + smlawt(condition, + Register(rd), + Register(rn), + Register(rm), + Register(ra)); + break; + } + case 0x00400000: { + // 0x016000c0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 16) & 0xf; + unsigned rn = instr & 0xf; + unsigned rm = (instr >> 8) & 0xf; + // SMULBT{}{} {}, , ; A1 + smulbt(condition, + Register(rd), + Register(rn), + Register(rm)); + if (((instr & 0xff0f0f0) != 0x16000c0)) { + UnpredictableA32(instr); + } + break; + } + } + break; + } + case 0x000000e0: { + // 0x012000e0 + switch (instr & 0x00400000) { + case 0x00000000: { + // 0x012000e0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 16) & 0xf; + unsigned rn = instr & 0xf; + unsigned rm = (instr >> 8) & 0xf; + // SMULWT{}{} {}, , ; A1 + smulwt(condition, + Register(rd), + Register(rn), + Register(rm)); + if (((instr & 0xff0f0f0) != 0x12000e0)) { + UnpredictableA32(instr); + } + break; + } + case 0x00400000: { + // 0x016000e0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 16) & 0xf; + unsigned rn = instr & 0xf; + unsigned rm = (instr >> 8) & 0xf; + // SMULTT{}{} {}, , ; A1 + smultt(condition, + Register(rd), + Register(rn), + Register(rm)); + if (((instr & 0xff0f0f0) != 0x16000e0)) { + UnpredictableA32(instr); + } + break; + } + } + break; + } + } + break; + } + case 0x01800000: { + // 0x01800000 + switch (instr & 0x00400000) { + case 0x00000000: { + // 0x01800000 + switch (instr & 0x00000fe0) { + case 0x00000060: { + // 0x01800060 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // ORR{}{} {}, , , RRX ; A1 + orr(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), RRX)); + break; + } + default: { + if (((instr & 0xf0000000) == 0xf0000000) || + ((instr & 0xfe0) == 0x60)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + ImmediateShiftOperand shift_operand((instr >> 5) & 0x3, + (instr >> 7) & + 0x1f); + // ORR{}{} {}, , {, # } ; A1 NOLINT(whitespace/line_length) + orr(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), + shift_operand.GetType(), + shift_operand.GetAmount())); + break; + } + } + break; + } + case 0x00400000: { + // 0x01c00000 + switch (instr & 0x00000fe0) { + case 0x00000060: { + // 0x01c00060 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // BIC{}{} {}, , , RRX ; A1 + bic(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), RRX)); + break; + } + default: { + if (((instr & 0xf0000000) == 0xf0000000) || + ((instr & 0xfe0) == 0x60)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + ImmediateShiftOperand shift_operand((instr >> 5) & 0x3, + (instr >> 7) & + 0x1f); + // BIC{}{} {}, , {, # } ; A1 NOLINT(whitespace/line_length) + bic(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), + shift_operand.GetType(), + shift_operand.GetAmount())); + break; + } + } + break; + } + } + break; + } + case 0x01a00000: { + // 0x01a00000 + switch (instr & 0x00400000) { + case 0x00000000: { + // 0x01a00000 + switch (instr & 0x00000fe0) { + case 0x00000060: { + // 0x01a00060 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + if (((instr & 0xf0000000) != 0xf0000000)) { + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rm = instr & 0xf; + // RRX{}{} {}, ; A1 + rrx(condition, Register(rd), Register(rm)); + if (((instr & 0xfff0ff0) != 0x1a00060)) { + UnpredictableA32(instr); + } + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rm = instr & 0xf; + // MOV{}{} , , RRX ; A1 + mov(condition, + Best, + Register(rd), + Operand(Register(rm), RRX)); + if (((instr & 0xfff0ff0) != 0x1a00060)) { + UnpredictableA32(instr); + } + break; + } + default: { + if (((instr & 0xf0000000) == 0xf0000000) || + ((instr & 0xfe0) == 0x60)) { + UnallocatedA32(instr); + return; + } + if (((Uint32((instr >> 5)) & Uint32(0x3)) == + Uint32(0x2)) && + ((instr & 0xf0000000) != 0xf0000000)) { + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rm = instr & 0xf; + uint32_t amount = (instr >> 7) & 0x1f; + if (amount == 0) amount = 32; + // ASR{}{} {}, , # ; A1 + asr(condition, + Best, + Register(rd), + Register(rm), + amount); + if (((instr & 0xfff0070) != 0x1a00040)) { + UnpredictableA32(instr); + } + return; + } + if (((Uint32((instr >> 5)) & Uint32(0x3)) == + Uint32(0x0)) && + ((instr & 0xf0000000) != 0xf0000000) && + ((instr & 0x00000f80) != 0x00000000)) { + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rm = instr & 0xf; + uint32_t amount = (instr >> 7) & 0x1f; + // LSL{}{} {}, , # ; A1 + lsl(condition, + Best, + Register(rd), + Register(rm), + amount); + if (((instr & 0xfff0070) != 0x1a00000)) { + UnpredictableA32(instr); + } + return; + } + if (((Uint32((instr >> 5)) & Uint32(0x3)) == + Uint32(0x1)) && + ((instr & 0xf0000000) != 0xf0000000)) { + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rm = instr & 0xf; + uint32_t amount = (instr >> 7) & 0x1f; + if (amount == 0) amount = 32; + // LSR{}{} {}, , # ; A1 + lsr(condition, + Best, + Register(rd), + Register(rm), + amount); + if (((instr & 0xfff0070) != 0x1a00020)) { + UnpredictableA32(instr); + } + return; + } + if (((Uint32((instr >> 5)) & Uint32(0x3)) == + Uint32(0x3)) && + ((instr & 0xf0000000) != 0xf0000000) && + ((instr & 0x00000f80) != 0x00000000)) { + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rm = instr & 0xf; + uint32_t amount = (instr >> 7) & 0x1f; + // ROR{}{} {}, , # ; A1 + ror(condition, + Best, + Register(rd), + Register(rm), + amount); + if (((instr & 0xfff0070) != 0x1a00060)) { + UnpredictableA32(instr); + } + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rm = instr & 0xf; + ImmediateShiftOperand shift_operand((instr >> 5) & 0x3, + (instr >> 7) & + 0x1f); + // MOV{}{} , {, # } ; A1 + mov(condition, + Best, + Register(rd), + Operand(Register(rm), + shift_operand.GetType(), + shift_operand.GetAmount())); + if (((instr & 0xfff0010) != 0x1a00000)) { + UnpredictableA32(instr); + } + break; + } + } + break; + } + case 0x00400000: { + // 0x01e00000 + switch (instr & 0x00000fe0) { + case 0x00000060: { + // 0x01e00060 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rm = instr & 0xf; + // MVN{}{} , , RRX ; A1 + mvn(condition, + Best, + Register(rd), + Operand(Register(rm), RRX)); + if (((instr & 0xfff0ff0) != 0x1e00060)) { + UnpredictableA32(instr); + } + break; + } + default: { + if (((instr & 0xf0000000) == 0xf0000000) || + ((instr & 0xfe0) == 0x60)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rm = instr & 0xf; + ImmediateShiftOperand shift_operand((instr >> 5) & 0x3, + (instr >> 7) & + 0x1f); + // MVN{}{} , {, # } ; A1 + mvn(condition, + Best, + Register(rd), + Operand(Register(rm), + shift_operand.GetType(), + shift_operand.GetAmount())); + if (((instr & 0xfff0010) != 0x1e00000)) { + UnpredictableA32(instr); + } + break; + } + } + break; + } + } + break; + } + } + break; + } + case 0x00000010: { + // 0x00000010 + switch (instr & 0x00400080) { + case 0x00000000: { + // 0x00000010 + switch (instr & 0x01a00000) { + case 0x00000000: { + // 0x00000010 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + Shift shift((instr >> 5) & 0x3); + unsigned rs = (instr >> 8) & 0xf; + // AND{}{} {}, , , ; A1 + and_(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), shift.GetType(), Register(rs))); + break; + } + case 0x00200000: { + // 0x00200010 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + Shift shift((instr >> 5) & 0x3); + unsigned rs = (instr >> 8) & 0xf; + // EOR{}{} {}, , , ; A1 + eor(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), shift.GetType(), Register(rs))); + break; + } + case 0x00800000: { + // 0x00800010 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + Shift shift((instr >> 5) & 0x3); + unsigned rs = (instr >> 8) & 0xf; + // ADD{}{} {}, , , ; A1 + add(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), shift.GetType(), Register(rs))); + break; + } + case 0x00a00000: { + // 0x00a00010 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + Shift shift((instr >> 5) & 0x3); + unsigned rs = (instr >> 8) & 0xf; + // ADC{}{} {}, , , ; A1 + adc(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), shift.GetType(), Register(rs))); + break; + } + case 0x01000000: { + // 0x01000010 + switch (instr & 0x00000060) { + case 0x00000040: { + // 0x01000050 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rm = instr & 0xf; + unsigned rn = (instr >> 16) & 0xf; + // QADD{}{} {}, , ; A1 + qadd(condition, + Register(rd), + Register(rm), + Register(rn)); + if (((instr & 0xff00ff0) != 0x1000050)) { + UnpredictableA32(instr); + } + break; + } + case 0x00000060: { + // 0x01000070 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + uint32_t imm = (instr & 0xf) | ((instr >> 4) & 0xfff0); + // HLT{} {#} ; A1 + hlt(al, imm); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x01200000: { + // 0x01200010 + switch (instr & 0x00000060) { + case 0x00000000: { + // 0x01200010 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rm = instr & 0xf; + // BX{}{} ; A1 + bx(condition, Register(rm)); + if (((instr & 0xffffff0) != 0x12fff10)) { + UnpredictableA32(instr); + } + break; + } + case 0x00000020: { + // 0x01200030 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rm = instr & 0xf; + // BLX{}{} ; A1 + blx(condition, Register(rm)); + if (((instr & 0xffffff0) != 0x12fff30)) { + UnpredictableA32(instr); + } + break; + } + case 0x00000040: { + // 0x01200050 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rm = instr & 0xf; + unsigned rn = (instr >> 16) & 0xf; + // QSUB{}{} {}, , ; A1 + qsub(condition, + Register(rd), + Register(rm), + Register(rn)); + if (((instr & 0xff00ff0) != 0x1200050)) { + UnpredictableA32(instr); + } + break; + } + case 0x00000060: { + // 0x01200070 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + uint32_t imm = (instr & 0xf) | ((instr >> 4) & 0xfff0); + // BKPT{} {#} ; A1 + bkpt(al, imm); + break; + } + } + break; + } + case 0x01800000: { + // 0x01800010 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + Shift shift((instr >> 5) & 0x3); + unsigned rs = (instr >> 8) & 0xf; + // ORR{}{} {}, , , ; A1 + orr(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), shift.GetType(), Register(rs))); + break; + } + case 0x01a00000: { + // 0x01a00010 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + if (((Uint32((instr >> 5)) & Uint32(0x3)) == Uint32(0x2)) && + ((instr & 0xf0000000) != 0xf0000000)) { + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rm = instr & 0xf; + unsigned rs = (instr >> 8) & 0xf; + // ASR{}{} {}, , ; A1 + asr(condition, + Best, + Register(rd), + Register(rm), + Register(rs)); + if (((instr & 0xfff00f0) != 0x1a00050)) { + UnpredictableA32(instr); + } + return; + } + if (((Uint32((instr >> 5)) & Uint32(0x3)) == Uint32(0x0)) && + ((instr & 0xf0000000) != 0xf0000000)) { + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rm = instr & 0xf; + unsigned rs = (instr >> 8) & 0xf; + // LSL{}{} {}, , ; A1 + lsl(condition, + Best, + Register(rd), + Register(rm), + Register(rs)); + if (((instr & 0xfff00f0) != 0x1a00010)) { + UnpredictableA32(instr); + } + return; + } + if (((Uint32((instr >> 5)) & Uint32(0x3)) == Uint32(0x1)) && + ((instr & 0xf0000000) != 0xf0000000)) { + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rm = instr & 0xf; + unsigned rs = (instr >> 8) & 0xf; + // LSR{}{} {}, , ; A1 + lsr(condition, + Best, + Register(rd), + Register(rm), + Register(rs)); + if (((instr & 0xfff00f0) != 0x1a00030)) { + UnpredictableA32(instr); + } + return; + } + if (((Uint32((instr >> 5)) & Uint32(0x3)) == Uint32(0x3)) && + ((instr & 0xf0000000) != 0xf0000000)) { + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rm = instr & 0xf; + unsigned rs = (instr >> 8) & 0xf; + // ROR{}{} {}, , ; A1 + ror(condition, + Best, + Register(rd), + Register(rm), + Register(rs)); + if (((instr & 0xfff00f0) != 0x1a00070)) { + UnpredictableA32(instr); + } + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rm = instr & 0xf; + Shift shift((instr >> 5) & 0x3); + unsigned rs = (instr >> 8) & 0xf; + // MOV{}{} , , ; A1 + mov(condition, + Best, + Register(rd), + Operand(Register(rm), shift.GetType(), Register(rs))); + if (((instr & 0xfff0090) != 0x1a00010)) { + UnpredictableA32(instr); + } + break; + } + } + break; + } + case 0x00000080: { + // 0x00000090 + switch (instr & 0x01200060) { + case 0x00000000: { + // 0x00000090 + switch (instr & 0x00800000) { + case 0x00000000: { + // 0x00000090 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 16) & 0xf; + unsigned rn = instr & 0xf; + unsigned rm = (instr >> 8) & 0xf; + // MUL{}{} , , {} ; A1 + mul(condition, + Best, + Register(rd), + Register(rn), + Register(rm)); + if (((instr & 0xff0f0f0) != 0x90)) { + UnpredictableA32(instr); + } + break; + } + case 0x00800000: { + // 0x00800090 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rdlo = (instr >> 12) & 0xf; + unsigned rdhi = (instr >> 16) & 0xf; + unsigned rn = instr & 0xf; + unsigned rm = (instr >> 8) & 0xf; + // UMULL{}{} , , , ; A1 + umull(condition, + Register(rdlo), + Register(rdhi), + Register(rn), + Register(rm)); + break; + } + } + break; + } + case 0x00000020: { + // 0x000000b0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rt = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + Sign sign(((instr >> 23) & 0x1) == 0 ? minus : plus); + unsigned rm = instr & 0xf; + // STRH{}{} , [], #{+/-} ; A1 + strh(condition, + Best, + Register(rt), + MemOperand(Register(rn), + sign, + Register(rm), + PostIndex)); + if (((instr & 0xf700ff0) != 0xb0)) { + UnpredictableA32(instr); + } + break; + } + case 0x00000040: { + // 0x000000d0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rt = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + Sign sign(((instr >> 23) & 0x1) == 0 ? minus : plus); + unsigned rm = instr & 0xf; + // LDRD{}{} , , [], #{+/-} ; A1 + ldrd(condition, + Register(rt), + Register(rt + 1), + MemOperand(Register(rn), + sign, + Register(rm), + PostIndex)); + if (((instr & 0xf700ff0) != 0xd0)) { + UnpredictableA32(instr); + } + break; + } + case 0x00000060: { + // 0x000000f0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rt = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + Sign sign(((instr >> 23) & 0x1) == 0 ? minus : plus); + unsigned rm = instr & 0xf; + // STRD{}{} , , [], #{+/-} ; A1 + strd(condition, + Register(rt), + Register(rt + 1), + MemOperand(Register(rn), + sign, + Register(rm), + PostIndex)); + if (((instr & 0xf700ff0) != 0xf0)) { + UnpredictableA32(instr); + } + break; + } + case 0x00200000: { + // 0x00200090 + switch (instr & 0x00800000) { + case 0x00000000: { + // 0x00200090 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 16) & 0xf; + unsigned rn = instr & 0xf; + unsigned rm = (instr >> 8) & 0xf; + unsigned ra = (instr >> 12) & 0xf; + // MLA{}{} , , , ; A1 + mla(condition, + Register(rd), + Register(rn), + Register(rm), + Register(ra)); + break; + } + case 0x00800000: { + // 0x00a00090 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rdlo = (instr >> 12) & 0xf; + unsigned rdhi = (instr >> 16) & 0xf; + unsigned rn = instr & 0xf; + unsigned rm = (instr >> 8) & 0xf; + // UMLAL{}{} , , , ; A1 + umlal(condition, + Register(rdlo), + Register(rdhi), + Register(rn), + Register(rm)); + break; + } + } + break; + } + case 0x00200020: { + // 0x002000b0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + UnimplementedA32("STRHT", instr); + break; + } + case 0x01000000: { + // 0x01000090 + switch (instr & 0x00800300) { + case 0x00800000: { + // 0x01800090 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rt = instr & 0xf; + unsigned rn = (instr >> 16) & 0xf; + // STL{}{} , [] ; A1 + stl(condition, + Register(rt), + MemOperand(Register(rn), Offset)); + if (((instr & 0xff0fff0) != 0x180fc90)) { + UnpredictableA32(instr); + } + break; + } + case 0x00800200: { + // 0x01800290 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rt = instr & 0xf; + unsigned rn = (instr >> 16) & 0xf; + // STLEX{}{} , , [] ; A1 + stlex(condition, + Register(rd), + Register(rt), + MemOperand(Register(rn), Offset)); + if (((instr & 0xff00ff0) != 0x1800e90)) { + UnpredictableA32(instr); + } + break; + } + case 0x00800300: { + // 0x01800390 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rt = instr & 0xf; + unsigned rn = (instr >> 16) & 0xf; + // STREX{}{} , , [{, #}] ; A1 + strex(condition, + Register(rd), + Register(rt), + MemOperand(Register(rn), plus, 0, Offset)); + if (((instr & 0xff00ff0) != 0x1800f90)) { + UnpredictableA32(instr); + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x01000020: { + // 0x010000b0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rt = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + Sign sign(((instr >> 23) & 0x1) == 0 ? minus : plus); + unsigned rm = instr & 0xf; + AddrMode addrmode = Offset; + // STRH{}{} , [, #{+/-}] ; A1 + strh(condition, + Best, + Register(rt), + MemOperand(Register(rn), + sign, + Register(rm), + addrmode)); + if (((instr & 0xf700ff0) != 0x10000b0)) { + UnpredictableA32(instr); + } + break; + } + case 0x01000040: { + // 0x010000d0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rt = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + Sign sign(((instr >> 23) & 0x1) == 0 ? minus : plus); + unsigned rm = instr & 0xf; + AddrMode addrmode = Offset; + // LDRD{}{} , , [, #{+/-}] ; A1 + ldrd(condition, + Register(rt), + Register(rt + 1), + MemOperand(Register(rn), + sign, + Register(rm), + addrmode)); + if (((instr & 0xf700ff0) != 0x10000d0)) { + UnpredictableA32(instr); + } + break; + } + case 0x01000060: { + // 0x010000f0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rt = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + Sign sign(((instr >> 23) & 0x1) == 0 ? minus : plus); + unsigned rm = instr & 0xf; + AddrMode addrmode = Offset; + // STRD{}{} , , [, #{+/-}] ; A1 + strd(condition, + Register(rt), + Register(rt + 1), + MemOperand(Register(rn), + sign, + Register(rm), + addrmode)); + if (((instr & 0xf700ff0) != 0x10000f0)) { + UnpredictableA32(instr); + } + break; + } + case 0x01200000: { + // 0x01200090 + switch (instr & 0x00800300) { + case 0x00800200: { + // 0x01a00290 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rt = instr & 0xf; + unsigned rn = (instr >> 16) & 0xf; + // STLEXD{}{} , , , [] ; A1 + stlexd(condition, + Register(rd), + Register(rt), + Register(rt + 1), + MemOperand(Register(rn), Offset)); + if (((instr & 0xff00ff0) != 0x1a00e90)) { + UnpredictableA32(instr); + } + break; + } + case 0x00800300: { + // 0x01a00390 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rt = instr & 0xf; + unsigned rn = (instr >> 16) & 0xf; + // STREXD{}{} , , , [] ; A1 + strexd(condition, + Register(rd), + Register(rt), + Register(rt + 1), + MemOperand(Register(rn), Offset)); + if (((instr & 0xff00ff0) != 0x1a00f90)) { + UnpredictableA32(instr); + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x01200020: { + // 0x012000b0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rt = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + Sign sign(((instr >> 23) & 0x1) == 0 ? minus : plus); + unsigned rm = instr & 0xf; + AddrMode addrmode = PreIndex; + // STRH{}{} , [, #{+/-}]! ; A1 + strh(condition, + Best, + Register(rt), + MemOperand(Register(rn), + sign, + Register(rm), + addrmode)); + if (((instr & 0xf700ff0) != 0x12000b0)) { + UnpredictableA32(instr); + } + break; + } + case 0x01200040: { + // 0x012000d0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rt = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + Sign sign(((instr >> 23) & 0x1) == 0 ? minus : plus); + unsigned rm = instr & 0xf; + AddrMode addrmode = PreIndex; + // LDRD{}{} , , [, #{+/-}]! ; A1 + ldrd(condition, + Register(rt), + Register(rt + 1), + MemOperand(Register(rn), + sign, + Register(rm), + addrmode)); + if (((instr & 0xf700ff0) != 0x12000d0)) { + UnpredictableA32(instr); + } + break; + } + case 0x01200060: { + // 0x012000f0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rt = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + Sign sign(((instr >> 23) & 0x1) == 0 ? minus : plus); + unsigned rm = instr & 0xf; + AddrMode addrmode = PreIndex; + // STRD{}{} , , [, #{+/-}]! ; A1 + strd(condition, + Register(rt), + Register(rt + 1), + MemOperand(Register(rn), + sign, + Register(rm), + addrmode)); + if (((instr & 0xf700ff0) != 0x12000f0)) { + UnpredictableA32(instr); + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00400000: { + // 0x00400010 + switch (instr & 0x01a00000) { + case 0x00000000: { + // 0x00400010 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + Shift shift((instr >> 5) & 0x3); + unsigned rs = (instr >> 8) & 0xf; + // SUB{}{} {}, , , ; A1 + sub(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), shift.GetType(), Register(rs))); + break; + } + case 0x00200000: { + // 0x00600010 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + Shift shift((instr >> 5) & 0x3); + unsigned rs = (instr >> 8) & 0xf; + // RSB{}{} {}, , , ; A1 + rsb(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), shift.GetType(), Register(rs))); + break; + } + case 0x00800000: { + // 0x00c00010 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + Shift shift((instr >> 5) & 0x3); + unsigned rs = (instr >> 8) & 0xf; + // SBC{}{} {}, , , ; A1 + sbc(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), shift.GetType(), Register(rs))); + break; + } + case 0x00a00000: { + // 0x00e00010 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + Shift shift((instr >> 5) & 0x3); + unsigned rs = (instr >> 8) & 0xf; + // RSC{}{} {}, , , ; A1 + rsc(condition, + Register(rd), + Register(rn), + Operand(Register(rm), shift.GetType(), Register(rs))); + break; + } + case 0x01000000: { + // 0x01400010 + switch (instr & 0x00000060) { + case 0x00000040: { + // 0x01400050 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rm = instr & 0xf; + unsigned rn = (instr >> 16) & 0xf; + // QDADD{}{} {}, , ; A1 + qdadd(condition, + Register(rd), + Register(rm), + Register(rn)); + if (((instr & 0xff00ff0) != 0x1400050)) { + UnpredictableA32(instr); + } + break; + } + case 0x00000060: { + // 0x01400070 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + uint32_t imm = (instr & 0xf) | ((instr >> 4) & 0xfff0); + // HVC{} {#} ; A1 + hvc(al, imm); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x01200000: { + // 0x01600010 + switch (instr & 0x00000060) { + case 0x00000000: { + // 0x01600010 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rm = instr & 0xf; + // CLZ{}{} , ; A1 + clz(condition, Register(rd), Register(rm)); + if (((instr & 0xfff0ff0) != 0x16f0f10)) { + UnpredictableA32(instr); + } + break; + } + case 0x00000040: { + // 0x01600050 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rm = instr & 0xf; + unsigned rn = (instr >> 16) & 0xf; + // QDSUB{}{} {}, , ; A1 + qdsub(condition, + Register(rd), + Register(rm), + Register(rn)); + if (((instr & 0xff00ff0) != 0x1600050)) { + UnpredictableA32(instr); + } + break; + } + case 0x00000060: { + // 0x01600070 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + UnimplementedA32("SMC", instr); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x01800000: { + // 0x01c00010 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + Shift shift((instr >> 5) & 0x3); + unsigned rs = (instr >> 8) & 0xf; + // BIC{}{} {}, , , ; A1 + bic(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), shift.GetType(), Register(rs))); + break; + } + case 0x01a00000: { + // 0x01e00010 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rm = instr & 0xf; + Shift shift((instr >> 5) & 0x3); + unsigned rs = (instr >> 8) & 0xf; + // MVN{}{} , , ; A1 + mvn(condition, + Best, + Register(rd), + Operand(Register(rm), shift.GetType(), Register(rs))); + if (((instr & 0xfff0090) != 0x1e00010)) { + UnpredictableA32(instr); + } + break; + } + } + break; + } + case 0x00400080: { + // 0x00400090 + switch (instr & 0x00000060) { + case 0x00000000: { + // 0x00400090 + switch (instr & 0x01a00000) { + case 0x00000000: { + // 0x00400090 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rdlo = (instr >> 12) & 0xf; + unsigned rdhi = (instr >> 16) & 0xf; + unsigned rn = instr & 0xf; + unsigned rm = (instr >> 8) & 0xf; + // UMAAL{}{} , , , ; A1 + umaal(condition, + Register(rdlo), + Register(rdhi), + Register(rn), + Register(rm)); + break; + } + case 0x00200000: { + // 0x00600090 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 16) & 0xf; + unsigned rn = instr & 0xf; + unsigned rm = (instr >> 8) & 0xf; + unsigned ra = (instr >> 12) & 0xf; + // MLS{}{} , , , ; A1 + mls(condition, + Register(rd), + Register(rn), + Register(rm), + Register(ra)); + break; + } + case 0x00800000: { + // 0x00c00090 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rdlo = (instr >> 12) & 0xf; + unsigned rdhi = (instr >> 16) & 0xf; + unsigned rn = instr & 0xf; + unsigned rm = (instr >> 8) & 0xf; + // SMULL{}{} , , , ; A1 + smull(condition, + Register(rdlo), + Register(rdhi), + Register(rn), + Register(rm)); + break; + } + case 0x00a00000: { + // 0x00e00090 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rdlo = (instr >> 12) & 0xf; + unsigned rdhi = (instr >> 16) & 0xf; + unsigned rn = instr & 0xf; + unsigned rm = (instr >> 8) & 0xf; + // SMLAL{}{} , , , ; A1 + smlal(condition, + Register(rdlo), + Register(rdhi), + Register(rn), + Register(rm)); + break; + } + case 0x01800000: { + // 0x01c00090 + switch (instr & 0x00000300) { + case 0x00000000: { + // 0x01c00090 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rt = instr & 0xf; + unsigned rn = (instr >> 16) & 0xf; + // STLB{}{} , [] ; A1 + stlb(condition, + Register(rt), + MemOperand(Register(rn), Offset)); + if (((instr & 0xff0fff0) != 0x1c0fc90)) { + UnpredictableA32(instr); + } + break; + } + case 0x00000200: { + // 0x01c00290 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rt = instr & 0xf; + unsigned rn = (instr >> 16) & 0xf; + // STLEXB{}{} , , [] ; A1 + stlexb(condition, + Register(rd), + Register(rt), + MemOperand(Register(rn), Offset)); + if (((instr & 0xff00ff0) != 0x1c00e90)) { + UnpredictableA32(instr); + } + break; + } + case 0x00000300: { + // 0x01c00390 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rt = instr & 0xf; + unsigned rn = (instr >> 16) & 0xf; + // STREXB{}{} , , [] ; A1 + strexb(condition, + Register(rd), + Register(rt), + MemOperand(Register(rn), Offset)); + if (((instr & 0xff00ff0) != 0x1c00f90)) { + UnpredictableA32(instr); + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x01a00000: { + // 0x01e00090 + switch (instr & 0x00000300) { + case 0x00000000: { + // 0x01e00090 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rt = instr & 0xf; + unsigned rn = (instr >> 16) & 0xf; + // STLH{}{} , [] ; A1 + stlh(condition, + Register(rt), + MemOperand(Register(rn), Offset)); + if (((instr & 0xff0fff0) != 0x1e0fc90)) { + UnpredictableA32(instr); + } + break; + } + case 0x00000200: { + // 0x01e00290 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rt = instr & 0xf; + unsigned rn = (instr >> 16) & 0xf; + // STLEXH{}{} , , [] ; A1 + stlexh(condition, + Register(rd), + Register(rt), + MemOperand(Register(rn), Offset)); + if (((instr & 0xff00ff0) != 0x1e00e90)) { + UnpredictableA32(instr); + } + break; + } + case 0x00000300: { + // 0x01e00390 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rt = instr & 0xf; + unsigned rn = (instr >> 16) & 0xf; + // STREXH{}{} , , [] ; A1 + strexh(condition, + Register(rd), + Register(rt), + MemOperand(Register(rn), Offset)); + if (((instr & 0xff00ff0) != 0x1e00f90)) { + UnpredictableA32(instr); + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000020: { + // 0x004000b0 + switch (instr & 0x01200000) { + case 0x00000000: { + // 0x004000b0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rt = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + Sign sign((((instr >> 23) & 0x1) == 0) ? minus : plus); + int32_t offset = (instr & 0xf) | ((instr >> 4) & 0xf0); + // STRH{}{} , [], #{+/-} ; A1 + strh(condition, + Best, + Register(rt), + MemOperand(Register(rn), sign, offset, PostIndex)); + break; + } + case 0x00200000: { + // 0x006000b0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + UnimplementedA32("STRHT", instr); + break; + } + case 0x01000000: { + // 0x014000b0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rt = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + Sign sign((((instr >> 23) & 0x1) == 0) ? minus : plus); + int32_t offset = (instr & 0xf) | ((instr >> 4) & 0xf0); + // STRH{}{} , [{, #{+/-}}] ; A1 + strh(condition, + Best, + Register(rt), + MemOperand(Register(rn), sign, offset, Offset)); + break; + } + case 0x01200000: { + // 0x016000b0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rt = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + Sign sign((((instr >> 23) & 0x1) == 0) ? minus : plus); + int32_t offset = (instr & 0xf) | ((instr >> 4) & 0xf0); + // STRH{}{} , [{, #{+/-}}]! ; A1 + strh(condition, + Best, + Register(rt), + MemOperand(Register(rn), sign, offset, PreIndex)); + break; + } + } + break; + } + case 0x00000040: { + // 0x004000d0 + switch (instr & 0x000f0000) { + case 0x000f0000: { + // 0x004f00d0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rt = (instr >> 12) & 0xf; + uint32_t U = (instr >> 23) & 0x1; + int32_t imm = (instr & 0xf) | ((instr >> 4) & 0xf0); + if (U == 0) imm = -imm; + bool minus_zero = (imm == 0) && (U == 0); + Location location(imm, kA32PcDelta); + // LDRD{}{} , ,
, [{, #{+/-}}] ; A1 + vstr(condition, + Untyped64, + DRegister(rd), + MemOperand(Register(rn), sign, offset, Offset)); + break; + } + case 0x00200000: { + // 0x0d200a00 + if ((instr & 0x00800000) == 0x00000000) { + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + if (((Uint32((instr >> 16)) & Uint32(0xf)) == Uint32(0xd)) && + ((instr & 0xf0000000) != 0xf0000000)) { + Condition condition((instr >> 28) & 0xf); + unsigned first = ExtractSRegister(instr, 22, 12); + unsigned len = instr & 0xff; + // VPUSH{}{}{.} ; A2 + vpush(condition, + kDataTypeValueNone, + SRegisterList(SRegister(first), len)); + if ((len == 0) || ((first + len) > kNumberOfSRegisters)) { + UnpredictableA32(instr); + } + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rn = (instr >> 16) & 0xf; + unsigned first = ExtractSRegister(instr, 22, 12); + unsigned len = instr & 0xff; + // VSTMDB{}{}{.} !, ; A2 + vstmdb(condition, + kDataTypeValueNone, + Register(rn), + WriteBack(WRITE_BACK), + SRegisterList(SRegister(first), len)); + if ((len == 0) || ((first + len) > kNumberOfSRegisters)) { + UnpredictableA32(instr); + } + } else { + UnallocatedA32(instr); + } + break; + } + case 0x00200100: { + // 0x0d200b00 + switch (instr & 0x00800001) { + case 0x00000000: { + // 0x0d200b00 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + if (((Uint32((instr >> 16)) & Uint32(0xf)) == + Uint32(0xd)) && + ((instr & 0xf0000000) != 0xf0000000)) { + Condition condition((instr >> 28) & 0xf); + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned imm8 = (instr & 0xff); + unsigned len = imm8 / 2; + unsigned end = first + len; + // VPUSH{}{}{.} ; A1 + vpush(condition, + kDataTypeValueNone, + DRegisterList(DRegister(first), len)); + if ((len == 0) || (len > 16) || + (end > kMaxNumberOfDRegisters)) { + UnpredictableA32(instr); + } + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rn = (instr >> 16) & 0xf; + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned imm8 = (instr & 0xff); + unsigned len = imm8 / 2; + unsigned end = first + len; + // VSTMDB{}{}{.} !, ; A1 + vstmdb(condition, + kDataTypeValueNone, + Register(rn), + WriteBack(WRITE_BACK), + DRegisterList(DRegister(first), len)); + if ((len == 0) || (len > 16) || + (end > kMaxNumberOfDRegisters)) { + UnpredictableA32(instr); + } + break; + } + case 0x00000001: { + // 0x0d200b01 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rn = (instr >> 16) & 0xf; + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned imm8 = (instr & 0xff); + unsigned len = imm8 / 2; + unsigned end = first + len; + // FSTMDBX{}{} !, ; A1 + fstmdbx(condition, + Register(rn), + WriteBack(WRITE_BACK), + DRegisterList(DRegister(first), len)); + if ((len == 0) || (len > 16) || (end > 16)) { + UnpredictableA32(instr); + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + } + break; + } + case 0x01000e00: { + // 0x0d000e00 + switch (instr & 0x0060f100) { + case 0x00005000: { + // 0x0d005e00 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + UnimplementedA32("STC", instr); + break; + } + case 0x00205000: { + // 0x0d205e00 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + UnimplementedA32("STC", instr); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x01100a00: { + // 0x0d100a00 + switch (instr & 0x00200100) { + case 0x00000000: { + // 0x0d100a00 + switch (instr & 0x000f0000) { + case 0x000f0000: { + // 0x0d1f0a00 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + uint32_t U = (instr >> 23) & 0x1; + int32_t imm = instr & 0xff; + imm <<= 2; + if (U == 0) imm = -imm; + bool minus_zero = (imm == 0) && (U == 0); + Location location(imm, kA32PcDelta); + // VLDR{}{}{.32} ,
,
, [{, #{+/-}}] ; A1 + vldr(condition, + Untyped64, + DRegister(rd), + MemOperand(Register(rn), sign, offset, Offset)); + break; + } + } + break; + } + case 0x00200000: { + // 0x0d300a00 + if ((instr & 0x00800000) == 0x00000000) { + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rn = (instr >> 16) & 0xf; + unsigned first = ExtractSRegister(instr, 22, 12); + unsigned len = instr & 0xff; + // VLDMDB{}{}{.} !, ; A2 + vldmdb(condition, + kDataTypeValueNone, + Register(rn), + WriteBack(WRITE_BACK), + SRegisterList(SRegister(first), len)); + if ((len == 0) || ((first + len) > kNumberOfSRegisters)) { + UnpredictableA32(instr); + } + } else { + UnallocatedA32(instr); + } + break; + } + case 0x00200100: { + // 0x0d300b00 + switch (instr & 0x00800001) { + case 0x00000000: { + // 0x0d300b00 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rn = (instr >> 16) & 0xf; + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned imm8 = (instr & 0xff); + unsigned len = imm8 / 2; + unsigned end = first + len; + // VLDMDB{}{}{.} !, ; A1 + vldmdb(condition, + kDataTypeValueNone, + Register(rn), + WriteBack(WRITE_BACK), + DRegisterList(DRegister(first), len)); + if ((len == 0) || (len > 16) || + (end > kMaxNumberOfDRegisters)) { + UnpredictableA32(instr); + } + break; + } + case 0x00000001: { + // 0x0d300b01 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rn = (instr >> 16) & 0xf; + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned imm8 = (instr & 0xff); + unsigned len = imm8 / 2; + unsigned end = first + len; + // FLDMDBX{}{} !, ; A1 + fldmdbx(condition, + Register(rn), + WriteBack(WRITE_BACK), + DRegisterList(DRegister(first), len)); + if ((len == 0) || (len > 16) || (end > 16)) { + UnpredictableA32(instr); + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + } + break; + } + case 0x01100e00: { + // 0x0d100e00 + switch (instr & 0x0060f100) { + case 0x00005000: { + // 0x0d105e00 + switch (instr & 0x000f0000) { + case 0x000f0000: { + // 0x0d1f5e00 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + UnimplementedA32("LDC", instr); + break; + } + default: { + if (((instr & 0xf0000000) == 0xf0000000) || + ((instr & 0xf0000) == 0xf0000)) { + UnallocatedA32(instr); + return; + } + UnimplementedA32("LDC", instr); + break; + } + } + break; + } + case 0x00205000: { + // 0x0d305e00 + if (((instr & 0xf0000000) == 0xf0000000) || + ((instr & 0xf0000) == 0xf0000)) { + UnallocatedA32(instr); + return; + } + UnimplementedA32("LDC", instr); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x0e000000: { + // 0x0e000000 + switch (instr & 0x01000000) { + case 0x00000000: { + // 0x0e000000 + switch (instr & 0x00100e10) { + case 0x00000a00: { + // 0x0e000a00 + switch (instr & 0x00a00140) { + case 0x00000000: { + // 0x0e000a00 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VMLA{}{}.F32 , , ; A2 + vmla(condition, + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00000040: { + // 0x0e000a40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VMLS{}{}.F32 , , ; A2 + vmls(condition, + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00000100: { + // 0x0e000b00 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMLA{}{}.F64
, , ; A2 + vmla(condition, + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000140: { + // 0x0e000b40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMLS{}{}.F64
, , ; A2 + vmls(condition, + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00200000: { + // 0x0e200a00 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VMUL{}{}.F32 {}, , ; A2 + vmul(condition, + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00200040: { + // 0x0e200a40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VNMUL{}{}.F32 {}, , ; A1 + vnmul(condition, + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00200100: { + // 0x0e200b00 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMUL{}{}.F64 {
}, , ; A2 + vmul(condition, + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00200140: { + // 0x0e200b40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VNMUL{}{}.F64 {
}, , ; A1 + vnmul(condition, + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00800000: { + // 0x0e800a00 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VDIV{}{}.F32 {}, , ; A1 + vdiv(condition, + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00800100: { + // 0x0e800b00 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VDIV{}{}.F64 {
}, , ; A1 + vdiv(condition, + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00a00000: { + // 0x0ea00a00 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VFMA{}{}.F32 , , ; A2 + vfma(condition, + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00a00040: { + // 0x0ea00a40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VFMS{}{}.F32 , , ; A2 + vfms(condition, + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00a00100: { + // 0x0ea00b00 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VFMA{}{}.F64
, , ; A2 + vfma(condition, + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00a00140: { + // 0x0ea00b40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VFMS{}{}.F64
, , ; A2 + vfms(condition, + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000a10: { + // 0x0e000a10 + switch (instr & 0x00800100) { + case 0x00000000: { + // 0x0e000a10 + if ((instr & 0x00600000) == 0x00000000) { + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rt = (instr >> 12) & 0xf; + // VMOV{}{} , ; A1 + vmov(condition, SRegister(rn), Register(rt)); + if (((instr & 0xff00f7f) != 0xe000a10)) { + UnpredictableA32(instr); + } + } else { + UnallocatedA32(instr); + } + break; + } + case 0x00000100: { + // 0x0e000b10 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned lane; + DataType dt = + Dt_opc1_opc2_1_Decode(((instr >> 5) & 0x3) | + ((instr >> 19) & 0xc), + &lane); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 7, 16); + unsigned rt = (instr >> 12) & 0xf; + // VMOV{}{}{.} , ; A1 + vmov(condition, dt, DRegisterLane(rd, lane), Register(rt)); + if (((instr & 0xf900f1f) != 0xe000b10)) { + UnpredictableA32(instr); + } + break; + } + case 0x00800000: { + // 0x0e800a10 + if ((instr & 0x00600000) == 0x00600000) { + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned spec_reg = (instr >> 16) & 0xf; + unsigned rt = (instr >> 12) & 0xf; + switch (spec_reg) { + case 0x0: + case 0x1: + case 0x8: { + // VMSR{}{} , ; A1 + vmsr(condition, + SpecialFPRegister(spec_reg), + Register(rt)); + if (((instr & 0xff00fff) != 0xee00a10)) { + UnpredictableA32(instr); + } + break; + } + default: + UnallocatedA32(instr); + break; + } + } else { + UnallocatedA32(instr); + } + break; + } + case 0x00800100: { + // 0x0e800b10 + switch (instr & 0x00200040) { + case 0x00000000: { + // 0x0e800b10 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + DataType dt = Dt_B_E_1_Decode(((instr >> 5) & 0x1) | + ((instr >> 21) & 0x2)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 7, 16); + unsigned rt = (instr >> 12) & 0xf; + // VDUP{}{}.
, ; A1 + vdup(condition, dt, DRegister(rd), Register(rt)); + if (((instr & 0xfb00f5f) != 0xe800b10)) { + UnpredictableA32(instr); + } + break; + } + case 0x00200000: { + // 0x0ea00b10 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + DataType dt = Dt_B_E_1_Decode(((instr >> 5) & 0x1) | + ((instr >> 21) & 0x2)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 7, 16); + unsigned rt = (instr >> 12) & 0xf; + // VDUP{}{}.
, ; A1 + vdup(condition, dt, QRegister(rd), Register(rt)); + if (((instr & 0xfb00f5f) != 0xea00b10)) { + UnpredictableA32(instr); + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + } + break; + } + case 0x00000e10: { + // 0x0e000e10 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + UnimplementedA32("MCR", instr); + break; + } + case 0x00100a00: { + // 0x0e100a00 + switch (instr & 0x00a00140) { + case 0x00000000: { + // 0x0e100a00 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VNMLS{}{}.F32 , , ; A1 + vnmls(condition, + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00000040: { + // 0x0e100a40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VNMLA{}{}.F32 , , ; A1 + vnmla(condition, + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00000100: { + // 0x0e100b00 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VNMLS{}{}.F64
, , ; A1 + vnmls(condition, + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000140: { + // 0x0e100b40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VNMLA{}{}.F64
, , ; A1 + vnmla(condition, + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00200000: { + // 0x0e300a00 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VADD{}{}.F32 {}, , ; A2 + vadd(condition, + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00200040: { + // 0x0e300a40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VSUB{}{}.F32 {}, , ; A2 + vsub(condition, + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00200100: { + // 0x0e300b00 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VADD{}{}.F64 {
}, , ; A2 + vadd(condition, + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00200140: { + // 0x0e300b40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VSUB{}{}.F64 {
}, , ; A2 + vsub(condition, + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00800000: { + // 0x0e900a00 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VFNMS{}{}.F32 , , ; A1 + vfnms(condition, + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00800040: { + // 0x0e900a40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VFNMA{}{}.F32 , , ; A1 + vfnma(condition, + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00800100: { + // 0x0e900b00 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VFNMS{}{}.F64
, , ; A1 + vfnms(condition, + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00800140: { + // 0x0e900b40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VFNMA{}{}.F64
, , ; A1 + vfnma(condition, + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00a00000: { + // 0x0eb00a00 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + uint32_t encoded_imm = + (instr & 0xf) | ((instr >> 12) & 0xf0); + NeonImmediate imm = + ImmediateVFP::Decode(encoded_imm); + // VMOV{}{}.F32 , # ; A2 + vmov(condition, F32, SRegister(rd), imm); + if (((instr & 0xfb00ff0) != 0xeb00a00)) { + UnpredictableA32(instr); + } + break; + } + case 0x00a00040: { + // 0x0eb00a40 + switch (instr & 0x000e0000) { + case 0x00000000: { + // 0x0eb00a40 + switch (instr & 0x00010080) { + case 0x00000000: { + // 0x0eb00a40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VMOV{}{}.F32 , ; A2 + vmov(condition, F32, SRegister(rd), SRegister(rm)); + break; + } + case 0x00000080: { + // 0x0eb00ac0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VABS{}{}.F32 , ; A2 + vabs(condition, F32, SRegister(rd), SRegister(rm)); + break; + } + case 0x00010000: { + // 0x0eb10a40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VNEG{}{}.F32 , ; A2 + vneg(condition, F32, SRegister(rd), SRegister(rm)); + break; + } + case 0x00010080: { + // 0x0eb10ac0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VSQRT{}{}.F32 , ; A1 + vsqrt(condition, F32, SRegister(rd), SRegister(rm)); + break; + } + } + break; + } + case 0x00020000: { + // 0x0eb20a40 + switch (instr & 0x00010080) { + case 0x00000000: { + // 0x0eb20a40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVTB{}{}.F32.F16 , ; A1 + vcvtb(condition, + F32, + F16, + SRegister(rd), + SRegister(rm)); + break; + } + case 0x00000080: { + // 0x0eb20ac0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVTT{}{}.F32.F16 , ; A1 + vcvtt(condition, + F32, + F16, + SRegister(rd), + SRegister(rm)); + break; + } + case 0x00010000: { + // 0x0eb30a40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVTB{}{}.F16.F32 , ; A1 + vcvtb(condition, + F16, + F32, + SRegister(rd), + SRegister(rm)); + break; + } + case 0x00010080: { + // 0x0eb30ac0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVTT{}{}.F16.F32 , ; A1 + vcvtt(condition, + F16, + F32, + SRegister(rd), + SRegister(rm)); + break; + } + } + break; + } + case 0x00040000: { + // 0x0eb40a40 + switch (instr & 0x00010080) { + case 0x00000000: { + // 0x0eb40a40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCMP{}{}.F32 , ; A1 + vcmp(condition, F32, SRegister(rd), SRegister(rm)); + break; + } + case 0x00000080: { + // 0x0eb40ac0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCMPE{}{}.F32 , ; A1 + vcmpe(condition, F32, SRegister(rd), SRegister(rm)); + break; + } + case 0x00010000: { + // 0x0eb50a40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + // VCMP{}{}.F32 , #0.0 ; A2 + vcmp(condition, F32, SRegister(rd), 0.0); + if (((instr & 0xfbf0fff) != 0xeb50a40)) { + UnpredictableA32(instr); + } + break; + } + case 0x00010080: { + // 0x0eb50ac0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + // VCMPE{}{}.F32 , #0.0 ; A2 + vcmpe(condition, F32, SRegister(rd), 0.0); + if (((instr & 0xfbf0fff) != 0xeb50ac0)) { + UnpredictableA32(instr); + } + break; + } + } + break; + } + case 0x00060000: { + // 0x0eb60a40 + switch (instr & 0x00010080) { + case 0x00000000: { + // 0x0eb60a40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VRINTR{}{}.F32 , ; A1 + vrintr(condition, + F32, + SRegister(rd), + SRegister(rm)); + break; + } + case 0x00000080: { + // 0x0eb60ac0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VRINTZ{}{}.F32 , ; A1 + vrintz(condition, + F32, + SRegister(rd), + SRegister(rm)); + break; + } + case 0x00010000: { + // 0x0eb70a40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VRINTX{}{}.F32 , ; A1 + vrintx(condition, + F32, + SRegister(rd), + SRegister(rm)); + break; + } + case 0x00010080: { + // 0x0eb70ac0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVT{}{}.F64.F32
, ; A1 + vcvt(condition, + F64, + F32, + DRegister(rd), + SRegister(rm)); + break; + } + } + break; + } + case 0x00080000: { + // 0x0eb80a40 + if ((instr & 0x00010000) == 0x00000000) { + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + DataType dt = Dt_op_2_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVT{}{}.F32.
, ; A1 + vcvt(condition, + F32, + dt, + SRegister(rd), + SRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x000a0000: { + // 0x0eba0a40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + DataType dt = Dt_U_sx_1_Decode(((instr >> 7) & 0x1) | + ((instr >> 15) & 0x2)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned offset = 32; + if (dt.Is(S16) || dt.Is(U16)) { + offset = 16; + } + uint32_t fbits = offset - (((instr >> 5) & 0x1) | + ((instr << 1) & 0x1e)); + // VCVT{}{}.F32.
, , # ; A1 + vcvt(condition, + F32, + dt, + SRegister(rd), + SRegister(rd), + fbits); + break; + } + case 0x000c0000: { + // 0x0ebc0a40 + switch (instr & 0x00010080) { + case 0x00000000: { + // 0x0ebc0a40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVTR{}{}.U32.F32 , ; A1 + vcvtr(condition, + U32, + F32, + SRegister(rd), + SRegister(rm)); + break; + } + case 0x00000080: { + // 0x0ebc0ac0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVT{}{}.U32.F32 , ; A1 + vcvt(condition, + U32, + F32, + SRegister(rd), + SRegister(rm)); + break; + } + case 0x00010000: { + // 0x0ebd0a40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVTR{}{}.S32.F32 , ; A1 + vcvtr(condition, + S32, + F32, + SRegister(rd), + SRegister(rm)); + break; + } + case 0x00010080: { + // 0x0ebd0ac0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVT{}{}.S32.F32 , ; A1 + vcvt(condition, + S32, + F32, + SRegister(rd), + SRegister(rm)); + break; + } + } + break; + } + case 0x000e0000: { + // 0x0ebe0a40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + DataType dt = Dt_U_sx_1_Decode(((instr >> 7) & 0x1) | + ((instr >> 15) & 0x2)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned offset = 32; + if (dt.Is(S16) || dt.Is(U16)) { + offset = 16; + } + uint32_t fbits = offset - (((instr >> 5) & 0x1) | + ((instr << 1) & 0x1e)); + // VCVT{}{}.
.F32 , , # ; A1 + vcvt(condition, + dt, + F32, + SRegister(rd), + SRegister(rd), + fbits); + break; + } + } + break; + } + case 0x00a00100: { + // 0x0eb00b00 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + uint32_t encoded_imm = + (instr & 0xf) | ((instr >> 12) & 0xf0); + NeonImmediate imm = + ImmediateVFP::Decode(encoded_imm); + // VMOV{}{}.F64
, # ; A2 + vmov(condition, F64, DRegister(rd), imm); + if (((instr & 0xfb00ff0) != 0xeb00b00)) { + UnpredictableA32(instr); + } + break; + } + case 0x00a00140: { + // 0x0eb00b40 + switch (instr & 0x000e0000) { + case 0x00000000: { + // 0x0eb00b40 + switch (instr & 0x00010080) { + case 0x00000000: { + // 0x0eb00b40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMOV{}{}.F64
, ; A2 + vmov(condition, F64, DRegister(rd), DRegister(rm)); + break; + } + case 0x00000080: { + // 0x0eb00bc0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VABS{}{}.F64
, ; A2 + vabs(condition, F64, DRegister(rd), DRegister(rm)); + break; + } + case 0x00010000: { + // 0x0eb10b40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VNEG{}{}.F64
, ; A2 + vneg(condition, F64, DRegister(rd), DRegister(rm)); + break; + } + case 0x00010080: { + // 0x0eb10bc0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VSQRT{}{}.F64
, ; A1 + vsqrt(condition, F64, DRegister(rd), DRegister(rm)); + break; + } + } + break; + } + case 0x00020000: { + // 0x0eb20b40 + switch (instr & 0x00010080) { + case 0x00000000: { + // 0x0eb20b40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVTB{}{}.F64.F16
, ; A1 + vcvtb(condition, + F64, + F16, + DRegister(rd), + SRegister(rm)); + break; + } + case 0x00000080: { + // 0x0eb20bc0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVTT{}{}.F64.F16
, ; A1 + vcvtt(condition, + F64, + F16, + DRegister(rd), + SRegister(rm)); + break; + } + case 0x00010000: { + // 0x0eb30b40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVTB{}{}.F16.F64 , ; A1 + vcvtb(condition, + F16, + F64, + SRegister(rd), + DRegister(rm)); + break; + } + case 0x00010080: { + // 0x0eb30bc0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVTT{}{}.F16.F64 , ; A1 + vcvtt(condition, + F16, + F64, + SRegister(rd), + DRegister(rm)); + break; + } + } + break; + } + case 0x00040000: { + // 0x0eb40b40 + switch (instr & 0x00010080) { + case 0x00000000: { + // 0x0eb40b40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCMP{}{}.F64
, ; A1 + vcmp(condition, F64, DRegister(rd), DRegister(rm)); + break; + } + case 0x00000080: { + // 0x0eb40bc0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCMPE{}{}.F64
, ; A1 + vcmpe(condition, F64, DRegister(rd), DRegister(rm)); + break; + } + case 0x00010000: { + // 0x0eb50b40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + // VCMP{}{}.F64
, #0.0 ; A2 + vcmp(condition, F64, DRegister(rd), 0.0); + if (((instr & 0xfbf0fff) != 0xeb50b40)) { + UnpredictableA32(instr); + } + break; + } + case 0x00010080: { + // 0x0eb50bc0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + // VCMPE{}{}.F64
, #0.0 ; A2 + vcmpe(condition, F64, DRegister(rd), 0.0); + if (((instr & 0xfbf0fff) != 0xeb50bc0)) { + UnpredictableA32(instr); + } + break; + } + } + break; + } + case 0x00060000: { + // 0x0eb60b40 + switch (instr & 0x00010080) { + case 0x00000000: { + // 0x0eb60b40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRINTR{}{}.F64
, ; A1 + vrintr(condition, + F64, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00000080: { + // 0x0eb60bc0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRINTZ{}{}.F64
, ; A1 + vrintz(condition, + F64, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00010000: { + // 0x0eb70b40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRINTX{}{}.F64
, ; A1 + vrintx(condition, + F64, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00010080: { + // 0x0eb70bc0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVT{}{}.F32.F64 , ; A1 + vcvt(condition, + F32, + F64, + SRegister(rd), + DRegister(rm)); + break; + } + } + break; + } + case 0x00080000: { + // 0x0eb80b40 + if ((instr & 0x00010000) == 0x00000000) { + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + DataType dt = Dt_op_2_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVT{}{}.F64.
, ; A1 + vcvt(condition, + F64, + dt, + DRegister(rd), + SRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x000a0000: { + // 0x0eba0b40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + DataType dt = Dt_U_sx_1_Decode(((instr >> 7) & 0x1) | + ((instr >> 15) & 0x2)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned offset = 32; + if (dt.Is(S16) || dt.Is(U16)) { + offset = 16; + } + uint32_t fbits = offset - (((instr >> 5) & 0x1) | + ((instr << 1) & 0x1e)); + // VCVT{}{}.F64.
, , # ; A1 + vcvt(condition, + F64, + dt, + DRegister(rd), + DRegister(rd), + fbits); + break; + } + case 0x000c0000: { + // 0x0ebc0b40 + switch (instr & 0x00010080) { + case 0x00000000: { + // 0x0ebc0b40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVTR{}{}.U32.F64 , ; A1 + vcvtr(condition, + U32, + F64, + SRegister(rd), + DRegister(rm)); + break; + } + case 0x00000080: { + // 0x0ebc0bc0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVT{}{}.U32.F64 , ; A1 + vcvt(condition, + U32, + F64, + SRegister(rd), + DRegister(rm)); + break; + } + case 0x00010000: { + // 0x0ebd0b40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVTR{}{}.S32.F64 , ; A1 + vcvtr(condition, + S32, + F64, + SRegister(rd), + DRegister(rm)); + break; + } + case 0x00010080: { + // 0x0ebd0bc0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVT{}{}.S32.F64 , ; A1 + vcvt(condition, + S32, + F64, + SRegister(rd), + DRegister(rm)); + break; + } + } + break; + } + case 0x000e0000: { + // 0x0ebe0b40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + DataType dt = Dt_U_sx_1_Decode(((instr >> 7) & 0x1) | + ((instr >> 15) & 0x2)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned offset = 32; + if (dt.Is(S16) || dt.Is(U16)) { + offset = 16; + } + uint32_t fbits = offset - (((instr >> 5) & 0x1) | + ((instr << 1) & 0x1e)); + // VCVT{}{}.
.F64 , , # ; A1 + vcvt(condition, + dt, + F64, + DRegister(rd), + DRegister(rd), + fbits); + break; + } + } + break; + } + } + break; + } + case 0x00100a10: { + // 0x0e100a10 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0x0e100a10 + switch (instr & 0x00e00000) { + case 0x00000000: { + // 0x0e100a10 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rt = (instr >> 12) & 0xf; + unsigned rn = ExtractSRegister(instr, 7, 16); + // VMOV{}{} , ; A1 + vmov(condition, Register(rt), SRegister(rn)); + if (((instr & 0xff00f7f) != 0xe100a10)) { + UnpredictableA32(instr); + } + break; + } + case 0x00e00000: { + // 0x0ef00a10 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rt = (instr >> 12) & 0xf; + unsigned spec_reg = (instr >> 16) & 0xf; + switch (spec_reg) { + case 0x0: + case 0x1: + case 0x5: + case 0x6: + case 0x7: + case 0x8: { + // VMRS{}{} , ; A1 + vmrs(condition, + RegisterOrAPSR_nzcv(rt), + SpecialFPRegister(spec_reg)); + if (((instr & 0xff00fff) != 0xef00a10)) { + UnpredictableA32(instr); + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000100: { + // 0x0e100b10 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned lane; + DataType dt = + Dt_U_opc1_opc2_1_Decode(((instr >> 5) & 0x3) | + ((instr >> 19) & 0xc) | + ((instr >> 19) & 0x10), + &lane); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rt = (instr >> 12) & 0xf; + unsigned rn = ExtractDRegister(instr, 7, 16); + // VMOV{}{}{.
} , ; A1 + vmov(condition, dt, Register(rt), DRegisterLane(rn, lane)); + if (((instr & 0xf100f1f) != 0xe100b10)) { + UnpredictableA32(instr); + } + break; + } + } + break; + } + case 0x00100e10: { + // 0x0e100e10 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + UnimplementedA32("MRC", instr); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x01000000: { + // 0x0f000000 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + uint32_t imm = instr & 0xffffff; + // SVC{}{} {#} ; A1 + svc(condition, imm); + break; + } + } + break; + } + } + } +} // NOLINT(readability/fn_size) +// End of generated code. + +const uint16_t* PrintDisassembler::DecodeT32At( + const uint16_t* instruction_address, const uint16_t* buffer_end) { + uint32_t instruction = *instruction_address++ << 16; + + if (instruction >= kLowestT32_32Opcode) { + if (instruction_address >= buffer_end) { + os() << "?\n"; + return instruction_address; + } + instruction |= *instruction_address++; + } + + DecodeT32(instruction); + return instruction_address; +} + +void PrintDisassembler::DecodeT32(uint32_t instruction) { + PrintCodeAddress(GetCodeAddress()); + if (T32Size(instruction) == 2) { + PrintOpcode16(instruction >> 16); + Disassembler::DecodeT32(instruction); + } else { + PrintOpcode32(instruction); + Disassembler::DecodeT32(instruction); + } + os() << "\n"; +} + + +void PrintDisassembler::DecodeA32(uint32_t instruction) { + PrintCodeAddress(GetCodeAddress()); + PrintOpcode32(instruction); + Disassembler::DecodeA32(instruction); + os() << "\n"; +} + + +void PrintDisassembler::DisassembleA32Buffer(const uint32_t* buffer, + size_t size_in_bytes) { + VIXL_ASSERT(IsAligned(buffer)); + VIXL_ASSERT(IsMultiple(size_in_bytes)); + const uint32_t* const end_buffer = + buffer + (size_in_bytes / sizeof(uint32_t)); + while (buffer < end_buffer) { + DecodeA32(*buffer++); + } +} + + +void PrintDisassembler::DisassembleT32Buffer(const uint16_t* buffer, + size_t size_in_bytes) { + VIXL_ASSERT(IsAligned(buffer)); + VIXL_ASSERT(IsMultiple(size_in_bytes)); + const uint16_t* const end_buffer = + buffer + (size_in_bytes / sizeof(uint16_t)); + while (buffer < end_buffer) { + buffer = DecodeT32At(buffer, end_buffer); + } + VIXL_ASSERT(buffer == end_buffer); +} + +} // namespace aarch32 +} // namespace vixl diff --git a/dep/vixl/src/aarch32/instructions-aarch32.cc b/dep/vixl/src/aarch32/instructions-aarch32.cc new file mode 100644 index 000000000..2d1cb9056 --- /dev/null +++ b/dep/vixl/src/aarch32/instructions-aarch32.cc @@ -0,0 +1,742 @@ +// Copyright 2017, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +extern "C" { +#include +} + +#include +#include +#include +#include +#include + +#include "utils-vixl.h" +#include "aarch32/constants-aarch32.h" +#include "aarch32/instructions-aarch32.h" + +namespace vixl { +namespace aarch32 { + + +bool Shift::IsValidAmount(uint32_t amount) const { + switch (GetType()) { + case LSL: + return amount <= 31; + case ROR: + return (amount > 0) && (amount <= 31); + case LSR: + case ASR: + return (amount > 0) && (amount <= 32); + case RRX: + return amount == 0; + default: + VIXL_UNREACHABLE(); + return false; + } +} + + +std::ostream& operator<<(std::ostream& os, const Register reg) { + switch (reg.GetCode()) { + case 12: + return os << "ip"; + case 13: + return os << "sp"; + case 14: + return os << "lr"; + case 15: + return os << "pc"; + default: + return os << "r" << reg.GetCode(); + } +} + + +SRegister VRegister::S() const { + VIXL_ASSERT(GetType() == kSRegister); + return SRegister(GetCode()); +} + + +DRegister VRegister::D() const { + VIXL_ASSERT(GetType() == kDRegister); + return DRegister(GetCode()); +} + + +QRegister VRegister::Q() const { + VIXL_ASSERT(GetType() == kQRegister); + return QRegister(GetCode()); +} + + +Register RegisterList::GetFirstAvailableRegister() const { + for (uint32_t i = 0; i < kNumberOfRegisters; i++) { + if (((list_ >> i) & 1) != 0) return Register(i); + } + return Register(); +} + + +std::ostream& PrintRegisterList(std::ostream& os, // NOLINT(runtime/references) + uint32_t list) { + os << "{"; + bool first = true; + int code = 0; + while (list != 0) { + if ((list & 1) != 0) { + if (first) { + first = false; + } else { + os << ","; + } + os << Register(code); + } + list >>= 1; + code++; + } + os << "}"; + return os; +} + + +std::ostream& operator<<(std::ostream& os, RegisterList registers) { + return PrintRegisterList(os, registers.GetList()); +} + + +QRegister VRegisterList::GetFirstAvailableQRegister() const { + for (uint32_t i = 0; i < kNumberOfQRegisters; i++) { + if (((list_ >> (i * 4)) & 0xf) == 0xf) return QRegister(i); + } + return QRegister(); +} + + +DRegister VRegisterList::GetFirstAvailableDRegister() const { + for (uint32_t i = 0; i < kMaxNumberOfDRegisters; i++) { + if (((list_ >> (i * 2)) & 0x3) == 0x3) return DRegister(i); + } + return DRegister(); +} + + +SRegister VRegisterList::GetFirstAvailableSRegister() const { + for (uint32_t i = 0; i < kNumberOfSRegisters; i++) { + if (((list_ >> i) & 0x1) != 0) return SRegister(i); + } + return SRegister(); +} + + +std::ostream& operator<<(std::ostream& os, SRegisterList reglist) { + SRegister first = reglist.GetFirstSRegister(); + SRegister last = reglist.GetLastSRegister(); + if (first.Is(last)) + os << "{" << first << "}"; + else + os << "{" << first << "-" << last << "}"; + return os; +} + + +std::ostream& operator<<(std::ostream& os, DRegisterList reglist) { + DRegister first = reglist.GetFirstDRegister(); + DRegister last = reglist.GetLastDRegister(); + if (first.Is(last)) + os << "{" << first << "}"; + else + os << "{" << first << "-" << last << "}"; + return os; +} + +std::ostream& operator<<(std::ostream& os, NeonRegisterList nreglist) { + DRegister first = nreglist.GetFirstDRegister(); + int increment = nreglist.IsSingleSpaced() ? 1 : 2; + int count = + nreglist.GetLastDRegister().GetCode() - first.GetCode() + increment; + if (count < 0) count += kMaxNumberOfDRegisters; + os << "{"; + bool first_displayed = false; + for (;;) { + if (first_displayed) { + os << ","; + } else { + first_displayed = true; + } + os << first; + if (nreglist.IsTransferOneLane()) { + os << "[" << nreglist.GetTransferLane() << "]"; + } else if (nreglist.IsTransferAllLanes()) { + os << "[]"; + } + count -= increment; + if (count <= 0) break; + unsigned next = first.GetCode() + increment; + if (next >= kMaxNumberOfDRegisters) next -= kMaxNumberOfDRegisters; + first = DRegister(next); + } + os << "}"; + return os; +} + + +const char* SpecialRegister::GetName() const { + switch (reg_) { + case APSR: + return "APSR"; + case SPSR: + return "SPSR"; + } + VIXL_UNREACHABLE(); + return "??"; +} + + +const char* MaskedSpecialRegister::GetName() const { + switch (reg_) { + case APSR_nzcvq: + return "APSR_nzcvq"; + case APSR_g: + return "APSR_g"; + case APSR_nzcvqg: + return "APSR_nzcvqg"; + case CPSR_c: + return "CPSR_c"; + case CPSR_x: + return "CPSR_x"; + case CPSR_xc: + return "CPSR_xc"; + case CPSR_sc: + return "CPSR_sc"; + case CPSR_sx: + return "CPSR_sx"; + case CPSR_sxc: + return "CPSR_sxc"; + case CPSR_fc: + return "CPSR_fc"; + case CPSR_fx: + return "CPSR_fx"; + case CPSR_fxc: + return "CPSR_fxc"; + case CPSR_fsc: + return "CPSR_fsc"; + case CPSR_fsx: + return "CPSR_fsx"; + case CPSR_fsxc: + return "CPSR_fsxc"; + case SPSR_c: + return "SPSR_c"; + case SPSR_x: + return "SPSR_x"; + case SPSR_xc: + return "SPSR_xc"; + case SPSR_s: + return "SPSR_s"; + case SPSR_sc: + return "SPSR_sc"; + case SPSR_sx: + return "SPSR_sx"; + case SPSR_sxc: + return "SPSR_sxc"; + case SPSR_f: + return "SPSR_f"; + case SPSR_fc: + return "SPSR_fc"; + case SPSR_fx: + return "SPSR_fx"; + case SPSR_fxc: + return "SPSR_fxc"; + case SPSR_fs: + return "SPSR_fs"; + case SPSR_fsc: + return "SPSR_fsc"; + case SPSR_fsx: + return "SPSR_fsx"; + case SPSR_fsxc: + return "SPSR_fsxc"; + } + VIXL_UNREACHABLE(); + return "??"; +} + + +const char* BankedRegister::GetName() const { + switch (reg_) { + case R8_usr: + return "R8_usr"; + case R9_usr: + return "R9_usr"; + case R10_usr: + return "R10_usr"; + case R11_usr: + return "R11_usr"; + case R12_usr: + return "R12_usr"; + case SP_usr: + return "SP_usr"; + case LR_usr: + return "LR_usr"; + case R8_fiq: + return "R8_fiq"; + case R9_fiq: + return "R9_fiq"; + case R10_fiq: + return "R10_fiq"; + case R11_fiq: + return "R11_fiq"; + case R12_fiq: + return "R12_fiq"; + case SP_fiq: + return "SP_fiq"; + case LR_fiq: + return "LR_fiq"; + case LR_irq: + return "LR_irq"; + case SP_irq: + return "SP_irq"; + case LR_svc: + return "LR_svc"; + case SP_svc: + return "SP_svc"; + case LR_abt: + return "LR_abt"; + case SP_abt: + return "SP_abt"; + case LR_und: + return "LR_und"; + case SP_und: + return "SP_und"; + case LR_mon: + return "LR_mon"; + case SP_mon: + return "SP_mon"; + case ELR_hyp: + return "ELR_hyp"; + case SP_hyp: + return "SP_hyp"; + case SPSR_fiq: + return "SPSR_fiq"; + case SPSR_irq: + return "SPSR_irq"; + case SPSR_svc: + return "SPSR_svc"; + case SPSR_abt: + return "SPSR_abt"; + case SPSR_und: + return "SPSR_und"; + case SPSR_mon: + return "SPSR_mon"; + case SPSR_hyp: + return "SPSR_hyp"; + } + VIXL_UNREACHABLE(); + return "??"; +} + +const char* SpecialFPRegister::GetName() const { + switch (reg_) { + case FPSID: + return "FPSID"; + case FPSCR: + return "FPSCR"; + case MVFR2: + return "MVFR2"; + case MVFR1: + return "MVFR1"; + case MVFR0: + return "MVFR0"; + case FPEXC: + return "FPEXC"; + } + VIXL_UNREACHABLE(); + return "??"; +} + + +const char* Condition::GetName() const { + switch (condition_) { + case eq: + return "eq"; + case ne: + return "ne"; + case cs: + return "cs"; + case cc: + return "cc"; + case mi: + return "mi"; + case pl: + return "pl"; + case vs: + return "vs"; + case vc: + return "vc"; + case hi: + return "hi"; + case ls: + return "ls"; + case ge: + return "ge"; + case lt: + return "lt"; + case gt: + return "gt"; + case le: + return "le"; + case al: + return ""; + case Condition::kNone: + return ""; + } + return ""; +} + + +const char* Shift::GetName() const { + switch (shift_) { + case LSL: + return "lsl"; + case LSR: + return "lsr"; + case ASR: + return "asr"; + case ROR: + return "ror"; + case RRX: + return "rrx"; + } + VIXL_UNREACHABLE(); + return "??"; +} + + +const char* EncodingSize::GetName() const { + switch (size_) { + case Best: + case Narrow: + return ""; + case Wide: + return ".w"; + } + VIXL_UNREACHABLE(); + return "??"; +} + + +const char* DataType::GetName() const { + switch (value_) { + case kDataTypeValueInvalid: + return ".??"; + case kDataTypeValueNone: + return ""; + case S8: + return ".s8"; + case S16: + return ".s16"; + case S32: + return ".s32"; + case S64: + return ".s64"; + case U8: + return ".u8"; + case U16: + return ".u16"; + case U32: + return ".u32"; + case U64: + return ".u64"; + case F16: + return ".f16"; + case F32: + return ".f32"; + case F64: + return ".f64"; + case I8: + return ".i8"; + case I16: + return ".i16"; + case I32: + return ".i32"; + case I64: + return ".i64"; + case P8: + return ".p8"; + case P64: + return ".p64"; + case Untyped8: + return ".8"; + case Untyped16: + return ".16"; + case Untyped32: + return ".32"; + case Untyped64: + return ".64"; + } + VIXL_UNREACHABLE(); + return ".??"; +} + + +const char* MemoryBarrier::GetName() const { + switch (type_) { + case OSHLD: + return "oshld"; + case OSHST: + return "oshst"; + case OSH: + return "osh"; + case NSHLD: + return "nshld"; + case NSHST: + return "nshst"; + case NSH: + return "nsh"; + case ISHLD: + return "ishld"; + case ISHST: + return "ishst"; + case ISH: + return "ish"; + case LD: + return "ld"; + case ST: + return "st"; + case SY: + return "sy"; + } + switch (static_cast(type_)) { + case 0: + return "#0x0"; + case 4: + return "#0x4"; + case 8: + return "#0x8"; + case 0xc: + return "#0xc"; + } + VIXL_UNREACHABLE(); + return "??"; +} + + +const char* InterruptFlags::GetName() const { + switch (type_) { + case F: + return "f"; + case I: + return "i"; + case IF: + return "if"; + case A: + return "a"; + case AF: + return "af"; + case AI: + return "ai"; + case AIF: + return "aif"; + } + VIXL_ASSERT(type_ == 0); + return ""; +} + + +const char* Endianness::GetName() const { + switch (type_) { + case LE: + return "le"; + case BE: + return "be"; + } + VIXL_UNREACHABLE(); + return "??"; +} + + +// Constructor used for disassembly. +ImmediateShiftOperand::ImmediateShiftOperand(int shift_value, int amount_value) + : Shift(shift_value) { + switch (shift_value) { + case LSL: + amount_ = amount_value; + break; + case LSR: + case ASR: + amount_ = (amount_value == 0) ? 32 : amount_value; + break; + case ROR: + amount_ = amount_value; + if (amount_value == 0) SetType(RRX); + break; + default: + VIXL_UNREACHABLE(); + SetType(LSL); + amount_ = 0; + break; + } +} + + +ImmediateT32::ImmediateT32(uint32_t imm) { + // 00000000 00000000 00000000 abcdefgh + if ((imm & ~0xff) == 0) { + SetEncodingValue(imm); + return; + } + if ((imm >> 16) == (imm & 0xffff)) { + if ((imm & 0xff00) == 0) { + // 00000000 abcdefgh 00000000 abcdefgh + SetEncodingValue((imm & 0xff) | (0x1 << 8)); + return; + } + if ((imm & 0xff) == 0) { + // abcdefgh 00000000 abcdefgh 00000000 + SetEncodingValue(((imm >> 8) & 0xff) | (0x2 << 8)); + return; + } + if (((imm >> 8) & 0xff) == (imm & 0xff)) { + // abcdefgh abcdefgh abcdefgh abcdefgh + SetEncodingValue((imm & 0xff) | (0x3 << 8)); + return; + } + } + for (int shift = 0; shift < 24; shift++) { + uint32_t imm8 = imm >> (24 - shift); + uint32_t overflow = imm << (8 + shift); + if ((imm8 <= 0xff) && ((imm8 & 0x80) != 0) && (overflow == 0)) { + SetEncodingValue(((shift + 8) << 7) | (imm8 & 0x7F)); + return; + } + } +} + + +static inline uint32_t ror(uint32_t x, int i) { + VIXL_ASSERT((0 < i) && (i < 32)); + return (x >> i) | (x << (32 - i)); +} + + +bool ImmediateT32::IsImmediateT32(uint32_t imm) { + /* abcdefgh abcdefgh abcdefgh abcdefgh */ + if ((imm ^ ror(imm, 8)) == 0) return true; + /* 00000000 abcdefgh 00000000 abcdefgh */ + /* abcdefgh 00000000 abcdefgh 00000000 */ + if ((imm ^ ror(imm, 16)) == 0 && + (((imm & 0xff00) == 0) || ((imm & 0xff) == 0))) + return true; + /* isolate least-significant set bit */ + uint32_t lsb = imm & -imm; + /* if imm is less than lsb*256 then it fits, but instead we test imm/256 to + * avoid overflow (underflow is always a successful case) */ + return ((imm >> 8) < lsb); +} + + +uint32_t ImmediateT32::Decode(uint32_t value) { + uint32_t base = value & 0xff; + switch (value >> 8) { + case 0: + return base; + case 1: + return base | (base << 16); + case 2: + return (base << 8) | (base << 24); + case 3: + return base | (base << 8) | (base << 16) | (base << 24); + default: + base |= 0x80; + return base << (32 - (value >> 7)); + } +} + + +ImmediateA32::ImmediateA32(uint32_t imm) { + // Deal with rot = 0 first to avoid undefined shift by 32. + if (imm <= 0xff) { + SetEncodingValue(imm); + return; + } + for (int rot = 2; rot < 32; rot += 2) { + uint32_t imm8 = (imm << rot) | (imm >> (32 - rot)); + if (imm8 <= 0xff) { + SetEncodingValue((rot << 7) | imm8); + return; + } + } +} + + +bool ImmediateA32::IsImmediateA32(uint32_t imm) { + /* fast-out */ + if (imm < 256) return true; + /* avoid getting confused by wrapped-around bytes (this transform has no + * effect on pass/fail results) */ + if (imm & 0xff000000) imm = ror(imm, 16); + /* copy odd-numbered set bits into even-numbered bits immediately below, so + * that the least-significant set bit is always an even bit */ + imm = imm | ((imm >> 1) & 0x55555555); + /* isolate least-significant set bit (always even) */ + uint32_t lsb = imm & -imm; + /* if imm is less than lsb*256 then it fits, but instead we test imm/256 to + * avoid overflow (underflow is always a successful case) */ + return ((imm >> 8) < lsb); +} + + +uint32_t ImmediateA32::Decode(uint32_t value) { + int rotation = (value >> 8) * 2; + VIXL_ASSERT(rotation >= 0); + VIXL_ASSERT(rotation <= 30); + value &= 0xff; + if (rotation == 0) return value; + return (value >> rotation) | (value << (32 - rotation)); +} + + +uint32_t TypeEncodingValue(Shift shift) { + return shift.IsRRX() ? kRRXEncodedValue : shift.GetValue(); +} + + +uint32_t AmountEncodingValue(Shift shift, uint32_t amount) { + switch (shift.GetType()) { + case LSL: + case ROR: + return amount; + case LSR: + case ASR: + return amount % 32; + case RRX: + return 0; + } + return 0; +} + +} // namespace aarch32 +} // namespace vixl diff --git a/dep/vixl/src/aarch32/location-aarch32.cc b/dep/vixl/src/aarch32/location-aarch32.cc new file mode 100644 index 000000000..d61aafa99 --- /dev/null +++ b/dep/vixl/src/aarch32/location-aarch32.cc @@ -0,0 +1,152 @@ +// Copyright 2017, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "location-aarch32.h" + +#include "assembler-aarch32.h" +#include "macro-assembler-aarch32.h" + +namespace vixl { + +namespace aarch32 { + +bool Location::Needs16BitPadding(int32_t location) const { + if (!HasForwardReferences()) return false; + const ForwardRef& last_ref = GetLastForwardReference(); + int32_t min_location_last_ref = last_ref.GetMinLocation(); + VIXL_ASSERT(min_location_last_ref - location <= 2); + return (min_location_last_ref > location); +} + +void Location::ResolveReferences(internal::AssemblerBase* assembler) { + // Iterate over references and call EncodeLocationFor on each of them. + for (ForwardRefListIterator it(this); !it.Done(); it.Advance()) { + const ForwardRef& reference = *it.Current(); + VIXL_ASSERT(reference.LocationIsEncodable(location_)); + int32_t from = reference.GetLocation(); + EncodeLocationFor(assembler, from, reference.op()); + } + forward_.clear(); +} + +static bool Is16BitEncoding(uint16_t instr) { + return instr < (kLowestT32_32Opcode >> 16); +} + +void Location::EncodeLocationFor(internal::AssemblerBase* assembler, + int32_t from, + const Location::EmitOperator* encoder) { + if (encoder->IsUsingT32()) { + uint16_t* instr_ptr = + assembler->GetBuffer()->GetOffsetAddress(from); + if (Is16BitEncoding(instr_ptr[0])) { + // The Encode methods always deals with uint32_t types so we need + // to explicitly cast it. + uint32_t instr = static_cast(instr_ptr[0]); + instr = encoder->Encode(instr, from, this); + // The Encode method should not ever set the top 16 bits. + VIXL_ASSERT((instr & ~0xffff) == 0); + instr_ptr[0] = static_cast(instr); + } else { + uint32_t instr = + instr_ptr[1] | (static_cast(instr_ptr[0]) << 16); + instr = encoder->Encode(instr, from, this); + instr_ptr[0] = static_cast(instr >> 16); + instr_ptr[1] = static_cast(instr); + } + } else { + uint32_t* instr_ptr = + assembler->GetBuffer()->GetOffsetAddress(from); + instr_ptr[0] = encoder->Encode(instr_ptr[0], from, this); + } +} + +void Location::AddForwardRef(int32_t instr_location, + const EmitOperator& op, + const ReferenceInfo* info) { + VIXL_ASSERT(referenced_); + int32_t from = instr_location + (op.IsUsingT32() ? kT32PcDelta : kA32PcDelta); + if (info->pc_needs_aligning == ReferenceInfo::kAlignPc) + from = AlignDown(from, 4); + int32_t min_object_location = from + info->min_offset; + int32_t max_object_location = from + info->max_offset; + forward_.insert(ForwardRef(&op, + instr_location, + info->size, + min_object_location, + max_object_location, + info->alignment)); +} + +int Location::GetMaxAlignment() const { + int max_alignment = GetPoolObjectAlignment(); + for (ForwardRefListIterator it(const_cast(this)); !it.Done(); + it.Advance()) { + const ForwardRef& reference = *it.Current(); + if (reference.GetAlignment() > max_alignment) + max_alignment = reference.GetAlignment(); + } + return max_alignment; +} + +int Location::GetMinLocation() const { + int32_t min_location = 0; + for (ForwardRefListIterator it(const_cast(this)); !it.Done(); + it.Advance()) { + const ForwardRef& reference = *it.Current(); + if (reference.GetMinLocation() > min_location) + min_location = reference.GetMinLocation(); + } + return min_location; +} + +void Label::UpdatePoolObject(PoolObject* object) { + VIXL_ASSERT(forward_.size() == 1); + const ForwardRef& reference = forward_.Front(); + object->Update(reference.GetMinLocation(), + reference.GetMaxLocation(), + reference.GetAlignment()); +} + +void Label::EmitPoolObject(MacroAssemblerInterface* masm) { + MacroAssembler* macro_assembler = static_cast(masm); + + // Add a new branch to this label. + macro_assembler->GetBuffer()->EnsureSpaceFor(kMaxInstructionSizeInBytes); + ExactAssemblyScopeWithoutPoolsCheck guard(macro_assembler, + kMaxInstructionSizeInBytes, + ExactAssemblyScope::kMaximumSize); + macro_assembler->b(this); +} + +void RawLiteral::EmitPoolObject(MacroAssemblerInterface* masm) { + Assembler* assembler = static_cast(masm->AsAssemblerBase()); + + assembler->GetBuffer()->EnsureSpaceFor(GetSize()); + assembler->GetBuffer()->EmitData(GetDataAddress(), GetSize()); +} +} +} diff --git a/dep/vixl/src/aarch32/macro-assembler-aarch32.cc b/dep/vixl/src/aarch32/macro-assembler-aarch32.cc new file mode 100644 index 000000000..56c0ffbdd --- /dev/null +++ b/dep/vixl/src/aarch32/macro-assembler-aarch32.cc @@ -0,0 +1,2312 @@ +// Copyright 2017, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may +// be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +#include "aarch32/macro-assembler-aarch32.h" + +#define STRINGIFY(x) #x +#define TOSTRING(x) STRINGIFY(x) + +#define CONTEXT_SCOPE \ + ContextScope context(this, __FILE__ ":" TOSTRING(__LINE__)) + +namespace vixl { +namespace aarch32 { + +ExactAssemblyScopeWithoutPoolsCheck::ExactAssemblyScopeWithoutPoolsCheck( + MacroAssembler* masm, size_t size, SizePolicy size_policy) + : ExactAssemblyScope(masm, + size, + size_policy, + ExactAssemblyScope::kIgnorePools) {} + +void UseScratchRegisterScope::Open(MacroAssembler* masm) { + VIXL_ASSERT(masm_ == NULL); + VIXL_ASSERT(masm != NULL); + masm_ = masm; + + old_available_ = masm_->GetScratchRegisterList()->GetList(); + old_available_vfp_ = masm_->GetScratchVRegisterList()->GetList(); + + parent_ = masm->GetCurrentScratchRegisterScope(); + masm->SetCurrentScratchRegisterScope(this); +} + + +void UseScratchRegisterScope::Close() { + if (masm_ != NULL) { + // Ensure that scopes nest perfectly, and do not outlive their parents. + // This is a run-time check because the order of destruction of objects in + // the _same_ scope is implementation-defined, and is likely to change in + // optimised builds. + VIXL_CHECK(masm_->GetCurrentScratchRegisterScope() == this); + masm_->SetCurrentScratchRegisterScope(parent_); + + masm_->GetScratchRegisterList()->SetList(old_available_); + masm_->GetScratchVRegisterList()->SetList(old_available_vfp_); + + masm_ = NULL; + } +} + + +bool UseScratchRegisterScope::IsAvailable(const Register& reg) const { + VIXL_ASSERT(masm_ != NULL); + VIXL_ASSERT(reg.IsValid()); + return masm_->GetScratchRegisterList()->Includes(reg); +} + + +bool UseScratchRegisterScope::IsAvailable(const VRegister& reg) const { + VIXL_ASSERT(masm_ != NULL); + VIXL_ASSERT(reg.IsValid()); + return masm_->GetScratchVRegisterList()->IncludesAllOf(reg); +} + + +Register UseScratchRegisterScope::Acquire() { + VIXL_ASSERT(masm_ != NULL); + Register reg = masm_->GetScratchRegisterList()->GetFirstAvailableRegister(); + VIXL_CHECK(reg.IsValid()); + masm_->GetScratchRegisterList()->Remove(reg); + return reg; +} + + +VRegister UseScratchRegisterScope::AcquireV(unsigned size_in_bits) { + switch (size_in_bits) { + case kSRegSizeInBits: + return AcquireS(); + case kDRegSizeInBits: + return AcquireD(); + case kQRegSizeInBits: + return AcquireQ(); + default: + VIXL_UNREACHABLE(); + return NoVReg; + } +} + + +QRegister UseScratchRegisterScope::AcquireQ() { + VIXL_ASSERT(masm_ != NULL); + QRegister reg = + masm_->GetScratchVRegisterList()->GetFirstAvailableQRegister(); + VIXL_CHECK(reg.IsValid()); + masm_->GetScratchVRegisterList()->Remove(reg); + return reg; +} + + +DRegister UseScratchRegisterScope::AcquireD() { + VIXL_ASSERT(masm_ != NULL); + DRegister reg = + masm_->GetScratchVRegisterList()->GetFirstAvailableDRegister(); + VIXL_CHECK(reg.IsValid()); + masm_->GetScratchVRegisterList()->Remove(reg); + return reg; +} + + +SRegister UseScratchRegisterScope::AcquireS() { + VIXL_ASSERT(masm_ != NULL); + SRegister reg = + masm_->GetScratchVRegisterList()->GetFirstAvailableSRegister(); + VIXL_CHECK(reg.IsValid()); + masm_->GetScratchVRegisterList()->Remove(reg); + return reg; +} + + +void UseScratchRegisterScope::Release(const Register& reg) { + VIXL_ASSERT(masm_ != NULL); + VIXL_ASSERT(reg.IsValid()); + VIXL_ASSERT(!masm_->GetScratchRegisterList()->Includes(reg)); + masm_->GetScratchRegisterList()->Combine(reg); +} + + +void UseScratchRegisterScope::Release(const VRegister& reg) { + VIXL_ASSERT(masm_ != NULL); + VIXL_ASSERT(reg.IsValid()); + VIXL_ASSERT(!masm_->GetScratchVRegisterList()->IncludesAliasOf(reg)); + masm_->GetScratchVRegisterList()->Combine(reg); +} + + +void UseScratchRegisterScope::Include(const RegisterList& list) { + VIXL_ASSERT(masm_ != NULL); + RegisterList excluded_registers(sp, lr, pc); + uint32_t mask = list.GetList() & ~excluded_registers.GetList(); + RegisterList* available = masm_->GetScratchRegisterList(); + available->SetList(available->GetList() | mask); +} + + +void UseScratchRegisterScope::Include(const VRegisterList& list) { + VIXL_ASSERT(masm_ != NULL); + VRegisterList* available = masm_->GetScratchVRegisterList(); + available->SetList(available->GetList() | list.GetList()); +} + + +void UseScratchRegisterScope::Exclude(const RegisterList& list) { + VIXL_ASSERT(masm_ != NULL); + RegisterList* available = masm_->GetScratchRegisterList(); + available->SetList(available->GetList() & ~list.GetList()); +} + + +void UseScratchRegisterScope::Exclude(const VRegisterList& list) { + VIXL_ASSERT(masm_ != NULL); + VRegisterList* available = masm_->GetScratchVRegisterList(); + available->SetList(available->GetList() & ~list.GetList()); +} + + +void UseScratchRegisterScope::Exclude(const Operand& operand) { + if (operand.IsImmediateShiftedRegister()) { + Exclude(operand.GetBaseRegister()); + } else if (operand.IsRegisterShiftedRegister()) { + Exclude(operand.GetBaseRegister(), operand.GetShiftRegister()); + } else { + VIXL_ASSERT(operand.IsImmediate()); + } +} + + +void UseScratchRegisterScope::ExcludeAll() { + VIXL_ASSERT(masm_ != NULL); + masm_->GetScratchRegisterList()->SetList(0); + masm_->GetScratchVRegisterList()->SetList(0); +} + + +void MacroAssembler::EnsureEmitPoolsFor(size_t size_arg) { + // We skip the check when the pools are blocked. + if (ArePoolsBlocked()) return; + + VIXL_ASSERT(IsUint32(size_arg)); + uint32_t size = static_cast(size_arg); + + if (pool_manager_.MustEmit(GetCursorOffset(), size)) { + int32_t new_pc = pool_manager_.Emit(this, GetCursorOffset(), size); + VIXL_ASSERT(new_pc == GetCursorOffset()); + USE(new_pc); + } +} + + +void MacroAssembler::HandleOutOfBoundsImmediate(Condition cond, + Register tmp, + uint32_t imm) { + if (IsUintN(16, imm)) { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + mov(cond, tmp, imm & 0xffff); + return; + } + if (IsUsingT32()) { + if (ImmediateT32::IsImmediateT32(~imm)) { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + mvn(cond, tmp, ~imm); + return; + } + } else { + if (ImmediateA32::IsImmediateA32(~imm)) { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + mvn(cond, tmp, ~imm); + return; + } + } + CodeBufferCheckScope scope(this, 2 * kMaxInstructionSizeInBytes); + mov(cond, tmp, imm & 0xffff); + movt(cond, tmp, imm >> 16); +} + + +MemOperand MacroAssembler::MemOperandComputationHelper( + Condition cond, + Register scratch, + Register base, + uint32_t offset, + uint32_t extra_offset_mask) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(scratch)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(base)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + + // Check for the simple pass-through case. + if ((offset & extra_offset_mask) == offset) return MemOperand(base, offset); + + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + + uint32_t load_store_offset = offset & extra_offset_mask; + uint32_t add_offset = offset & ~extra_offset_mask; + if ((add_offset != 0) && + (IsModifiedImmediate(offset) || IsModifiedImmediate(-offset))) { + load_store_offset = 0; + add_offset = offset; + } + + if (base.IsPC()) { + // Special handling for PC bases. We must read the PC in the first + // instruction (and only in that instruction), and we must also take care to + // keep the same address calculation as loads and stores. For T32, that + // means using something like ADR, which uses AlignDown(PC, 4). + + // We don't handle positive offsets from PC because the intention is not + // clear; does the user expect the offset from the current + // GetCursorOffset(), or to allow a certain amount of space after the + // instruction? + VIXL_ASSERT((offset & 0x80000000) != 0); + if (IsUsingT32()) { + // T32: make the first instruction "SUB (immediate, from PC)" -- an alias + // of ADR -- to get behaviour like loads and stores. This ADR can handle + // at least as much offset as the load_store_offset so it can replace it. + + uint32_t sub_pc_offset = (-offset) & 0xfff; + load_store_offset = (offset + sub_pc_offset) & extra_offset_mask; + add_offset = (offset + sub_pc_offset) & ~extra_offset_mask; + + ExactAssemblyScope scope(this, k32BitT32InstructionSizeInBytes); + sub(cond, scratch, base, sub_pc_offset); + + if (add_offset == 0) return MemOperand(scratch, load_store_offset); + + // The rest of the offset can be generated in the usual way. + base = scratch; + } + // A32 can use any SUB instruction, so we don't have to do anything special + // here except to ensure that we read the PC first. + } + + add(cond, scratch, base, add_offset); + return MemOperand(scratch, load_store_offset); +} + + +uint32_t MacroAssembler::GetOffsetMask(InstructionType type, + AddrMode addrmode) { + switch (type) { + case kLdr: + case kLdrb: + case kStr: + case kStrb: + if (IsUsingA32() || (addrmode == Offset)) { + return 0xfff; + } else { + return 0xff; + } + case kLdrsb: + case kLdrh: + case kLdrsh: + case kStrh: + if (IsUsingT32() && (addrmode == Offset)) { + return 0xfff; + } else { + return 0xff; + } + case kVldr: + case kVstr: + return 0x3fc; + case kLdrd: + case kStrd: + if (IsUsingA32()) { + return 0xff; + } else { + return 0x3fc; + } + default: + VIXL_UNREACHABLE(); + return 0; + } +} + + +HARDFLOAT void PrintfTrampolineRRRR( + const char* format, uint32_t a, uint32_t b, uint32_t c, uint32_t d) { + printf(format, a, b, c, d); +} + + +HARDFLOAT void PrintfTrampolineRRRD( + const char* format, uint32_t a, uint32_t b, uint32_t c, double d) { + printf(format, a, b, c, d); +} + + +HARDFLOAT void PrintfTrampolineRRDR( + const char* format, uint32_t a, uint32_t b, double c, uint32_t d) { + printf(format, a, b, c, d); +} + + +HARDFLOAT void PrintfTrampolineRRDD( + const char* format, uint32_t a, uint32_t b, double c, double d) { + printf(format, a, b, c, d); +} + + +HARDFLOAT void PrintfTrampolineRDRR( + const char* format, uint32_t a, double b, uint32_t c, uint32_t d) { + printf(format, a, b, c, d); +} + + +HARDFLOAT void PrintfTrampolineRDRD( + const char* format, uint32_t a, double b, uint32_t c, double d) { + printf(format, a, b, c, d); +} + + +HARDFLOAT void PrintfTrampolineRDDR( + const char* format, uint32_t a, double b, double c, uint32_t d) { + printf(format, a, b, c, d); +} + + +HARDFLOAT void PrintfTrampolineRDDD( + const char* format, uint32_t a, double b, double c, double d) { + printf(format, a, b, c, d); +} + + +HARDFLOAT void PrintfTrampolineDRRR( + const char* format, double a, uint32_t b, uint32_t c, uint32_t d) { + printf(format, a, b, c, d); +} + + +HARDFLOAT void PrintfTrampolineDRRD( + const char* format, double a, uint32_t b, uint32_t c, double d) { + printf(format, a, b, c, d); +} + + +HARDFLOAT void PrintfTrampolineDRDR( + const char* format, double a, uint32_t b, double c, uint32_t d) { + printf(format, a, b, c, d); +} + + +HARDFLOAT void PrintfTrampolineDRDD( + const char* format, double a, uint32_t b, double c, double d) { + printf(format, a, b, c, d); +} + + +HARDFLOAT void PrintfTrampolineDDRR( + const char* format, double a, double b, uint32_t c, uint32_t d) { + printf(format, a, b, c, d); +} + + +HARDFLOAT void PrintfTrampolineDDRD( + const char* format, double a, double b, uint32_t c, double d) { + printf(format, a, b, c, d); +} + + +HARDFLOAT void PrintfTrampolineDDDR( + const char* format, double a, double b, double c, uint32_t d) { + printf(format, a, b, c, d); +} + + +HARDFLOAT void PrintfTrampolineDDDD( + const char* format, double a, double b, double c, double d) { + printf(format, a, b, c, d); +} + + +void MacroAssembler::Printf(const char* format, + CPURegister reg1, + CPURegister reg2, + CPURegister reg3, + CPURegister reg4) { + // Exclude all registers from the available scratch registers, so + // that we are able to use ip below. + // TODO: Refactor this function to use UseScratchRegisterScope + // for temporary registers below. + UseScratchRegisterScope scratch(this); + scratch.ExcludeAll(); + if (generate_simulator_code_) { + PushRegister(reg4); + PushRegister(reg3); + PushRegister(reg2); + PushRegister(reg1); + Push(RegisterList(r0, r1)); + StringLiteral* format_literal = + new StringLiteral(format, RawLiteral::kDeletedOnPlacementByPool); + Adr(r0, format_literal); + uint32_t args = (reg4.GetType() << 12) | (reg3.GetType() << 8) | + (reg2.GetType() << 4) | reg1.GetType(); + Mov(r1, args); + Hvc(kPrintfCode); + Pop(RegisterList(r0, r1)); + int size = reg4.GetRegSizeInBytes() + reg3.GetRegSizeInBytes() + + reg2.GetRegSizeInBytes() + reg1.GetRegSizeInBytes(); + Drop(size); + } else { + // Generate on a native platform => 32 bit environment. + // Preserve core registers r0-r3, r12, r14 + const uint32_t saved_registers_mask = + kCallerSavedRegistersMask | (1 << r5.GetCode()); + Push(RegisterList(saved_registers_mask)); + // Push VFP registers. + Vpush(Untyped64, DRegisterList(d0, 8)); + if (Has32DRegs()) Vpush(Untyped64, DRegisterList(d16, 16)); + // Search one register which has been saved and which doesn't need to be + // printed. + RegisterList available_registers(kCallerSavedRegistersMask); + if (reg1.GetType() == CPURegister::kRRegister) { + available_registers.Remove(Register(reg1.GetCode())); + } + if (reg2.GetType() == CPURegister::kRRegister) { + available_registers.Remove(Register(reg2.GetCode())); + } + if (reg3.GetType() == CPURegister::kRRegister) { + available_registers.Remove(Register(reg3.GetCode())); + } + if (reg4.GetType() == CPURegister::kRRegister) { + available_registers.Remove(Register(reg4.GetCode())); + } + Register tmp = available_registers.GetFirstAvailableRegister(); + VIXL_ASSERT(tmp.GetType() == CPURegister::kRRegister); + // Push the flags. + Mrs(tmp, APSR); + Push(tmp); + Vmrs(RegisterOrAPSR_nzcv(tmp.GetCode()), FPSCR); + Push(tmp); + // Push the registers to print on the stack. + PushRegister(reg4); + PushRegister(reg3); + PushRegister(reg2); + PushRegister(reg1); + int core_count = 1; + int vfp_count = 0; + uint32_t printf_type = 0; + // Pop the registers to print and store them into r1-r3 and/or d0-d3. + // Reg4 may stay into the stack if all the register to print are core + // registers. + PreparePrintfArgument(reg1, &core_count, &vfp_count, &printf_type); + PreparePrintfArgument(reg2, &core_count, &vfp_count, &printf_type); + PreparePrintfArgument(reg3, &core_count, &vfp_count, &printf_type); + PreparePrintfArgument(reg4, &core_count, &vfp_count, &printf_type); + // Ensure that the stack is aligned on 8 bytes. + And(r5, sp, 0x7); + if (core_count == 5) { + // One 32 bit argument (reg4) has been left on the stack => align the + // stack + // before the argument. + Pop(r0); + Sub(sp, sp, r5); + Push(r0); + } else { + Sub(sp, sp, r5); + } + // Select the right trampoline depending on the arguments. + uintptr_t address; + switch (printf_type) { + case 0: + address = reinterpret_cast(PrintfTrampolineRRRR); + break; + case 1: + address = reinterpret_cast(PrintfTrampolineDRRR); + break; + case 2: + address = reinterpret_cast(PrintfTrampolineRDRR); + break; + case 3: + address = reinterpret_cast(PrintfTrampolineDDRR); + break; + case 4: + address = reinterpret_cast(PrintfTrampolineRRDR); + break; + case 5: + address = reinterpret_cast(PrintfTrampolineDRDR); + break; + case 6: + address = reinterpret_cast(PrintfTrampolineRDDR); + break; + case 7: + address = reinterpret_cast(PrintfTrampolineDDDR); + break; + case 8: + address = reinterpret_cast(PrintfTrampolineRRRD); + break; + case 9: + address = reinterpret_cast(PrintfTrampolineDRRD); + break; + case 10: + address = reinterpret_cast(PrintfTrampolineRDRD); + break; + case 11: + address = reinterpret_cast(PrintfTrampolineDDRD); + break; + case 12: + address = reinterpret_cast(PrintfTrampolineRRDD); + break; + case 13: + address = reinterpret_cast(PrintfTrampolineDRDD); + break; + case 14: + address = reinterpret_cast(PrintfTrampolineRDDD); + break; + case 15: + address = reinterpret_cast(PrintfTrampolineDDDD); + break; + default: + VIXL_UNREACHABLE(); + address = reinterpret_cast(PrintfTrampolineRRRR); + break; + } + StringLiteral* format_literal = + new StringLiteral(format, RawLiteral::kDeletedOnPlacementByPool); + Adr(r0, format_literal); + Mov(ip, Operand::From(address)); + Blx(ip); + // If register reg4 was left on the stack => skip it. + if (core_count == 5) Drop(kRegSizeInBytes); + // Restore the stack as it was before alignment. + Add(sp, sp, r5); + // Restore the flags. + Pop(tmp); + Vmsr(FPSCR, tmp); + Pop(tmp); + Msr(APSR_nzcvqg, tmp); + // Restore the regsisters. + if (Has32DRegs()) Vpop(Untyped64, DRegisterList(d16, 16)); + Vpop(Untyped64, DRegisterList(d0, 8)); + Pop(RegisterList(saved_registers_mask)); + } +} + + +void MacroAssembler::PushRegister(CPURegister reg) { + switch (reg.GetType()) { + case CPURegister::kNoRegister: + break; + case CPURegister::kRRegister: + Push(Register(reg.GetCode())); + break; + case CPURegister::kSRegister: + Vpush(Untyped32, SRegisterList(SRegister(reg.GetCode()))); + break; + case CPURegister::kDRegister: + Vpush(Untyped64, DRegisterList(DRegister(reg.GetCode()))); + break; + case CPURegister::kQRegister: + VIXL_UNIMPLEMENTED(); + break; + } +} + + +void MacroAssembler::PreparePrintfArgument(CPURegister reg, + int* core_count, + int* vfp_count, + uint32_t* printf_type) { + switch (reg.GetType()) { + case CPURegister::kNoRegister: + break; + case CPURegister::kRRegister: + VIXL_ASSERT(*core_count <= 4); + if (*core_count < 4) Pop(Register(*core_count)); + *core_count += 1; + break; + case CPURegister::kSRegister: + VIXL_ASSERT(*vfp_count < 4); + *printf_type |= 1 << (*core_count + *vfp_count - 1); + Vpop(Untyped32, SRegisterList(SRegister(*vfp_count * 2))); + Vcvt(F64, F32, DRegister(*vfp_count), SRegister(*vfp_count * 2)); + *vfp_count += 1; + break; + case CPURegister::kDRegister: + VIXL_ASSERT(*vfp_count < 4); + *printf_type |= 1 << (*core_count + *vfp_count - 1); + Vpop(Untyped64, DRegisterList(DRegister(*vfp_count))); + *vfp_count += 1; + break; + case CPURegister::kQRegister: + VIXL_UNIMPLEMENTED(); + break; + } +} + + +void MacroAssembler::Delegate(InstructionType type, + InstructionCondROp instruction, + Condition cond, + Register rn, + const Operand& operand) { + VIXL_ASSERT((type == kMovt) || (type == kSxtb16) || (type == kTeq) || + (type == kUxtb16)); + + if (type == kMovt) { + VIXL_ABORT_WITH_MSG("`Movt` expects a 16-bit immediate.\n"); + } + + // This delegate only supports teq with immediates. + CONTEXT_SCOPE; + if ((type == kTeq) && operand.IsImmediate()) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + HandleOutOfBoundsImmediate(cond, scratch, operand.GetImmediate()); + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + teq(cond, rn, scratch); + return; + } + Assembler::Delegate(type, instruction, cond, rn, operand); +} + + +void MacroAssembler::Delegate(InstructionType type, + InstructionCondSizeROp instruction, + Condition cond, + EncodingSize size, + Register rn, + const Operand& operand) { + CONTEXT_SCOPE; + VIXL_ASSERT(size.IsBest()); + VIXL_ASSERT((type == kCmn) || (type == kCmp) || (type == kMov) || + (type == kMovs) || (type == kMvn) || (type == kMvns) || + (type == kSxtb) || (type == kSxth) || (type == kTst) || + (type == kUxtb) || (type == kUxth)); + if (IsUsingT32() && operand.IsRegisterShiftedRegister()) { + VIXL_ASSERT((type != kMov) || (type != kMovs)); + InstructionCondRROp shiftop = NULL; + switch (operand.GetShift().GetType()) { + case LSL: + shiftop = &Assembler::lsl; + break; + case LSR: + shiftop = &Assembler::lsr; + break; + case ASR: + shiftop = &Assembler::asr; + break; + case RRX: + // A RegisterShiftedRegister operand cannot have a shift of type RRX. + VIXL_UNREACHABLE(); + break; + case ROR: + shiftop = &Assembler::ror; + break; + default: + VIXL_UNREACHABLE(); + } + if (shiftop != NULL) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + CodeBufferCheckScope scope(this, 2 * kMaxInstructionSizeInBytes); + (this->*shiftop)(cond, + scratch, + operand.GetBaseRegister(), + operand.GetShiftRegister()); + (this->*instruction)(cond, size, rn, scratch); + return; + } + } + if (operand.IsImmediate()) { + uint32_t imm = operand.GetImmediate(); + switch (type) { + case kMov: + case kMovs: + if (!rn.IsPC()) { + // Immediate is too large, but not using PC, so handle with mov{t}. + HandleOutOfBoundsImmediate(cond, rn, imm); + if (type == kMovs) { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + tst(cond, rn, rn); + } + return; + } else if (type == kMov) { + VIXL_ASSERT(IsUsingA32() || cond.Is(al)); + // Immediate is too large and using PC, so handle using a temporary + // register. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + HandleOutOfBoundsImmediate(al, scratch, imm); + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + bx(cond, scratch); + return; + } + break; + case kCmn: + case kCmp: + if (IsUsingA32() || !rn.IsPC()) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + HandleOutOfBoundsImmediate(cond, scratch, imm); + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + (this->*instruction)(cond, size, rn, scratch); + return; + } + break; + case kMvn: + case kMvns: + if (!rn.IsPC()) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + HandleOutOfBoundsImmediate(cond, scratch, imm); + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + (this->*instruction)(cond, size, rn, scratch); + return; + } + break; + case kTst: + if (IsUsingA32() || !rn.IsPC()) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + HandleOutOfBoundsImmediate(cond, scratch, imm); + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + (this->*instruction)(cond, size, rn, scratch); + return; + } + break; + default: // kSxtb, Sxth, Uxtb, Uxth + break; + } + } + Assembler::Delegate(type, instruction, cond, size, rn, operand); +} + + +void MacroAssembler::Delegate(InstructionType type, + InstructionCondRROp instruction, + Condition cond, + Register rd, + Register rn, + const Operand& operand) { + if ((type == kSxtab) || (type == kSxtab16) || (type == kSxtah) || + (type == kUxtab) || (type == kUxtab16) || (type == kUxtah) || + (type == kPkhbt) || (type == kPkhtb)) { + UnimplementedDelegate(type); + return; + } + + // This delegate only handles the following instructions. + VIXL_ASSERT((type == kOrn) || (type == kOrns) || (type == kRsc) || + (type == kRscs)); + CONTEXT_SCOPE; + + // T32 does not support register shifted register operands, emulate it. + if (IsUsingT32() && operand.IsRegisterShiftedRegister()) { + InstructionCondRROp shiftop = NULL; + switch (operand.GetShift().GetType()) { + case LSL: + shiftop = &Assembler::lsl; + break; + case LSR: + shiftop = &Assembler::lsr; + break; + case ASR: + shiftop = &Assembler::asr; + break; + case RRX: + // A RegisterShiftedRegister operand cannot have a shift of type RRX. + VIXL_UNREACHABLE(); + break; + case ROR: + shiftop = &Assembler::ror; + break; + default: + VIXL_UNREACHABLE(); + } + if (shiftop != NULL) { + UseScratchRegisterScope temps(this); + Register rm = operand.GetBaseRegister(); + Register rs = operand.GetShiftRegister(); + // Try to use rd as a scratch register. We can do this if it aliases rs or + // rm (because we read them in the first instruction), but not rn. + if (!rd.Is(rn)) temps.Include(rd); + Register scratch = temps.Acquire(); + // TODO: The scope length was measured empirically. We should analyse the + // worst-case size and add targetted tests. + CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes); + (this->*shiftop)(cond, scratch, rm, rs); + (this->*instruction)(cond, rd, rn, scratch); + return; + } + } + + // T32 does not have a Rsc instruction, negate the lhs input and turn it into + // an Adc. Adc and Rsc are equivalent using a bitwise NOT: + // adc rd, rn, operand <-> rsc rd, NOT(rn), operand + if (IsUsingT32() && ((type == kRsc) || (type == kRscs))) { + // The RegisterShiftRegister case should have been handled above. + VIXL_ASSERT(!operand.IsRegisterShiftedRegister()); + UseScratchRegisterScope temps(this); + // Try to use rd as a scratch register. We can do this if it aliases rn + // (because we read it in the first instruction), but not rm. + temps.Include(rd); + temps.Exclude(operand); + Register negated_rn = temps.Acquire(); + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + mvn(cond, negated_rn, rn); + } + if (type == kRsc) { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + adc(cond, rd, negated_rn, operand); + return; + } + // TODO: We shouldn't have to specify how much space the next instruction + // needs. + CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes); + adcs(cond, rd, negated_rn, operand); + return; + } + + if (operand.IsImmediate()) { + // If the immediate can be encoded when inverted, turn Orn into Orr. + // Otherwise rely on HandleOutOfBoundsImmediate to generate a series of + // mov. + int32_t imm = operand.GetSignedImmediate(); + if (((type == kOrn) || (type == kOrns)) && IsModifiedImmediate(~imm)) { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + switch (type) { + case kOrn: + orr(cond, rd, rn, ~imm); + return; + case kOrns: + orrs(cond, rd, rn, ~imm); + return; + default: + VIXL_UNREACHABLE(); + break; + } + } + } + + // A32 does not have a Orn instruction, negate the rhs input and turn it into + // a Orr. + if (IsUsingA32() && ((type == kOrn) || (type == kOrns))) { + // TODO: orn r0, r1, imm -> orr r0, r1, neg(imm) if doable + // mvn r0, r2 + // orr r0, r1, r0 + Register scratch; + UseScratchRegisterScope temps(this); + // Try to use rd as a scratch register. We can do this if it aliases rs or + // rm (because we read them in the first instruction), but not rn. + if (!rd.Is(rn)) temps.Include(rd); + scratch = temps.Acquire(); + { + // TODO: We shouldn't have to specify how much space the next instruction + // needs. + CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes); + mvn(cond, scratch, operand); + } + if (type == kOrns) { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + orrs(cond, rd, rn, scratch); + return; + } + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + orr(cond, rd, rn, scratch); + return; + } + + if (operand.IsImmediate()) { + UseScratchRegisterScope temps(this); + // Allow using the destination as a scratch register if possible. + if (!rd.Is(rn)) temps.Include(rd); + Register scratch = temps.Acquire(); + int32_t imm = operand.GetSignedImmediate(); + HandleOutOfBoundsImmediate(cond, scratch, imm); + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + (this->*instruction)(cond, rd, rn, scratch); + return; + } + Assembler::Delegate(type, instruction, cond, rd, rn, operand); +} + + +void MacroAssembler::Delegate(InstructionType type, + InstructionCondSizeRL instruction, + Condition cond, + EncodingSize size, + Register rd, + Location* location) { + VIXL_ASSERT((type == kLdr) || (type == kAdr)); + + CONTEXT_SCOPE; + VIXL_ASSERT(size.IsBest()); + + if ((type == kLdr) && location->IsBound()) { + CodeBufferCheckScope scope(this, 5 * kMaxInstructionSizeInBytes); + UseScratchRegisterScope temps(this); + temps.Include(rd); + uint32_t mask = GetOffsetMask(type, Offset); + ldr(rd, MemOperandComputationHelper(cond, temps.Acquire(), location, mask)); + return; + } + + Assembler::Delegate(type, instruction, cond, size, rd, location); +} + + +bool MacroAssembler::GenerateSplitInstruction( + InstructionCondSizeRROp instruction, + Condition cond, + Register rd, + Register rn, + uint32_t imm, + uint32_t mask) { + uint32_t high = imm & ~mask; + if (!IsModifiedImmediate(high) && !rn.IsPC()) return false; + // If high is a modified immediate, we can perform the operation with + // only 2 instructions. + // Else, if rn is PC, we want to avoid moving PC into a temporary. + // Therefore, we also use the pattern even if the second call may + // generate 3 instructions. + uint32_t low = imm & mask; + CodeBufferCheckScope scope(this, + (rn.IsPC() ? 4 : 2) * kMaxInstructionSizeInBytes); + (this->*instruction)(cond, Best, rd, rn, low); + (this->*instruction)(cond, Best, rd, rd, high); + return true; +} + + +void MacroAssembler::Delegate(InstructionType type, + InstructionCondSizeRROp instruction, + Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + VIXL_ASSERT( + (type == kAdc) || (type == kAdcs) || (type == kAdd) || (type == kAdds) || + (type == kAnd) || (type == kAnds) || (type == kAsr) || (type == kAsrs) || + (type == kBic) || (type == kBics) || (type == kEor) || (type == kEors) || + (type == kLsl) || (type == kLsls) || (type == kLsr) || (type == kLsrs) || + (type == kOrr) || (type == kOrrs) || (type == kRor) || (type == kRors) || + (type == kRsb) || (type == kRsbs) || (type == kSbc) || (type == kSbcs) || + (type == kSub) || (type == kSubs)); + + CONTEXT_SCOPE; + VIXL_ASSERT(size.IsBest()); + if (IsUsingT32() && operand.IsRegisterShiftedRegister()) { + InstructionCondRROp shiftop = NULL; + switch (operand.GetShift().GetType()) { + case LSL: + shiftop = &Assembler::lsl; + break; + case LSR: + shiftop = &Assembler::lsr; + break; + case ASR: + shiftop = &Assembler::asr; + break; + case RRX: + // A RegisterShiftedRegister operand cannot have a shift of type RRX. + VIXL_UNREACHABLE(); + break; + case ROR: + shiftop = &Assembler::ror; + break; + default: + VIXL_UNREACHABLE(); + } + if (shiftop != NULL) { + UseScratchRegisterScope temps(this); + Register rm = operand.GetBaseRegister(); + Register rs = operand.GetShiftRegister(); + // Try to use rd as a scratch register. We can do this if it aliases rs or + // rm (because we read them in the first instruction), but not rn. + if (!rd.Is(rn)) temps.Include(rd); + Register scratch = temps.Acquire(); + CodeBufferCheckScope scope(this, 2 * kMaxInstructionSizeInBytes); + (this->*shiftop)(cond, scratch, rm, rs); + (this->*instruction)(cond, size, rd, rn, scratch); + return; + } + } + if (operand.IsImmediate()) { + int32_t imm = operand.GetSignedImmediate(); + if (ImmediateT32::IsImmediateT32(~imm)) { + if (IsUsingT32()) { + switch (type) { + case kOrr: + orn(cond, rd, rn, ~imm); + return; + case kOrrs: + orns(cond, rd, rn, ~imm); + return; + default: + break; + } + } + } + if (imm < 0) { + InstructionCondSizeRROp asmcb = NULL; + // Add and sub are equivalent using an arithmetic negation: + // add rd, rn, #imm <-> sub rd, rn, - #imm + // Add and sub with carry are equivalent using a bitwise NOT: + // adc rd, rn, #imm <-> sbc rd, rn, NOT #imm + switch (type) { + case kAdd: + asmcb = &Assembler::sub; + imm = -imm; + break; + case kAdds: + asmcb = &Assembler::subs; + imm = -imm; + break; + case kSub: + asmcb = &Assembler::add; + imm = -imm; + break; + case kSubs: + asmcb = &Assembler::adds; + imm = -imm; + break; + case kAdc: + asmcb = &Assembler::sbc; + imm = ~imm; + break; + case kAdcs: + asmcb = &Assembler::sbcs; + imm = ~imm; + break; + case kSbc: + asmcb = &Assembler::adc; + imm = ~imm; + break; + case kSbcs: + asmcb = &Assembler::adcs; + imm = ~imm; + break; + default: + break; + } + if (asmcb != NULL) { + CodeBufferCheckScope scope(this, 4 * kMaxInstructionSizeInBytes); + (this->*asmcb)(cond, size, rd, rn, Operand(imm)); + return; + } + } + + // When rn is PC, only handle negative offsets. The correct way to handle + // positive offsets isn't clear; does the user want the offset from the + // start of the macro, or from the end (to allow a certain amount of space)? + // When type is Add or Sub, imm is always positive (imm < 0 has just been + // handled and imm == 0 would have been generated without the need of a + // delegate). Therefore, only add to PC is forbidden here. + if ((((type == kAdd) && !rn.IsPC()) || (type == kSub)) && + (IsUsingA32() || (!rd.IsPC() && !rn.IsPC()))) { + VIXL_ASSERT(imm > 0); + // Try to break the constant into two modified immediates. + // For T32 also try to break the constant into one imm12 and one modified + // immediate. Count the trailing zeroes and get the biggest even value. + int trailing_zeroes = CountTrailingZeros(imm) & ~1u; + uint32_t mask = ((trailing_zeroes < 4) && IsUsingT32()) + ? 0xfff + : (0xff << trailing_zeroes); + if (GenerateSplitInstruction(instruction, cond, rd, rn, imm, mask)) { + return; + } + InstructionCondSizeRROp asmcb = NULL; + switch (type) { + case kAdd: + asmcb = &Assembler::sub; + break; + case kSub: + asmcb = &Assembler::add; + break; + default: + VIXL_UNREACHABLE(); + } + if (GenerateSplitInstruction(asmcb, cond, rd, rn, -imm, mask)) { + return; + } + } + + UseScratchRegisterScope temps(this); + // Allow using the destination as a scratch register if possible. + if (!rd.Is(rn)) temps.Include(rd); + if (rn.IsPC()) { + // If we're reading the PC, we need to do it in the first instruction, + // otherwise we'll read the wrong value. We rely on this to handle the + // long-range PC-relative MemOperands which can result from user-managed + // literals. + + // Only handle negative offsets. The correct way to handle positive + // offsets isn't clear; does the user want the offset from the start of + // the macro, or from the end (to allow a certain amount of space)? + bool offset_is_negative_or_zero = (imm <= 0); + switch (type) { + case kAdd: + case kAdds: + offset_is_negative_or_zero = (imm <= 0); + break; + case kSub: + case kSubs: + offset_is_negative_or_zero = (imm >= 0); + break; + case kAdc: + case kAdcs: + offset_is_negative_or_zero = (imm < 0); + break; + case kSbc: + case kSbcs: + offset_is_negative_or_zero = (imm > 0); + break; + default: + break; + } + if (offset_is_negative_or_zero) { + { + rn = temps.Acquire(); + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + mov(cond, rn, pc); + } + // Recurse rather than falling through, to try to get the immediate into + // a single instruction. + CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes); + (this->*instruction)(cond, size, rd, rn, operand); + return; + } + } else { + Register scratch = temps.Acquire(); + // TODO: The scope length was measured empirically. We should analyse the + // worst-case size and add targetted tests. + CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes); + mov(cond, scratch, operand.GetImmediate()); + (this->*instruction)(cond, size, rd, rn, scratch); + return; + } + } + Assembler::Delegate(type, instruction, cond, size, rd, rn, operand); +} + + +void MacroAssembler::Delegate(InstructionType type, + InstructionRL instruction, + Register rn, + Location* location) { + VIXL_ASSERT((type == kCbz) || (type == kCbnz)); + + CONTEXT_SCOPE; + CodeBufferCheckScope scope(this, 2 * kMaxInstructionSizeInBytes); + if (IsUsingA32()) { + if (type == kCbz) { + VIXL_ABORT_WITH_MSG("Cbz is only available for T32.\n"); + } else { + VIXL_ABORT_WITH_MSG("Cbnz is only available for T32.\n"); + } + } else if (rn.IsLow()) { + switch (type) { + case kCbnz: { + Label done; + cbz(rn, &done); + b(location); + Bind(&done); + return; + } + case kCbz: { + Label done; + cbnz(rn, &done); + b(location); + Bind(&done); + return; + } + default: + break; + } + } + Assembler::Delegate(type, instruction, rn, location); +} + + +template +static inline bool IsI64BitPattern(T imm) { + for (T mask = 0xff << ((sizeof(T) - 1) * 8); mask != 0; mask >>= 8) { + if (((imm & mask) != mask) && ((imm & mask) != 0)) return false; + } + return true; +} + + +template +static inline bool IsI8BitPattern(T imm) { + uint8_t imm8 = imm & 0xff; + for (unsigned rep = sizeof(T) - 1; rep > 0; rep--) { + imm >>= 8; + if ((imm & 0xff) != imm8) return false; + } + return true; +} + + +static inline bool CanBeInverted(uint32_t imm32) { + uint32_t fill8 = 0; + + if ((imm32 & 0xffffff00) == 0xffffff00) { + // 11111111 11111111 11111111 abcdefgh + return true; + } + if (((imm32 & 0xff) == 0) || ((imm32 & 0xff) == 0xff)) { + fill8 = imm32 & 0xff; + imm32 >>= 8; + if ((imm32 >> 8) == 0xffff) { + // 11111111 11111111 abcdefgh 00000000 + // or 11111111 11111111 abcdefgh 11111111 + return true; + } + if ((imm32 & 0xff) == fill8) { + imm32 >>= 8; + if ((imm32 >> 8) == 0xff) { + // 11111111 abcdefgh 00000000 00000000 + // or 11111111 abcdefgh 11111111 11111111 + return true; + } + if ((fill8 == 0xff) && ((imm32 & 0xff) == 0xff)) { + // abcdefgh 11111111 11111111 11111111 + return true; + } + } + } + return false; +} + + +template +static inline RES replicate(T imm) { + VIXL_ASSERT((sizeof(RES) > sizeof(T)) && + (((sizeof(RES) / sizeof(T)) * sizeof(T)) == sizeof(RES))); + RES res = imm; + for (unsigned i = sizeof(RES) / sizeof(T) - 1; i > 0; i--) { + res = (res << (sizeof(T) * 8)) | imm; + } + return res; +} + + +void MacroAssembler::Delegate(InstructionType type, + InstructionCondDtSSop instruction, + Condition cond, + DataType dt, + SRegister rd, + const SOperand& operand) { + CONTEXT_SCOPE; + if (type == kVmov) { + if (operand.IsImmediate() && dt.Is(F32)) { + const NeonImmediate& neon_imm = operand.GetNeonImmediate(); + if (neon_imm.CanConvert()) { + // movw ip, imm16 + // movk ip, imm16 + // vmov s0, ip + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + float f = neon_imm.GetImmediate(); + // TODO: The scope length was measured empirically. We should analyse + // the + // worst-case size and add targetted tests. + CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes); + mov(cond, scratch, FloatToRawbits(f)); + vmov(cond, rd, scratch); + return; + } + } + } + Assembler::Delegate(type, instruction, cond, dt, rd, operand); +} + + +void MacroAssembler::Delegate(InstructionType type, + InstructionCondDtDDop instruction, + Condition cond, + DataType dt, + DRegister rd, + const DOperand& operand) { + CONTEXT_SCOPE; + if (type == kVmov) { + if (operand.IsImmediate()) { + const NeonImmediate& neon_imm = operand.GetNeonImmediate(); + switch (dt.GetValue()) { + case I32: + if (neon_imm.CanConvert()) { + uint32_t imm = neon_imm.GetImmediate(); + // vmov.i32 d0, 0xabababab will translate into vmov.i8 d0, 0xab + if (IsI8BitPattern(imm)) { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + vmov(cond, I8, rd, imm & 0xff); + return; + } + // vmov.i32 d0, 0xff0000ff will translate into + // vmov.i64 d0, 0xff0000ffff0000ff + if (IsI64BitPattern(imm)) { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + vmov(cond, I64, rd, replicate(imm)); + return; + } + // vmov.i32 d0, 0xffab0000 will translate into + // vmvn.i32 d0, 0x0054ffff + if (cond.Is(al) && CanBeInverted(imm)) { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + vmvn(I32, rd, ~imm); + return; + } + } + break; + case I16: + if (neon_imm.CanConvert()) { + uint16_t imm = neon_imm.GetImmediate(); + // vmov.i16 d0, 0xabab will translate into vmov.i8 d0, 0xab + if (IsI8BitPattern(imm)) { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + vmov(cond, I8, rd, imm & 0xff); + return; + } + } + break; + case I64: + if (neon_imm.CanConvert()) { + uint64_t imm = neon_imm.GetImmediate(); + // vmov.i64 d0, -1 will translate into vmov.i8 d0, 0xff + if (IsI8BitPattern(imm)) { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + vmov(cond, I8, rd, imm & 0xff); + return; + } + // mov ip, lo(imm64) + // vdup d0, ip + // vdup is prefered to 'vmov d0[0]' as d0[1] does not need to be + // preserved + { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + { + // TODO: The scope length was measured empirically. We should + // analyse the + // worst-case size and add targetted tests. + CodeBufferCheckScope scope(this, + 2 * kMaxInstructionSizeInBytes); + mov(cond, scratch, static_cast(imm & 0xffffffff)); + } + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + vdup(cond, Untyped32, rd, scratch); + } + // mov ip, hi(imm64) + // vmov d0[1], ip + { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + { + // TODO: The scope length was measured empirically. We should + // analyse the + // worst-case size and add targetted tests. + CodeBufferCheckScope scope(this, + 2 * kMaxInstructionSizeInBytes); + mov(cond, scratch, static_cast(imm >> 32)); + } + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + vmov(cond, Untyped32, DRegisterLane(rd, 1), scratch); + } + return; + } + break; + default: + break; + } + VIXL_ASSERT(!dt.Is(I8)); // I8 cases should have been handled already. + if ((dt.Is(I16) || dt.Is(I32)) && neon_imm.CanConvert()) { + // mov ip, imm32 + // vdup.16 d0, ip + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + { + CodeBufferCheckScope scope(this, 2 * kMaxInstructionSizeInBytes); + mov(cond, scratch, neon_imm.GetImmediate()); + } + DataTypeValue vdup_dt = Untyped32; + switch (dt.GetValue()) { + case I16: + vdup_dt = Untyped16; + break; + case I32: + vdup_dt = Untyped32; + break; + default: + VIXL_UNREACHABLE(); + } + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + vdup(cond, vdup_dt, rd, scratch); + return; + } + if (dt.Is(F32) && neon_imm.CanConvert()) { + float f = neon_imm.GetImmediate(); + // Punt to vmov.i32 + // TODO: The scope length was guessed based on the double case below. We + // should analyse the worst-case size and add targetted tests. + CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes); + vmov(cond, I32, rd, FloatToRawbits(f)); + return; + } + if (dt.Is(F64) && neon_imm.CanConvert()) { + // Punt to vmov.i64 + double d = neon_imm.GetImmediate(); + // TODO: The scope length was measured empirically. We should analyse + // the + // worst-case size and add targetted tests. + CodeBufferCheckScope scope(this, 6 * kMaxInstructionSizeInBytes); + vmov(cond, I64, rd, DoubleToRawbits(d)); + return; + } + } + } + Assembler::Delegate(type, instruction, cond, dt, rd, operand); +} + + +void MacroAssembler::Delegate(InstructionType type, + InstructionCondDtQQop instruction, + Condition cond, + DataType dt, + QRegister rd, + const QOperand& operand) { + CONTEXT_SCOPE; + if (type == kVmov) { + if (operand.IsImmediate()) { + const NeonImmediate& neon_imm = operand.GetNeonImmediate(); + switch (dt.GetValue()) { + case I32: + if (neon_imm.CanConvert()) { + uint32_t imm = neon_imm.GetImmediate(); + // vmov.i32 d0, 0xabababab will translate into vmov.i8 d0, 0xab + if (IsI8BitPattern(imm)) { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + vmov(cond, I8, rd, imm & 0xff); + return; + } + // vmov.i32 d0, 0xff0000ff will translate into + // vmov.i64 d0, 0xff0000ffff0000ff + if (IsI64BitPattern(imm)) { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + vmov(cond, I64, rd, replicate(imm)); + return; + } + // vmov.i32 d0, 0xffab0000 will translate into + // vmvn.i32 d0, 0x0054ffff + if (CanBeInverted(imm)) { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + vmvn(cond, I32, rd, ~imm); + return; + } + } + break; + case I16: + if (neon_imm.CanConvert()) { + uint16_t imm = neon_imm.GetImmediate(); + // vmov.i16 d0, 0xabab will translate into vmov.i8 d0, 0xab + if (IsI8BitPattern(imm)) { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + vmov(cond, I8, rd, imm & 0xff); + return; + } + } + break; + case I64: + if (neon_imm.CanConvert()) { + uint64_t imm = neon_imm.GetImmediate(); + // vmov.i64 d0, -1 will translate into vmov.i8 d0, 0xff + if (IsI8BitPattern(imm)) { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + vmov(cond, I8, rd, imm & 0xff); + return; + } + // mov ip, lo(imm64) + // vdup q0, ip + // vdup is prefered to 'vmov d0[0]' as d0[1-3] don't need to be + // preserved + { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + { + CodeBufferCheckScope scope(this, + 2 * kMaxInstructionSizeInBytes); + mov(cond, scratch, static_cast(imm & 0xffffffff)); + } + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + vdup(cond, Untyped32, rd, scratch); + } + // mov ip, hi(imm64) + // vmov.i32 d0[1], ip + // vmov d1, d0 + { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + { + CodeBufferCheckScope scope(this, + 2 * kMaxInstructionSizeInBytes); + mov(cond, scratch, static_cast(imm >> 32)); + } + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + vmov(cond, + Untyped32, + DRegisterLane(rd.GetLowDRegister(), 1), + scratch); + } + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + vmov(cond, F64, rd.GetHighDRegister(), rd.GetLowDRegister()); + } + return; + } + break; + default: + break; + } + VIXL_ASSERT(!dt.Is(I8)); // I8 cases should have been handled already. + if ((dt.Is(I16) || dt.Is(I32)) && neon_imm.CanConvert()) { + // mov ip, imm32 + // vdup.16 d0, ip + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + { + CodeBufferCheckScope scope(this, 2 * kMaxInstructionSizeInBytes); + mov(cond, scratch, neon_imm.GetImmediate()); + } + DataTypeValue vdup_dt = Untyped32; + switch (dt.GetValue()) { + case I16: + vdup_dt = Untyped16; + break; + case I32: + vdup_dt = Untyped32; + break; + default: + VIXL_UNREACHABLE(); + } + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + vdup(cond, vdup_dt, rd, scratch); + return; + } + if (dt.Is(F32) && neon_imm.CanConvert()) { + // Punt to vmov.i64 + float f = neon_imm.GetImmediate(); + CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes); + vmov(cond, I32, rd, FloatToRawbits(f)); + return; + } + if (dt.Is(F64) && neon_imm.CanConvert()) { + // Use vmov to create the double in the low D register, then duplicate + // it into the high D register. + double d = neon_imm.GetImmediate(); + CodeBufferCheckScope scope(this, 7 * kMaxInstructionSizeInBytes); + vmov(cond, F64, rd.GetLowDRegister(), d); + vmov(cond, F64, rd.GetHighDRegister(), rd.GetLowDRegister()); + return; + } + } + } + Assembler::Delegate(type, instruction, cond, dt, rd, operand); +} + + +void MacroAssembler::Delegate(InstructionType type, + InstructionCondRL instruction, + Condition cond, + Register rt, + Location* location) { + VIXL_ASSERT((type == kLdrb) || (type == kLdrh) || (type == kLdrsb) || + (type == kLdrsh)); + + CONTEXT_SCOPE; + + if (location->IsBound()) { + CodeBufferCheckScope scope(this, 5 * kMaxInstructionSizeInBytes); + UseScratchRegisterScope temps(this); + temps.Include(rt); + Register scratch = temps.Acquire(); + uint32_t mask = GetOffsetMask(type, Offset); + switch (type) { + case kLdrb: + ldrb(rt, MemOperandComputationHelper(cond, scratch, location, mask)); + return; + case kLdrh: + ldrh(rt, MemOperandComputationHelper(cond, scratch, location, mask)); + return; + case kLdrsb: + ldrsb(rt, MemOperandComputationHelper(cond, scratch, location, mask)); + return; + case kLdrsh: + ldrsh(rt, MemOperandComputationHelper(cond, scratch, location, mask)); + return; + default: + VIXL_UNREACHABLE(); + } + return; + } + + Assembler::Delegate(type, instruction, cond, rt, location); +} + + +void MacroAssembler::Delegate(InstructionType type, + InstructionCondRRL instruction, + Condition cond, + Register rt, + Register rt2, + Location* location) { + VIXL_ASSERT(type == kLdrd); + + CONTEXT_SCOPE; + + if (location->IsBound()) { + CodeBufferCheckScope scope(this, 6 * kMaxInstructionSizeInBytes); + UseScratchRegisterScope temps(this); + temps.Include(rt, rt2); + Register scratch = temps.Acquire(); + uint32_t mask = GetOffsetMask(type, Offset); + ldrd(rt, rt2, MemOperandComputationHelper(cond, scratch, location, mask)); + return; + } + + Assembler::Delegate(type, instruction, cond, rt, rt2, location); +} + + +void MacroAssembler::Delegate(InstructionType type, + InstructionCondSizeRMop instruction, + Condition cond, + EncodingSize size, + Register rd, + const MemOperand& operand) { + CONTEXT_SCOPE; + VIXL_ASSERT(size.IsBest()); + VIXL_ASSERT((type == kLdr) || (type == kLdrb) || (type == kLdrh) || + (type == kLdrsb) || (type == kLdrsh) || (type == kStr) || + (type == kStrb) || (type == kStrh)); + if (operand.IsImmediate()) { + const Register& rn = operand.GetBaseRegister(); + AddrMode addrmode = operand.GetAddrMode(); + int32_t offset = operand.GetOffsetImmediate(); + uint32_t extra_offset_mask = GetOffsetMask(type, addrmode); + // Try to maximize the offset used by the MemOperand (load_store_offset). + // Add the part which can't be used by the MemOperand (add_offset). + uint32_t load_store_offset = offset & extra_offset_mask; + uint32_t add_offset = offset & ~extra_offset_mask; + if ((add_offset != 0) && + (IsModifiedImmediate(offset) || IsModifiedImmediate(-offset))) { + load_store_offset = 0; + add_offset = offset; + } + switch (addrmode) { + case PreIndex: + // Avoid the unpredictable case 'str r0, [r0, imm]!' + if (!rn.Is(rd)) { + // Pre-Indexed case: + // ldr r0, [r1, 12345]! will translate into + // add r1, r1, 12345 + // ldr r0, [r1] + { + CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes); + add(cond, rn, rn, add_offset); + } + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + (this->*instruction)(cond, + size, + rd, + MemOperand(rn, load_store_offset, PreIndex)); + } + return; + } + break; + case Offset: { + UseScratchRegisterScope temps(this); + // Allow using the destination as a scratch register if possible. + if ((type != kStr) && (type != kStrb) && (type != kStrh) && + !rd.Is(rn)) { + temps.Include(rd); + } + Register scratch = temps.Acquire(); + // Offset case: + // ldr r0, [r1, 12345] will translate into + // add r0, r1, 12345 + // ldr r0, [r0] + { + CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes); + add(cond, scratch, rn, add_offset); + } + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + (this->*instruction)(cond, + size, + rd, + MemOperand(scratch, load_store_offset)); + } + return; + } + case PostIndex: + // Avoid the unpredictable case 'ldr r0, [r0], imm' + if (!rn.Is(rd)) { + // Post-indexed case: + // ldr r0. [r1], imm32 will translate into + // ldr r0, [r1] + // movw ip. imm32 & 0xffffffff + // movt ip, imm32 >> 16 + // add r1, r1, ip + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + (this->*instruction)(cond, + size, + rd, + MemOperand(rn, load_store_offset, PostIndex)); + } + { + CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes); + add(cond, rn, rn, add_offset); + } + return; + } + break; + } + } else if (operand.IsPlainRegister()) { + const Register& rn = operand.GetBaseRegister(); + AddrMode addrmode = operand.GetAddrMode(); + const Register& rm = operand.GetOffsetRegister(); + if (rm.IsPC()) { + VIXL_ABORT_WITH_MSG( + "The MacroAssembler does not convert loads and stores with a PC " + "offset register.\n"); + } + if (rn.IsPC()) { + if (addrmode == Offset) { + if (IsUsingT32()) { + VIXL_ABORT_WITH_MSG( + "The MacroAssembler does not convert loads and stores with a PC " + "base register for T32.\n"); + } + } else { + VIXL_ABORT_WITH_MSG( + "The MacroAssembler does not convert loads and stores with a PC " + "base register in pre-index or post-index mode.\n"); + } + } + switch (addrmode) { + case PreIndex: + // Avoid the unpredictable case 'str r0, [r0, imm]!' + if (!rn.Is(rd)) { + // Pre-Indexed case: + // ldr r0, [r1, r2]! will translate into + // add r1, r1, r2 + // ldr r0, [r1] + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + if (operand.GetSign().IsPlus()) { + add(cond, rn, rn, rm); + } else { + sub(cond, rn, rn, rm); + } + } + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + (this->*instruction)(cond, size, rd, MemOperand(rn, Offset)); + } + return; + } + break; + case Offset: { + UseScratchRegisterScope temps(this); + // Allow using the destination as a scratch register if this is not a + // store. + // Avoid using PC as a temporary as this has side-effects. + if ((type != kStr) && (type != kStrb) && (type != kStrh) && + !rd.IsPC()) { + temps.Include(rd); + } + Register scratch = temps.Acquire(); + // Offset case: + // ldr r0, [r1, r2] will translate into + // add r0, r1, r2 + // ldr r0, [r0] + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + if (operand.GetSign().IsPlus()) { + add(cond, scratch, rn, rm); + } else { + sub(cond, scratch, rn, rm); + } + } + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + (this->*instruction)(cond, size, rd, MemOperand(scratch, Offset)); + } + return; + } + case PostIndex: + // Avoid the unpredictable case 'ldr r0, [r0], imm' + if (!rn.Is(rd)) { + // Post-indexed case: + // ldr r0. [r1], r2 will translate into + // ldr r0, [r1] + // add r1, r1, r2 + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + (this->*instruction)(cond, size, rd, MemOperand(rn, Offset)); + } + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + if (operand.GetSign().IsPlus()) { + add(cond, rn, rn, rm); + } else { + sub(cond, rn, rn, rm); + } + } + return; + } + break; + } + } + Assembler::Delegate(type, instruction, cond, size, rd, operand); +} + + +void MacroAssembler::Delegate(InstructionType type, + InstructionCondRRMop instruction, + Condition cond, + Register rt, + Register rt2, + const MemOperand& operand) { + if ((type == kLdaexd) || (type == kLdrexd) || (type == kStlex) || + (type == kStlexb) || (type == kStlexh) || (type == kStrex) || + (type == kStrexb) || (type == kStrexh)) { + UnimplementedDelegate(type); + return; + } + + VIXL_ASSERT((type == kLdrd) || (type == kStrd)); + + CONTEXT_SCOPE; + + // TODO: Should we allow these cases? + if (IsUsingA32()) { + // The first register needs to be even. + if ((rt.GetCode() & 1) != 0) { + UnimplementedDelegate(type); + return; + } + // Registers need to be adjacent. + if (((rt.GetCode() + 1) % kNumberOfRegisters) != rt2.GetCode()) { + UnimplementedDelegate(type); + return; + } + // LDRD lr, pc [...] is not allowed. + if (rt.Is(lr)) { + UnimplementedDelegate(type); + return; + } + } + + if (operand.IsImmediate()) { + const Register& rn = operand.GetBaseRegister(); + AddrMode addrmode = operand.GetAddrMode(); + int32_t offset = operand.GetOffsetImmediate(); + uint32_t extra_offset_mask = GetOffsetMask(type, addrmode); + // Try to maximize the offset used by the MemOperand (load_store_offset). + // Add the part which can't be used by the MemOperand (add_offset). + uint32_t load_store_offset = offset & extra_offset_mask; + uint32_t add_offset = offset & ~extra_offset_mask; + if ((add_offset != 0) && + (IsModifiedImmediate(offset) || IsModifiedImmediate(-offset))) { + load_store_offset = 0; + add_offset = offset; + } + switch (addrmode) { + case PreIndex: { + // Allow using the destinations as a scratch registers if possible. + UseScratchRegisterScope temps(this); + if (type == kLdrd) { + if (!rt.Is(rn)) temps.Include(rt); + if (!rt2.Is(rn)) temps.Include(rt2); + } + + // Pre-Indexed case: + // ldrd r0, r1, [r2, 12345]! will translate into + // add r2, 12345 + // ldrd r0, r1, [r2] + { + CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes); + add(cond, rn, rn, add_offset); + } + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + (this->*instruction)(cond, + rt, + rt2, + MemOperand(rn, load_store_offset, PreIndex)); + } + return; + } + case Offset: { + UseScratchRegisterScope temps(this); + // Allow using the destinations as a scratch registers if possible. + if (type == kLdrd) { + if (!rt.Is(rn)) temps.Include(rt); + if (!rt2.Is(rn)) temps.Include(rt2); + } + Register scratch = temps.Acquire(); + // Offset case: + // ldrd r0, r1, [r2, 12345] will translate into + // add r0, r2, 12345 + // ldrd r0, r1, [r0] + { + CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes); + add(cond, scratch, rn, add_offset); + } + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + (this->*instruction)(cond, + rt, + rt2, + MemOperand(scratch, load_store_offset)); + } + return; + } + case PostIndex: + // Avoid the unpredictable case 'ldrd r0, r1, [r0], imm' + if (!rn.Is(rt) && !rn.Is(rt2)) { + // Post-indexed case: + // ldrd r0, r1, [r2], imm32 will translate into + // ldrd r0, r1, [r2] + // movw ip. imm32 & 0xffffffff + // movt ip, imm32 >> 16 + // add r2, ip + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + (this->*instruction)(cond, + rt, + rt2, + MemOperand(rn, load_store_offset, PostIndex)); + } + { + CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes); + add(cond, rn, rn, add_offset); + } + return; + } + break; + } + } + if (operand.IsPlainRegister()) { + const Register& rn = operand.GetBaseRegister(); + const Register& rm = operand.GetOffsetRegister(); + AddrMode addrmode = operand.GetAddrMode(); + switch (addrmode) { + case PreIndex: + // ldrd r0, r1, [r2, r3]! will translate into + // add r2, r3 + // ldrd r0, r1, [r2] + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + if (operand.GetSign().IsPlus()) { + add(cond, rn, rn, rm); + } else { + sub(cond, rn, rn, rm); + } + } + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + (this->*instruction)(cond, rt, rt2, MemOperand(rn, Offset)); + } + return; + case PostIndex: + // ldrd r0, r1, [r2], r3 will translate into + // ldrd r0, r1, [r2] + // add r2, r3 + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + (this->*instruction)(cond, rt, rt2, MemOperand(rn, Offset)); + } + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + if (operand.GetSign().IsPlus()) { + add(cond, rn, rn, rm); + } else { + sub(cond, rn, rn, rm); + } + } + return; + case Offset: { + UseScratchRegisterScope temps(this); + // Allow using the destinations as a scratch registers if possible. + if (type == kLdrd) { + if (!rt.Is(rn)) temps.Include(rt); + if (!rt2.Is(rn)) temps.Include(rt2); + } + Register scratch = temps.Acquire(); + // Offset case: + // ldrd r0, r1, [r2, r3] will translate into + // add r0, r2, r3 + // ldrd r0, r1, [r0] + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + if (operand.GetSign().IsPlus()) { + add(cond, scratch, rn, rm); + } else { + sub(cond, scratch, rn, rm); + } + } + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + (this->*instruction)(cond, rt, rt2, MemOperand(scratch, Offset)); + } + return; + } + } + } + Assembler::Delegate(type, instruction, cond, rt, rt2, operand); +} + + +void MacroAssembler::Delegate(InstructionType type, + InstructionCondDtSMop instruction, + Condition cond, + DataType dt, + SRegister rd, + const MemOperand& operand) { + CONTEXT_SCOPE; + if (operand.IsImmediate()) { + const Register& rn = operand.GetBaseRegister(); + AddrMode addrmode = operand.GetAddrMode(); + int32_t offset = operand.GetOffsetImmediate(); + VIXL_ASSERT(((offset > 0) && operand.GetSign().IsPlus()) || + ((offset < 0) && operand.GetSign().IsMinus()) || (offset == 0)); + if (rn.IsPC()) { + VIXL_ABORT_WITH_MSG( + "The MacroAssembler does not convert vldr or vstr with a PC base " + "register.\n"); + } + switch (addrmode) { + case PreIndex: + // Pre-Indexed case: + // vldr.32 s0, [r1, 12345]! will translate into + // add r1, 12345 + // vldr.32 s0, [r1] + if (offset != 0) { + CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes); + add(cond, rn, rn, offset); + } + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + (this->*instruction)(cond, dt, rd, MemOperand(rn, Offset)); + } + return; + case Offset: { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + // Offset case: + // vldr.32 s0, [r1, 12345] will translate into + // add ip, r1, 12345 + // vldr.32 s0, [ip] + { + VIXL_ASSERT(offset != 0); + CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes); + add(cond, scratch, rn, offset); + } + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + (this->*instruction)(cond, dt, rd, MemOperand(scratch, Offset)); + } + return; + } + case PostIndex: + // Post-indexed case: + // vldr.32 s0, [r1], imm32 will translate into + // vldr.32 s0, [r1] + // movw ip. imm32 & 0xffffffff + // movt ip, imm32 >> 16 + // add r1, ip + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + (this->*instruction)(cond, dt, rd, MemOperand(rn, Offset)); + } + if (offset != 0) { + CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes); + add(cond, rn, rn, offset); + } + return; + } + } + Assembler::Delegate(type, instruction, cond, dt, rd, operand); +} + + +void MacroAssembler::Delegate(InstructionType type, + InstructionCondDtDMop instruction, + Condition cond, + DataType dt, + DRegister rd, + const MemOperand& operand) { + CONTEXT_SCOPE; + if (operand.IsImmediate()) { + const Register& rn = operand.GetBaseRegister(); + AddrMode addrmode = operand.GetAddrMode(); + int32_t offset = operand.GetOffsetImmediate(); + VIXL_ASSERT(((offset > 0) && operand.GetSign().IsPlus()) || + ((offset < 0) && operand.GetSign().IsMinus()) || (offset == 0)); + if (rn.IsPC()) { + VIXL_ABORT_WITH_MSG( + "The MacroAssembler does not convert vldr or vstr with a PC base " + "register.\n"); + } + switch (addrmode) { + case PreIndex: + // Pre-Indexed case: + // vldr.64 d0, [r1, 12345]! will translate into + // add r1, 12345 + // vldr.64 d0, [r1] + if (offset != 0) { + CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes); + add(cond, rn, rn, offset); + } + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + (this->*instruction)(cond, dt, rd, MemOperand(rn, Offset)); + } + return; + case Offset: { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + // Offset case: + // vldr.64 d0, [r1, 12345] will translate into + // add ip, r1, 12345 + // vldr.32 s0, [ip] + { + VIXL_ASSERT(offset != 0); + CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes); + add(cond, scratch, rn, offset); + } + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + (this->*instruction)(cond, dt, rd, MemOperand(scratch, Offset)); + } + return; + } + case PostIndex: + // Post-indexed case: + // vldr.64 d0. [r1], imm32 will translate into + // vldr.64 d0, [r1] + // movw ip. imm32 & 0xffffffff + // movt ip, imm32 >> 16 + // add r1, ip + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + (this->*instruction)(cond, dt, rd, MemOperand(rn, Offset)); + } + if (offset != 0) { + CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes); + add(cond, rn, rn, offset); + } + return; + } + } + Assembler::Delegate(type, instruction, cond, dt, rd, operand); +} + + +void MacroAssembler::Delegate(InstructionType type, + InstructionCondMsrOp instruction, + Condition cond, + MaskedSpecialRegister spec_reg, + const Operand& operand) { + USE(type); + VIXL_ASSERT(type == kMsr); + if (operand.IsImmediate()) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + { + CodeBufferCheckScope scope(this, 2 * kMaxInstructionSizeInBytes); + mov(cond, scratch, operand); + } + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + msr(cond, spec_reg, scratch); + return; + } + Assembler::Delegate(type, instruction, cond, spec_reg, operand); +} + + +void MacroAssembler::Delegate(InstructionType type, + InstructionCondDtDL instruction, + Condition cond, + DataType dt, + DRegister rd, + Location* location) { + VIXL_ASSERT(type == kVldr); + + CONTEXT_SCOPE; + + if (location->IsBound()) { + CodeBufferCheckScope scope(this, 5 * kMaxInstructionSizeInBytes); + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + uint32_t mask = GetOffsetMask(type, Offset); + vldr(dt, rd, MemOperandComputationHelper(cond, scratch, location, mask)); + return; + } + + Assembler::Delegate(type, instruction, cond, dt, rd, location); +} + + +void MacroAssembler::Delegate(InstructionType type, + InstructionCondDtSL instruction, + Condition cond, + DataType dt, + SRegister rd, + Location* location) { + VIXL_ASSERT(type == kVldr); + + CONTEXT_SCOPE; + + if (location->IsBound()) { + CodeBufferCheckScope scope(this, 5 * kMaxInstructionSizeInBytes); + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + uint32_t mask = GetOffsetMask(type, Offset); + vldr(dt, rd, MemOperandComputationHelper(cond, scratch, location, mask)); + return; + } + + Assembler::Delegate(type, instruction, cond, dt, rd, location); +} + + +#undef CONTEXT_SCOPE +#undef TOSTRING +#undef STRINGIFY + +// Start of generated code. +// End of generated code. +} // namespace aarch32 +} // namespace vixl diff --git a/dep/vixl/src/aarch32/operands-aarch32.cc b/dep/vixl/src/aarch32/operands-aarch32.cc new file mode 100644 index 000000000..a3068944f --- /dev/null +++ b/dep/vixl/src/aarch32/operands-aarch32.cc @@ -0,0 +1,563 @@ +// Copyright 2017, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may +// be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +extern "C" { +#include +#include +} + +#include +#include +#include +#include +#include +#include +#include + +#include "utils-vixl.h" +#include "aarch32/constants-aarch32.h" +#include "aarch32/instructions-aarch32.h" +#include "aarch32/operands-aarch32.h" + +namespace vixl { +namespace aarch32 { + +// Operand + +std::ostream& operator<<(std::ostream& os, const Operand& operand) { + if (operand.IsImmediate()) { + return os << "#" << operand.GetImmediate(); + } + if (operand.IsImmediateShiftedRegister()) { + if ((operand.GetShift().IsLSL() || operand.GetShift().IsROR()) && + (operand.GetShiftAmount() == 0)) { + return os << operand.GetBaseRegister(); + } + if (operand.GetShift().IsRRX()) { + return os << operand.GetBaseRegister() << ", rrx"; + } + return os << operand.GetBaseRegister() << ", " << operand.GetShift() << " #" + << operand.GetShiftAmount(); + } + if (operand.IsRegisterShiftedRegister()) { + return os << operand.GetBaseRegister() << ", " << operand.GetShift() << " " + << operand.GetShiftRegister(); + } + VIXL_UNREACHABLE(); + return os; +} + +std::ostream& operator<<(std::ostream& os, const NeonImmediate& neon_imm) { + if (neon_imm.IsDouble()) { + if (neon_imm.imm_.d_ == 0) { + if (copysign(1.0, neon_imm.imm_.d_) < 0.0) { + return os << "#-0.0"; + } + return os << "#0.0"; + } + return os << "#" << std::setprecision(9) << neon_imm.imm_.d_; + } + if (neon_imm.IsFloat()) { + if (neon_imm.imm_.f_ == 0) { + if (copysign(1.0, neon_imm.imm_.d_) < 0.0) return os << "#-0.0"; + return os << "#0.0"; + } + return os << "#" << std::setprecision(9) << neon_imm.imm_.f_; + } + if (neon_imm.IsInteger64()) { + return os << "#0x" << std::hex << std::setw(16) << std::setfill('0') + << neon_imm.imm_.u64_ << std::dec; + } + return os << "#" << neon_imm.imm_.u32_; +} + +// SOperand + +std::ostream& operator<<(std::ostream& os, const SOperand& operand) { + if (operand.IsImmediate()) { + return os << operand.GetNeonImmediate(); + } + return os << operand.GetRegister(); +} + +// DOperand + +std::ostream& operator<<(std::ostream& os, const DOperand& operand) { + if (operand.IsImmediate()) { + return os << operand.GetNeonImmediate(); + } + return os << operand.GetRegister(); +} + +// QOperand + +std::ostream& operator<<(std::ostream& os, const QOperand& operand) { + if (operand.IsImmediate()) { + return os << operand.GetNeonImmediate(); + } + return os << operand.GetRegister(); +} + + +ImmediateVbic::ImmediateVbic(DataType dt, const NeonImmediate& neon_imm) { + if (neon_imm.IsInteger32()) { + uint32_t immediate = neon_imm.GetImmediate(); + if (dt.GetValue() == I16) { + if ((immediate & ~0xff) == 0) { + SetEncodingValue(0x9); + SetEncodedImmediate(immediate); + } else if ((immediate & ~0xff00) == 0) { + SetEncodingValue(0xb); + SetEncodedImmediate(immediate >> 8); + } + } else if (dt.GetValue() == I32) { + if ((immediate & ~0xff) == 0) { + SetEncodingValue(0x1); + SetEncodedImmediate(immediate); + } else if ((immediate & ~0xff00) == 0) { + SetEncodingValue(0x3); + SetEncodedImmediate(immediate >> 8); + } else if ((immediate & ~0xff0000) == 0) { + SetEncodingValue(0x5); + SetEncodedImmediate(immediate >> 16); + } else if ((immediate & ~0xff000000) == 0) { + SetEncodingValue(0x7); + SetEncodedImmediate(immediate >> 24); + } + } + } +} + + +DataType ImmediateVbic::DecodeDt(uint32_t cmode) { + switch (cmode) { + case 0x1: + case 0x3: + case 0x5: + case 0x7: + return I32; + case 0x9: + case 0xb: + return I16; + default: + break; + } + VIXL_UNREACHABLE(); + return kDataTypeValueInvalid; +} + + +NeonImmediate ImmediateVbic::DecodeImmediate(uint32_t cmode, + uint32_t immediate) { + switch (cmode) { + case 0x1: + case 0x9: + return immediate; + case 0x3: + case 0xb: + return immediate << 8; + case 0x5: + return immediate << 16; + case 0x7: + return immediate << 24; + default: + break; + } + VIXL_UNREACHABLE(); + return 0; +} + + +ImmediateVmov::ImmediateVmov(DataType dt, const NeonImmediate& neon_imm) { + if (neon_imm.IsInteger()) { + switch (dt.GetValue()) { + case I8: + if (neon_imm.CanConvert()) { + SetEncodingValue(0xe); + SetEncodedImmediate(neon_imm.GetImmediate()); + } + break; + case I16: + if (neon_imm.IsInteger32()) { + uint32_t immediate = neon_imm.GetImmediate(); + if ((immediate & ~0xff) == 0) { + SetEncodingValue(0x8); + SetEncodedImmediate(immediate); + } else if ((immediate & ~0xff00) == 0) { + SetEncodingValue(0xa); + SetEncodedImmediate(immediate >> 8); + } + } + break; + case I32: + if (neon_imm.IsInteger32()) { + uint32_t immediate = neon_imm.GetImmediate(); + if ((immediate & ~0xff) == 0) { + SetEncodingValue(0x0); + SetEncodedImmediate(immediate); + } else if ((immediate & ~0xff00) == 0) { + SetEncodingValue(0x2); + SetEncodedImmediate(immediate >> 8); + } else if ((immediate & ~0xff0000) == 0) { + SetEncodingValue(0x4); + SetEncodedImmediate(immediate >> 16); + } else if ((immediate & ~0xff000000) == 0) { + SetEncodingValue(0x6); + SetEncodedImmediate(immediate >> 24); + } else if ((immediate & ~0xff00) == 0xff) { + SetEncodingValue(0xc); + SetEncodedImmediate(immediate >> 8); + } else if ((immediate & ~0xff0000) == 0xffff) { + SetEncodingValue(0xd); + SetEncodedImmediate(immediate >> 16); + } + } + break; + case I64: { + bool is_valid = true; + uint32_t encoding = 0; + if (neon_imm.IsInteger32()) { + uint32_t immediate = neon_imm.GetImmediate(); + uint32_t mask = 0xff000000; + for (uint32_t set_bit = 1 << 3; set_bit != 0; set_bit >>= 1) { + if ((immediate & mask) == mask) { + encoding |= set_bit; + } else if ((immediate & mask) != 0) { + is_valid = false; + break; + } + mask >>= 8; + } + } else { + uint64_t immediate = neon_imm.GetImmediate(); + uint64_t mask = UINT64_C(0xff) << 56; + for (uint32_t set_bit = 1 << 7; set_bit != 0; set_bit >>= 1) { + if ((immediate & mask) == mask) { + encoding |= set_bit; + } else if ((immediate & mask) != 0) { + is_valid = false; + break; + } + mask >>= 8; + } + } + if (is_valid) { + SetEncodingValue(0x1e); + SetEncodedImmediate(encoding); + } + break; + } + default: + break; + } + } else { + switch (dt.GetValue()) { + case F32: + if (neon_imm.IsFloat() || neon_imm.IsDouble()) { + ImmediateVFP vfp(neon_imm.GetImmediate()); + if (vfp.IsValid()) { + SetEncodingValue(0xf); + SetEncodedImmediate(vfp.GetEncodingValue()); + } + } + break; + default: + break; + } + } +} + + +DataType ImmediateVmov::DecodeDt(uint32_t cmode) { + switch (cmode & 0xf) { + case 0x0: + case 0x2: + case 0x4: + case 0x6: + case 0xc: + case 0xd: + return I32; + case 0x8: + case 0xa: + return I16; + case 0xe: + return ((cmode & 0x10) == 0) ? I8 : I64; + case 0xf: + if ((cmode & 0x10) == 0) return F32; + break; + default: + break; + } + VIXL_UNREACHABLE(); + return kDataTypeValueInvalid; +} + + +NeonImmediate ImmediateVmov::DecodeImmediate(uint32_t cmode, + uint32_t immediate) { + switch (cmode & 0xf) { + case 0x8: + case 0x0: + return immediate; + case 0x2: + case 0xa: + return immediate << 8; + case 0x4: + return immediate << 16; + case 0x6: + return immediate << 24; + case 0xc: + return (immediate << 8) | 0xff; + case 0xd: + return (immediate << 16) | 0xffff; + case 0xe: { + if (cmode == 0x1e) { + uint64_t encoding = 0; + for (uint32_t set_bit = 1 << 7; set_bit != 0; set_bit >>= 1) { + encoding <<= 8; + if ((immediate & set_bit) != 0) { + encoding |= 0xff; + } + } + return encoding; + } else { + return immediate; + } + } + case 0xf: { + return ImmediateVFP::Decode(immediate); + } + default: + break; + } + VIXL_UNREACHABLE(); + return 0; +} + + +ImmediateVmvn::ImmediateVmvn(DataType dt, const NeonImmediate& neon_imm) { + if (neon_imm.IsInteger32()) { + uint32_t immediate = neon_imm.GetImmediate(); + switch (dt.GetValue()) { + case I16: + if ((immediate & ~0xff) == 0) { + SetEncodingValue(0x8); + SetEncodedImmediate(immediate); + } else if ((immediate & ~0xff00) == 0) { + SetEncodingValue(0xa); + SetEncodedImmediate(immediate >> 8); + } + break; + case I32: + if ((immediate & ~0xff) == 0) { + SetEncodingValue(0x0); + SetEncodedImmediate(immediate); + } else if ((immediate & ~0xff00) == 0) { + SetEncodingValue(0x2); + SetEncodedImmediate(immediate >> 8); + } else if ((immediate & ~0xff0000) == 0) { + SetEncodingValue(0x4); + SetEncodedImmediate(immediate >> 16); + } else if ((immediate & ~0xff000000) == 0) { + SetEncodingValue(0x6); + SetEncodedImmediate(immediate >> 24); + } else if ((immediate & ~0xff00) == 0xff) { + SetEncodingValue(0xc); + SetEncodedImmediate(immediate >> 8); + } else if ((immediate & ~0xff0000) == 0xffff) { + SetEncodingValue(0xd); + SetEncodedImmediate(immediate >> 16); + } + break; + default: + break; + } + } +} + + +DataType ImmediateVmvn::DecodeDt(uint32_t cmode) { + switch (cmode) { + case 0x0: + case 0x2: + case 0x4: + case 0x6: + case 0xc: + case 0xd: + return I32; + case 0x8: + case 0xa: + return I16; + default: + break; + } + VIXL_UNREACHABLE(); + return kDataTypeValueInvalid; +} + + +NeonImmediate ImmediateVmvn::DecodeImmediate(uint32_t cmode, + uint32_t immediate) { + switch (cmode) { + case 0x0: + case 0x8: + return immediate; + case 0x2: + case 0xa: + return immediate << 8; + case 0x4: + return immediate << 16; + case 0x6: + return immediate << 24; + case 0xc: + return (immediate << 8) | 0xff; + case 0xd: + return (immediate << 16) | 0xffff; + default: + break; + } + VIXL_UNREACHABLE(); + return 0; +} + + +ImmediateVorr::ImmediateVorr(DataType dt, const NeonImmediate& neon_imm) { + if (neon_imm.IsInteger32()) { + uint32_t immediate = neon_imm.GetImmediate(); + if (dt.GetValue() == I16) { + if ((immediate & ~0xff) == 0) { + SetEncodingValue(0x9); + SetEncodedImmediate(immediate); + } else if ((immediate & ~0xff00) == 0) { + SetEncodingValue(0xb); + SetEncodedImmediate(immediate >> 8); + } + } else if (dt.GetValue() == I32) { + if ((immediate & ~0xff) == 0) { + SetEncodingValue(0x1); + SetEncodedImmediate(immediate); + } else if ((immediate & ~0xff00) == 0) { + SetEncodingValue(0x3); + SetEncodedImmediate(immediate >> 8); + } else if ((immediate & ~0xff0000) == 0) { + SetEncodingValue(0x5); + SetEncodedImmediate(immediate >> 16); + } else if ((immediate & ~0xff000000) == 0) { + SetEncodingValue(0x7); + SetEncodedImmediate(immediate >> 24); + } + } + } +} + + +DataType ImmediateVorr::DecodeDt(uint32_t cmode) { + switch (cmode) { + case 0x1: + case 0x3: + case 0x5: + case 0x7: + return I32; + case 0x9: + case 0xb: + return I16; + default: + break; + } + VIXL_UNREACHABLE(); + return kDataTypeValueInvalid; +} + + +NeonImmediate ImmediateVorr::DecodeImmediate(uint32_t cmode, + uint32_t immediate) { + switch (cmode) { + case 0x1: + case 0x9: + return immediate; + case 0x3: + case 0xb: + return immediate << 8; + case 0x5: + return immediate << 16; + case 0x7: + return immediate << 24; + default: + break; + } + VIXL_UNREACHABLE(); + return 0; +} + +// MemOperand + +std::ostream& operator<<(std::ostream& os, const MemOperand& operand) { + os << "[" << operand.GetBaseRegister(); + if (operand.GetAddrMode() == PostIndex) { + os << "]"; + if (operand.IsRegisterOnly()) return os << "!"; + } + if (operand.IsImmediate()) { + if ((operand.GetOffsetImmediate() != 0) || operand.GetSign().IsMinus() || + ((operand.GetAddrMode() != Offset) && !operand.IsRegisterOnly())) { + if (operand.GetOffsetImmediate() == 0) { + os << ", #" << operand.GetSign() << operand.GetOffsetImmediate(); + } else { + os << ", #" << operand.GetOffsetImmediate(); + } + } + } else if (operand.IsPlainRegister()) { + os << ", " << operand.GetSign() << operand.GetOffsetRegister(); + } else if (operand.IsShiftedRegister()) { + os << ", " << operand.GetSign() << operand.GetOffsetRegister() + << ImmediateShiftOperand(operand.GetShift(), operand.GetShiftAmount()); + } else { + VIXL_UNREACHABLE(); + return os; + } + if (operand.GetAddrMode() == Offset) { + os << "]"; + } else if (operand.GetAddrMode() == PreIndex) { + os << "]!"; + } + return os; +} + +std::ostream& operator<<(std::ostream& os, const AlignedMemOperand& operand) { + os << "[" << operand.GetBaseRegister() << operand.GetAlignment() << "]"; + if (operand.GetAddrMode() == PostIndex) { + if (operand.IsPlainRegister()) { + os << ", " << operand.GetOffsetRegister(); + } else { + os << "!"; + } + } + return os; +} + +} // namespace aarch32 +} // namespace vixl diff --git a/dep/vixl/src/aarch64/assembler-aarch64.cc b/dep/vixl/src/aarch64/assembler-aarch64.cc new file mode 100644 index 000000000..937809b14 --- /dev/null +++ b/dep/vixl/src/aarch64/assembler-aarch64.cc @@ -0,0 +1,6084 @@ +// Copyright 2015, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +#include + +#include "assembler-aarch64.h" +#include "macro-assembler-aarch64.h" + +namespace vixl { +namespace aarch64 { + +RawLiteral::RawLiteral(size_t size, + LiteralPool* literal_pool, + DeletionPolicy deletion_policy) + : size_(size), + offset_(0), + low64_(0), + high64_(0), + literal_pool_(literal_pool), + deletion_policy_(deletion_policy) { + VIXL_ASSERT((deletion_policy == kManuallyDeleted) || (literal_pool_ != NULL)); + if (deletion_policy == kDeletedOnPoolDestruction) { + literal_pool_->DeleteOnDestruction(this); + } +} + + +void Assembler::Reset() { GetBuffer()->Reset(); } + + +void Assembler::bind(Label* label) { + BindToOffset(label, GetBuffer()->GetCursorOffset()); +} + + +void Assembler::BindToOffset(Label* label, ptrdiff_t offset) { + VIXL_ASSERT((offset >= 0) && (offset <= GetBuffer()->GetCursorOffset())); + VIXL_ASSERT(offset % kInstructionSize == 0); + + label->Bind(offset); + + for (Label::LabelLinksIterator it(label); !it.Done(); it.Advance()) { + Instruction* link = + GetBuffer()->GetOffsetAddress(*it.Current()); + link->SetImmPCOffsetTarget(GetLabelAddress(label)); + } + label->ClearAllLinks(); +} + + +// A common implementation for the LinkAndGetOffsetTo helpers. +// +// The offset is calculated by aligning the PC and label addresses down to a +// multiple of 1 << element_shift, then calculating the (scaled) offset between +// them. This matches the semantics of adrp, for example. +template +ptrdiff_t Assembler::LinkAndGetOffsetTo(Label* label) { + VIXL_STATIC_ASSERT(element_shift < (sizeof(ptrdiff_t) * 8)); + + if (label->IsBound()) { + uintptr_t pc_offset = GetCursorAddress() >> element_shift; + uintptr_t label_offset = GetLabelAddress(label) >> element_shift; + return label_offset - pc_offset; + } else { + label->AddLink(GetBuffer()->GetCursorOffset()); + return 0; + } +} + + +ptrdiff_t Assembler::LinkAndGetByteOffsetTo(Label* label) { + return LinkAndGetOffsetTo<0>(label); +} + + +ptrdiff_t Assembler::LinkAndGetInstructionOffsetTo(Label* label) { + return LinkAndGetOffsetTo(label); +} + + +ptrdiff_t Assembler::LinkAndGetPageOffsetTo(Label* label) { + return LinkAndGetOffsetTo(label); +} + + +void Assembler::place(RawLiteral* literal) { + VIXL_ASSERT(!literal->IsPlaced()); + + // Patch instructions using this literal. + if (literal->IsUsed()) { + Instruction* target = GetCursorAddress(); + ptrdiff_t offset = literal->GetLastUse(); + bool done; + do { + Instruction* ldr = GetBuffer()->GetOffsetAddress(offset); + VIXL_ASSERT(ldr->IsLoadLiteral()); + + ptrdiff_t imm19 = ldr->GetImmLLiteral(); + VIXL_ASSERT(imm19 <= 0); + done = (imm19 == 0); + offset += imm19 * kLiteralEntrySize; + + ldr->SetImmLLiteral(target); + } while (!done); + } + + // "bind" the literal. + literal->SetOffset(GetCursorOffset()); + // Copy the data into the pool. + switch (literal->GetSize()) { + case kSRegSizeInBytes: + dc32(literal->GetRawValue32()); + break; + case kDRegSizeInBytes: + dc64(literal->GetRawValue64()); + break; + default: + VIXL_ASSERT(literal->GetSize() == kQRegSizeInBytes); + dc64(literal->GetRawValue128Low64()); + dc64(literal->GetRawValue128High64()); + } + + literal->literal_pool_ = NULL; +} + + +ptrdiff_t Assembler::LinkAndGetWordOffsetTo(RawLiteral* literal) { + VIXL_ASSERT(IsWordAligned(GetCursorOffset())); + + bool register_first_use = + (literal->GetLiteralPool() != NULL) && !literal->IsUsed(); + + if (literal->IsPlaced()) { + // The literal is "behind", the offset will be negative. + VIXL_ASSERT((literal->GetOffset() - GetCursorOffset()) <= 0); + return (literal->GetOffset() - GetCursorOffset()) >> kLiteralEntrySizeLog2; + } + + ptrdiff_t offset = 0; + // Link all uses together. + if (literal->IsUsed()) { + offset = + (literal->GetLastUse() - GetCursorOffset()) >> kLiteralEntrySizeLog2; + } + literal->SetLastUse(GetCursorOffset()); + + if (register_first_use) { + literal->GetLiteralPool()->AddEntry(literal); + } + + return offset; +} + + +// Code generation. +void Assembler::br(const Register& xn) { + VIXL_ASSERT(xn.Is64Bits()); + Emit(BR | Rn(xn)); +} + + +void Assembler::blr(const Register& xn) { + VIXL_ASSERT(xn.Is64Bits()); + Emit(BLR | Rn(xn)); +} + + +void Assembler::ret(const Register& xn) { + VIXL_ASSERT(xn.Is64Bits()); + Emit(RET | Rn(xn)); +} + + +void Assembler::braaz(const Register& xn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + VIXL_ASSERT(xn.Is64Bits()); + Emit(BRAAZ | Rn(xn) | Rd_mask); +} + +void Assembler::brabz(const Register& xn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + VIXL_ASSERT(xn.Is64Bits()); + Emit(BRABZ | Rn(xn) | Rd_mask); +} + +void Assembler::blraaz(const Register& xn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + VIXL_ASSERT(xn.Is64Bits()); + Emit(BLRAAZ | Rn(xn) | Rd_mask); +} + +void Assembler::blrabz(const Register& xn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + VIXL_ASSERT(xn.Is64Bits()); + Emit(BLRABZ | Rn(xn) | Rd_mask); +} + +void Assembler::retaa() { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + Emit(RETAA | Rn_mask | Rd_mask); +} + +void Assembler::retab() { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + Emit(RETAB | Rn_mask | Rd_mask); +} + +// The Arm ARM names the register Xm but encodes it in the Xd bitfield. +void Assembler::braa(const Register& xn, const Register& xm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + VIXL_ASSERT(xn.Is64Bits() && xm.Is64Bits()); + Emit(BRAA | Rn(xn) | RdSP(xm)); +} + +void Assembler::brab(const Register& xn, const Register& xm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + VIXL_ASSERT(xn.Is64Bits() && xm.Is64Bits()); + Emit(BRAB | Rn(xn) | RdSP(xm)); +} + +void Assembler::blraa(const Register& xn, const Register& xm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + VIXL_ASSERT(xn.Is64Bits() && xm.Is64Bits()); + Emit(BLRAA | Rn(xn) | RdSP(xm)); +} + +void Assembler::blrab(const Register& xn, const Register& xm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + VIXL_ASSERT(xn.Is64Bits() && xm.Is64Bits()); + Emit(BLRAB | Rn(xn) | RdSP(xm)); +} + + +void Assembler::b(int64_t imm26) { Emit(B | ImmUncondBranch(imm26)); } + + +void Assembler::b(int64_t imm19, Condition cond) { + Emit(B_cond | ImmCondBranch(imm19) | cond); +} + + +void Assembler::b(Label* label) { + int64_t offset = LinkAndGetInstructionOffsetTo(label); + VIXL_ASSERT(Instruction::IsValidImmPCOffset(UncondBranchType, offset)); + b(static_cast(offset)); +} + + +void Assembler::b(Label* label, Condition cond) { + int64_t offset = LinkAndGetInstructionOffsetTo(label); + VIXL_ASSERT(Instruction::IsValidImmPCOffset(CondBranchType, offset)); + b(static_cast(offset), cond); +} + + +void Assembler::bl(int64_t imm26) { Emit(BL | ImmUncondBranch(imm26)); } + + +void Assembler::bl(Label* label) { + int64_t offset = LinkAndGetInstructionOffsetTo(label); + VIXL_ASSERT(Instruction::IsValidImmPCOffset(UncondBranchType, offset)); + bl(static_cast(offset)); +} + + +void Assembler::cbz(const Register& rt, int64_t imm19) { + Emit(SF(rt) | CBZ | ImmCmpBranch(imm19) | Rt(rt)); +} + + +void Assembler::cbz(const Register& rt, Label* label) { + int64_t offset = LinkAndGetInstructionOffsetTo(label); + VIXL_ASSERT(Instruction::IsValidImmPCOffset(CompareBranchType, offset)); + cbz(rt, static_cast(offset)); +} + + +void Assembler::cbnz(const Register& rt, int64_t imm19) { + Emit(SF(rt) | CBNZ | ImmCmpBranch(imm19) | Rt(rt)); +} + + +void Assembler::cbnz(const Register& rt, Label* label) { + int64_t offset = LinkAndGetInstructionOffsetTo(label); + VIXL_ASSERT(Instruction::IsValidImmPCOffset(CompareBranchType, offset)); + cbnz(rt, static_cast(offset)); +} + + +void Assembler::NEONTable(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + NEONTableOp op) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.Is16B() || vd.Is8B()); + VIXL_ASSERT(vn.Is16B()); + VIXL_ASSERT(AreSameFormat(vd, vm)); + Emit(op | (vd.IsQ() ? NEON_Q : 0) | Rm(vm) | Rn(vn) | Rd(vd)); +} + + +void Assembler::tbl(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + NEONTable(vd, vn, vm, NEON_TBL_1v); +} + + +void Assembler::tbl(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vm) { + USE(vn2); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vn, vn2)); + VIXL_ASSERT(AreConsecutive(vn, vn2)); + NEONTable(vd, vn, vm, NEON_TBL_2v); +} + + +void Assembler::tbl(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vn3, + const VRegister& vm) { + USE(vn2, vn3); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vn, vn2, vn3)); + VIXL_ASSERT(AreConsecutive(vn, vn2, vn3)); + NEONTable(vd, vn, vm, NEON_TBL_3v); +} + + +void Assembler::tbl(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vn3, + const VRegister& vn4, + const VRegister& vm) { + USE(vn2, vn3, vn4); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vn, vn2, vn3, vn4)); + VIXL_ASSERT(AreConsecutive(vn, vn2, vn3, vn4)); + NEONTable(vd, vn, vm, NEON_TBL_4v); +} + + +void Assembler::tbx(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + NEONTable(vd, vn, vm, NEON_TBX_1v); +} + + +void Assembler::tbx(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vm) { + USE(vn2); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vn, vn2)); + VIXL_ASSERT(AreConsecutive(vn, vn2)); + NEONTable(vd, vn, vm, NEON_TBX_2v); +} + + +void Assembler::tbx(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vn3, + const VRegister& vm) { + USE(vn2, vn3); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vn, vn2, vn3)); + VIXL_ASSERT(AreConsecutive(vn, vn2, vn3)); + NEONTable(vd, vn, vm, NEON_TBX_3v); +} + + +void Assembler::tbx(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vn3, + const VRegister& vn4, + const VRegister& vm) { + USE(vn2, vn3, vn4); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vn, vn2, vn3, vn4)); + VIXL_ASSERT(AreConsecutive(vn, vn2, vn3, vn4)); + NEONTable(vd, vn, vm, NEON_TBX_4v); +} + + +void Assembler::tbz(const Register& rt, unsigned bit_pos, int64_t imm14) { + VIXL_ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize))); + Emit(TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt)); +} + + +void Assembler::tbz(const Register& rt, unsigned bit_pos, Label* label) { + ptrdiff_t offset = LinkAndGetInstructionOffsetTo(label); + VIXL_ASSERT(Instruction::IsValidImmPCOffset(TestBranchType, offset)); + tbz(rt, bit_pos, static_cast(offset)); +} + + +void Assembler::tbnz(const Register& rt, unsigned bit_pos, int64_t imm14) { + VIXL_ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize))); + Emit(TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt)); +} + + +void Assembler::tbnz(const Register& rt, unsigned bit_pos, Label* label) { + ptrdiff_t offset = LinkAndGetInstructionOffsetTo(label); + VIXL_ASSERT(Instruction::IsValidImmPCOffset(TestBranchType, offset)); + tbnz(rt, bit_pos, static_cast(offset)); +} + + +void Assembler::adr(const Register& xd, int64_t imm21) { + VIXL_ASSERT(xd.Is64Bits()); + Emit(ADR | ImmPCRelAddress(imm21) | Rd(xd)); +} + + +void Assembler::adr(const Register& xd, Label* label) { + adr(xd, static_cast(LinkAndGetByteOffsetTo(label))); +} + + +void Assembler::adrp(const Register& xd, int64_t imm21) { + VIXL_ASSERT(xd.Is64Bits()); + Emit(ADRP | ImmPCRelAddress(imm21) | Rd(xd)); +} + + +void Assembler::adrp(const Register& xd, Label* label) { + VIXL_ASSERT(AllowPageOffsetDependentCode()); + adrp(xd, static_cast(LinkAndGetPageOffsetTo(label))); +} + + +void Assembler::add(const Register& rd, + const Register& rn, + const Operand& operand) { + AddSub(rd, rn, operand, LeaveFlags, ADD); +} + + +void Assembler::adds(const Register& rd, + const Register& rn, + const Operand& operand) { + AddSub(rd, rn, operand, SetFlags, ADD); +} + + +void Assembler::cmn(const Register& rn, const Operand& operand) { + Register zr = AppropriateZeroRegFor(rn); + adds(zr, rn, operand); +} + + +void Assembler::sub(const Register& rd, + const Register& rn, + const Operand& operand) { + AddSub(rd, rn, operand, LeaveFlags, SUB); +} + + +void Assembler::subs(const Register& rd, + const Register& rn, + const Operand& operand) { + AddSub(rd, rn, operand, SetFlags, SUB); +} + + +void Assembler::cmp(const Register& rn, const Operand& operand) { + Register zr = AppropriateZeroRegFor(rn); + subs(zr, rn, operand); +} + + +void Assembler::neg(const Register& rd, const Operand& operand) { + Register zr = AppropriateZeroRegFor(rd); + sub(rd, zr, operand); +} + + +void Assembler::negs(const Register& rd, const Operand& operand) { + Register zr = AppropriateZeroRegFor(rd); + subs(rd, zr, operand); +} + + +void Assembler::adc(const Register& rd, + const Register& rn, + const Operand& operand) { + AddSubWithCarry(rd, rn, operand, LeaveFlags, ADC); +} + + +void Assembler::adcs(const Register& rd, + const Register& rn, + const Operand& operand) { + AddSubWithCarry(rd, rn, operand, SetFlags, ADC); +} + + +void Assembler::sbc(const Register& rd, + const Register& rn, + const Operand& operand) { + AddSubWithCarry(rd, rn, operand, LeaveFlags, SBC); +} + + +void Assembler::sbcs(const Register& rd, + const Register& rn, + const Operand& operand) { + AddSubWithCarry(rd, rn, operand, SetFlags, SBC); +} + + +void Assembler::ngc(const Register& rd, const Operand& operand) { + Register zr = AppropriateZeroRegFor(rd); + sbc(rd, zr, operand); +} + + +void Assembler::ngcs(const Register& rd, const Operand& operand) { + Register zr = AppropriateZeroRegFor(rd); + sbcs(rd, zr, operand); +} + + +// Logical instructions. +void Assembler::and_(const Register& rd, + const Register& rn, + const Operand& operand) { + Logical(rd, rn, operand, AND); +} + + +void Assembler::ands(const Register& rd, + const Register& rn, + const Operand& operand) { + Logical(rd, rn, operand, ANDS); +} + + +void Assembler::tst(const Register& rn, const Operand& operand) { + ands(AppropriateZeroRegFor(rn), rn, operand); +} + + +void Assembler::bic(const Register& rd, + const Register& rn, + const Operand& operand) { + Logical(rd, rn, operand, BIC); +} + + +void Assembler::bics(const Register& rd, + const Register& rn, + const Operand& operand) { + Logical(rd, rn, operand, BICS); +} + + +void Assembler::orr(const Register& rd, + const Register& rn, + const Operand& operand) { + Logical(rd, rn, operand, ORR); +} + + +void Assembler::orn(const Register& rd, + const Register& rn, + const Operand& operand) { + Logical(rd, rn, operand, ORN); +} + + +void Assembler::eor(const Register& rd, + const Register& rn, + const Operand& operand) { + Logical(rd, rn, operand, EOR); +} + + +void Assembler::eon(const Register& rd, + const Register& rn, + const Operand& operand) { + Logical(rd, rn, operand, EON); +} + + +void Assembler::lslv(const Register& rd, + const Register& rn, + const Register& rm) { + VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits()); + VIXL_ASSERT(rd.GetSizeInBits() == rm.GetSizeInBits()); + Emit(SF(rd) | LSLV | Rm(rm) | Rn(rn) | Rd(rd)); +} + + +void Assembler::lsrv(const Register& rd, + const Register& rn, + const Register& rm) { + VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits()); + VIXL_ASSERT(rd.GetSizeInBits() == rm.GetSizeInBits()); + Emit(SF(rd) | LSRV | Rm(rm) | Rn(rn) | Rd(rd)); +} + + +void Assembler::asrv(const Register& rd, + const Register& rn, + const Register& rm) { + VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits()); + VIXL_ASSERT(rd.GetSizeInBits() == rm.GetSizeInBits()); + Emit(SF(rd) | ASRV | Rm(rm) | Rn(rn) | Rd(rd)); +} + + +void Assembler::rorv(const Register& rd, + const Register& rn, + const Register& rm) { + VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits()); + VIXL_ASSERT(rd.GetSizeInBits() == rm.GetSizeInBits()); + Emit(SF(rd) | RORV | Rm(rm) | Rn(rn) | Rd(rd)); +} + + +// Bitfield operations. +void Assembler::bfm(const Register& rd, + const Register& rn, + unsigned immr, + unsigned imms) { + VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits()); + Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset); + Emit(SF(rd) | BFM | N | ImmR(immr, rd.GetSizeInBits()) | + ImmS(imms, rn.GetSizeInBits()) | Rn(rn) | Rd(rd)); +} + + +void Assembler::sbfm(const Register& rd, + const Register& rn, + unsigned immr, + unsigned imms) { + VIXL_ASSERT(rd.Is64Bits() || rn.Is32Bits()); + Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset); + Emit(SF(rd) | SBFM | N | ImmR(immr, rd.GetSizeInBits()) | + ImmS(imms, rn.GetSizeInBits()) | Rn(rn) | Rd(rd)); +} + + +void Assembler::ubfm(const Register& rd, + const Register& rn, + unsigned immr, + unsigned imms) { + VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits()); + Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset); + Emit(SF(rd) | UBFM | N | ImmR(immr, rd.GetSizeInBits()) | + ImmS(imms, rn.GetSizeInBits()) | Rn(rn) | Rd(rd)); +} + + +void Assembler::extr(const Register& rd, + const Register& rn, + const Register& rm, + unsigned lsb) { + VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits()); + VIXL_ASSERT(rd.GetSizeInBits() == rm.GetSizeInBits()); + Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset); + Emit(SF(rd) | EXTR | N | Rm(rm) | ImmS(lsb, rn.GetSizeInBits()) | Rn(rn) | + Rd(rd)); +} + + +void Assembler::csel(const Register& rd, + const Register& rn, + const Register& rm, + Condition cond) { + ConditionalSelect(rd, rn, rm, cond, CSEL); +} + + +void Assembler::csinc(const Register& rd, + const Register& rn, + const Register& rm, + Condition cond) { + ConditionalSelect(rd, rn, rm, cond, CSINC); +} + + +void Assembler::csinv(const Register& rd, + const Register& rn, + const Register& rm, + Condition cond) { + ConditionalSelect(rd, rn, rm, cond, CSINV); +} + + +void Assembler::csneg(const Register& rd, + const Register& rn, + const Register& rm, + Condition cond) { + ConditionalSelect(rd, rn, rm, cond, CSNEG); +} + + +void Assembler::cset(const Register& rd, Condition cond) { + VIXL_ASSERT((cond != al) && (cond != nv)); + Register zr = AppropriateZeroRegFor(rd); + csinc(rd, zr, zr, InvertCondition(cond)); +} + + +void Assembler::csetm(const Register& rd, Condition cond) { + VIXL_ASSERT((cond != al) && (cond != nv)); + Register zr = AppropriateZeroRegFor(rd); + csinv(rd, zr, zr, InvertCondition(cond)); +} + + +void Assembler::cinc(const Register& rd, const Register& rn, Condition cond) { + VIXL_ASSERT((cond != al) && (cond != nv)); + csinc(rd, rn, rn, InvertCondition(cond)); +} + + +void Assembler::cinv(const Register& rd, const Register& rn, Condition cond) { + VIXL_ASSERT((cond != al) && (cond != nv)); + csinv(rd, rn, rn, InvertCondition(cond)); +} + + +void Assembler::cneg(const Register& rd, const Register& rn, Condition cond) { + VIXL_ASSERT((cond != al) && (cond != nv)); + csneg(rd, rn, rn, InvertCondition(cond)); +} + + +void Assembler::ConditionalSelect(const Register& rd, + const Register& rn, + const Register& rm, + Condition cond, + ConditionalSelectOp op) { + VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits()); + VIXL_ASSERT(rd.GetSizeInBits() == rm.GetSizeInBits()); + Emit(SF(rd) | op | Rm(rm) | Cond(cond) | Rn(rn) | Rd(rd)); +} + + +void Assembler::ccmn(const Register& rn, + const Operand& operand, + StatusFlags nzcv, + Condition cond) { + ConditionalCompare(rn, operand, nzcv, cond, CCMN); +} + + +void Assembler::ccmp(const Register& rn, + const Operand& operand, + StatusFlags nzcv, + Condition cond) { + ConditionalCompare(rn, operand, nzcv, cond, CCMP); +} + + +void Assembler::DataProcessing3Source(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra, + DataProcessing3SourceOp op) { + Emit(SF(rd) | op | Rm(rm) | Ra(ra) | Rn(rn) | Rd(rd)); +} + + +void Assembler::crc32b(const Register& wd, + const Register& wn, + const Register& wm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kCRC32)); + VIXL_ASSERT(wd.Is32Bits() && wn.Is32Bits() && wm.Is32Bits()); + Emit(SF(wm) | Rm(wm) | CRC32B | Rn(wn) | Rd(wd)); +} + + +void Assembler::crc32h(const Register& wd, + const Register& wn, + const Register& wm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kCRC32)); + VIXL_ASSERT(wd.Is32Bits() && wn.Is32Bits() && wm.Is32Bits()); + Emit(SF(wm) | Rm(wm) | CRC32H | Rn(wn) | Rd(wd)); +} + + +void Assembler::crc32w(const Register& wd, + const Register& wn, + const Register& wm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kCRC32)); + VIXL_ASSERT(wd.Is32Bits() && wn.Is32Bits() && wm.Is32Bits()); + Emit(SF(wm) | Rm(wm) | CRC32W | Rn(wn) | Rd(wd)); +} + + +void Assembler::crc32x(const Register& wd, + const Register& wn, + const Register& xm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kCRC32)); + VIXL_ASSERT(wd.Is32Bits() && wn.Is32Bits() && xm.Is64Bits()); + Emit(SF(xm) | Rm(xm) | CRC32X | Rn(wn) | Rd(wd)); +} + + +void Assembler::crc32cb(const Register& wd, + const Register& wn, + const Register& wm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kCRC32)); + VIXL_ASSERT(wd.Is32Bits() && wn.Is32Bits() && wm.Is32Bits()); + Emit(SF(wm) | Rm(wm) | CRC32CB | Rn(wn) | Rd(wd)); +} + + +void Assembler::crc32ch(const Register& wd, + const Register& wn, + const Register& wm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kCRC32)); + VIXL_ASSERT(wd.Is32Bits() && wn.Is32Bits() && wm.Is32Bits()); + Emit(SF(wm) | Rm(wm) | CRC32CH | Rn(wn) | Rd(wd)); +} + + +void Assembler::crc32cw(const Register& wd, + const Register& wn, + const Register& wm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kCRC32)); + VIXL_ASSERT(wd.Is32Bits() && wn.Is32Bits() && wm.Is32Bits()); + Emit(SF(wm) | Rm(wm) | CRC32CW | Rn(wn) | Rd(wd)); +} + + +void Assembler::crc32cx(const Register& wd, + const Register& wn, + const Register& xm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kCRC32)); + VIXL_ASSERT(wd.Is32Bits() && wn.Is32Bits() && xm.Is64Bits()); + Emit(SF(xm) | Rm(xm) | CRC32CX | Rn(wn) | Rd(wd)); +} + + +void Assembler::mul(const Register& rd, + const Register& rn, + const Register& rm) { + VIXL_ASSERT(AreSameSizeAndType(rd, rn, rm)); + DataProcessing3Source(rd, rn, rm, AppropriateZeroRegFor(rd), MADD); +} + + +void Assembler::madd(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra) { + DataProcessing3Source(rd, rn, rm, ra, MADD); +} + + +void Assembler::mneg(const Register& rd, + const Register& rn, + const Register& rm) { + VIXL_ASSERT(AreSameSizeAndType(rd, rn, rm)); + DataProcessing3Source(rd, rn, rm, AppropriateZeroRegFor(rd), MSUB); +} + + +void Assembler::msub(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra) { + DataProcessing3Source(rd, rn, rm, ra, MSUB); +} + + +void Assembler::umaddl(const Register& xd, + const Register& wn, + const Register& wm, + const Register& xa) { + VIXL_ASSERT(xd.Is64Bits() && xa.Is64Bits()); + VIXL_ASSERT(wn.Is32Bits() && wm.Is32Bits()); + DataProcessing3Source(xd, wn, wm, xa, UMADDL_x); +} + + +void Assembler::smaddl(const Register& xd, + const Register& wn, + const Register& wm, + const Register& xa) { + VIXL_ASSERT(xd.Is64Bits() && xa.Is64Bits()); + VIXL_ASSERT(wn.Is32Bits() && wm.Is32Bits()); + DataProcessing3Source(xd, wn, wm, xa, SMADDL_x); +} + + +void Assembler::umsubl(const Register& xd, + const Register& wn, + const Register& wm, + const Register& xa) { + VIXL_ASSERT(xd.Is64Bits() && xa.Is64Bits()); + VIXL_ASSERT(wn.Is32Bits() && wm.Is32Bits()); + DataProcessing3Source(xd, wn, wm, xa, UMSUBL_x); +} + + +void Assembler::smsubl(const Register& xd, + const Register& wn, + const Register& wm, + const Register& xa) { + VIXL_ASSERT(xd.Is64Bits() && xa.Is64Bits()); + VIXL_ASSERT(wn.Is32Bits() && wm.Is32Bits()); + DataProcessing3Source(xd, wn, wm, xa, SMSUBL_x); +} + + +void Assembler::smull(const Register& xd, + const Register& wn, + const Register& wm) { + VIXL_ASSERT(xd.Is64Bits()); + VIXL_ASSERT(wn.Is32Bits() && wm.Is32Bits()); + DataProcessing3Source(xd, wn, wm, xzr, SMADDL_x); +} + + +void Assembler::sdiv(const Register& rd, + const Register& rn, + const Register& rm) { + VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits()); + VIXL_ASSERT(rd.GetSizeInBits() == rm.GetSizeInBits()); + Emit(SF(rd) | SDIV | Rm(rm) | Rn(rn) | Rd(rd)); +} + + +void Assembler::smulh(const Register& xd, + const Register& xn, + const Register& xm) { + VIXL_ASSERT(xd.Is64Bits() && xn.Is64Bits() && xm.Is64Bits()); + DataProcessing3Source(xd, xn, xm, xzr, SMULH_x); +} + + +void Assembler::umulh(const Register& xd, + const Register& xn, + const Register& xm) { + VIXL_ASSERT(xd.Is64Bits() && xn.Is64Bits() && xm.Is64Bits()); + DataProcessing3Source(xd, xn, xm, xzr, UMULH_x); +} + + +void Assembler::udiv(const Register& rd, + const Register& rn, + const Register& rm) { + VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits()); + VIXL_ASSERT(rd.GetSizeInBits() == rm.GetSizeInBits()); + Emit(SF(rd) | UDIV | Rm(rm) | Rn(rn) | Rd(rd)); +} + + +void Assembler::rbit(const Register& rd, const Register& rn) { + DataProcessing1Source(rd, rn, RBIT); +} + + +void Assembler::rev16(const Register& rd, const Register& rn) { + DataProcessing1Source(rd, rn, REV16); +} + + +void Assembler::rev32(const Register& xd, const Register& xn) { + VIXL_ASSERT(xd.Is64Bits()); + DataProcessing1Source(xd, xn, REV); +} + + +void Assembler::rev(const Register& rd, const Register& rn) { + DataProcessing1Source(rd, rn, rd.Is64Bits() ? REV_x : REV_w); +} + + +void Assembler::clz(const Register& rd, const Register& rn) { + DataProcessing1Source(rd, rn, CLZ); +} + + +void Assembler::cls(const Register& rd, const Register& rn) { + DataProcessing1Source(rd, rn, CLS); +} + +#define PAUTH_VARIATIONS(V) \ + V(paci, PACI) \ + V(pacd, PACD) \ + V(auti, AUTI) \ + V(autd, AUTD) + +#define DEFINE_ASM_FUNCS(PRE, OP) \ + void Assembler::PRE##a(const Register& xd, const Register& xn) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); \ + VIXL_ASSERT(xd.Is64Bits() && xn.Is64Bits()); \ + Emit(SF(xd) | OP##A | Rd(xd) | RnSP(xn)); \ + } \ + \ + void Assembler::PRE##za(const Register& xd) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); \ + VIXL_ASSERT(xd.Is64Bits()); \ + Emit(SF(xd) | OP##ZA | Rd(xd)); \ + } \ + \ + void Assembler::PRE##b(const Register& xd, const Register& xn) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); \ + VIXL_ASSERT(xd.Is64Bits() && xn.Is64Bits()); \ + Emit(SF(xd) | OP##B | Rd(xd) | RnSP(xn)); \ + } \ + \ + void Assembler::PRE##zb(const Register& xd) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); \ + VIXL_ASSERT(xd.Is64Bits()); \ + Emit(SF(xd) | OP##ZB | Rd(xd)); \ + } + +PAUTH_VARIATIONS(DEFINE_ASM_FUNCS) +#undef DEFINE_ASM_FUNCS + +void Assembler::pacga(const Register& xd, + const Register& xn, + const Register& xm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth, CPUFeatures::kPAuthGeneric)); + VIXL_ASSERT(xd.Is64Bits() && xn.Is64Bits() && xm.Is64Bits()); + Emit(SF(xd) | PACGA | Rd(xd) | Rn(xn) | RmSP(xm)); +} + +void Assembler::xpaci(const Register& xd) { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + VIXL_ASSERT(xd.Is64Bits()); + Emit(SF(xd) | XPACI | Rd(xd)); +} + +void Assembler::xpacd(const Register& xd) { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + VIXL_ASSERT(xd.Is64Bits()); + Emit(SF(xd) | XPACD | Rd(xd)); +} + + +void Assembler::ldp(const CPURegister& rt, + const CPURegister& rt2, + const MemOperand& src) { + LoadStorePair(rt, rt2, src, LoadPairOpFor(rt, rt2)); +} + + +void Assembler::stp(const CPURegister& rt, + const CPURegister& rt2, + const MemOperand& dst) { + LoadStorePair(rt, rt2, dst, StorePairOpFor(rt, rt2)); +} + + +void Assembler::ldpsw(const Register& xt, + const Register& xt2, + const MemOperand& src) { + VIXL_ASSERT(xt.Is64Bits() && xt2.Is64Bits()); + LoadStorePair(xt, xt2, src, LDPSW_x); +} + + +void Assembler::LoadStorePair(const CPURegister& rt, + const CPURegister& rt2, + const MemOperand& addr, + LoadStorePairOp op) { + VIXL_ASSERT(CPUHas(rt, rt2)); + + // 'rt' and 'rt2' can only be aliased for stores. + VIXL_ASSERT(((op & LoadStorePairLBit) == 0) || !rt.Is(rt2)); + VIXL_ASSERT(AreSameSizeAndType(rt, rt2)); + VIXL_ASSERT(IsImmLSPair(addr.GetOffset(), CalcLSPairDataSize(op))); + + int offset = static_cast(addr.GetOffset()); + Instr memop = op | Rt(rt) | Rt2(rt2) | RnSP(addr.GetBaseRegister()) | + ImmLSPair(offset, CalcLSPairDataSize(op)); + + Instr addrmodeop; + if (addr.IsImmediateOffset()) { + addrmodeop = LoadStorePairOffsetFixed; + } else { + if (addr.IsPreIndex()) { + addrmodeop = LoadStorePairPreIndexFixed; + } else { + VIXL_ASSERT(addr.IsPostIndex()); + addrmodeop = LoadStorePairPostIndexFixed; + } + } + Emit(addrmodeop | memop); +} + + +void Assembler::ldnp(const CPURegister& rt, + const CPURegister& rt2, + const MemOperand& src) { + LoadStorePairNonTemporal(rt, rt2, src, LoadPairNonTemporalOpFor(rt, rt2)); +} + + +void Assembler::stnp(const CPURegister& rt, + const CPURegister& rt2, + const MemOperand& dst) { + LoadStorePairNonTemporal(rt, rt2, dst, StorePairNonTemporalOpFor(rt, rt2)); +} + + +void Assembler::LoadStorePairNonTemporal(const CPURegister& rt, + const CPURegister& rt2, + const MemOperand& addr, + LoadStorePairNonTemporalOp op) { + VIXL_ASSERT(CPUHas(rt, rt2)); + + VIXL_ASSERT(!rt.Is(rt2)); + VIXL_ASSERT(AreSameSizeAndType(rt, rt2)); + VIXL_ASSERT(addr.IsImmediateOffset()); + + unsigned size = + CalcLSPairDataSize(static_cast(op & LoadStorePairMask)); + VIXL_ASSERT(IsImmLSPair(addr.GetOffset(), size)); + int offset = static_cast(addr.GetOffset()); + Emit(op | Rt(rt) | Rt2(rt2) | RnSP(addr.GetBaseRegister()) | + ImmLSPair(offset, size)); +} + + +// Memory instructions. +void Assembler::ldrb(const Register& rt, + const MemOperand& src, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireUnscaledOffset); + VIXL_ASSERT(option != PreferUnscaledOffset); + LoadStore(rt, src, LDRB_w, option); +} + + +void Assembler::strb(const Register& rt, + const MemOperand& dst, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireUnscaledOffset); + VIXL_ASSERT(option != PreferUnscaledOffset); + LoadStore(rt, dst, STRB_w, option); +} + + +void Assembler::ldrsb(const Register& rt, + const MemOperand& src, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireUnscaledOffset); + VIXL_ASSERT(option != PreferUnscaledOffset); + LoadStore(rt, src, rt.Is64Bits() ? LDRSB_x : LDRSB_w, option); +} + + +void Assembler::ldrh(const Register& rt, + const MemOperand& src, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireUnscaledOffset); + VIXL_ASSERT(option != PreferUnscaledOffset); + LoadStore(rt, src, LDRH_w, option); +} + + +void Assembler::strh(const Register& rt, + const MemOperand& dst, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireUnscaledOffset); + VIXL_ASSERT(option != PreferUnscaledOffset); + LoadStore(rt, dst, STRH_w, option); +} + + +void Assembler::ldrsh(const Register& rt, + const MemOperand& src, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireUnscaledOffset); + VIXL_ASSERT(option != PreferUnscaledOffset); + LoadStore(rt, src, rt.Is64Bits() ? LDRSH_x : LDRSH_w, option); +} + + +void Assembler::ldr(const CPURegister& rt, + const MemOperand& src, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireUnscaledOffset); + VIXL_ASSERT(option != PreferUnscaledOffset); + LoadStore(rt, src, LoadOpFor(rt), option); +} + + +void Assembler::str(const CPURegister& rt, + const MemOperand& dst, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireUnscaledOffset); + VIXL_ASSERT(option != PreferUnscaledOffset); + LoadStore(rt, dst, StoreOpFor(rt), option); +} + + +void Assembler::ldrsw(const Register& xt, + const MemOperand& src, + LoadStoreScalingOption option) { + VIXL_ASSERT(xt.Is64Bits()); + VIXL_ASSERT(option != RequireUnscaledOffset); + VIXL_ASSERT(option != PreferUnscaledOffset); + LoadStore(xt, src, LDRSW_x, option); +} + + +void Assembler::ldurb(const Register& rt, + const MemOperand& src, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireScaledOffset); + VIXL_ASSERT(option != PreferScaledOffset); + LoadStore(rt, src, LDRB_w, option); +} + + +void Assembler::sturb(const Register& rt, + const MemOperand& dst, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireScaledOffset); + VIXL_ASSERT(option != PreferScaledOffset); + LoadStore(rt, dst, STRB_w, option); +} + + +void Assembler::ldursb(const Register& rt, + const MemOperand& src, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireScaledOffset); + VIXL_ASSERT(option != PreferScaledOffset); + LoadStore(rt, src, rt.Is64Bits() ? LDRSB_x : LDRSB_w, option); +} + + +void Assembler::ldurh(const Register& rt, + const MemOperand& src, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireScaledOffset); + VIXL_ASSERT(option != PreferScaledOffset); + LoadStore(rt, src, LDRH_w, option); +} + + +void Assembler::sturh(const Register& rt, + const MemOperand& dst, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireScaledOffset); + VIXL_ASSERT(option != PreferScaledOffset); + LoadStore(rt, dst, STRH_w, option); +} + + +void Assembler::ldursh(const Register& rt, + const MemOperand& src, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireScaledOffset); + VIXL_ASSERT(option != PreferScaledOffset); + LoadStore(rt, src, rt.Is64Bits() ? LDRSH_x : LDRSH_w, option); +} + + +void Assembler::ldur(const CPURegister& rt, + const MemOperand& src, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireScaledOffset); + VIXL_ASSERT(option != PreferScaledOffset); + LoadStore(rt, src, LoadOpFor(rt), option); +} + + +void Assembler::stur(const CPURegister& rt, + const MemOperand& dst, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireScaledOffset); + VIXL_ASSERT(option != PreferScaledOffset); + LoadStore(rt, dst, StoreOpFor(rt), option); +} + + +void Assembler::ldursw(const Register& xt, + const MemOperand& src, + LoadStoreScalingOption option) { + VIXL_ASSERT(xt.Is64Bits()); + VIXL_ASSERT(option != RequireScaledOffset); + VIXL_ASSERT(option != PreferScaledOffset); + LoadStore(xt, src, LDRSW_x, option); +} + + +void Assembler::ldrsw(const Register& xt, RawLiteral* literal) { + VIXL_ASSERT(xt.Is64Bits()); + VIXL_ASSERT(literal->GetSize() == kWRegSizeInBytes); + ldrsw(xt, static_cast(LinkAndGetWordOffsetTo(literal))); +} + + +void Assembler::ldr(const CPURegister& rt, RawLiteral* literal) { + VIXL_ASSERT(CPUHas(rt)); + VIXL_ASSERT(literal->GetSize() == static_cast(rt.GetSizeInBytes())); + ldr(rt, static_cast(LinkAndGetWordOffsetTo(literal))); +} + + +void Assembler::ldrsw(const Register& rt, int64_t imm19) { + Emit(LDRSW_x_lit | ImmLLiteral(imm19) | Rt(rt)); +} + + +void Assembler::ldr(const CPURegister& rt, int64_t imm19) { + VIXL_ASSERT(CPUHas(rt)); + LoadLiteralOp op = LoadLiteralOpFor(rt); + Emit(op | ImmLLiteral(imm19) | Rt(rt)); +} + + +void Assembler::prfm(PrefetchOperation op, int64_t imm19) { + Emit(PRFM_lit | ImmPrefetchOperation(op) | ImmLLiteral(imm19)); +} + + +// Exclusive-access instructions. +void Assembler::stxrb(const Register& rs, + const Register& rt, + const MemOperand& dst) { + VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0)); + Emit(STXRB_w | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister())); +} + + +void Assembler::stxrh(const Register& rs, + const Register& rt, + const MemOperand& dst) { + VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0)); + Emit(STXRH_w | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister())); +} + + +void Assembler::stxr(const Register& rs, + const Register& rt, + const MemOperand& dst) { + VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0)); + LoadStoreExclusive op = rt.Is64Bits() ? STXR_x : STXR_w; + Emit(op | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister())); +} + + +void Assembler::ldxrb(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); + Emit(LDXRB_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister())); +} + + +void Assembler::ldxrh(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); + Emit(LDXRH_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister())); +} + + +void Assembler::ldxr(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); + LoadStoreExclusive op = rt.Is64Bits() ? LDXR_x : LDXR_w; + Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister())); +} + + +void Assembler::stxp(const Register& rs, + const Register& rt, + const Register& rt2, + const MemOperand& dst) { + VIXL_ASSERT(rt.GetSizeInBits() == rt2.GetSizeInBits()); + VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0)); + LoadStoreExclusive op = rt.Is64Bits() ? STXP_x : STXP_w; + Emit(op | Rs(rs) | Rt(rt) | Rt2(rt2) | RnSP(dst.GetBaseRegister())); +} + + +void Assembler::ldxp(const Register& rt, + const Register& rt2, + const MemOperand& src) { + VIXL_ASSERT(rt.GetSizeInBits() == rt2.GetSizeInBits()); + VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); + LoadStoreExclusive op = rt.Is64Bits() ? LDXP_x : LDXP_w; + Emit(op | Rs_mask | Rt(rt) | Rt2(rt2) | RnSP(src.GetBaseRegister())); +} + + +void Assembler::stlxrb(const Register& rs, + const Register& rt, + const MemOperand& dst) { + VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0)); + Emit(STLXRB_w | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister())); +} + + +void Assembler::stlxrh(const Register& rs, + const Register& rt, + const MemOperand& dst) { + VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0)); + Emit(STLXRH_w | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister())); +} + + +void Assembler::stlxr(const Register& rs, + const Register& rt, + const MemOperand& dst) { + VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0)); + LoadStoreExclusive op = rt.Is64Bits() ? STLXR_x : STLXR_w; + Emit(op | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister())); +} + + +void Assembler::ldaxrb(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); + Emit(LDAXRB_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister())); +} + + +void Assembler::ldaxrh(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); + Emit(LDAXRH_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister())); +} + + +void Assembler::ldaxr(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); + LoadStoreExclusive op = rt.Is64Bits() ? LDAXR_x : LDAXR_w; + Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister())); +} + + +void Assembler::stlxp(const Register& rs, + const Register& rt, + const Register& rt2, + const MemOperand& dst) { + VIXL_ASSERT(rt.GetSizeInBits() == rt2.GetSizeInBits()); + VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0)); + LoadStoreExclusive op = rt.Is64Bits() ? STLXP_x : STLXP_w; + Emit(op | Rs(rs) | Rt(rt) | Rt2(rt2) | RnSP(dst.GetBaseRegister())); +} + + +void Assembler::ldaxp(const Register& rt, + const Register& rt2, + const MemOperand& src) { + VIXL_ASSERT(rt.GetSizeInBits() == rt2.GetSizeInBits()); + VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); + LoadStoreExclusive op = rt.Is64Bits() ? LDAXP_x : LDAXP_w; + Emit(op | Rs_mask | Rt(rt) | Rt2(rt2) | RnSP(src.GetBaseRegister())); +} + + +void Assembler::stlrb(const Register& rt, const MemOperand& dst) { + VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0)); + Emit(STLRB_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister())); +} + + +void Assembler::stlrh(const Register& rt, const MemOperand& dst) { + VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0)); + Emit(STLRH_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister())); +} + + +void Assembler::stlr(const Register& rt, const MemOperand& dst) { + VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0)); + LoadStoreExclusive op = rt.Is64Bits() ? STLR_x : STLR_w; + Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister())); +} + + +void Assembler::ldarb(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); + Emit(LDARB_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister())); +} + + +void Assembler::ldarh(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); + Emit(LDARH_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister())); +} + + +void Assembler::ldar(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); + LoadStoreExclusive op = rt.Is64Bits() ? LDAR_x : LDAR_w; + Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister())); +} + + +void Assembler::stllrb(const Register& rt, const MemOperand& dst) { + VIXL_ASSERT(CPUHas(CPUFeatures::kLORegions)); + VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0)); + Emit(STLLRB | Rs_mask | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister())); +} + + +void Assembler::stllrh(const Register& rt, const MemOperand& dst) { + VIXL_ASSERT(CPUHas(CPUFeatures::kLORegions)); + VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0)); + Emit(STLLRH | Rs_mask | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister())); +} + + +void Assembler::stllr(const Register& rt, const MemOperand& dst) { + VIXL_ASSERT(CPUHas(CPUFeatures::kLORegions)); + VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0)); + LoadStoreExclusive op = rt.Is64Bits() ? STLLR_x : STLLR_w; + Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister())); +} + + +void Assembler::ldlarb(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(CPUHas(CPUFeatures::kLORegions)); + VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); + Emit(LDLARB | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister())); +} + + +void Assembler::ldlarh(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(CPUHas(CPUFeatures::kLORegions)); + VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); + Emit(LDLARH | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister())); +} + + +void Assembler::ldlar(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(CPUHas(CPUFeatures::kLORegions)); + VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); + LoadStoreExclusive op = rt.Is64Bits() ? LDLAR_x : LDLAR_w; + Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister())); +} + + +// clang-format off +#define COMPARE_AND_SWAP_W_X_LIST(V) \ + V(cas, CAS) \ + V(casa, CASA) \ + V(casl, CASL) \ + V(casal, CASAL) +// clang-format on + +#define DEFINE_ASM_FUNC(FN, OP) \ + void Assembler::FN(const Register& rs, \ + const Register& rt, \ + const MemOperand& src) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kAtomics)); \ + VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); \ + LoadStoreExclusive op = rt.Is64Bits() ? OP##_x : OP##_w; \ + Emit(op | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister())); \ + } +COMPARE_AND_SWAP_W_X_LIST(DEFINE_ASM_FUNC) +#undef DEFINE_ASM_FUNC + +// clang-format off +#define COMPARE_AND_SWAP_W_LIST(V) \ + V(casb, CASB) \ + V(casab, CASAB) \ + V(caslb, CASLB) \ + V(casalb, CASALB) \ + V(cash, CASH) \ + V(casah, CASAH) \ + V(caslh, CASLH) \ + V(casalh, CASALH) +// clang-format on + +#define DEFINE_ASM_FUNC(FN, OP) \ + void Assembler::FN(const Register& rs, \ + const Register& rt, \ + const MemOperand& src) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kAtomics)); \ + VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); \ + Emit(OP | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister())); \ + } +COMPARE_AND_SWAP_W_LIST(DEFINE_ASM_FUNC) +#undef DEFINE_ASM_FUNC + + +// clang-format off +#define COMPARE_AND_SWAP_PAIR_LIST(V) \ + V(casp, CASP) \ + V(caspa, CASPA) \ + V(caspl, CASPL) \ + V(caspal, CASPAL) +// clang-format on + +#define DEFINE_ASM_FUNC(FN, OP) \ + void Assembler::FN(const Register& rs, \ + const Register& rs1, \ + const Register& rt, \ + const Register& rt1, \ + const MemOperand& src) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kAtomics)); \ + USE(rs1, rt1); \ + VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); \ + VIXL_ASSERT(AreEven(rs, rt)); \ + VIXL_ASSERT(AreConsecutive(rs, rs1)); \ + VIXL_ASSERT(AreConsecutive(rt, rt1)); \ + LoadStoreExclusive op = rt.Is64Bits() ? OP##_x : OP##_w; \ + Emit(op | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister())); \ + } +COMPARE_AND_SWAP_PAIR_LIST(DEFINE_ASM_FUNC) +#undef DEFINE_ASM_FUNC + +// These macros generate all the variations of the atomic memory operations, +// e.g. ldadd, ldadda, ldaddb, staddl, etc. +// For a full list of the methods with comments, see the assembler header file. + +// clang-format off +#define ATOMIC_MEMORY_SIMPLE_OPERATION_LIST(V, DEF) \ + V(DEF, add, LDADD) \ + V(DEF, clr, LDCLR) \ + V(DEF, eor, LDEOR) \ + V(DEF, set, LDSET) \ + V(DEF, smax, LDSMAX) \ + V(DEF, smin, LDSMIN) \ + V(DEF, umax, LDUMAX) \ + V(DEF, umin, LDUMIN) + +#define ATOMIC_MEMORY_STORE_MODES(V, NAME, OP) \ + V(NAME, OP##_x, OP##_w) \ + V(NAME##l, OP##L_x, OP##L_w) \ + V(NAME##b, OP##B, OP##B) \ + V(NAME##lb, OP##LB, OP##LB) \ + V(NAME##h, OP##H, OP##H) \ + V(NAME##lh, OP##LH, OP##LH) + +#define ATOMIC_MEMORY_LOAD_MODES(V, NAME, OP) \ + ATOMIC_MEMORY_STORE_MODES(V, NAME, OP) \ + V(NAME##a, OP##A_x, OP##A_w) \ + V(NAME##al, OP##AL_x, OP##AL_w) \ + V(NAME##ab, OP##AB, OP##AB) \ + V(NAME##alb, OP##ALB, OP##ALB) \ + V(NAME##ah, OP##AH, OP##AH) \ + V(NAME##alh, OP##ALH, OP##ALH) +// clang-format on + +#define DEFINE_ASM_LOAD_FUNC(FN, OP_X, OP_W) \ + void Assembler::ld##FN(const Register& rs, \ + const Register& rt, \ + const MemOperand& src) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kAtomics)); \ + VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); \ + AtomicMemoryOp op = rt.Is64Bits() ? OP_X : OP_W; \ + Emit(op | Rs(rs) | Rt(rt) | RnSP(src.GetBaseRegister())); \ + } +#define DEFINE_ASM_STORE_FUNC(FN, OP_X, OP_W) \ + void Assembler::st##FN(const Register& rs, const MemOperand& src) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kAtomics)); \ + ld##FN(rs, AppropriateZeroRegFor(rs), src); \ + } + +ATOMIC_MEMORY_SIMPLE_OPERATION_LIST(ATOMIC_MEMORY_LOAD_MODES, + DEFINE_ASM_LOAD_FUNC) +ATOMIC_MEMORY_SIMPLE_OPERATION_LIST(ATOMIC_MEMORY_STORE_MODES, + DEFINE_ASM_STORE_FUNC) + +#define DEFINE_ASM_SWP_FUNC(FN, OP_X, OP_W) \ + void Assembler::FN(const Register& rs, \ + const Register& rt, \ + const MemOperand& src) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kAtomics)); \ + VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); \ + AtomicMemoryOp op = rt.Is64Bits() ? OP_X : OP_W; \ + Emit(op | Rs(rs) | Rt(rt) | RnSP(src.GetBaseRegister())); \ + } + +ATOMIC_MEMORY_LOAD_MODES(DEFINE_ASM_SWP_FUNC, swp, SWP) + +#undef DEFINE_ASM_LOAD_FUNC +#undef DEFINE_ASM_STORE_FUNC +#undef DEFINE_ASM_SWP_FUNC + + +void Assembler::ldaprb(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(CPUHas(CPUFeatures::kRCpc)); + VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); + AtomicMemoryOp op = LDAPRB; + Emit(op | Rs(xzr) | Rt(rt) | RnSP(src.GetBaseRegister())); +} + +void Assembler::ldaprh(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(CPUHas(CPUFeatures::kRCpc)); + VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); + AtomicMemoryOp op = LDAPRH; + Emit(op | Rs(xzr) | Rt(rt) | RnSP(src.GetBaseRegister())); +} + +void Assembler::ldapr(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(CPUHas(CPUFeatures::kRCpc)); + VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); + AtomicMemoryOp op = rt.Is64Bits() ? LDAPR_x : LDAPR_w; + Emit(op | Rs(xzr) | Rt(rt) | RnSP(src.GetBaseRegister())); +} + +void Assembler::prfm(PrefetchOperation op, + const MemOperand& address, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireUnscaledOffset); + VIXL_ASSERT(option != PreferUnscaledOffset); + Prefetch(op, address, option); +} + + +void Assembler::prfum(PrefetchOperation op, + const MemOperand& address, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireScaledOffset); + VIXL_ASSERT(option != PreferScaledOffset); + Prefetch(op, address, option); +} + + +void Assembler::prfm(PrefetchOperation op, RawLiteral* literal) { + prfm(op, static_cast(LinkAndGetWordOffsetTo(literal))); +} + + +void Assembler::sys(int op1, int crn, int crm, int op2, const Register& xt) { + VIXL_ASSERT(xt.Is64Bits()); + Emit(SYS | ImmSysOp1(op1) | CRn(crn) | CRm(crm) | ImmSysOp2(op2) | Rt(xt)); +} + + +void Assembler::sys(int op, const Register& xt) { + VIXL_ASSERT(xt.Is64Bits()); + Emit(SYS | SysOp(op) | Rt(xt)); +} + + +void Assembler::dc(DataCacheOp op, const Register& rt) { + VIXL_ASSERT((op == CVAC) || (op == CVAU) || (op == CIVAC) || (op == ZVA)); + sys(op, rt); +} + + +void Assembler::ic(InstructionCacheOp op, const Register& rt) { + VIXL_ASSERT(op == IVAU); + sys(op, rt); +} + + +void Assembler::hint(SystemHint code) { hint(static_cast(code)); } + + +void Assembler::hint(int imm7) { + VIXL_ASSERT(IsUint7(imm7)); + Emit(HINT | ImmHint(imm7) | Rt(xzr)); +} + + +// NEON structure loads and stores. +Instr Assembler::LoadStoreStructAddrModeField(const MemOperand& addr) { + Instr addr_field = RnSP(addr.GetBaseRegister()); + + if (addr.IsPostIndex()) { + VIXL_STATIC_ASSERT(NEONLoadStoreMultiStructPostIndex == + static_cast( + NEONLoadStoreSingleStructPostIndex)); + + addr_field |= NEONLoadStoreMultiStructPostIndex; + if (addr.GetOffset() == 0) { + addr_field |= RmNot31(addr.GetRegisterOffset()); + } else { + // The immediate post index addressing mode is indicated by rm = 31. + // The immediate is implied by the number of vector registers used. + addr_field |= (0x1f << Rm_offset); + } + } else { + VIXL_ASSERT(addr.IsImmediateOffset() && (addr.GetOffset() == 0)); + } + return addr_field; +} + +void Assembler::LoadStoreStructVerify(const VRegister& vt, + const MemOperand& addr, + Instr op) { +#ifdef VIXL_DEBUG + // Assert that addressing mode is either offset (with immediate 0), post + // index by immediate of the size of the register list, or post index by a + // value in a core register. + if (addr.IsImmediateOffset()) { + VIXL_ASSERT(addr.GetOffset() == 0); + } else { + int offset = vt.GetSizeInBytes(); + switch (op) { + case NEON_LD1_1v: + case NEON_ST1_1v: + offset *= 1; + break; + case NEONLoadStoreSingleStructLoad1: + case NEONLoadStoreSingleStructStore1: + case NEON_LD1R: + offset = (offset / vt.GetLanes()) * 1; + break; + + case NEON_LD1_2v: + case NEON_ST1_2v: + case NEON_LD2: + case NEON_ST2: + offset *= 2; + break; + case NEONLoadStoreSingleStructLoad2: + case NEONLoadStoreSingleStructStore2: + case NEON_LD2R: + offset = (offset / vt.GetLanes()) * 2; + break; + + case NEON_LD1_3v: + case NEON_ST1_3v: + case NEON_LD3: + case NEON_ST3: + offset *= 3; + break; + case NEONLoadStoreSingleStructLoad3: + case NEONLoadStoreSingleStructStore3: + case NEON_LD3R: + offset = (offset / vt.GetLanes()) * 3; + break; + + case NEON_LD1_4v: + case NEON_ST1_4v: + case NEON_LD4: + case NEON_ST4: + offset *= 4; + break; + case NEONLoadStoreSingleStructLoad4: + case NEONLoadStoreSingleStructStore4: + case NEON_LD4R: + offset = (offset / vt.GetLanes()) * 4; + break; + default: + VIXL_UNREACHABLE(); + } + VIXL_ASSERT(!addr.GetRegisterOffset().Is(NoReg) || + addr.GetOffset() == offset); + } +#else + USE(vt, addr, op); +#endif +} + +void Assembler::LoadStoreStruct(const VRegister& vt, + const MemOperand& addr, + NEONLoadStoreMultiStructOp op) { + LoadStoreStructVerify(vt, addr, op); + VIXL_ASSERT(vt.IsVector() || vt.Is1D()); + Emit(op | LoadStoreStructAddrModeField(addr) | LSVFormat(vt) | Rt(vt)); +} + + +void Assembler::LoadStoreStructSingleAllLanes(const VRegister& vt, + const MemOperand& addr, + NEONLoadStoreSingleStructOp op) { + LoadStoreStructVerify(vt, addr, op); + Emit(op | LoadStoreStructAddrModeField(addr) | LSVFormat(vt) | Rt(vt)); +} + + +void Assembler::ld1(const VRegister& vt, const MemOperand& src) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + LoadStoreStruct(vt, src, NEON_LD1_1v); +} + + +void Assembler::ld1(const VRegister& vt, + const VRegister& vt2, + const MemOperand& src) { + USE(vt2); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vt, vt2)); + VIXL_ASSERT(AreConsecutive(vt, vt2)); + LoadStoreStruct(vt, src, NEON_LD1_2v); +} + + +void Assembler::ld1(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const MemOperand& src) { + USE(vt2, vt3); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vt, vt2, vt3)); + VIXL_ASSERT(AreConsecutive(vt, vt2, vt3)); + LoadStoreStruct(vt, src, NEON_LD1_3v); +} + + +void Assembler::ld1(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + const MemOperand& src) { + USE(vt2, vt3, vt4); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4)); + VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4)); + LoadStoreStruct(vt, src, NEON_LD1_4v); +} + + +void Assembler::ld2(const VRegister& vt, + const VRegister& vt2, + const MemOperand& src) { + USE(vt2); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vt, vt2)); + VIXL_ASSERT(AreConsecutive(vt, vt2)); + LoadStoreStruct(vt, src, NEON_LD2); +} + + +void Assembler::ld2(const VRegister& vt, + const VRegister& vt2, + int lane, + const MemOperand& src) { + USE(vt2); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vt, vt2)); + VIXL_ASSERT(AreConsecutive(vt, vt2)); + LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad2); +} + + +void Assembler::ld2r(const VRegister& vt, + const VRegister& vt2, + const MemOperand& src) { + USE(vt2); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vt, vt2)); + VIXL_ASSERT(AreConsecutive(vt, vt2)); + LoadStoreStructSingleAllLanes(vt, src, NEON_LD2R); +} + + +void Assembler::ld3(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const MemOperand& src) { + USE(vt2, vt3); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vt, vt2, vt3)); + VIXL_ASSERT(AreConsecutive(vt, vt2, vt3)); + LoadStoreStruct(vt, src, NEON_LD3); +} + + +void Assembler::ld3(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + int lane, + const MemOperand& src) { + USE(vt2, vt3); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vt, vt2, vt3)); + VIXL_ASSERT(AreConsecutive(vt, vt2, vt3)); + LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad3); +} + + +void Assembler::ld3r(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const MemOperand& src) { + USE(vt2, vt3); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vt, vt2, vt3)); + VIXL_ASSERT(AreConsecutive(vt, vt2, vt3)); + LoadStoreStructSingleAllLanes(vt, src, NEON_LD3R); +} + + +void Assembler::ld4(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + const MemOperand& src) { + USE(vt2, vt3, vt4); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4)); + VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4)); + LoadStoreStruct(vt, src, NEON_LD4); +} + + +void Assembler::ld4(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + int lane, + const MemOperand& src) { + USE(vt2, vt3, vt4); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4)); + VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4)); + LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad4); +} + + +void Assembler::ld4r(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + const MemOperand& src) { + USE(vt2, vt3, vt4); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4)); + VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4)); + LoadStoreStructSingleAllLanes(vt, src, NEON_LD4R); +} + + +void Assembler::st1(const VRegister& vt, const MemOperand& src) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + LoadStoreStruct(vt, src, NEON_ST1_1v); +} + + +void Assembler::st1(const VRegister& vt, + const VRegister& vt2, + const MemOperand& src) { + USE(vt2); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vt, vt2)); + VIXL_ASSERT(AreConsecutive(vt, vt2)); + LoadStoreStruct(vt, src, NEON_ST1_2v); +} + + +void Assembler::st1(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const MemOperand& src) { + USE(vt2, vt3); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vt, vt2, vt3)); + VIXL_ASSERT(AreConsecutive(vt, vt2, vt3)); + LoadStoreStruct(vt, src, NEON_ST1_3v); +} + + +void Assembler::st1(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + const MemOperand& src) { + USE(vt2, vt3, vt4); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4)); + VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4)); + LoadStoreStruct(vt, src, NEON_ST1_4v); +} + + +void Assembler::st2(const VRegister& vt, + const VRegister& vt2, + const MemOperand& dst) { + USE(vt2); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vt, vt2)); + VIXL_ASSERT(AreConsecutive(vt, vt2)); + LoadStoreStruct(vt, dst, NEON_ST2); +} + + +void Assembler::st2(const VRegister& vt, + const VRegister& vt2, + int lane, + const MemOperand& dst) { + USE(vt2); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vt, vt2)); + VIXL_ASSERT(AreConsecutive(vt, vt2)); + LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore2); +} + + +void Assembler::st3(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const MemOperand& dst) { + USE(vt2, vt3); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vt, vt2, vt3)); + VIXL_ASSERT(AreConsecutive(vt, vt2, vt3)); + LoadStoreStruct(vt, dst, NEON_ST3); +} + + +void Assembler::st3(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + int lane, + const MemOperand& dst) { + USE(vt2, vt3); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vt, vt2, vt3)); + VIXL_ASSERT(AreConsecutive(vt, vt2, vt3)); + LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore3); +} + + +void Assembler::st4(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + const MemOperand& dst) { + USE(vt2, vt3, vt4); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4)); + VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4)); + LoadStoreStruct(vt, dst, NEON_ST4); +} + + +void Assembler::st4(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + int lane, + const MemOperand& dst) { + USE(vt2, vt3, vt4); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4)); + VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4)); + LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore4); +} + + +void Assembler::LoadStoreStructSingle(const VRegister& vt, + uint32_t lane, + const MemOperand& addr, + NEONLoadStoreSingleStructOp op) { + LoadStoreStructVerify(vt, addr, op); + + // We support vt arguments of the form vt.VxT() or vt.T(), where x is the + // number of lanes, and T is b, h, s or d. + unsigned lane_size = vt.GetLaneSizeInBytes(); + VIXL_ASSERT(lane < (kQRegSizeInBytes / lane_size)); + + // Lane size is encoded in the opcode field. Lane index is encoded in the Q, + // S and size fields. + lane *= lane_size; + if (lane_size == 8) lane++; + + Instr size = (lane << NEONLSSize_offset) & NEONLSSize_mask; + Instr s = (lane << (NEONS_offset - 2)) & NEONS_mask; + Instr q = (lane << (NEONQ_offset - 3)) & NEONQ_mask; + + Instr instr = op; + switch (lane_size) { + case 1: + instr |= NEONLoadStoreSingle_b; + break; + case 2: + instr |= NEONLoadStoreSingle_h; + break; + case 4: + instr |= NEONLoadStoreSingle_s; + break; + default: + VIXL_ASSERT(lane_size == 8); + instr |= NEONLoadStoreSingle_d; + } + + Emit(instr | LoadStoreStructAddrModeField(addr) | q | size | s | Rt(vt)); +} + + +void Assembler::ld1(const VRegister& vt, int lane, const MemOperand& src) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad1); +} + + +void Assembler::ld1r(const VRegister& vt, const MemOperand& src) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + LoadStoreStructSingleAllLanes(vt, src, NEON_LD1R); +} + + +void Assembler::st1(const VRegister& vt, int lane, const MemOperand& dst) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore1); +} + + +void Assembler::NEON3DifferentL(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + NEON3DifferentOp vop) { + VIXL_ASSERT(AreSameFormat(vn, vm)); + VIXL_ASSERT((vn.Is1H() && vd.Is1S()) || (vn.Is1S() && vd.Is1D()) || + (vn.Is8B() && vd.Is8H()) || (vn.Is4H() && vd.Is4S()) || + (vn.Is2S() && vd.Is2D()) || (vn.Is16B() && vd.Is8H()) || + (vn.Is8H() && vd.Is4S()) || (vn.Is4S() && vd.Is2D())); + Instr format, op = vop; + if (vd.IsScalar()) { + op |= NEON_Q | NEONScalar; + format = SFormat(vn); + } else { + format = VFormat(vn); + } + Emit(format | op | Rm(vm) | Rn(vn) | Rd(vd)); +} + + +void Assembler::NEON3DifferentW(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + NEON3DifferentOp vop) { + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT((vm.Is8B() && vd.Is8H()) || (vm.Is4H() && vd.Is4S()) || + (vm.Is2S() && vd.Is2D()) || (vm.Is16B() && vd.Is8H()) || + (vm.Is8H() && vd.Is4S()) || (vm.Is4S() && vd.Is2D())); + Emit(VFormat(vm) | vop | Rm(vm) | Rn(vn) | Rd(vd)); +} + + +void Assembler::NEON3DifferentHN(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + NEON3DifferentOp vop) { + VIXL_ASSERT(AreSameFormat(vm, vn)); + VIXL_ASSERT((vd.Is8B() && vn.Is8H()) || (vd.Is4H() && vn.Is4S()) || + (vd.Is2S() && vn.Is2D()) || (vd.Is16B() && vn.Is8H()) || + (vd.Is8H() && vn.Is4S()) || (vd.Is4S() && vn.Is2D())); + Emit(VFormat(vd) | vop | Rm(vm) | Rn(vn) | Rd(vd)); +} + + +// clang-format off +#define NEON_3DIFF_LONG_LIST(V) \ + V(pmull, NEON_PMULL, vn.IsVector() && vn.Is8B()) \ + V(pmull2, NEON_PMULL2, vn.IsVector() && vn.Is16B()) \ + V(saddl, NEON_SADDL, vn.IsVector() && vn.IsD()) \ + V(saddl2, NEON_SADDL2, vn.IsVector() && vn.IsQ()) \ + V(sabal, NEON_SABAL, vn.IsVector() && vn.IsD()) \ + V(sabal2, NEON_SABAL2, vn.IsVector() && vn.IsQ()) \ + V(uabal, NEON_UABAL, vn.IsVector() && vn.IsD()) \ + V(uabal2, NEON_UABAL2, vn.IsVector() && vn.IsQ()) \ + V(sabdl, NEON_SABDL, vn.IsVector() && vn.IsD()) \ + V(sabdl2, NEON_SABDL2, vn.IsVector() && vn.IsQ()) \ + V(uabdl, NEON_UABDL, vn.IsVector() && vn.IsD()) \ + V(uabdl2, NEON_UABDL2, vn.IsVector() && vn.IsQ()) \ + V(smlal, NEON_SMLAL, vn.IsVector() && vn.IsD()) \ + V(smlal2, NEON_SMLAL2, vn.IsVector() && vn.IsQ()) \ + V(umlal, NEON_UMLAL, vn.IsVector() && vn.IsD()) \ + V(umlal2, NEON_UMLAL2, vn.IsVector() && vn.IsQ()) \ + V(smlsl, NEON_SMLSL, vn.IsVector() && vn.IsD()) \ + V(smlsl2, NEON_SMLSL2, vn.IsVector() && vn.IsQ()) \ + V(umlsl, NEON_UMLSL, vn.IsVector() && vn.IsD()) \ + V(umlsl2, NEON_UMLSL2, vn.IsVector() && vn.IsQ()) \ + V(smull, NEON_SMULL, vn.IsVector() && vn.IsD()) \ + V(smull2, NEON_SMULL2, vn.IsVector() && vn.IsQ()) \ + V(umull, NEON_UMULL, vn.IsVector() && vn.IsD()) \ + V(umull2, NEON_UMULL2, vn.IsVector() && vn.IsQ()) \ + V(ssubl, NEON_SSUBL, vn.IsVector() && vn.IsD()) \ + V(ssubl2, NEON_SSUBL2, vn.IsVector() && vn.IsQ()) \ + V(uaddl, NEON_UADDL, vn.IsVector() && vn.IsD()) \ + V(uaddl2, NEON_UADDL2, vn.IsVector() && vn.IsQ()) \ + V(usubl, NEON_USUBL, vn.IsVector() && vn.IsD()) \ + V(usubl2, NEON_USUBL2, vn.IsVector() && vn.IsQ()) \ + V(sqdmlal, NEON_SQDMLAL, vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \ + V(sqdmlal2, NEON_SQDMLAL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) \ + V(sqdmlsl, NEON_SQDMLSL, vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \ + V(sqdmlsl2, NEON_SQDMLSL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) \ + V(sqdmull, NEON_SQDMULL, vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \ + V(sqdmull2, NEON_SQDMULL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) \ +// clang-format on + + +#define DEFINE_ASM_FUNC(FN, OP, AS) \ +void Assembler::FN(const VRegister& vd, \ + const VRegister& vn, \ + const VRegister& vm) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); \ + VIXL_ASSERT(AS); \ + NEON3DifferentL(vd, vn, vm, OP); \ +} +NEON_3DIFF_LONG_LIST(DEFINE_ASM_FUNC) +#undef DEFINE_ASM_FUNC + +// clang-format off +#define NEON_3DIFF_HN_LIST(V) \ + V(addhn, NEON_ADDHN, vd.IsD()) \ + V(addhn2, NEON_ADDHN2, vd.IsQ()) \ + V(raddhn, NEON_RADDHN, vd.IsD()) \ + V(raddhn2, NEON_RADDHN2, vd.IsQ()) \ + V(subhn, NEON_SUBHN, vd.IsD()) \ + V(subhn2, NEON_SUBHN2, vd.IsQ()) \ + V(rsubhn, NEON_RSUBHN, vd.IsD()) \ + V(rsubhn2, NEON_RSUBHN2, vd.IsQ()) +// clang-format on + +#define DEFINE_ASM_FUNC(FN, OP, AS) \ + void Assembler::FN(const VRegister& vd, \ + const VRegister& vn, \ + const VRegister& vm) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); \ + VIXL_ASSERT(AS); \ + NEON3DifferentHN(vd, vn, vm, OP); \ + } +NEON_3DIFF_HN_LIST(DEFINE_ASM_FUNC) +#undef DEFINE_ASM_FUNC + +void Assembler::uaddw(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vm.IsD()); + NEON3DifferentW(vd, vn, vm, NEON_UADDW); +} + + +void Assembler::uaddw2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vm.IsQ()); + NEON3DifferentW(vd, vn, vm, NEON_UADDW2); +} + + +void Assembler::saddw(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vm.IsD()); + NEON3DifferentW(vd, vn, vm, NEON_SADDW); +} + + +void Assembler::saddw2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vm.IsQ()); + NEON3DifferentW(vd, vn, vm, NEON_SADDW2); +} + + +void Assembler::usubw(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vm.IsD()); + NEON3DifferentW(vd, vn, vm, NEON_USUBW); +} + + +void Assembler::usubw2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vm.IsQ()); + NEON3DifferentW(vd, vn, vm, NEON_USUBW2); +} + + +void Assembler::ssubw(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vm.IsD()); + NEON3DifferentW(vd, vn, vm, NEON_SSUBW); +} + + +void Assembler::ssubw2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vm.IsQ()); + NEON3DifferentW(vd, vn, vm, NEON_SSUBW2); +} + + +void Assembler::mov(const Register& rd, const Register& rm) { + // Moves involving the stack pointer are encoded as add immediate with + // second operand of zero. Otherwise, orr with first operand zr is + // used. + if (rd.IsSP() || rm.IsSP()) { + add(rd, rm, 0); + } else { + orr(rd, AppropriateZeroRegFor(rd), rm); + } +} + +void Assembler::xpaclri() { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + Emit(XPACLRI); +} + +void Assembler::pacia1716() { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + Emit(PACIA1716); +} + +void Assembler::pacib1716() { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + Emit(PACIB1716); +} + +void Assembler::autia1716() { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + Emit(AUTIA1716); +} + +void Assembler::autib1716() { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + Emit(AUTIB1716); +} + +void Assembler::paciaz() { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + Emit(PACIAZ); +} + +void Assembler::pacibz() { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + Emit(PACIBZ); +} + +void Assembler::autiaz() { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + Emit(AUTIAZ); +} + +void Assembler::autibz() { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + Emit(AUTIBZ); +} + +void Assembler::paciasp() { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + Emit(PACIASP); +} + +void Assembler::pacibsp() { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + Emit(PACIBSP); +} + +void Assembler::autiasp() { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + Emit(AUTIASP); +} + +void Assembler::autibsp() { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + Emit(AUTIBSP); +} + + +void Assembler::mvn(const Register& rd, const Operand& operand) { + orn(rd, AppropriateZeroRegFor(rd), operand); +} + + +void Assembler::mrs(const Register& xt, SystemRegister sysreg) { + VIXL_ASSERT(xt.Is64Bits()); + Emit(MRS | ImmSystemRegister(sysreg) | Rt(xt)); +} + + +void Assembler::msr(SystemRegister sysreg, const Register& xt) { + VIXL_ASSERT(xt.Is64Bits()); + Emit(MSR | Rt(xt) | ImmSystemRegister(sysreg)); +} + + +void Assembler::clrex(int imm4) { Emit(CLREX | CRm(imm4)); } + + +void Assembler::dmb(BarrierDomain domain, BarrierType type) { + Emit(DMB | ImmBarrierDomain(domain) | ImmBarrierType(type)); +} + + +void Assembler::dsb(BarrierDomain domain, BarrierType type) { + Emit(DSB | ImmBarrierDomain(domain) | ImmBarrierType(type)); +} + + +void Assembler::isb() { + Emit(ISB | ImmBarrierDomain(FullSystem) | ImmBarrierType(BarrierAll)); +} + +void Assembler::esb() { + VIXL_ASSERT(CPUHas(CPUFeatures::kRAS)); + hint(ESB); +} + +void Assembler::csdb() { hint(CSDB); } + +void Assembler::fmov(const VRegister& vd, double imm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); + if (vd.IsScalar()) { + VIXL_ASSERT(vd.Is1D()); + Emit(FMOV_d_imm | Rd(vd) | ImmFP64(imm)); + } else { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.Is2D()); + Instr op = NEONModifiedImmediate_MOVI | NEONModifiedImmediateOpBit; + Instr q = NEON_Q; + uint32_t encoded_imm = FP64ToImm8(imm); + Emit(q | op | ImmNEONabcdefgh(encoded_imm) | NEONCmode(0xf) | Rd(vd)); + } +} + + +void Assembler::fmov(const VRegister& vd, float imm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); + if (vd.IsScalar()) { + VIXL_ASSERT(vd.Is1S()); + Emit(FMOV_s_imm | Rd(vd) | ImmFP32(imm)); + } else { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.Is2S() | vd.Is4S()); + Instr op = NEONModifiedImmediate_MOVI; + Instr q = vd.Is4S() ? NEON_Q : 0; + uint32_t encoded_imm = FP32ToImm8(imm); + Emit(q | op | ImmNEONabcdefgh(encoded_imm) | NEONCmode(0xf) | Rd(vd)); + } +} + + +void Assembler::fmov(const VRegister& vd, Float16 imm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); + if (vd.IsScalar()) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); + VIXL_ASSERT(vd.Is1H()); + Emit(FMOV_h_imm | Rd(vd) | ImmFP16(imm)); + } else { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kNEONHalf)); + VIXL_ASSERT(vd.Is4H() | vd.Is8H()); + Instr q = vd.Is8H() ? NEON_Q : 0; + uint32_t encoded_imm = FP16ToImm8(imm); + Emit(q | NEONModifiedImmediate_FMOV | ImmNEONabcdefgh(encoded_imm) | + NEONCmode(0xf) | Rd(vd)); + } +} + + +void Assembler::fmov(const Register& rd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); + VIXL_ASSERT(vn.Is1H() || vn.Is1S() || vn.Is1D()); + VIXL_ASSERT((rd.GetSizeInBits() == vn.GetSizeInBits()) || vn.Is1H()); + FPIntegerConvertOp op; + switch (vn.GetSizeInBits()) { + case 16: + VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); + op = rd.Is64Bits() ? FMOV_xh : FMOV_wh; + break; + case 32: + op = FMOV_ws; + break; + default: + op = FMOV_xd; + } + Emit(op | Rd(rd) | Rn(vn)); +} + + +void Assembler::fmov(const VRegister& vd, const Register& rn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); + VIXL_ASSERT(vd.Is1H() || vd.Is1S() || vd.Is1D()); + VIXL_ASSERT((vd.GetSizeInBits() == rn.GetSizeInBits()) || vd.Is1H()); + FPIntegerConvertOp op; + switch (vd.GetSizeInBits()) { + case 16: + VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); + op = rn.Is64Bits() ? FMOV_hx : FMOV_hw; + break; + case 32: + op = FMOV_sw; + break; + default: + op = FMOV_dx; + } + Emit(op | Rd(vd) | Rn(rn)); +} + + +void Assembler::fmov(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); + if (vd.Is1H()) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); + } + VIXL_ASSERT(vd.Is1H() || vd.Is1S() || vd.Is1D()); + VIXL_ASSERT(vd.IsSameFormat(vn)); + Emit(FPType(vd) | FMOV | Rd(vd) | Rn(vn)); +} + + +void Assembler::fmov(const VRegister& vd, int index, const Register& rn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kFP)); + VIXL_ASSERT((index == 1) && vd.Is1D() && rn.IsX()); + USE(index); + Emit(FMOV_d1_x | Rd(vd) | Rn(rn)); +} + + +void Assembler::fmov(const Register& rd, const VRegister& vn, int index) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kFP)); + VIXL_ASSERT((index == 1) && vn.Is1D() && rd.IsX()); + USE(index); + Emit(FMOV_x_d1 | Rd(rd) | Rn(vn)); +} + + +void Assembler::fmadd(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + const VRegister& va) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); + FPDataProcessing3SourceOp op; + if (vd.Is1H()) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); + op = FMADD_h; + } else if (vd.Is1S()) { + op = FMADD_s; + } else { + VIXL_ASSERT(vd.Is1D()); + op = FMADD_d; + } + FPDataProcessing3Source(vd, vn, vm, va, op); +} + + +void Assembler::fmsub(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + const VRegister& va) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); + FPDataProcessing3SourceOp op; + if (vd.Is1H()) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); + op = FMSUB_h; + } else if (vd.Is1S()) { + op = FMSUB_s; + } else { + VIXL_ASSERT(vd.Is1D()); + op = FMSUB_d; + } + FPDataProcessing3Source(vd, vn, vm, va, op); +} + + +void Assembler::fnmadd(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + const VRegister& va) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); + FPDataProcessing3SourceOp op; + if (vd.Is1H()) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); + op = FNMADD_h; + } else if (vd.Is1S()) { + op = FNMADD_s; + } else { + VIXL_ASSERT(vd.Is1D()); + op = FNMADD_d; + } + FPDataProcessing3Source(vd, vn, vm, va, op); +} + + +void Assembler::fnmsub(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + const VRegister& va) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); + FPDataProcessing3SourceOp op; + if (vd.Is1H()) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); + op = FNMSUB_h; + } else if (vd.Is1S()) { + op = FNMSUB_s; + } else { + VIXL_ASSERT(vd.Is1D()); + op = FNMSUB_d; + } + FPDataProcessing3Source(vd, vn, vm, va, op); +} + + +void Assembler::fnmul(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); + VIXL_ASSERT(AreSameSizeAndType(vd, vn, vm)); + Instr op; + if (vd.Is1H()) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); + op = FNMUL_h; + } else if (vd.Is1S()) { + op = FNMUL_s; + } else { + VIXL_ASSERT(vd.Is1D()); + op = FNMUL_d; + } + Emit(FPType(vd) | op | Rm(vm) | Rn(vn) | Rd(vd)); +} + + +void Assembler::FPCompareMacro(const VRegister& vn, + double value, + FPTrapFlags trap) { + USE(value); + // Although the fcmp{e} instructions can strictly only take an immediate + // value of +0.0, we don't need to check for -0.0 because the sign of 0.0 + // doesn't affect the result of the comparison. + VIXL_ASSERT(value == 0.0); + VIXL_ASSERT(vn.Is1H() || vn.Is1S() || vn.Is1D()); + Instr op = (trap == EnableTrap) ? FCMPE_zero : FCMP_zero; + Emit(FPType(vn) | op | Rn(vn)); +} + + +void Assembler::FPCompareMacro(const VRegister& vn, + const VRegister& vm, + FPTrapFlags trap) { + VIXL_ASSERT(vn.Is1H() || vn.Is1S() || vn.Is1D()); + VIXL_ASSERT(vn.IsSameSizeAndType(vm)); + Instr op = (trap == EnableTrap) ? FCMPE : FCMP; + Emit(FPType(vn) | op | Rm(vm) | Rn(vn)); +} + + +void Assembler::fcmp(const VRegister& vn, const VRegister& vm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); + if (vn.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); + FPCompareMacro(vn, vm, DisableTrap); +} + + +void Assembler::fcmpe(const VRegister& vn, const VRegister& vm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); + if (vn.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); + FPCompareMacro(vn, vm, EnableTrap); +} + + +void Assembler::fcmp(const VRegister& vn, double value) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); + if (vn.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); + FPCompareMacro(vn, value, DisableTrap); +} + + +void Assembler::fcmpe(const VRegister& vn, double value) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); + if (vn.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); + FPCompareMacro(vn, value, EnableTrap); +} + + +void Assembler::FPCCompareMacro(const VRegister& vn, + const VRegister& vm, + StatusFlags nzcv, + Condition cond, + FPTrapFlags trap) { + VIXL_ASSERT(vn.Is1H() || vn.Is1S() || vn.Is1D()); + VIXL_ASSERT(vn.IsSameSizeAndType(vm)); + Instr op = (trap == EnableTrap) ? FCCMPE : FCCMP; + Emit(FPType(vn) | op | Rm(vm) | Cond(cond) | Rn(vn) | Nzcv(nzcv)); +} + +void Assembler::fccmp(const VRegister& vn, + const VRegister& vm, + StatusFlags nzcv, + Condition cond) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); + if (vn.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); + FPCCompareMacro(vn, vm, nzcv, cond, DisableTrap); +} + + +void Assembler::fccmpe(const VRegister& vn, + const VRegister& vm, + StatusFlags nzcv, + Condition cond) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); + if (vn.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); + FPCCompareMacro(vn, vm, nzcv, cond, EnableTrap); +} + + +void Assembler::fcsel(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + Condition cond) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); + if (vd.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); + VIXL_ASSERT(vd.Is1H() || vd.Is1S() || vd.Is1D()); + VIXL_ASSERT(AreSameFormat(vd, vn, vm)); + Emit(FPType(vd) | FCSEL | Rm(vm) | Cond(cond) | Rn(vn) | Rd(vd)); +} + + +void Assembler::fcvt(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); + FPDataProcessing1SourceOp op; + // The half-precision variants belong to base FP, and do not require kFPHalf. + if (vd.Is1D()) { + VIXL_ASSERT(vn.Is1S() || vn.Is1H()); + op = vn.Is1S() ? FCVT_ds : FCVT_dh; + } else if (vd.Is1S()) { + VIXL_ASSERT(vn.Is1D() || vn.Is1H()); + op = vn.Is1D() ? FCVT_sd : FCVT_sh; + } else { + VIXL_ASSERT(vd.Is1H()); + VIXL_ASSERT(vn.Is1D() || vn.Is1S()); + op = vn.Is1D() ? FCVT_hd : FCVT_hs; + } + FPDataProcessing1Source(vd, vn, op); +} + + +void Assembler::fcvtl(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); + VIXL_ASSERT((vd.Is4S() && vn.Is4H()) || (vd.Is2D() && vn.Is2S())); + // The half-precision variants belong to base FP, and do not require kFPHalf. + Instr format = vd.Is2D() ? (1 << NEONSize_offset) : 0; + Emit(format | NEON_FCVTL | Rn(vn) | Rd(vd)); +} + + +void Assembler::fcvtl2(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); + VIXL_ASSERT((vd.Is4S() && vn.Is8H()) || (vd.Is2D() && vn.Is4S())); + // The half-precision variants belong to base FP, and do not require kFPHalf. + Instr format = vd.Is2D() ? (1 << NEONSize_offset) : 0; + Emit(NEON_Q | format | NEON_FCVTL | Rn(vn) | Rd(vd)); +} + + +void Assembler::fcvtn(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); + VIXL_ASSERT((vn.Is4S() && vd.Is4H()) || (vn.Is2D() && vd.Is2S())); + // The half-precision variants belong to base FP, and do not require kFPHalf. + Instr format = vn.Is2D() ? (1 << NEONSize_offset) : 0; + Emit(format | NEON_FCVTN | Rn(vn) | Rd(vd)); +} + + +void Assembler::fcvtn2(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); + VIXL_ASSERT((vn.Is4S() && vd.Is8H()) || (vn.Is2D() && vd.Is4S())); + // The half-precision variants belong to base FP, and do not require kFPHalf. + Instr format = vn.Is2D() ? (1 << NEONSize_offset) : 0; + Emit(NEON_Q | format | NEON_FCVTN | Rn(vn) | Rd(vd)); +} + + +void Assembler::fcvtxn(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); + Instr format = 1 << NEONSize_offset; + if (vd.IsScalar()) { + VIXL_ASSERT(vd.Is1S() && vn.Is1D()); + Emit(format | NEON_FCVTXN_scalar | Rn(vn) | Rd(vd)); + } else { + VIXL_ASSERT(vd.Is2S() && vn.Is2D()); + Emit(format | NEON_FCVTXN | Rn(vn) | Rd(vd)); + } +} + + +void Assembler::fcvtxn2(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); + VIXL_ASSERT(vd.Is4S() && vn.Is2D()); + Instr format = 1 << NEONSize_offset; + Emit(NEON_Q | format | NEON_FCVTXN | Rn(vn) | Rd(vd)); +} + +void Assembler::fjcvtzs(const Register& rd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kJSCVT)); + VIXL_ASSERT(rd.IsW() && vn.Is1D()); + Emit(FJCVTZS | Rn(vn) | Rd(rd)); +} + + +void Assembler::NEONFPConvertToInt(const Register& rd, + const VRegister& vn, + Instr op) { + Emit(SF(rd) | FPType(vn) | op | Rn(vn) | Rd(rd)); +} + + +void Assembler::NEONFPConvertToInt(const VRegister& vd, + const VRegister& vn, + Instr op) { + if (vn.IsScalar()) { + VIXL_ASSERT((vd.Is1S() && vn.Is1S()) || (vd.Is1D() && vn.Is1D())); + op |= NEON_Q | NEONScalar; + } + Emit(FPFormat(vn) | op | Rn(vn) | Rd(vd)); +} + + +void Assembler::NEONFP16ConvertToInt(const VRegister& vd, + const VRegister& vn, + Instr op) { + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT(vn.IsLaneSizeH()); + if (vn.IsScalar()) { + op |= NEON_Q | NEONScalar; + } else if (vn.Is8H()) { + op |= NEON_Q; + } + Emit(op | Rn(vn) | Rd(vd)); +} + + +#define NEON_FP2REGMISC_FCVT_LIST(V) \ + V(fcvtnu, NEON_FCVTNU, FCVTNU) \ + V(fcvtns, NEON_FCVTNS, FCVTNS) \ + V(fcvtpu, NEON_FCVTPU, FCVTPU) \ + V(fcvtps, NEON_FCVTPS, FCVTPS) \ + V(fcvtmu, NEON_FCVTMU, FCVTMU) \ + V(fcvtms, NEON_FCVTMS, FCVTMS) \ + V(fcvtau, NEON_FCVTAU, FCVTAU) \ + V(fcvtas, NEON_FCVTAS, FCVTAS) + +#define DEFINE_ASM_FUNCS(FN, VEC_OP, SCA_OP) \ + void Assembler::FN(const Register& rd, const VRegister& vn) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); \ + if (vn.IsH()) VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); \ + NEONFPConvertToInt(rd, vn, SCA_OP); \ + } \ + void Assembler::FN(const VRegister& vd, const VRegister& vn) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); \ + if (vd.IsLaneSizeH()) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); \ + NEONFP16ConvertToInt(vd, vn, VEC_OP##_H); \ + } else { \ + NEONFPConvertToInt(vd, vn, VEC_OP); \ + } \ + } +NEON_FP2REGMISC_FCVT_LIST(DEFINE_ASM_FUNCS) +#undef DEFINE_ASM_FUNCS + + +void Assembler::fcvtzs(const Register& rd, const VRegister& vn, int fbits) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); + if (vn.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); + VIXL_ASSERT(vn.Is1H() || vn.Is1S() || vn.Is1D()); + VIXL_ASSERT((fbits >= 0) && (fbits <= rd.GetSizeInBits())); + if (fbits == 0) { + Emit(SF(rd) | FPType(vn) | FCVTZS | Rn(vn) | Rd(rd)); + } else { + Emit(SF(rd) | FPType(vn) | FCVTZS_fixed | FPScale(64 - fbits) | Rn(vn) | + Rd(rd)); + } +} + + +void Assembler::fcvtzs(const VRegister& vd, const VRegister& vn, int fbits) { + // This form is a NEON scalar FP instruction. + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); + if (vn.IsLaneSizeH()) VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); + VIXL_ASSERT(fbits >= 0); + if (fbits == 0) { + if (vd.IsLaneSizeH()) { + NEONFP2RegMiscFP16(vd, vn, NEON_FCVTZS_H); + } else { + NEONFP2RegMisc(vd, vn, NEON_FCVTZS); + } + } else { + VIXL_ASSERT(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S() || + vd.Is1H() || vd.Is4H() || vd.Is8H()); + NEONShiftRightImmediate(vd, vn, fbits, NEON_FCVTZS_imm); + } +} + + +void Assembler::fcvtzu(const Register& rd, const VRegister& vn, int fbits) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); + if (vn.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); + VIXL_ASSERT(vn.Is1H() || vn.Is1S() || vn.Is1D()); + VIXL_ASSERT((fbits >= 0) && (fbits <= rd.GetSizeInBits())); + if (fbits == 0) { + Emit(SF(rd) | FPType(vn) | FCVTZU | Rn(vn) | Rd(rd)); + } else { + Emit(SF(rd) | FPType(vn) | FCVTZU_fixed | FPScale(64 - fbits) | Rn(vn) | + Rd(rd)); + } +} + + +void Assembler::fcvtzu(const VRegister& vd, const VRegister& vn, int fbits) { + // This form is a NEON scalar FP instruction. + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); + if (vn.IsLaneSizeH()) VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); + VIXL_ASSERT(fbits >= 0); + if (fbits == 0) { + if (vd.IsLaneSizeH()) { + NEONFP2RegMiscFP16(vd, vn, NEON_FCVTZU_H); + } else { + NEONFP2RegMisc(vd, vn, NEON_FCVTZU); + } + } else { + VIXL_ASSERT(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S() || + vd.Is1H() || vd.Is4H() || vd.Is8H()); + NEONShiftRightImmediate(vd, vn, fbits, NEON_FCVTZU_imm); + } +} + +void Assembler::ucvtf(const VRegister& vd, const VRegister& vn, int fbits) { + // This form is a NEON scalar FP instruction. + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); + if (vn.IsLaneSizeH()) VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); + VIXL_ASSERT(fbits >= 0); + if (fbits == 0) { + if (vd.IsLaneSizeH()) { + NEONFP2RegMiscFP16(vd, vn, NEON_UCVTF_H); + } else { + NEONFP2RegMisc(vd, vn, NEON_UCVTF); + } + } else { + VIXL_ASSERT(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S() || + vd.Is1H() || vd.Is4H() || vd.Is8H()); + NEONShiftRightImmediate(vd, vn, fbits, NEON_UCVTF_imm); + } +} + +void Assembler::scvtf(const VRegister& vd, const VRegister& vn, int fbits) { + // This form is a NEON scalar FP instruction. + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); + if (vn.IsLaneSizeH()) VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); + VIXL_ASSERT(fbits >= 0); + if (fbits == 0) { + if (vd.IsLaneSizeH()) { + NEONFP2RegMiscFP16(vd, vn, NEON_SCVTF_H); + } else { + NEONFP2RegMisc(vd, vn, NEON_SCVTF); + } + } else { + VIXL_ASSERT(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S() || + vd.Is1H() || vd.Is4H() || vd.Is8H()); + NEONShiftRightImmediate(vd, vn, fbits, NEON_SCVTF_imm); + } +} + + +void Assembler::scvtf(const VRegister& vd, const Register& rn, int fbits) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); + if (vd.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); + VIXL_ASSERT(vd.Is1H() || vd.Is1S() || vd.Is1D()); + VIXL_ASSERT(fbits >= 0); + if (fbits == 0) { + Emit(SF(rn) | FPType(vd) | SCVTF | Rn(rn) | Rd(vd)); + } else { + Emit(SF(rn) | FPType(vd) | SCVTF_fixed | FPScale(64 - fbits) | Rn(rn) | + Rd(vd)); + } +} + + +void Assembler::ucvtf(const VRegister& vd, const Register& rn, int fbits) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); + if (vd.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); + VIXL_ASSERT(vd.Is1H() || vd.Is1S() || vd.Is1D()); + VIXL_ASSERT(fbits >= 0); + if (fbits == 0) { + Emit(SF(rn) | FPType(vd) | UCVTF | Rn(rn) | Rd(vd)); + } else { + Emit(SF(rn) | FPType(vd) | UCVTF_fixed | FPScale(64 - fbits) | Rn(rn) | + Rd(vd)); + } +} + + +void Assembler::NEON3Same(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + NEON3SameOp vop) { + VIXL_ASSERT(AreSameFormat(vd, vn, vm)); + VIXL_ASSERT(vd.IsVector() || !vd.IsQ()); + + Instr format, op = vop; + if (vd.IsScalar()) { + op |= NEON_Q | NEONScalar; + format = SFormat(vd); + } else { + format = VFormat(vd); + } + + Emit(format | op | Rm(vm) | Rn(vn) | Rd(vd)); +} + + +void Assembler::NEONFP3Same(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + Instr op) { + VIXL_ASSERT(AreSameFormat(vd, vn, vm)); + Emit(FPFormat(vd) | op | Rm(vm) | Rn(vn) | Rd(vd)); +} + + +void Assembler::NEON3SameFP16(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + Instr op) { + VIXL_ASSERT(AreSameFormat(vd, vn, vm)); + VIXL_ASSERT(vd.GetLaneSizeInBytes() == kHRegSizeInBytes); + if (vd.Is8H()) op |= NEON_Q; + Emit(op | Rm(vm) | Rn(vn) | Rd(vd)); +} + + +// clang-format off +#define NEON_FP2REGMISC_LIST(V) \ + V(fabs, NEON_FABS, FABS, FABS_h) \ + V(fneg, NEON_FNEG, FNEG, FNEG_h) \ + V(fsqrt, NEON_FSQRT, FSQRT, FSQRT_h) \ + V(frintn, NEON_FRINTN, FRINTN, FRINTN_h) \ + V(frinta, NEON_FRINTA, FRINTA, FRINTA_h) \ + V(frintp, NEON_FRINTP, FRINTP, FRINTP_h) \ + V(frintm, NEON_FRINTM, FRINTM, FRINTM_h) \ + V(frintx, NEON_FRINTX, FRINTX, FRINTX_h) \ + V(frintz, NEON_FRINTZ, FRINTZ, FRINTZ_h) \ + V(frinti, NEON_FRINTI, FRINTI, FRINTI_h) \ + V(frsqrte, NEON_FRSQRTE, NEON_FRSQRTE_scalar, NEON_FRSQRTE_H_scalar) \ + V(frecpe, NEON_FRECPE, NEON_FRECPE_scalar, NEON_FRECPE_H_scalar) +// clang-format on + +#define DEFINE_ASM_FUNC(FN, VEC_OP, SCA_OP, SCA_OP_H) \ + void Assembler::FN(const VRegister& vd, const VRegister& vn) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); \ + Instr op; \ + if (vd.IsScalar()) { \ + if (vd.Is1H()) { \ + if ((SCA_OP_H & NEONScalar2RegMiscFP16FMask) == \ + NEONScalar2RegMiscFP16Fixed) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kNEONHalf)); \ + } else { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); \ + } \ + op = SCA_OP_H; \ + } else { \ + if ((SCA_OP & NEONScalar2RegMiscFMask) == NEONScalar2RegMiscFixed) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); \ + } \ + VIXL_ASSERT(vd.Is1S() || vd.Is1D()); \ + op = SCA_OP; \ + } \ + } else { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); \ + VIXL_ASSERT(vd.Is4H() || vd.Is8H() || vd.Is2S() || vd.Is2D() || \ + vd.Is4S()); \ + if (vd.IsLaneSizeH()) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); \ + op = VEC_OP##_H; \ + if (vd.Is8H()) { \ + op |= NEON_Q; \ + } \ + } else { \ + op = VEC_OP; \ + } \ + } \ + if (vd.IsLaneSizeH()) { \ + NEONFP2RegMiscFP16(vd, vn, op); \ + } else { \ + NEONFP2RegMisc(vd, vn, op); \ + } \ + } +NEON_FP2REGMISC_LIST(DEFINE_ASM_FUNC) +#undef DEFINE_ASM_FUNC + + +void Assembler::NEONFP2RegMiscFP16(const VRegister& vd, + const VRegister& vn, + Instr op) { + VIXL_ASSERT(AreSameFormat(vd, vn)); + Emit(op | Rn(vn) | Rd(vd)); +} + + +void Assembler::NEONFP2RegMisc(const VRegister& vd, + const VRegister& vn, + Instr op) { + VIXL_ASSERT(AreSameFormat(vd, vn)); + Emit(FPFormat(vd) | op | Rn(vn) | Rd(vd)); +} + + +void Assembler::NEON2RegMisc(const VRegister& vd, + const VRegister& vn, + NEON2RegMiscOp vop, + int value) { + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT(value == 0); + USE(value); + + Instr format, op = vop; + if (vd.IsScalar()) { + op |= NEON_Q | NEONScalar; + format = SFormat(vd); + } else { + format = VFormat(vd); + } + + Emit(format | op | Rn(vn) | Rd(vd)); +} + + +void Assembler::cmeq(const VRegister& vd, const VRegister& vn, int value) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEON2RegMisc(vd, vn, NEON_CMEQ_zero, value); +} + + +void Assembler::cmge(const VRegister& vd, const VRegister& vn, int value) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEON2RegMisc(vd, vn, NEON_CMGE_zero, value); +} + + +void Assembler::cmgt(const VRegister& vd, const VRegister& vn, int value) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEON2RegMisc(vd, vn, NEON_CMGT_zero, value); +} + + +void Assembler::cmle(const VRegister& vd, const VRegister& vn, int value) { + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + NEON2RegMisc(vd, vn, NEON_CMLE_zero, value); +} + + +void Assembler::cmlt(const VRegister& vd, const VRegister& vn, int value) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEON2RegMisc(vd, vn, NEON_CMLT_zero, value); +} + + +void Assembler::shll(const VRegister& vd, const VRegister& vn, int shift) { + USE(shift); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT((vd.Is8H() && vn.Is8B() && shift == 8) || + (vd.Is4S() && vn.Is4H() && shift == 16) || + (vd.Is2D() && vn.Is2S() && shift == 32)); + Emit(VFormat(vn) | NEON_SHLL | Rn(vn) | Rd(vd)); +} + + +void Assembler::shll2(const VRegister& vd, const VRegister& vn, int shift) { + USE(shift); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT((vd.Is8H() && vn.Is16B() && shift == 8) || + (vd.Is4S() && vn.Is8H() && shift == 16) || + (vd.Is2D() && vn.Is4S() && shift == 32)); + Emit(VFormat(vn) | NEON_SHLL | Rn(vn) | Rd(vd)); +} + + +void Assembler::NEONFP2RegMisc(const VRegister& vd, + const VRegister& vn, + NEON2RegMiscOp vop, + double value) { + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT(value == 0.0); + USE(value); + + Instr op = vop; + if (vd.IsScalar()) { + VIXL_ASSERT(vd.Is1S() || vd.Is1D()); + op |= NEON_Q | NEONScalar; + } else { + VIXL_ASSERT(vd.Is2S() || vd.Is2D() || vd.Is4S()); + } + + Emit(FPFormat(vd) | op | Rn(vn) | Rd(vd)); +} + + +void Assembler::NEONFP2RegMiscFP16(const VRegister& vd, + const VRegister& vn, + NEON2RegMiscFP16Op vop, + double value) { + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT(value == 0.0); + USE(value); + + Instr op = vop; + if (vd.IsScalar()) { + VIXL_ASSERT(vd.Is1H()); + op |= NEON_Q | NEONScalar; + } else { + VIXL_ASSERT(vd.Is4H() || vd.Is8H()); + if (vd.Is8H()) { + op |= NEON_Q; + } + } + + Emit(op | Rn(vn) | Rd(vd)); +} + + +void Assembler::fcmeq(const VRegister& vd, const VRegister& vn, double value) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); + if (vd.IsLaneSizeH()) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); + NEONFP2RegMiscFP16(vd, vn, NEON_FCMEQ_H_zero, value); + } else { + NEONFP2RegMisc(vd, vn, NEON_FCMEQ_zero, value); + } +} + + +void Assembler::fcmge(const VRegister& vd, const VRegister& vn, double value) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); + if (vd.IsLaneSizeH()) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); + NEONFP2RegMiscFP16(vd, vn, NEON_FCMGE_H_zero, value); + } else { + NEONFP2RegMisc(vd, vn, NEON_FCMGE_zero, value); + } +} + + +void Assembler::fcmgt(const VRegister& vd, const VRegister& vn, double value) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); + if (vd.IsLaneSizeH()) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); + NEONFP2RegMiscFP16(vd, vn, NEON_FCMGT_H_zero, value); + } else { + NEONFP2RegMisc(vd, vn, NEON_FCMGT_zero, value); + } +} + + +void Assembler::fcmle(const VRegister& vd, const VRegister& vn, double value) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); + if (vd.IsLaneSizeH()) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); + NEONFP2RegMiscFP16(vd, vn, NEON_FCMLE_H_zero, value); + } else { + NEONFP2RegMisc(vd, vn, NEON_FCMLE_zero, value); + } +} + + +void Assembler::fcmlt(const VRegister& vd, const VRegister& vn, double value) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); + if (vd.IsLaneSizeH()) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); + NEONFP2RegMiscFP16(vd, vn, NEON_FCMLT_H_zero, value); + } else { + NEONFP2RegMisc(vd, vn, NEON_FCMLT_zero, value); + } +} + + +void Assembler::frecpx(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsScalar()); + VIXL_ASSERT(AreSameFormat(vd, vn)); + Instr op; + if (vd.Is1H()) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); + op = NEON_FRECPX_H_scalar; + } else { + VIXL_ASSERT(vd.Is1S() || vd.Is1D()); + op = NEON_FRECPX_scalar; + } + Emit(FPFormat(vd) | op | Rn(vn) | Rd(vd)); +} + + +// clang-format off +#define NEON_3SAME_LIST(V) \ + V(add, NEON_ADD, vd.IsVector() || vd.Is1D()) \ + V(addp, NEON_ADDP, vd.IsVector() || vd.Is1D()) \ + V(sub, NEON_SUB, vd.IsVector() || vd.Is1D()) \ + V(cmeq, NEON_CMEQ, vd.IsVector() || vd.Is1D()) \ + V(cmge, NEON_CMGE, vd.IsVector() || vd.Is1D()) \ + V(cmgt, NEON_CMGT, vd.IsVector() || vd.Is1D()) \ + V(cmhi, NEON_CMHI, vd.IsVector() || vd.Is1D()) \ + V(cmhs, NEON_CMHS, vd.IsVector() || vd.Is1D()) \ + V(cmtst, NEON_CMTST, vd.IsVector() || vd.Is1D()) \ + V(sshl, NEON_SSHL, vd.IsVector() || vd.Is1D()) \ + V(ushl, NEON_USHL, vd.IsVector() || vd.Is1D()) \ + V(srshl, NEON_SRSHL, vd.IsVector() || vd.Is1D()) \ + V(urshl, NEON_URSHL, vd.IsVector() || vd.Is1D()) \ + V(sqdmulh, NEON_SQDMULH, vd.IsLaneSizeH() || vd.IsLaneSizeS()) \ + V(sqrdmulh, NEON_SQRDMULH, vd.IsLaneSizeH() || vd.IsLaneSizeS()) \ + V(shadd, NEON_SHADD, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(uhadd, NEON_UHADD, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(srhadd, NEON_SRHADD, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(urhadd, NEON_URHADD, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(shsub, NEON_SHSUB, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(uhsub, NEON_UHSUB, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(smax, NEON_SMAX, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(smaxp, NEON_SMAXP, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(smin, NEON_SMIN, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(sminp, NEON_SMINP, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(umax, NEON_UMAX, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(umaxp, NEON_UMAXP, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(umin, NEON_UMIN, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(uminp, NEON_UMINP, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(saba, NEON_SABA, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(sabd, NEON_SABD, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(uaba, NEON_UABA, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(uabd, NEON_UABD, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(mla, NEON_MLA, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(mls, NEON_MLS, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(mul, NEON_MUL, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(and_, NEON_AND, vd.Is8B() || vd.Is16B()) \ + V(orr, NEON_ORR, vd.Is8B() || vd.Is16B()) \ + V(orn, NEON_ORN, vd.Is8B() || vd.Is16B()) \ + V(eor, NEON_EOR, vd.Is8B() || vd.Is16B()) \ + V(bic, NEON_BIC, vd.Is8B() || vd.Is16B()) \ + V(bit, NEON_BIT, vd.Is8B() || vd.Is16B()) \ + V(bif, NEON_BIF, vd.Is8B() || vd.Is16B()) \ + V(bsl, NEON_BSL, vd.Is8B() || vd.Is16B()) \ + V(pmul, NEON_PMUL, vd.Is8B() || vd.Is16B()) \ + V(uqadd, NEON_UQADD, true) \ + V(sqadd, NEON_SQADD, true) \ + V(uqsub, NEON_UQSUB, true) \ + V(sqsub, NEON_SQSUB, true) \ + V(sqshl, NEON_SQSHL, true) \ + V(uqshl, NEON_UQSHL, true) \ + V(sqrshl, NEON_SQRSHL, true) \ + V(uqrshl, NEON_UQRSHL, true) +// clang-format on + +#define DEFINE_ASM_FUNC(FN, OP, AS) \ + void Assembler::FN(const VRegister& vd, \ + const VRegister& vn, \ + const VRegister& vm) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); \ + VIXL_ASSERT(AS); \ + NEON3Same(vd, vn, vm, OP); \ + } +NEON_3SAME_LIST(DEFINE_ASM_FUNC) +#undef DEFINE_ASM_FUNC + +// clang-format off +#define NEON_FP3SAME_OP_LIST(V) \ + V(fmulx, NEON_FMULX, NEON_FMULX_scalar, NEON_FMULX_H_scalar) \ + V(frecps, NEON_FRECPS, NEON_FRECPS_scalar, NEON_FRECPS_H_scalar) \ + V(frsqrts, NEON_FRSQRTS, NEON_FRSQRTS_scalar, NEON_FRSQRTS_H_scalar) \ + V(fabd, NEON_FABD, NEON_FABD_scalar, NEON_FABD_H_scalar) \ + V(fmla, NEON_FMLA, 0, 0) \ + V(fmls, NEON_FMLS, 0, 0) \ + V(facge, NEON_FACGE, NEON_FACGE_scalar, NEON_FACGE_H_scalar) \ + V(facgt, NEON_FACGT, NEON_FACGT_scalar, NEON_FACGT_H_scalar) \ + V(fcmeq, NEON_FCMEQ, NEON_FCMEQ_scalar, NEON_FCMEQ_H_scalar) \ + V(fcmge, NEON_FCMGE, NEON_FCMGE_scalar, NEON_FCMGE_H_scalar) \ + V(fcmgt, NEON_FCMGT, NEON_FCMGT_scalar, NEON_FCMGT_H_scalar) \ + V(faddp, NEON_FADDP, 0, 0) \ + V(fmaxp, NEON_FMAXP, 0, 0) \ + V(fminp, NEON_FMINP, 0, 0) \ + V(fmaxnmp, NEON_FMAXNMP, 0, 0) \ + V(fadd, NEON_FADD, FADD, 0) \ + V(fsub, NEON_FSUB, FSUB, 0) \ + V(fmul, NEON_FMUL, FMUL, 0) \ + V(fdiv, NEON_FDIV, FDIV, 0) \ + V(fmax, NEON_FMAX, FMAX, 0) \ + V(fmin, NEON_FMIN, FMIN, 0) \ + V(fmaxnm, NEON_FMAXNM, FMAXNM, 0) \ + V(fminnm, NEON_FMINNM, FMINNM, 0) \ + V(fminnmp, NEON_FMINNMP, 0, 0) +// clang-format on + +// TODO: This macro is complicated because it classifies the instructions in the +// macro list above, and treats each case differently. It could be somewhat +// simpler if we were to split the macro, at the cost of some duplication. +#define DEFINE_ASM_FUNC(FN, VEC_OP, SCA_OP, SCA_OP_H) \ + void Assembler::FN(const VRegister& vd, \ + const VRegister& vn, \ + const VRegister& vm) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); \ + Instr op; \ + bool is_fp16 = false; \ + if ((SCA_OP != 0) && vd.IsScalar()) { \ + if ((SCA_OP_H != 0) && vd.Is1H()) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kNEONHalf)); \ + is_fp16 = true; \ + op = SCA_OP_H; \ + } else { \ + VIXL_ASSERT(vd.Is1H() || vd.Is1S() || vd.Is1D()); \ + if ((SCA_OP & NEONScalar3SameFMask) == NEONScalar3SameFixed) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); \ + if (vd.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); \ + } else if (vd.Is1H()) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); \ + } \ + op = SCA_OP; \ + } \ + } else { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); \ + VIXL_ASSERT(vd.IsVector()); \ + if (vd.Is4H() || vd.Is8H()) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); \ + is_fp16 = true; \ + op = VEC_OP##_H; \ + } else { \ + VIXL_ASSERT(vd.Is2S() || vd.Is2D() || vd.Is4S()); \ + op = VEC_OP; \ + } \ + } \ + if (is_fp16) { \ + NEON3SameFP16(vd, vn, vm, op); \ + } else { \ + NEONFP3Same(vd, vn, vm, op); \ + } \ + } +NEON_FP3SAME_OP_LIST(DEFINE_ASM_FUNC) +#undef DEFINE_ASM_FUNC + + +void Assembler::addp(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT((vd.Is1D() && vn.Is2D())); + Emit(SFormat(vd) | NEON_ADDP_scalar | Rn(vn) | Rd(vd)); +} + + +void Assembler::sqrdmlah(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kRDM)); + VIXL_ASSERT(AreSameFormat(vd, vn, vm)); + VIXL_ASSERT(vd.IsVector() || !vd.IsQ()); + + Instr format, op = NEON_SQRDMLAH; + if (vd.IsScalar()) { + op |= NEON_Q | NEONScalar; + format = SFormat(vd); + } else { + format = VFormat(vd); + } + + Emit(format | op | Rm(vm) | Rn(vn) | Rd(vd)); +} + + +void Assembler::sqrdmlsh(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kRDM)); + VIXL_ASSERT(AreSameFormat(vd, vn, vm)); + VIXL_ASSERT(vd.IsVector() || !vd.IsQ()); + + Instr format, op = NEON_SQRDMLSH; + if (vd.IsScalar()) { + op |= NEON_Q | NEONScalar; + format = SFormat(vd); + } else { + format = VFormat(vd); + } + + Emit(format | op | Rm(vm) | Rn(vn) | Rd(vd)); +} + + +void Assembler::sdot(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kDotProduct)); + VIXL_ASSERT(AreSameFormat(vn, vm)); + VIXL_ASSERT((vd.Is2S() && vn.Is8B()) || (vd.Is4S() && vn.Is16B())); + + Emit(VFormat(vd) | NEON_SDOT | Rm(vm) | Rn(vn) | Rd(vd)); +} + + +void Assembler::udot(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kDotProduct)); + VIXL_ASSERT(AreSameFormat(vn, vm)); + VIXL_ASSERT((vd.Is2S() && vn.Is8B()) || (vd.Is4S() && vn.Is16B())); + + Emit(VFormat(vd) | NEON_UDOT | Rm(vm) | Rn(vn) | Rd(vd)); +} + + +void Assembler::faddp(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); + VIXL_ASSERT((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()) || + (vd.Is1H() && vn.Is2H())); + if (vd.Is1H()) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); + Emit(NEON_FADDP_h_scalar | Rn(vn) | Rd(vd)); + } else { + Emit(FPFormat(vd) | NEON_FADDP_scalar | Rn(vn) | Rd(vd)); + } +} + + +void Assembler::fmaxp(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); + VIXL_ASSERT((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()) || + (vd.Is1H() && vn.Is2H())); + if (vd.Is1H()) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); + Emit(NEON_FMAXP_h_scalar | Rn(vn) | Rd(vd)); + } else { + Emit(FPFormat(vd) | NEON_FMAXP_scalar | Rn(vn) | Rd(vd)); + } +} + + +void Assembler::fminp(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); + VIXL_ASSERT((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()) || + (vd.Is1H() && vn.Is2H())); + if (vd.Is1H()) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); + Emit(NEON_FMINP_h_scalar | Rn(vn) | Rd(vd)); + } else { + Emit(FPFormat(vd) | NEON_FMINP_scalar | Rn(vn) | Rd(vd)); + } +} + + +void Assembler::fmaxnmp(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); + VIXL_ASSERT((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()) || + (vd.Is1H() && vn.Is2H())); + if (vd.Is1H()) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); + Emit(NEON_FMAXNMP_h_scalar | Rn(vn) | Rd(vd)); + } else { + Emit(FPFormat(vd) | NEON_FMAXNMP_scalar | Rn(vn) | Rd(vd)); + } +} + + +void Assembler::fminnmp(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); + VIXL_ASSERT((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()) || + (vd.Is1H() && vn.Is2H())); + if (vd.Is1H()) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); + Emit(NEON_FMINNMP_h_scalar | Rn(vn) | Rd(vd)); + } else { + Emit(FPFormat(vd) | NEON_FMINNMP_scalar | Rn(vn) | Rd(vd)); + } +} + + +// v8.3 complex numbers - floating-point complex multiply accumulate. +void Assembler::fcmla(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index, + int rot) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON, CPUFeatures::kFcma)); + VIXL_ASSERT(vd.IsVector() && AreSameFormat(vd, vn)); + VIXL_ASSERT((vm.IsH() && (vd.Is8H() || vd.Is4H())) || + (vm.IsS() && vd.Is4S())); + if (vd.IsLaneSizeH()) VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); + int index_num_bits = vd.Is4S() ? 1 : 2; + Emit(VFormat(vd) | Rm(vm) | NEON_FCMLA_byelement | + ImmNEONHLM(vm_index, index_num_bits) | ImmRotFcmlaSca(rot) | Rn(vn) | + Rd(vd)); +} + + +void Assembler::fcmla(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int rot) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON, CPUFeatures::kFcma)); + VIXL_ASSERT(AreSameFormat(vd, vn, vm)); + VIXL_ASSERT(vd.IsVector() && !vd.IsLaneSizeB()); + if (vd.IsLaneSizeH()) VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); + Emit(VFormat(vd) | Rm(vm) | NEON_FCMLA | ImmRotFcmlaVec(rot) | Rn(vn) | + Rd(vd)); +} + + +// v8.3 complex numbers - floating-point complex add. +void Assembler::fcadd(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int rot) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON, CPUFeatures::kFcma)); + VIXL_ASSERT(AreSameFormat(vd, vn, vm)); + VIXL_ASSERT(vd.IsVector() && !vd.IsLaneSizeB()); + if (vd.IsLaneSizeH()) VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); + Emit(VFormat(vd) | Rm(vm) | NEON_FCADD | ImmRotFcadd(rot) | Rn(vn) | Rd(vd)); +} + + +void Assembler::orr(const VRegister& vd, const int imm8, const int left_shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + NEONModifiedImmShiftLsl(vd, imm8, left_shift, NEONModifiedImmediate_ORR); +} + + +void Assembler::mov(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vd, vn)); + if (vd.IsD()) { + orr(vd.V8B(), vn.V8B(), vn.V8B()); + } else { + VIXL_ASSERT(vd.IsQ()); + orr(vd.V16B(), vn.V16B(), vn.V16B()); + } +} + + +void Assembler::bic(const VRegister& vd, const int imm8, const int left_shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + NEONModifiedImmShiftLsl(vd, imm8, left_shift, NEONModifiedImmediate_BIC); +} + + +void Assembler::movi(const VRegister& vd, + const uint64_t imm, + Shift shift, + const int shift_amount) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT((shift == LSL) || (shift == MSL)); + if (vd.Is2D() || vd.Is1D()) { + VIXL_ASSERT(shift_amount == 0); + int imm8 = 0; + for (int i = 0; i < 8; ++i) { + int byte = (imm >> (i * 8)) & 0xff; + VIXL_ASSERT((byte == 0) || (byte == 0xff)); + if (byte == 0xff) { + imm8 |= (1 << i); + } + } + int q = vd.Is2D() ? NEON_Q : 0; + Emit(q | NEONModImmOp(1) | NEONModifiedImmediate_MOVI | + ImmNEONabcdefgh(imm8) | NEONCmode(0xe) | Rd(vd)); + } else if (shift == LSL) { + VIXL_ASSERT(IsUint8(imm)); + NEONModifiedImmShiftLsl(vd, + static_cast(imm), + shift_amount, + NEONModifiedImmediate_MOVI); + } else { + VIXL_ASSERT(IsUint8(imm)); + NEONModifiedImmShiftMsl(vd, + static_cast(imm), + shift_amount, + NEONModifiedImmediate_MOVI); + } +} + + +void Assembler::mvn(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vd, vn)); + if (vd.IsD()) { + not_(vd.V8B(), vn.V8B()); + } else { + VIXL_ASSERT(vd.IsQ()); + not_(vd.V16B(), vn.V16B()); + } +} + + +void Assembler::mvni(const VRegister& vd, + const int imm8, + Shift shift, + const int shift_amount) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT((shift == LSL) || (shift == MSL)); + if (shift == LSL) { + NEONModifiedImmShiftLsl(vd, imm8, shift_amount, NEONModifiedImmediate_MVNI); + } else { + NEONModifiedImmShiftMsl(vd, imm8, shift_amount, NEONModifiedImmediate_MVNI); + } +} + + +void Assembler::NEONFPByElement(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index, + NEONByIndexedElementOp vop, + NEONByIndexedElementOp vop_half) { + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT((vd.Is2S() && vm.Is1S()) || (vd.Is4S() && vm.Is1S()) || + (vd.Is1S() && vm.Is1S()) || (vd.Is2D() && vm.Is1D()) || + (vd.Is1D() && vm.Is1D()) || (vd.Is4H() && vm.Is1H()) || + (vd.Is8H() && vm.Is1H()) || (vd.Is1H() && vm.Is1H())); + VIXL_ASSERT((vm.Is1S() && (vm_index < 4)) || (vm.Is1D() && (vm_index < 2)) || + (vm.Is1H() && (vm.GetCode() < 16) && (vm_index < 8))); + + Instr op = vop; + int index_num_bits; + if (vm.Is1D()) { + index_num_bits = 1; + } else if (vm.Is1S()) { + index_num_bits = 2; + } else { + index_num_bits = 3; + op = vop_half; + } + + if (vd.IsScalar()) { + op |= NEON_Q | NEONScalar; + } + + if (!vm.Is1H()) { + op |= FPFormat(vd); + } else if (vd.Is8H()) { + op |= NEON_Q; + } + + Emit(op | ImmNEONHLM(vm_index, index_num_bits) | Rm(vm) | Rn(vn) | Rd(vd)); +} + + +void Assembler::NEONByElement(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index, + NEONByIndexedElementOp vop) { + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT((vd.Is4H() && vm.Is1H()) || (vd.Is8H() && vm.Is1H()) || + (vd.Is1H() && vm.Is1H()) || (vd.Is2S() && vm.Is1S()) || + (vd.Is4S() && vm.Is1S()) || (vd.Is1S() && vm.Is1S())); + VIXL_ASSERT((vm.Is1H() && (vm.GetCode() < 16) && (vm_index < 8)) || + (vm.Is1S() && (vm_index < 4))); + + Instr format, op = vop; + int index_num_bits = vm.Is1H() ? 3 : 2; + if (vd.IsScalar()) { + op |= NEONScalar | NEON_Q; + format = SFormat(vn); + } else { + format = VFormat(vn); + } + Emit(format | op | ImmNEONHLM(vm_index, index_num_bits) | Rm(vm) | Rn(vn) | + Rd(vd)); +} + + +void Assembler::NEONByElementL(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index, + NEONByIndexedElementOp vop) { + VIXL_ASSERT((vd.Is4S() && vn.Is4H() && vm.Is1H()) || + (vd.Is4S() && vn.Is8H() && vm.Is1H()) || + (vd.Is1S() && vn.Is1H() && vm.Is1H()) || + (vd.Is2D() && vn.Is2S() && vm.Is1S()) || + (vd.Is2D() && vn.Is4S() && vm.Is1S()) || + (vd.Is1D() && vn.Is1S() && vm.Is1S())); + + VIXL_ASSERT((vm.Is1H() && (vm.GetCode() < 16) && (vm_index < 8)) || + (vm.Is1S() && (vm_index < 4))); + + Instr format, op = vop; + int index_num_bits = vm.Is1H() ? 3 : 2; + if (vd.IsScalar()) { + op |= NEONScalar | NEON_Q; + format = SFormat(vn); + } else { + format = VFormat(vn); + } + Emit(format | op | ImmNEONHLM(vm_index, index_num_bits) | Rm(vm) | Rn(vn) | + Rd(vd)); +} + + +void Assembler::sdot(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kDotProduct)); + VIXL_ASSERT((vd.Is2S() && vn.Is8B() && vm.Is1S4B()) || + (vd.Is4S() && vn.Is16B() && vm.Is1S4B())); + + int index_num_bits = 2; + Emit(VFormat(vd) | NEON_SDOT_byelement | + ImmNEONHLM(vm_index, index_num_bits) | Rm(vm) | Rn(vn) | Rd(vd)); +} + + +void Assembler::udot(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kDotProduct)); + VIXL_ASSERT((vd.Is2S() && vn.Is8B() && vm.Is1S4B()) || + (vd.Is4S() && vn.Is16B() && vm.Is1S4B())); + + int index_num_bits = 2; + Emit(VFormat(vd) | NEON_UDOT_byelement | + ImmNEONHLM(vm_index, index_num_bits) | Rm(vm) | Rn(vn) | Rd(vd)); +} + + +// clang-format off +#define NEON_BYELEMENT_LIST(V) \ + V(mul, NEON_MUL_byelement, vn.IsVector()) \ + V(mla, NEON_MLA_byelement, vn.IsVector()) \ + V(mls, NEON_MLS_byelement, vn.IsVector()) \ + V(sqdmulh, NEON_SQDMULH_byelement, true) \ + V(sqrdmulh, NEON_SQRDMULH_byelement, true) \ +// clang-format on + +#define DEFINE_ASM_FUNC(FN, OP, AS) \ + void Assembler::FN(const VRegister& vd, \ + const VRegister& vn, \ + const VRegister& vm, \ + int vm_index) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); \ + VIXL_ASSERT(AS); \ + NEONByElement(vd, vn, vm, vm_index, OP); \ + } +NEON_BYELEMENT_LIST(DEFINE_ASM_FUNC) +#undef DEFINE_ASM_FUNC + + +// clang-format off +#define NEON_BYELEMENT_RDM_LIST(V) \ + V(sqrdmlah, NEON_SQRDMLAH_byelement) \ + V(sqrdmlsh, NEON_SQRDMLSH_byelement) +// clang-format on + +#define DEFINE_ASM_FUNC(FN, OP) \ + void Assembler::FN(const VRegister& vd, \ + const VRegister& vn, \ + const VRegister& vm, \ + int vm_index) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kRDM)); \ + NEONByElement(vd, vn, vm, vm_index, OP); \ + } +NEON_BYELEMENT_RDM_LIST(DEFINE_ASM_FUNC) +#undef DEFINE_ASM_FUNC + + +// clang-format off +#define NEON_FPBYELEMENT_LIST(V) \ + V(fmul, NEON_FMUL_byelement, NEON_FMUL_H_byelement) \ + V(fmla, NEON_FMLA_byelement, NEON_FMLA_H_byelement) \ + V(fmls, NEON_FMLS_byelement, NEON_FMLS_H_byelement) \ + V(fmulx, NEON_FMULX_byelement, NEON_FMULX_H_byelement) +// clang-format on + +#define DEFINE_ASM_FUNC(FN, OP, OP_H) \ + void Assembler::FN(const VRegister& vd, \ + const VRegister& vn, \ + const VRegister& vm, \ + int vm_index) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); \ + if (vd.IsLaneSizeH()) VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); \ + NEONFPByElement(vd, vn, vm, vm_index, OP, OP_H); \ + } +NEON_FPBYELEMENT_LIST(DEFINE_ASM_FUNC) +#undef DEFINE_ASM_FUNC + + +// clang-format off +#define NEON_BYELEMENT_LONG_LIST(V) \ + V(sqdmull, NEON_SQDMULL_byelement, vn.IsScalar() || vn.IsD()) \ + V(sqdmull2, NEON_SQDMULL_byelement, vn.IsVector() && vn.IsQ()) \ + V(sqdmlal, NEON_SQDMLAL_byelement, vn.IsScalar() || vn.IsD()) \ + V(sqdmlal2, NEON_SQDMLAL_byelement, vn.IsVector() && vn.IsQ()) \ + V(sqdmlsl, NEON_SQDMLSL_byelement, vn.IsScalar() || vn.IsD()) \ + V(sqdmlsl2, NEON_SQDMLSL_byelement, vn.IsVector() && vn.IsQ()) \ + V(smull, NEON_SMULL_byelement, vn.IsVector() && vn.IsD()) \ + V(smull2, NEON_SMULL_byelement, vn.IsVector() && vn.IsQ()) \ + V(umull, NEON_UMULL_byelement, vn.IsVector() && vn.IsD()) \ + V(umull2, NEON_UMULL_byelement, vn.IsVector() && vn.IsQ()) \ + V(smlal, NEON_SMLAL_byelement, vn.IsVector() && vn.IsD()) \ + V(smlal2, NEON_SMLAL_byelement, vn.IsVector() && vn.IsQ()) \ + V(umlal, NEON_UMLAL_byelement, vn.IsVector() && vn.IsD()) \ + V(umlal2, NEON_UMLAL_byelement, vn.IsVector() && vn.IsQ()) \ + V(smlsl, NEON_SMLSL_byelement, vn.IsVector() && vn.IsD()) \ + V(smlsl2, NEON_SMLSL_byelement, vn.IsVector() && vn.IsQ()) \ + V(umlsl, NEON_UMLSL_byelement, vn.IsVector() && vn.IsD()) \ + V(umlsl2, NEON_UMLSL_byelement, vn.IsVector() && vn.IsQ()) +// clang-format on + + +#define DEFINE_ASM_FUNC(FN, OP, AS) \ + void Assembler::FN(const VRegister& vd, \ + const VRegister& vn, \ + const VRegister& vm, \ + int vm_index) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); \ + VIXL_ASSERT(AS); \ + NEONByElementL(vd, vn, vm, vm_index, OP); \ + } +NEON_BYELEMENT_LONG_LIST(DEFINE_ASM_FUNC) +#undef DEFINE_ASM_FUNC + + +void Assembler::suqadd(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + NEON2RegMisc(vd, vn, NEON_SUQADD); +} + + +void Assembler::usqadd(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + NEON2RegMisc(vd, vn, NEON_USQADD); +} + + +void Assembler::abs(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEON2RegMisc(vd, vn, NEON_ABS); +} + + +void Assembler::sqabs(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + NEON2RegMisc(vd, vn, NEON_SQABS); +} + + +void Assembler::neg(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEON2RegMisc(vd, vn, NEON_NEG); +} + + +void Assembler::sqneg(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + NEON2RegMisc(vd, vn, NEON_SQNEG); +} + + +void Assembler::NEONXtn(const VRegister& vd, + const VRegister& vn, + NEON2RegMiscOp vop) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + Instr format, op = vop; + if (vd.IsScalar()) { + VIXL_ASSERT((vd.Is1B() && vn.Is1H()) || (vd.Is1H() && vn.Is1S()) || + (vd.Is1S() && vn.Is1D())); + op |= NEON_Q | NEONScalar; + format = SFormat(vd); + } else { + VIXL_ASSERT((vd.Is8B() && vn.Is8H()) || (vd.Is4H() && vn.Is4S()) || + (vd.Is2S() && vn.Is2D()) || (vd.Is16B() && vn.Is8H()) || + (vd.Is8H() && vn.Is4S()) || (vd.Is4S() && vn.Is2D())); + format = VFormat(vd); + } + Emit(format | op | Rn(vn) | Rd(vd)); +} + + +void Assembler::xtn(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsVector() && vd.IsD()); + NEONXtn(vd, vn, NEON_XTN); +} + + +void Assembler::xtn2(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsVector() && vd.IsQ()); + NEONXtn(vd, vn, NEON_XTN); +} + + +void Assembler::sqxtn(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsScalar() || vd.IsD()); + NEONXtn(vd, vn, NEON_SQXTN); +} + + +void Assembler::sqxtn2(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsVector() && vd.IsQ()); + NEONXtn(vd, vn, NEON_SQXTN); +} + + +void Assembler::sqxtun(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsScalar() || vd.IsD()); + NEONXtn(vd, vn, NEON_SQXTUN); +} + + +void Assembler::sqxtun2(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsVector() && vd.IsQ()); + NEONXtn(vd, vn, NEON_SQXTUN); +} + + +void Assembler::uqxtn(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsScalar() || vd.IsD()); + NEONXtn(vd, vn, NEON_UQXTN); +} + + +void Assembler::uqxtn2(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsVector() && vd.IsQ()); + NEONXtn(vd, vn, NEON_UQXTN); +} + + +// NEON NOT and RBIT are distinguised by bit 22, the bottom bit of "size". +void Assembler::not_(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT(vd.Is8B() || vd.Is16B()); + Emit(VFormat(vd) | NEON_RBIT_NOT | Rn(vn) | Rd(vd)); +} + + +void Assembler::rbit(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT(vd.Is8B() || vd.Is16B()); + Emit(VFormat(vn) | (1 << NEONSize_offset) | NEON_RBIT_NOT | Rn(vn) | Rd(vd)); +} + + +void Assembler::ext(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int index) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vd, vn, vm)); + VIXL_ASSERT(vd.Is8B() || vd.Is16B()); + VIXL_ASSERT((0 <= index) && (index < vd.GetLanes())); + Emit(VFormat(vd) | NEON_EXT | Rm(vm) | ImmNEONExt(index) | Rn(vn) | Rd(vd)); +} + + +void Assembler::dup(const VRegister& vd, const VRegister& vn, int vn_index) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + Instr q, scalar; + + // We support vn arguments of the form vn.VxT() or vn.T(), where x is the + // number of lanes, and T is b, h, s or d. + int lane_size = vn.GetLaneSizeInBytes(); + NEONFormatField format; + switch (lane_size) { + case 1: + format = NEON_16B; + break; + case 2: + format = NEON_8H; + break; + case 4: + format = NEON_4S; + break; + default: + VIXL_ASSERT(lane_size == 8); + format = NEON_2D; + break; + } + + if (vd.IsScalar()) { + q = NEON_Q; + scalar = NEONScalar; + } else { + VIXL_ASSERT(!vd.Is1D()); + q = vd.IsD() ? 0 : NEON_Q; + scalar = 0; + } + Emit(q | scalar | NEON_DUP_ELEMENT | ImmNEON5(format, vn_index) | Rn(vn) | + Rd(vd)); +} + + +void Assembler::mov(const VRegister& vd, const VRegister& vn, int vn_index) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsScalar()); + dup(vd, vn, vn_index); +} + + +void Assembler::dup(const VRegister& vd, const Register& rn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(!vd.Is1D()); + VIXL_ASSERT(vd.Is2D() == rn.IsX()); + int q = vd.IsD() ? 0 : NEON_Q; + Emit(q | NEON_DUP_GENERAL | ImmNEON5(VFormat(vd), 0) | Rn(rn) | Rd(vd)); +} + + +void Assembler::ins(const VRegister& vd, + int vd_index, + const VRegister& vn, + int vn_index) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vd, vn)); + // We support vd arguments of the form vd.VxT() or vd.T(), where x is the + // number of lanes, and T is b, h, s or d. + int lane_size = vd.GetLaneSizeInBytes(); + NEONFormatField format; + switch (lane_size) { + case 1: + format = NEON_16B; + break; + case 2: + format = NEON_8H; + break; + case 4: + format = NEON_4S; + break; + default: + VIXL_ASSERT(lane_size == 8); + format = NEON_2D; + break; + } + + VIXL_ASSERT( + (0 <= vd_index) && + (vd_index < LaneCountFromFormat(static_cast(format)))); + VIXL_ASSERT( + (0 <= vn_index) && + (vn_index < LaneCountFromFormat(static_cast(format)))); + Emit(NEON_INS_ELEMENT | ImmNEON5(format, vd_index) | + ImmNEON4(format, vn_index) | Rn(vn) | Rd(vd)); +} + + +void Assembler::mov(const VRegister& vd, + int vd_index, + const VRegister& vn, + int vn_index) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + ins(vd, vd_index, vn, vn_index); +} + + +void Assembler::ins(const VRegister& vd, int vd_index, const Register& rn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + // We support vd arguments of the form vd.VxT() or vd.T(), where x is the + // number of lanes, and T is b, h, s or d. + int lane_size = vd.GetLaneSizeInBytes(); + NEONFormatField format; + switch (lane_size) { + case 1: + format = NEON_16B; + VIXL_ASSERT(rn.IsW()); + break; + case 2: + format = NEON_8H; + VIXL_ASSERT(rn.IsW()); + break; + case 4: + format = NEON_4S; + VIXL_ASSERT(rn.IsW()); + break; + default: + VIXL_ASSERT(lane_size == 8); + VIXL_ASSERT(rn.IsX()); + format = NEON_2D; + break; + } + + VIXL_ASSERT( + (0 <= vd_index) && + (vd_index < LaneCountFromFormat(static_cast(format)))); + Emit(NEON_INS_GENERAL | ImmNEON5(format, vd_index) | Rn(rn) | Rd(vd)); +} + + +void Assembler::mov(const VRegister& vd, int vd_index, const Register& rn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + ins(vd, vd_index, rn); +} + + +void Assembler::umov(const Register& rd, const VRegister& vn, int vn_index) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + // We support vn arguments of the form vn.VxT() or vn.T(), where x is the + // number of lanes, and T is b, h, s or d. + int lane_size = vn.GetLaneSizeInBytes(); + NEONFormatField format; + Instr q = 0; + switch (lane_size) { + case 1: + format = NEON_16B; + VIXL_ASSERT(rd.IsW()); + break; + case 2: + format = NEON_8H; + VIXL_ASSERT(rd.IsW()); + break; + case 4: + format = NEON_4S; + VIXL_ASSERT(rd.IsW()); + break; + default: + VIXL_ASSERT(lane_size == 8); + VIXL_ASSERT(rd.IsX()); + format = NEON_2D; + q = NEON_Q; + break; + } + + VIXL_ASSERT( + (0 <= vn_index) && + (vn_index < LaneCountFromFormat(static_cast(format)))); + Emit(q | NEON_UMOV | ImmNEON5(format, vn_index) | Rn(vn) | Rd(rd)); +} + + +void Assembler::mov(const Register& rd, const VRegister& vn, int vn_index) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vn.GetSizeInBytes() >= 4); + umov(rd, vn, vn_index); +} + + +void Assembler::smov(const Register& rd, const VRegister& vn, int vn_index) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + // We support vn arguments of the form vn.VxT() or vn.T(), where x is the + // number of lanes, and T is b, h, s. + int lane_size = vn.GetLaneSizeInBytes(); + NEONFormatField format; + Instr q = 0; + VIXL_ASSERT(lane_size != 8); + switch (lane_size) { + case 1: + format = NEON_16B; + break; + case 2: + format = NEON_8H; + break; + default: + VIXL_ASSERT(lane_size == 4); + VIXL_ASSERT(rd.IsX()); + format = NEON_4S; + break; + } + q = rd.IsW() ? 0 : NEON_Q; + VIXL_ASSERT( + (0 <= vn_index) && + (vn_index < LaneCountFromFormat(static_cast(format)))); + Emit(q | NEON_SMOV | ImmNEON5(format, vn_index) | Rn(vn) | Rd(rd)); +} + + +void Assembler::cls(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT(!vd.Is1D() && !vd.Is2D()); + Emit(VFormat(vn) | NEON_CLS | Rn(vn) | Rd(vd)); +} + + +void Assembler::clz(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT(!vd.Is1D() && !vd.Is2D()); + Emit(VFormat(vn) | NEON_CLZ | Rn(vn) | Rd(vd)); +} + + +void Assembler::cnt(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT(vd.Is8B() || vd.Is16B()); + Emit(VFormat(vn) | NEON_CNT | Rn(vn) | Rd(vd)); +} + + +void Assembler::rev16(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT(vd.Is8B() || vd.Is16B()); + Emit(VFormat(vn) | NEON_REV16 | Rn(vn) | Rd(vd)); +} + + +void Assembler::rev32(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT(vd.Is8B() || vd.Is16B() || vd.Is4H() || vd.Is8H()); + Emit(VFormat(vn) | NEON_REV32 | Rn(vn) | Rd(vd)); +} + + +void Assembler::rev64(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT(!vd.Is1D() && !vd.Is2D()); + Emit(VFormat(vn) | NEON_REV64 | Rn(vn) | Rd(vd)); +} + + +void Assembler::ursqrte(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT(vd.Is2S() || vd.Is4S()); + Emit(VFormat(vn) | NEON_URSQRTE | Rn(vn) | Rd(vd)); +} + + +void Assembler::urecpe(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT(vd.Is2S() || vd.Is4S()); + Emit(VFormat(vn) | NEON_URECPE | Rn(vn) | Rd(vd)); +} + + +void Assembler::NEONAddlp(const VRegister& vd, + const VRegister& vn, + NEON2RegMiscOp op) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT((op == NEON_SADDLP) || (op == NEON_UADDLP) || + (op == NEON_SADALP) || (op == NEON_UADALP)); + + VIXL_ASSERT((vn.Is8B() && vd.Is4H()) || (vn.Is4H() && vd.Is2S()) || + (vn.Is2S() && vd.Is1D()) || (vn.Is16B() && vd.Is8H()) || + (vn.Is8H() && vd.Is4S()) || (vn.Is4S() && vd.Is2D())); + Emit(VFormat(vn) | op | Rn(vn) | Rd(vd)); +} + + +void Assembler::saddlp(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + NEONAddlp(vd, vn, NEON_SADDLP); +} + + +void Assembler::uaddlp(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + NEONAddlp(vd, vn, NEON_UADDLP); +} + + +void Assembler::sadalp(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + NEONAddlp(vd, vn, NEON_SADALP); +} + + +void Assembler::uadalp(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + NEONAddlp(vd, vn, NEON_UADALP); +} + + +void Assembler::NEONAcrossLanesL(const VRegister& vd, + const VRegister& vn, + NEONAcrossLanesOp op) { + VIXL_ASSERT((vn.Is8B() && vd.Is1H()) || (vn.Is16B() && vd.Is1H()) || + (vn.Is4H() && vd.Is1S()) || (vn.Is8H() && vd.Is1S()) || + (vn.Is4S() && vd.Is1D())); + Emit(VFormat(vn) | op | Rn(vn) | Rd(vd)); +} + + +void Assembler::saddlv(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + NEONAcrossLanesL(vd, vn, NEON_SADDLV); +} + + +void Assembler::uaddlv(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + NEONAcrossLanesL(vd, vn, NEON_UADDLV); +} + + +void Assembler::NEONAcrossLanes(const VRegister& vd, + const VRegister& vn, + NEONAcrossLanesOp op, + Instr op_half) { + VIXL_ASSERT((vn.Is8B() && vd.Is1B()) || (vn.Is16B() && vd.Is1B()) || + (vn.Is4H() && vd.Is1H()) || (vn.Is8H() && vd.Is1H()) || + (vn.Is4S() && vd.Is1S())); + if ((op & NEONAcrossLanesFPFMask) == NEONAcrossLanesFPFixed) { + if (vd.Is1H()) { + VIXL_ASSERT(op_half != 0); + Instr vop = op_half; + if (vn.Is8H()) { + vop |= NEON_Q; + } + Emit(vop | Rn(vn) | Rd(vd)); + } else { + Emit(FPFormat(vn) | op | Rn(vn) | Rd(vd)); + } + } else { + Emit(VFormat(vn) | op | Rn(vn) | Rd(vd)); + } +} + +// clang-format off +#define NEON_ACROSSLANES_LIST(V) \ + V(addv, NEON_ADDV) \ + V(smaxv, NEON_SMAXV) \ + V(sminv, NEON_SMINV) \ + V(umaxv, NEON_UMAXV) \ + V(uminv, NEON_UMINV) +// clang-format on + +#define DEFINE_ASM_FUNC(FN, OP) \ + void Assembler::FN(const VRegister& vd, const VRegister& vn) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); \ + NEONAcrossLanes(vd, vn, OP, 0); \ + } +NEON_ACROSSLANES_LIST(DEFINE_ASM_FUNC) +#undef DEFINE_ASM_FUNC + + +// clang-format off +#define NEON_ACROSSLANES_FP_LIST(V) \ + V(fmaxv, NEON_FMAXV, NEON_FMAXV_H) \ + V(fminv, NEON_FMINV, NEON_FMINV_H) \ + V(fmaxnmv, NEON_FMAXNMV, NEON_FMAXNMV_H) \ + V(fminnmv, NEON_FMINNMV, NEON_FMINNMV_H) \ +// clang-format on + +#define DEFINE_ASM_FUNC(FN, OP, OP_H) \ + void Assembler::FN(const VRegister& vd, const VRegister& vn) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); \ + if (vd.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); \ + VIXL_ASSERT(vd.Is1S() || vd.Is1H()); \ + NEONAcrossLanes(vd, vn, OP, OP_H); \ + } +NEON_ACROSSLANES_FP_LIST(DEFINE_ASM_FUNC) +#undef DEFINE_ASM_FUNC + + +void Assembler::NEONPerm(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + NEONPermOp op) { + VIXL_ASSERT(AreSameFormat(vd, vn, vm)); + VIXL_ASSERT(!vd.Is1D()); + Emit(VFormat(vd) | op | Rm(vm) | Rn(vn) | Rd(vd)); +} + + +void Assembler::trn1(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + NEONPerm(vd, vn, vm, NEON_TRN1); +} + + +void Assembler::trn2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + NEONPerm(vd, vn, vm, NEON_TRN2); +} + + +void Assembler::uzp1(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + NEONPerm(vd, vn, vm, NEON_UZP1); +} + + +void Assembler::uzp2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + NEONPerm(vd, vn, vm, NEON_UZP2); +} + + +void Assembler::zip1(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + NEONPerm(vd, vn, vm, NEON_ZIP1); +} + + +void Assembler::zip2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + NEONPerm(vd, vn, vm, NEON_ZIP2); +} + + +void Assembler::NEONShiftImmediate(const VRegister& vd, + const VRegister& vn, + NEONShiftImmediateOp op, + int immh_immb) { + VIXL_ASSERT(AreSameFormat(vd, vn)); + Instr q, scalar; + if (vn.IsScalar()) { + q = NEON_Q; + scalar = NEONScalar; + } else { + q = vd.IsD() ? 0 : NEON_Q; + scalar = 0; + } + Emit(q | op | scalar | immh_immb | Rn(vn) | Rd(vd)); +} + + +void Assembler::NEONShiftLeftImmediate(const VRegister& vd, + const VRegister& vn, + int shift, + NEONShiftImmediateOp op) { + int laneSizeInBits = vn.GetLaneSizeInBits(); + VIXL_ASSERT((shift >= 0) && (shift < laneSizeInBits)); + NEONShiftImmediate(vd, vn, op, (laneSizeInBits + shift) << 16); +} + + +void Assembler::NEONShiftRightImmediate(const VRegister& vd, + const VRegister& vn, + int shift, + NEONShiftImmediateOp op) { + int laneSizeInBits = vn.GetLaneSizeInBits(); + VIXL_ASSERT((shift >= 1) && (shift <= laneSizeInBits)); + NEONShiftImmediate(vd, vn, op, ((2 * laneSizeInBits) - shift) << 16); +} + + +void Assembler::NEONShiftImmediateL(const VRegister& vd, + const VRegister& vn, + int shift, + NEONShiftImmediateOp op) { + int laneSizeInBits = vn.GetLaneSizeInBits(); + VIXL_ASSERT((shift >= 0) && (shift < laneSizeInBits)); + int immh_immb = (laneSizeInBits + shift) << 16; + + VIXL_ASSERT((vn.Is8B() && vd.Is8H()) || (vn.Is4H() && vd.Is4S()) || + (vn.Is2S() && vd.Is2D()) || (vn.Is16B() && vd.Is8H()) || + (vn.Is8H() && vd.Is4S()) || (vn.Is4S() && vd.Is2D())); + Instr q; + q = vn.IsD() ? 0 : NEON_Q; + Emit(q | op | immh_immb | Rn(vn) | Rd(vd)); +} + + +void Assembler::NEONShiftImmediateN(const VRegister& vd, + const VRegister& vn, + int shift, + NEONShiftImmediateOp op) { + Instr q, scalar; + int laneSizeInBits = vd.GetLaneSizeInBits(); + VIXL_ASSERT((shift >= 1) && (shift <= laneSizeInBits)); + int immh_immb = (2 * laneSizeInBits - shift) << 16; + + if (vn.IsScalar()) { + VIXL_ASSERT((vd.Is1B() && vn.Is1H()) || (vd.Is1H() && vn.Is1S()) || + (vd.Is1S() && vn.Is1D())); + q = NEON_Q; + scalar = NEONScalar; + } else { + VIXL_ASSERT((vd.Is8B() && vn.Is8H()) || (vd.Is4H() && vn.Is4S()) || + (vd.Is2S() && vn.Is2D()) || (vd.Is16B() && vn.Is8H()) || + (vd.Is8H() && vn.Is4S()) || (vd.Is4S() && vn.Is2D())); + scalar = 0; + q = vd.IsD() ? 0 : NEON_Q; + } + Emit(q | op | scalar | immh_immb | Rn(vn) | Rd(vd)); +} + + +void Assembler::shl(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEONShiftLeftImmediate(vd, vn, shift, NEON_SHL); +} + + +void Assembler::sli(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEONShiftLeftImmediate(vd, vn, shift, NEON_SLI); +} + + +void Assembler::sqshl(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + NEONShiftLeftImmediate(vd, vn, shift, NEON_SQSHL_imm); +} + + +void Assembler::sqshlu(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + NEONShiftLeftImmediate(vd, vn, shift, NEON_SQSHLU); +} + + +void Assembler::uqshl(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + NEONShiftLeftImmediate(vd, vn, shift, NEON_UQSHL_imm); +} + + +void Assembler::sshll(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vn.IsD()); + NEONShiftImmediateL(vd, vn, shift, NEON_SSHLL); +} + + +void Assembler::sshll2(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vn.IsQ()); + NEONShiftImmediateL(vd, vn, shift, NEON_SSHLL); +} + + +void Assembler::sxtl(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + sshll(vd, vn, 0); +} + + +void Assembler::sxtl2(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + sshll2(vd, vn, 0); +} + + +void Assembler::ushll(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vn.IsD()); + NEONShiftImmediateL(vd, vn, shift, NEON_USHLL); +} + + +void Assembler::ushll2(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vn.IsQ()); + NEONShiftImmediateL(vd, vn, shift, NEON_USHLL); +} + + +void Assembler::uxtl(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + ushll(vd, vn, 0); +} + + +void Assembler::uxtl2(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + ushll2(vd, vn, 0); +} + + +void Assembler::sri(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEONShiftRightImmediate(vd, vn, shift, NEON_SRI); +} + + +void Assembler::sshr(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEONShiftRightImmediate(vd, vn, shift, NEON_SSHR); +} + + +void Assembler::ushr(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEONShiftRightImmediate(vd, vn, shift, NEON_USHR); +} + + +void Assembler::srshr(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEONShiftRightImmediate(vd, vn, shift, NEON_SRSHR); +} + + +void Assembler::urshr(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEONShiftRightImmediate(vd, vn, shift, NEON_URSHR); +} + + +void Assembler::ssra(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEONShiftRightImmediate(vd, vn, shift, NEON_SSRA); +} + + +void Assembler::usra(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEONShiftRightImmediate(vd, vn, shift, NEON_USRA); +} + + +void Assembler::srsra(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEONShiftRightImmediate(vd, vn, shift, NEON_SRSRA); +} + + +void Assembler::ursra(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEONShiftRightImmediate(vd, vn, shift, NEON_URSRA); +} + + +void Assembler::shrn(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vn.IsVector() && vd.IsD()); + NEONShiftImmediateN(vd, vn, shift, NEON_SHRN); +} + + +void Assembler::shrn2(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vn.IsVector() && vd.IsQ()); + NEONShiftImmediateN(vd, vn, shift, NEON_SHRN); +} + + +void Assembler::rshrn(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vn.IsVector() && vd.IsD()); + NEONShiftImmediateN(vd, vn, shift, NEON_RSHRN); +} + + +void Assembler::rshrn2(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vn.IsVector() && vd.IsQ()); + NEONShiftImmediateN(vd, vn, shift, NEON_RSHRN); +} + + +void Assembler::sqshrn(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar())); + NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRN); +} + + +void Assembler::sqshrn2(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vn.IsVector() && vd.IsQ()); + NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRN); +} + + +void Assembler::sqrshrn(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar())); + NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRN); +} + + +void Assembler::sqrshrn2(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vn.IsVector() && vd.IsQ()); + NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRN); +} + + +void Assembler::sqshrun(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar())); + NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRUN); +} + + +void Assembler::sqshrun2(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vn.IsVector() && vd.IsQ()); + NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRUN); +} + + +void Assembler::sqrshrun(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar())); + NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRUN); +} + + +void Assembler::sqrshrun2(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vn.IsVector() && vd.IsQ()); + NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRUN); +} + + +void Assembler::uqshrn(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar())); + NEONShiftImmediateN(vd, vn, shift, NEON_UQSHRN); +} + + +void Assembler::uqshrn2(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vn.IsVector() && vd.IsQ()); + NEONShiftImmediateN(vd, vn, shift, NEON_UQSHRN); +} + + +void Assembler::uqrshrn(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar())); + NEONShiftImmediateN(vd, vn, shift, NEON_UQRSHRN); +} + + +void Assembler::uqrshrn2(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vn.IsVector() && vd.IsQ()); + NEONShiftImmediateN(vd, vn, shift, NEON_UQRSHRN); +} + + +// Note: +// For all ToImm instructions below, a difference in case +// for the same letter indicates a negated bit. +// If b is 1, then B is 0. +uint32_t Assembler::FP16ToImm8(Float16 imm) { + VIXL_ASSERT(IsImmFP16(imm)); + // Half: aBbb.cdef.gh00.0000 (16 bits) + uint16_t bits = Float16ToRawbits(imm); + // bit7: a000.0000 + uint16_t bit7 = ((bits >> 15) & 0x1) << 7; + // bit6: 0b00.0000 + uint16_t bit6 = ((bits >> 13) & 0x1) << 6; + // bit5_to_0: 00cd.efgh + uint16_t bit5_to_0 = (bits >> 6) & 0x3f; + uint32_t result = static_cast(bit7 | bit6 | bit5_to_0); + return result; +} + + +Instr Assembler::ImmFP16(Float16 imm) { + return FP16ToImm8(imm) << ImmFP_offset; +} + + +uint32_t Assembler::FP32ToImm8(float imm) { + VIXL_ASSERT(IsImmFP32(imm)); + // bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000 + uint32_t bits = FloatToRawbits(imm); + // bit7: a000.0000 + uint32_t bit7 = ((bits >> 31) & 0x1) << 7; + // bit6: 0b00.0000 + uint32_t bit6 = ((bits >> 29) & 0x1) << 6; + // bit5_to_0: 00cd.efgh + uint32_t bit5_to_0 = (bits >> 19) & 0x3f; + + return bit7 | bit6 | bit5_to_0; +} + + +Instr Assembler::ImmFP32(float imm) { return FP32ToImm8(imm) << ImmFP_offset; } + + +uint32_t Assembler::FP64ToImm8(double imm) { + VIXL_ASSERT(IsImmFP64(imm)); + // bits: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000 + // 0000.0000.0000.0000.0000.0000.0000.0000 + uint64_t bits = DoubleToRawbits(imm); + // bit7: a000.0000 + uint64_t bit7 = ((bits >> 63) & 0x1) << 7; + // bit6: 0b00.0000 + uint64_t bit6 = ((bits >> 61) & 0x1) << 6; + // bit5_to_0: 00cd.efgh + uint64_t bit5_to_0 = (bits >> 48) & 0x3f; + + return static_cast(bit7 | bit6 | bit5_to_0); +} + + +Instr Assembler::ImmFP64(double imm) { return FP64ToImm8(imm) << ImmFP_offset; } + + +// Code generation helpers. +void Assembler::MoveWide(const Register& rd, + uint64_t imm, + int shift, + MoveWideImmediateOp mov_op) { + // Ignore the top 32 bits of an immediate if we're moving to a W register. + if (rd.Is32Bits()) { + // Check that the top 32 bits are zero (a positive 32-bit number) or top + // 33 bits are one (a negative 32-bit number, sign extended to 64 bits). + VIXL_ASSERT(((imm >> kWRegSize) == 0) || + ((imm >> (kWRegSize - 1)) == 0x1ffffffff)); + imm &= kWRegMask; + } + + if (shift >= 0) { + // Explicit shift specified. + VIXL_ASSERT((shift == 0) || (shift == 16) || (shift == 32) || + (shift == 48)); + VIXL_ASSERT(rd.Is64Bits() || (shift == 0) || (shift == 16)); + shift /= 16; + } else { + // Calculate a new immediate and shift combination to encode the immediate + // argument. + shift = 0; + if ((imm & 0xffffffffffff0000) == 0) { + // Nothing to do. + } else if ((imm & 0xffffffff0000ffff) == 0) { + imm >>= 16; + shift = 1; + } else if ((imm & 0xffff0000ffffffff) == 0) { + VIXL_ASSERT(rd.Is64Bits()); + imm >>= 32; + shift = 2; + } else if ((imm & 0x0000ffffffffffff) == 0) { + VIXL_ASSERT(rd.Is64Bits()); + imm >>= 48; + shift = 3; + } + } + + VIXL_ASSERT(IsUint16(imm)); + + Emit(SF(rd) | MoveWideImmediateFixed | mov_op | Rd(rd) | ImmMoveWide(imm) | + ShiftMoveWide(shift)); +} + + +void Assembler::AddSub(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S, + AddSubOp op) { + VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits()); + if (operand.IsImmediate()) { + int64_t immediate = operand.GetImmediate(); + VIXL_ASSERT(IsImmAddSub(immediate)); + Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd); + Emit(SF(rd) | AddSubImmediateFixed | op | Flags(S) | + ImmAddSub(static_cast(immediate)) | dest_reg | RnSP(rn)); + } else if (operand.IsShiftedRegister()) { + VIXL_ASSERT(operand.GetRegister().GetSizeInBits() == rd.GetSizeInBits()); + VIXL_ASSERT(operand.GetShift() != ROR); + + // For instructions of the form: + // add/sub wsp, , [, LSL #0-3 ] + // add/sub , wsp, [, LSL #0-3 ] + // add/sub wsp, wsp, [, LSL #0-3 ] + // adds/subs , wsp, [, LSL #0-3 ] + // or their 64-bit register equivalents, convert the operand from shifted to + // extended register mode, and emit an add/sub extended instruction. + if (rn.IsSP() || rd.IsSP()) { + VIXL_ASSERT(!(rd.IsSP() && (S == SetFlags))); + DataProcExtendedRegister(rd, + rn, + operand.ToExtendedRegister(), + S, + AddSubExtendedFixed | op); + } else { + DataProcShiftedRegister(rd, rn, operand, S, AddSubShiftedFixed | op); + } + } else { + VIXL_ASSERT(operand.IsExtendedRegister()); + DataProcExtendedRegister(rd, rn, operand, S, AddSubExtendedFixed | op); + } +} + + +void Assembler::AddSubWithCarry(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S, + AddSubWithCarryOp op) { + VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits()); + VIXL_ASSERT(rd.GetSizeInBits() == operand.GetRegister().GetSizeInBits()); + VIXL_ASSERT(operand.IsShiftedRegister() && (operand.GetShiftAmount() == 0)); + Emit(SF(rd) | op | Flags(S) | Rm(operand.GetRegister()) | Rn(rn) | Rd(rd)); +} + + +void Assembler::hlt(int code) { + VIXL_ASSERT(IsUint16(code)); + Emit(HLT | ImmException(code)); +} + + +void Assembler::brk(int code) { + VIXL_ASSERT(IsUint16(code)); + Emit(BRK | ImmException(code)); +} + + +void Assembler::svc(int code) { Emit(SVC | ImmException(code)); } + + +// TODO(all): The third parameter should be passed by reference but gcc 4.8.2 +// reports a bogus uninitialised warning then. +void Assembler::Logical(const Register& rd, + const Register& rn, + const Operand operand, + LogicalOp op) { + VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits()); + if (operand.IsImmediate()) { + int64_t immediate = operand.GetImmediate(); + unsigned reg_size = rd.GetSizeInBits(); + + VIXL_ASSERT(immediate != 0); + VIXL_ASSERT(immediate != -1); + VIXL_ASSERT(rd.Is64Bits() || IsUint32(immediate)); + + // If the operation is NOT, invert the operation and immediate. + if ((op & NOT) == NOT) { + op = static_cast(op & ~NOT); + immediate = rd.Is64Bits() ? ~immediate : (~immediate & kWRegMask); + } + + unsigned n, imm_s, imm_r; + if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) { + // Immediate can be encoded in the instruction. + LogicalImmediate(rd, rn, n, imm_s, imm_r, op); + } else { + // This case is handled in the macro assembler. + VIXL_UNREACHABLE(); + } + } else { + VIXL_ASSERT(operand.IsShiftedRegister()); + VIXL_ASSERT(operand.GetRegister().GetSizeInBits() == rd.GetSizeInBits()); + Instr dp_op = static_cast(op | LogicalShiftedFixed); + DataProcShiftedRegister(rd, rn, operand, LeaveFlags, dp_op); + } +} + + +void Assembler::LogicalImmediate(const Register& rd, + const Register& rn, + unsigned n, + unsigned imm_s, + unsigned imm_r, + LogicalOp op) { + unsigned reg_size = rd.GetSizeInBits(); + Instr dest_reg = (op == ANDS) ? Rd(rd) : RdSP(rd); + Emit(SF(rd) | LogicalImmediateFixed | op | BitN(n, reg_size) | + ImmSetBits(imm_s, reg_size) | ImmRotate(imm_r, reg_size) | dest_reg | + Rn(rn)); +} + + +void Assembler::ConditionalCompare(const Register& rn, + const Operand& operand, + StatusFlags nzcv, + Condition cond, + ConditionalCompareOp op) { + Instr ccmpop; + if (operand.IsImmediate()) { + int64_t immediate = operand.GetImmediate(); + VIXL_ASSERT(IsImmConditionalCompare(immediate)); + ccmpop = ConditionalCompareImmediateFixed | op | + ImmCondCmp(static_cast(immediate)); + } else { + VIXL_ASSERT(operand.IsShiftedRegister() && (operand.GetShiftAmount() == 0)); + ccmpop = ConditionalCompareRegisterFixed | op | Rm(operand.GetRegister()); + } + Emit(SF(rn) | ccmpop | Cond(cond) | Rn(rn) | Nzcv(nzcv)); +} + + +void Assembler::DataProcessing1Source(const Register& rd, + const Register& rn, + DataProcessing1SourceOp op) { + VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits()); + Emit(SF(rn) | op | Rn(rn) | Rd(rd)); +} + + +void Assembler::FPDataProcessing1Source(const VRegister& vd, + const VRegister& vn, + FPDataProcessing1SourceOp op) { + VIXL_ASSERT(vd.Is1H() || vd.Is1S() || vd.Is1D()); + Emit(FPType(vn) | op | Rn(vn) | Rd(vd)); +} + + +void Assembler::FPDataProcessing3Source(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + const VRegister& va, + FPDataProcessing3SourceOp op) { + VIXL_ASSERT(vd.Is1H() || vd.Is1S() || vd.Is1D()); + VIXL_ASSERT(AreSameSizeAndType(vd, vn, vm, va)); + Emit(FPType(vd) | op | Rm(vm) | Rn(vn) | Rd(vd) | Ra(va)); +} + + +void Assembler::NEONModifiedImmShiftLsl(const VRegister& vd, + const int imm8, + const int left_shift, + NEONModifiedImmediateOp op) { + VIXL_ASSERT(vd.Is8B() || vd.Is16B() || vd.Is4H() || vd.Is8H() || vd.Is2S() || + vd.Is4S()); + VIXL_ASSERT((left_shift == 0) || (left_shift == 8) || (left_shift == 16) || + (left_shift == 24)); + VIXL_ASSERT(IsUint8(imm8)); + + int cmode_1, cmode_2, cmode_3; + if (vd.Is8B() || vd.Is16B()) { + VIXL_ASSERT(op == NEONModifiedImmediate_MOVI); + cmode_1 = 1; + cmode_2 = 1; + cmode_3 = 1; + } else { + cmode_1 = (left_shift >> 3) & 1; + cmode_2 = left_shift >> 4; + cmode_3 = 0; + if (vd.Is4H() || vd.Is8H()) { + VIXL_ASSERT((left_shift == 0) || (left_shift == 8)); + cmode_3 = 1; + } + } + int cmode = (cmode_3 << 3) | (cmode_2 << 2) | (cmode_1 << 1); + + int q = vd.IsQ() ? NEON_Q : 0; + + Emit(q | op | ImmNEONabcdefgh(imm8) | NEONCmode(cmode) | Rd(vd)); +} + + +void Assembler::NEONModifiedImmShiftMsl(const VRegister& vd, + const int imm8, + const int shift_amount, + NEONModifiedImmediateOp op) { + VIXL_ASSERT(vd.Is2S() || vd.Is4S()); + VIXL_ASSERT((shift_amount == 8) || (shift_amount == 16)); + VIXL_ASSERT(IsUint8(imm8)); + + int cmode_0 = (shift_amount >> 4) & 1; + int cmode = 0xc | cmode_0; + + int q = vd.IsQ() ? NEON_Q : 0; + + Emit(q | op | ImmNEONabcdefgh(imm8) | NEONCmode(cmode) | Rd(vd)); +} + + +void Assembler::EmitShift(const Register& rd, + const Register& rn, + Shift shift, + unsigned shift_amount) { + switch (shift) { + case LSL: + lsl(rd, rn, shift_amount); + break; + case LSR: + lsr(rd, rn, shift_amount); + break; + case ASR: + asr(rd, rn, shift_amount); + break; + case ROR: + ror(rd, rn, shift_amount); + break; + default: + VIXL_UNREACHABLE(); + } +} + + +void Assembler::EmitExtendShift(const Register& rd, + const Register& rn, + Extend extend, + unsigned left_shift) { + VIXL_ASSERT(rd.GetSizeInBits() >= rn.GetSizeInBits()); + unsigned reg_size = rd.GetSizeInBits(); + // Use the correct size of register. + Register rn_ = Register(rn.GetCode(), rd.GetSizeInBits()); + // Bits extracted are high_bit:0. + unsigned high_bit = (8 << (extend & 0x3)) - 1; + // Number of bits left in the result that are not introduced by the shift. + unsigned non_shift_bits = (reg_size - left_shift) & (reg_size - 1); + + if ((non_shift_bits > high_bit) || (non_shift_bits == 0)) { + switch (extend) { + case UXTB: + case UXTH: + case UXTW: + ubfm(rd, rn_, non_shift_bits, high_bit); + break; + case SXTB: + case SXTH: + case SXTW: + sbfm(rd, rn_, non_shift_bits, high_bit); + break; + case UXTX: + case SXTX: { + VIXL_ASSERT(rn.GetSizeInBits() == kXRegSize); + // Nothing to extend. Just shift. + lsl(rd, rn_, left_shift); + break; + } + default: + VIXL_UNREACHABLE(); + } + } else { + // No need to extend as the extended bits would be shifted away. + lsl(rd, rn_, left_shift); + } +} + + +void Assembler::DataProcShiftedRegister(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S, + Instr op) { + VIXL_ASSERT(operand.IsShiftedRegister()); + VIXL_ASSERT(rn.Is64Bits() || + (rn.Is32Bits() && IsUint5(operand.GetShiftAmount()))); + Emit(SF(rd) | op | Flags(S) | ShiftDP(operand.GetShift()) | + ImmDPShift(operand.GetShiftAmount()) | Rm(operand.GetRegister()) | + Rn(rn) | Rd(rd)); +} + + +void Assembler::DataProcExtendedRegister(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S, + Instr op) { + Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd); + Emit(SF(rd) | op | Flags(S) | Rm(operand.GetRegister()) | + ExtendMode(operand.GetExtend()) | + ImmExtendShift(operand.GetShiftAmount()) | dest_reg | RnSP(rn)); +} + + +Instr Assembler::LoadStoreMemOperand(const MemOperand& addr, + unsigned access_size, + LoadStoreScalingOption option) { + Instr base = RnSP(addr.GetBaseRegister()); + int64_t offset = addr.GetOffset(); + + if (addr.IsImmediateOffset()) { + bool prefer_unscaled = + (option == PreferUnscaledOffset) || (option == RequireUnscaledOffset); + if (prefer_unscaled && IsImmLSUnscaled(offset)) { + // Use the unscaled addressing mode. + return base | LoadStoreUnscaledOffsetFixed | + ImmLS(static_cast(offset)); + } + + if ((option != RequireUnscaledOffset) && + IsImmLSScaled(offset, access_size)) { + // Use the scaled addressing mode. + return base | LoadStoreUnsignedOffsetFixed | + ImmLSUnsigned(static_cast(offset) >> access_size); + } + + if ((option != RequireScaledOffset) && IsImmLSUnscaled(offset)) { + // Use the unscaled addressing mode. + return base | LoadStoreUnscaledOffsetFixed | + ImmLS(static_cast(offset)); + } + } + + // All remaining addressing modes are register-offset, pre-indexed or + // post-indexed modes. + VIXL_ASSERT((option != RequireUnscaledOffset) && + (option != RequireScaledOffset)); + + if (addr.IsRegisterOffset()) { + Extend ext = addr.GetExtend(); + Shift shift = addr.GetShift(); + unsigned shift_amount = addr.GetShiftAmount(); + + // LSL is encoded in the option field as UXTX. + if (shift == LSL) { + ext = UXTX; + } + + // Shifts are encoded in one bit, indicating a left shift by the memory + // access size. + VIXL_ASSERT((shift_amount == 0) || (shift_amount == access_size)); + return base | LoadStoreRegisterOffsetFixed | Rm(addr.GetRegisterOffset()) | + ExtendMode(ext) | ImmShiftLS((shift_amount > 0) ? 1 : 0); + } + + if (addr.IsPreIndex() && IsImmLSUnscaled(offset)) { + return base | LoadStorePreIndexFixed | ImmLS(static_cast(offset)); + } + + if (addr.IsPostIndex() && IsImmLSUnscaled(offset)) { + return base | LoadStorePostIndexFixed | ImmLS(static_cast(offset)); + } + + // If this point is reached, the MemOperand (addr) cannot be encoded. + VIXL_UNREACHABLE(); + return 0; +} + + +void Assembler::LoadStore(const CPURegister& rt, + const MemOperand& addr, + LoadStoreOp op, + LoadStoreScalingOption option) { + VIXL_ASSERT(CPUHas(rt)); + Emit(op | Rt(rt) | LoadStoreMemOperand(addr, CalcLSDataSize(op), option)); +} + + +void Assembler::Prefetch(PrefetchOperation op, + const MemOperand& addr, + LoadStoreScalingOption option) { + VIXL_ASSERT(addr.IsRegisterOffset() || addr.IsImmediateOffset()); + + Instr prfop = ImmPrefetchOperation(op); + Emit(PRFM | prfop | LoadStoreMemOperand(addr, kXRegSizeInBytesLog2, option)); +} + + +bool Assembler::IsImmAddSub(int64_t immediate) { + return IsUint12(immediate) || + (IsUint12(immediate >> 12) && ((immediate & 0xfff) == 0)); +} + + +bool Assembler::IsImmConditionalCompare(int64_t immediate) { + return IsUint5(immediate); +} + + +bool Assembler::IsImmFP16(Float16 imm) { + // Valid values will have the form: + // aBbb.cdef.gh00.000 + uint16_t bits = Float16ToRawbits(imm); + // bits[6..0] are cleared. + if ((bits & 0x3f) != 0) { + return false; + } + + // bits[13..12] are all set or all cleared. + uint16_t b_pattern = (bits >> 12) & 0x03; + if (b_pattern != 0 && b_pattern != 0x03) { + return false; + } + + // bit[15] and bit[14] are opposite. + if (((bits ^ (bits << 1)) & 0x4000) == 0) { + return false; + } + + return true; +} + + +bool Assembler::IsImmFP32(float imm) { + // Valid values will have the form: + // aBbb.bbbc.defg.h000.0000.0000.0000.0000 + uint32_t bits = FloatToRawbits(imm); + // bits[19..0] are cleared. + if ((bits & 0x7ffff) != 0) { + return false; + } + + // bits[29..25] are all set or all cleared. + uint32_t b_pattern = (bits >> 16) & 0x3e00; + if (b_pattern != 0 && b_pattern != 0x3e00) { + return false; + } + + // bit[30] and bit[29] are opposite. + if (((bits ^ (bits << 1)) & 0x40000000) == 0) { + return false; + } + + return true; +} + + +bool Assembler::IsImmFP64(double imm) { + // Valid values will have the form: + // aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000 + // 0000.0000.0000.0000.0000.0000.0000.0000 + uint64_t bits = DoubleToRawbits(imm); + // bits[47..0] are cleared. + if ((bits & 0x0000ffffffffffff) != 0) { + return false; + } + + // bits[61..54] are all set or all cleared. + uint32_t b_pattern = (bits >> 48) & 0x3fc0; + if ((b_pattern != 0) && (b_pattern != 0x3fc0)) { + return false; + } + + // bit[62] and bit[61] are opposite. + if (((bits ^ (bits << 1)) & (UINT64_C(1) << 62)) == 0) { + return false; + } + + return true; +} + + +bool Assembler::IsImmLSPair(int64_t offset, unsigned access_size) { + VIXL_ASSERT(access_size <= kQRegSizeInBytesLog2); + return IsMultiple(offset, 1 << access_size) && + IsInt7(offset / (1 << access_size)); +} + + +bool Assembler::IsImmLSScaled(int64_t offset, unsigned access_size) { + VIXL_ASSERT(access_size <= kQRegSizeInBytesLog2); + return IsMultiple(offset, 1 << access_size) && + IsUint12(offset / (1 << access_size)); +} + + +bool Assembler::IsImmLSUnscaled(int64_t offset) { return IsInt9(offset); } + + +// The movn instruction can generate immediates containing an arbitrary 16-bit +// value, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff. +bool Assembler::IsImmMovn(uint64_t imm, unsigned reg_size) { + return IsImmMovz(~imm, reg_size); +} + + +// The movz instruction can generate immediates containing an arbitrary 16-bit +// value, with remaining bits clear, eg. 0x00001234, 0x0000123400000000. +bool Assembler::IsImmMovz(uint64_t imm, unsigned reg_size) { + VIXL_ASSERT((reg_size == kXRegSize) || (reg_size == kWRegSize)); + return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1); +} + + +// Test if a given value can be encoded in the immediate field of a logical +// instruction. +// If it can be encoded, the function returns true, and values pointed to by n, +// imm_s and imm_r are updated with immediates encoded in the format required +// by the corresponding fields in the logical instruction. +// If it can not be encoded, the function returns false, and the values pointed +// to by n, imm_s and imm_r are undefined. +bool Assembler::IsImmLogical(uint64_t value, + unsigned width, + unsigned* n, + unsigned* imm_s, + unsigned* imm_r) { + VIXL_ASSERT((width == kWRegSize) || (width == kXRegSize)); + + bool negate = false; + + // Logical immediates are encoded using parameters n, imm_s and imm_r using + // the following table: + // + // N imms immr size S R + // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr) + // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr) + // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr) + // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr) + // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr) + // 0 11110s xxxxxr 2 UInt(s) UInt(r) + // (s bits must not be all set) + // + // A pattern is constructed of size bits, where the least significant S+1 bits + // are set. The pattern is rotated right by R, and repeated across a 32 or + // 64-bit value, depending on destination register width. + // + // Put another way: the basic format of a logical immediate is a single + // contiguous stretch of 1 bits, repeated across the whole word at intervals + // given by a power of 2. To identify them quickly, we first locate the + // lowest stretch of 1 bits, then the next 1 bit above that; that combination + // is different for every logical immediate, so it gives us all the + // information we need to identify the only logical immediate that our input + // could be, and then we simply check if that's the value we actually have. + // + // (The rotation parameter does give the possibility of the stretch of 1 bits + // going 'round the end' of the word. To deal with that, we observe that in + // any situation where that happens the bitwise NOT of the value is also a + // valid logical immediate. So we simply invert the input whenever its low bit + // is set, and then we know that the rotated case can't arise.) + + if (value & 1) { + // If the low bit is 1, negate the value, and set a flag to remember that we + // did (so that we can adjust the return values appropriately). + negate = true; + value = ~value; + } + + if (width == kWRegSize) { + // To handle 32-bit logical immediates, the very easiest thing is to repeat + // the input value twice to make a 64-bit word. The correct encoding of that + // as a logical immediate will also be the correct encoding of the 32-bit + // value. + + // Avoid making the assumption that the most-significant 32 bits are zero by + // shifting the value left and duplicating it. + value <<= kWRegSize; + value |= value >> kWRegSize; + } + + // The basic analysis idea: imagine our input word looks like this. + // + // 0011111000111110001111100011111000111110001111100011111000111110 + // c b a + // |<--d-->| + // + // We find the lowest set bit (as an actual power-of-2 value, not its index) + // and call it a. Then we add a to our original number, which wipes out the + // bottommost stretch of set bits and replaces it with a 1 carried into the + // next zero bit. Then we look for the new lowest set bit, which is in + // position b, and subtract it, so now our number is just like the original + // but with the lowest stretch of set bits completely gone. Now we find the + // lowest set bit again, which is position c in the diagram above. Then we'll + // measure the distance d between bit positions a and c (using CLZ), and that + // tells us that the only valid logical immediate that could possibly be equal + // to this number is the one in which a stretch of bits running from a to just + // below b is replicated every d bits. + uint64_t a = LowestSetBit(value); + uint64_t value_plus_a = value + a; + uint64_t b = LowestSetBit(value_plus_a); + uint64_t value_plus_a_minus_b = value_plus_a - b; + uint64_t c = LowestSetBit(value_plus_a_minus_b); + + int d, clz_a, out_n; + uint64_t mask; + + if (c != 0) { + // The general case, in which there is more than one stretch of set bits. + // Compute the repeat distance d, and set up a bitmask covering the basic + // unit of repetition (i.e. a word with the bottom d bits set). Also, in all + // of these cases the N bit of the output will be zero. + clz_a = CountLeadingZeros(a, kXRegSize); + int clz_c = CountLeadingZeros(c, kXRegSize); + d = clz_a - clz_c; + mask = ((UINT64_C(1) << d) - 1); + out_n = 0; + } else { + // Handle degenerate cases. + // + // If any of those 'find lowest set bit' operations didn't find a set bit at + // all, then the word will have been zero thereafter, so in particular the + // last lowest_set_bit operation will have returned zero. So we can test for + // all the special case conditions in one go by seeing if c is zero. + if (a == 0) { + // The input was zero (or all 1 bits, which will come to here too after we + // inverted it at the start of the function), for which we just return + // false. + return false; + } else { + // Otherwise, if c was zero but a was not, then there's just one stretch + // of set bits in our word, meaning that we have the trivial case of + // d == 64 and only one 'repetition'. Set up all the same variables as in + // the general case above, and set the N bit in the output. + clz_a = CountLeadingZeros(a, kXRegSize); + d = 64; + mask = ~UINT64_C(0); + out_n = 1; + } + } + + // If the repeat period d is not a power of two, it can't be encoded. + if (!IsPowerOf2(d)) { + return false; + } + + if (((b - a) & ~mask) != 0) { + // If the bit stretch (b - a) does not fit within the mask derived from the + // repeat period, then fail. + return false; + } + + // The only possible option is b - a repeated every d bits. Now we're going to + // actually construct the valid logical immediate derived from that + // specification, and see if it equals our original input. + // + // To repeat a value every d bits, we multiply it by a number of the form + // (1 + 2^d + 2^(2d) + ...), i.e. 0x0001000100010001 or similar. These can + // be derived using a table lookup on CLZ(d). + static const uint64_t multipliers[] = { + 0x0000000000000001UL, + 0x0000000100000001UL, + 0x0001000100010001UL, + 0x0101010101010101UL, + 0x1111111111111111UL, + 0x5555555555555555UL, + }; + uint64_t multiplier = multipliers[CountLeadingZeros(d, kXRegSize) - 57]; + uint64_t candidate = (b - a) * multiplier; + + if (value != candidate) { + // The candidate pattern doesn't match our input value, so fail. + return false; + } + + // We have a match! This is a valid logical immediate, so now we have to + // construct the bits and pieces of the instruction encoding that generates + // it. + + // Count the set bits in our basic stretch. The special case of clz(0) == -1 + // makes the answer come out right for stretches that reach the very top of + // the word (e.g. numbers like 0xffffc00000000000). + int clz_b = (b == 0) ? -1 : CountLeadingZeros(b, kXRegSize); + int s = clz_a - clz_b; + + // Decide how many bits to rotate right by, to put the low bit of that basic + // stretch in position a. + int r; + if (negate) { + // If we inverted the input right at the start of this function, here's + // where we compensate: the number of set bits becomes the number of clear + // bits, and the rotation count is based on position b rather than position + // a (since b is the location of the 'lowest' 1 bit after inversion). + s = d - s; + r = (clz_b + 1) & (d - 1); + } else { + r = (clz_a + 1) & (d - 1); + } + + // Now we're done, except for having to encode the S output in such a way that + // it gives both the number of set bits and the length of the repeated + // segment. The s field is encoded like this: + // + // imms size S + // ssssss 64 UInt(ssssss) + // 0sssss 32 UInt(sssss) + // 10ssss 16 UInt(ssss) + // 110sss 8 UInt(sss) + // 1110ss 4 UInt(ss) + // 11110s 2 UInt(s) + // + // So we 'or' (2 * -d) with our computed s to form imms. + if ((n != NULL) || (imm_s != NULL) || (imm_r != NULL)) { + *n = out_n; + *imm_s = ((2 * -d) | (s - 1)) & 0x3f; + *imm_r = r; + } + + return true; +} + + +LoadStoreOp Assembler::LoadOpFor(const CPURegister& rt) { + VIXL_ASSERT(rt.IsValid()); + if (rt.IsRegister()) { + return rt.Is64Bits() ? LDR_x : LDR_w; + } else { + VIXL_ASSERT(rt.IsVRegister()); + switch (rt.GetSizeInBits()) { + case kBRegSize: + return LDR_b; + case kHRegSize: + return LDR_h; + case kSRegSize: + return LDR_s; + case kDRegSize: + return LDR_d; + default: + VIXL_ASSERT(rt.IsQ()); + return LDR_q; + } + } +} + + +LoadStoreOp Assembler::StoreOpFor(const CPURegister& rt) { + VIXL_ASSERT(rt.IsValid()); + if (rt.IsRegister()) { + return rt.Is64Bits() ? STR_x : STR_w; + } else { + VIXL_ASSERT(rt.IsVRegister()); + switch (rt.GetSizeInBits()) { + case kBRegSize: + return STR_b; + case kHRegSize: + return STR_h; + case kSRegSize: + return STR_s; + case kDRegSize: + return STR_d; + default: + VIXL_ASSERT(rt.IsQ()); + return STR_q; + } + } +} + + +LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt, + const CPURegister& rt2) { + VIXL_ASSERT(AreSameSizeAndType(rt, rt2)); + USE(rt2); + if (rt.IsRegister()) { + return rt.Is64Bits() ? STP_x : STP_w; + } else { + VIXL_ASSERT(rt.IsVRegister()); + switch (rt.GetSizeInBytes()) { + case kSRegSizeInBytes: + return STP_s; + case kDRegSizeInBytes: + return STP_d; + default: + VIXL_ASSERT(rt.IsQ()); + return STP_q; + } + } +} + + +LoadStorePairOp Assembler::LoadPairOpFor(const CPURegister& rt, + const CPURegister& rt2) { + VIXL_ASSERT((STP_w | LoadStorePairLBit) == LDP_w); + return static_cast(StorePairOpFor(rt, rt2) | + LoadStorePairLBit); +} + + +LoadStorePairNonTemporalOp Assembler::StorePairNonTemporalOpFor( + const CPURegister& rt, const CPURegister& rt2) { + VIXL_ASSERT(AreSameSizeAndType(rt, rt2)); + USE(rt2); + if (rt.IsRegister()) { + return rt.Is64Bits() ? STNP_x : STNP_w; + } else { + VIXL_ASSERT(rt.IsVRegister()); + switch (rt.GetSizeInBytes()) { + case kSRegSizeInBytes: + return STNP_s; + case kDRegSizeInBytes: + return STNP_d; + default: + VIXL_ASSERT(rt.IsQ()); + return STNP_q; + } + } +} + + +LoadStorePairNonTemporalOp Assembler::LoadPairNonTemporalOpFor( + const CPURegister& rt, const CPURegister& rt2) { + VIXL_ASSERT((STNP_w | LoadStorePairNonTemporalLBit) == LDNP_w); + return static_cast( + StorePairNonTemporalOpFor(rt, rt2) | LoadStorePairNonTemporalLBit); +} + + +LoadLiteralOp Assembler::LoadLiteralOpFor(const CPURegister& rt) { + if (rt.IsRegister()) { + return rt.IsX() ? LDR_x_lit : LDR_w_lit; + } else { + VIXL_ASSERT(rt.IsVRegister()); + switch (rt.GetSizeInBytes()) { + case kSRegSizeInBytes: + return LDR_s_lit; + case kDRegSizeInBytes: + return LDR_d_lit; + default: + VIXL_ASSERT(rt.IsQ()); + return LDR_q_lit; + } + } +} + + +bool Assembler::CPUHas(const CPURegister& rt) const { + // Core registers are available without any particular CPU features. + if (rt.IsRegister()) return true; + VIXL_ASSERT(rt.IsVRegister()); + // The architecture does not allow FP and NEON to be implemented separately, + // but we can crudely categorise them based on register size, since FP only + // uses D, S and (occasionally) H registers. + if (rt.IsH() || rt.IsS() || rt.IsD()) { + return CPUHas(CPUFeatures::kFP) || CPUHas(CPUFeatures::kNEON); + } + VIXL_ASSERT(rt.IsB() || rt.IsQ()); + return CPUHas(CPUFeatures::kNEON); +} + + +bool Assembler::CPUHas(const CPURegister& rt, const CPURegister& rt2) const { + // This is currently only used for loads and stores, where rt and rt2 must + // have the same size and type. We could extend this to cover other cases if + // necessary, but for now we can avoid checking both registers. + VIXL_ASSERT(AreSameSizeAndType(rt, rt2)); + USE(rt2); + return CPUHas(rt); +} + + +bool AreAliased(const CPURegister& reg1, + const CPURegister& reg2, + const CPURegister& reg3, + const CPURegister& reg4, + const CPURegister& reg5, + const CPURegister& reg6, + const CPURegister& reg7, + const CPURegister& reg8) { + int number_of_valid_regs = 0; + int number_of_valid_fpregs = 0; + + RegList unique_regs = 0; + RegList unique_fpregs = 0; + + const CPURegister regs[] = {reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8}; + + for (size_t i = 0; i < ArrayLength(regs); i++) { + if (regs[i].IsRegister()) { + number_of_valid_regs++; + unique_regs |= regs[i].GetBit(); + } else if (regs[i].IsVRegister()) { + number_of_valid_fpregs++; + unique_fpregs |= regs[i].GetBit(); + } else { + VIXL_ASSERT(!regs[i].IsValid()); + } + } + + int number_of_unique_regs = CountSetBits(unique_regs); + int number_of_unique_fpregs = CountSetBits(unique_fpregs); + + VIXL_ASSERT(number_of_valid_regs >= number_of_unique_regs); + VIXL_ASSERT(number_of_valid_fpregs >= number_of_unique_fpregs); + + return (number_of_valid_regs != number_of_unique_regs) || + (number_of_valid_fpregs != number_of_unique_fpregs); +} + + +bool AreSameSizeAndType(const CPURegister& reg1, + const CPURegister& reg2, + const CPURegister& reg3, + const CPURegister& reg4, + const CPURegister& reg5, + const CPURegister& reg6, + const CPURegister& reg7, + const CPURegister& reg8) { + VIXL_ASSERT(reg1.IsValid()); + bool match = true; + match &= !reg2.IsValid() || reg2.IsSameSizeAndType(reg1); + match &= !reg3.IsValid() || reg3.IsSameSizeAndType(reg1); + match &= !reg4.IsValid() || reg4.IsSameSizeAndType(reg1); + match &= !reg5.IsValid() || reg5.IsSameSizeAndType(reg1); + match &= !reg6.IsValid() || reg6.IsSameSizeAndType(reg1); + match &= !reg7.IsValid() || reg7.IsSameSizeAndType(reg1); + match &= !reg8.IsValid() || reg8.IsSameSizeAndType(reg1); + return match; +} + +bool AreEven(const CPURegister& reg1, + const CPURegister& reg2, + const CPURegister& reg3, + const CPURegister& reg4, + const CPURegister& reg5, + const CPURegister& reg6, + const CPURegister& reg7, + const CPURegister& reg8) { + VIXL_ASSERT(reg1.IsValid()); + bool even = (reg1.GetCode() % 2) == 0; + even &= !reg2.IsValid() || ((reg2.GetCode() % 2) == 0); + even &= !reg3.IsValid() || ((reg3.GetCode() % 2) == 0); + even &= !reg4.IsValid() || ((reg4.GetCode() % 2) == 0); + even &= !reg5.IsValid() || ((reg5.GetCode() % 2) == 0); + even &= !reg6.IsValid() || ((reg6.GetCode() % 2) == 0); + even &= !reg7.IsValid() || ((reg7.GetCode() % 2) == 0); + even &= !reg8.IsValid() || ((reg8.GetCode() % 2) == 0); + return even; +} + + +bool AreConsecutive(const CPURegister& reg1, + const CPURegister& reg2, + const CPURegister& reg3, + const CPURegister& reg4) { + VIXL_ASSERT(reg1.IsValid()); + + if (!reg2.IsValid()) { + return true; + } else if (reg2.GetCode() != ((reg1.GetCode() + 1) % kNumberOfRegisters)) { + return false; + } + + if (!reg3.IsValid()) { + return true; + } else if (reg3.GetCode() != ((reg2.GetCode() + 1) % kNumberOfRegisters)) { + return false; + } + + if (!reg4.IsValid()) { + return true; + } else if (reg4.GetCode() != ((reg3.GetCode() + 1) % kNumberOfRegisters)) { + return false; + } + + return true; +} + + +bool AreSameFormat(const VRegister& reg1, + const VRegister& reg2, + const VRegister& reg3, + const VRegister& reg4) { + VIXL_ASSERT(reg1.IsValid()); + bool match = true; + match &= !reg2.IsValid() || reg2.IsSameFormat(reg1); + match &= !reg3.IsValid() || reg3.IsSameFormat(reg1); + match &= !reg4.IsValid() || reg4.IsSameFormat(reg1); + return match; +} + + +bool AreConsecutive(const VRegister& reg1, + const VRegister& reg2, + const VRegister& reg3, + const VRegister& reg4) { + VIXL_ASSERT(reg1.IsValid()); + + if (!reg2.IsValid()) { + return true; + } else if (reg2.GetCode() != ((reg1.GetCode() + 1) % kNumberOfVRegisters)) { + return false; + } + + if (!reg3.IsValid()) { + return true; + } else if (reg3.GetCode() != ((reg2.GetCode() + 1) % kNumberOfVRegisters)) { + return false; + } + + if (!reg4.IsValid()) { + return true; + } else if (reg4.GetCode() != ((reg3.GetCode() + 1) % kNumberOfVRegisters)) { + return false; + } + + return true; +} +} // namespace aarch64 +} // namespace vixl diff --git a/dep/vixl/src/aarch64/cpu-aarch64.cc b/dep/vixl/src/aarch64/cpu-aarch64.cc new file mode 100644 index 000000000..978486535 --- /dev/null +++ b/dep/vixl/src/aarch64/cpu-aarch64.cc @@ -0,0 +1,178 @@ +// Copyright 2015, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "../utils-vixl.h" + +#include "cpu-aarch64.h" + +namespace vixl { +namespace aarch64 { + +// Initialise to smallest possible cache size. +unsigned CPU::dcache_line_size_ = 1; +unsigned CPU::icache_line_size_ = 1; + + +// Currently computes I and D cache line size. +void CPU::SetUp() { + uint32_t cache_type_register = GetCacheType(); + + // The cache type register holds information about the caches, including I + // D caches line size. + static const int kDCacheLineSizeShift = 16; + static const int kICacheLineSizeShift = 0; + static const uint32_t kDCacheLineSizeMask = 0xf << kDCacheLineSizeShift; + static const uint32_t kICacheLineSizeMask = 0xf << kICacheLineSizeShift; + + // The cache type register holds the size of the I and D caches in words as + // a power of two. + uint32_t dcache_line_size_power_of_two = + (cache_type_register & kDCacheLineSizeMask) >> kDCacheLineSizeShift; + uint32_t icache_line_size_power_of_two = + (cache_type_register & kICacheLineSizeMask) >> kICacheLineSizeShift; + + dcache_line_size_ = 4 << dcache_line_size_power_of_two; + icache_line_size_ = 4 << icache_line_size_power_of_two; +} + + +uint32_t CPU::GetCacheType() { +#ifdef __aarch64__ + uint64_t cache_type_register; + // Copy the content of the cache type register to a core register. + __asm__ __volatile__("mrs %[ctr], ctr_el0" // NOLINT(runtime/references) + : [ctr] "=r"(cache_type_register)); + VIXL_ASSERT(IsUint32(cache_type_register)); + return static_cast(cache_type_register); +#else + // This will lead to a cache with 1 byte long lines, which is fine since + // neither EnsureIAndDCacheCoherency nor the simulator will need this + // information. + return 0; +#endif +} + + +void CPU::EnsureIAndDCacheCoherency(void *address, size_t length) { +#ifdef __aarch64__ + // Implement the cache synchronisation for all targets where AArch64 is the + // host, even if we're building the simulator for an AAarch64 host. This + // allows for cases where the user wants to simulate code as well as run it + // natively. + + if (length == 0) { + return; + } + + // The code below assumes user space cache operations are allowed. + + // Work out the line sizes for each cache, and use them to determine the + // start addresses. + uintptr_t start = reinterpret_cast(address); + uintptr_t dsize = static_cast(dcache_line_size_); + uintptr_t isize = static_cast(icache_line_size_); + uintptr_t dline = start & ~(dsize - 1); + uintptr_t iline = start & ~(isize - 1); + + // Cache line sizes are always a power of 2. + VIXL_ASSERT(IsPowerOf2(dsize)); + VIXL_ASSERT(IsPowerOf2(isize)); + uintptr_t end = start + length; + + do { + __asm__ __volatile__( + // Clean each line of the D cache containing the target data. + // + // dc : Data Cache maintenance + // c : Clean + // va : by (Virtual) Address + // u : to the point of Unification + // The point of unification for a processor is the point by which the + // instruction and data caches are guaranteed to see the same copy of a + // memory location. See ARM DDI 0406B page B2-12 for more information. + " dc cvau, %[dline]\n" + : + : [dline] "r"(dline) + // This code does not write to memory, but the "memory" dependency + // prevents GCC from reordering the code. + : "memory"); + dline += dsize; + } while (dline < end); + + __asm__ __volatile__( + // Make sure that the data cache operations (above) complete before the + // instruction cache operations (below). + // + // dsb : Data Synchronisation Barrier + // ish : Inner SHareable domain + // + // The point of unification for an Inner Shareable shareability domain is + // the point by which the instruction and data caches of all the + // processors + // in that Inner Shareable shareability domain are guaranteed to see the + // same copy of a memory location. See ARM DDI 0406B page B2-12 for more + // information. + " dsb ish\n" + : + : + : "memory"); + + do { + __asm__ __volatile__( + // Invalidate each line of the I cache containing the target data. + // + // ic : Instruction Cache maintenance + // i : Invalidate + // va : by Address + // u : to the point of Unification + " ic ivau, %[iline]\n" + : + : [iline] "r"(iline) + : "memory"); + iline += isize; + } while (iline < end); + + __asm__ __volatile__( + // Make sure that the instruction cache operations (above) take effect + // before the isb (below). + " dsb ish\n" + + // Ensure that any instructions already in the pipeline are discarded and + // reloaded from the new data. + // isb : Instruction Synchronisation Barrier + " isb\n" + : + : + : "memory"); +#else + // If the host isn't AArch64, we must be using the simulator, so this function + // doesn't have to do anything. + USE(address, length); +#endif +} + +} // namespace aarch64 +} // namespace vixl diff --git a/dep/vixl/src/aarch64/cpu-features-auditor-aarch64.cc b/dep/vixl/src/aarch64/cpu-features-auditor-aarch64.cc new file mode 100644 index 000000000..66f0d8065 --- /dev/null +++ b/dep/vixl/src/aarch64/cpu-features-auditor-aarch64.cc @@ -0,0 +1,1059 @@ +// Copyright 2018, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Arm Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "cpu-features.h" +#include "globals-vixl.h" +#include "utils-vixl.h" +#include "decoder-aarch64.h" + +#include "cpu-features-auditor-aarch64.h" + +namespace vixl { +namespace aarch64 { + +// Every instruction must update last_instruction_, even if only to clear it, +// and every instruction must also update seen_ once it has been fully handled. +// This scope makes that simple, and allows early returns in the decode logic. +class CPUFeaturesAuditor::RecordInstructionFeaturesScope { + public: + explicit RecordInstructionFeaturesScope(CPUFeaturesAuditor* auditor) + : auditor_(auditor) { + auditor_->last_instruction_ = CPUFeatures::None(); + } + ~RecordInstructionFeaturesScope() { + auditor_->seen_.Combine(auditor_->last_instruction_); + } + + void Record(const CPUFeatures& features) { + auditor_->last_instruction_.Combine(features); + } + + void Record(CPUFeatures::Feature feature0, + CPUFeatures::Feature feature1 = CPUFeatures::kNone, + CPUFeatures::Feature feature2 = CPUFeatures::kNone, + CPUFeatures::Feature feature3 = CPUFeatures::kNone) { + auditor_->last_instruction_.Combine(feature0, feature1, feature2, feature3); + } + + // If exactly one of a or b is known to be available, record it. Otherwise, + // record both. This is intended for encodings that can be provided by two + // different features. + void RecordOneOrBothOf(CPUFeatures::Feature a, CPUFeatures::Feature b) { + bool hint_a = auditor_->available_.Has(a); + bool hint_b = auditor_->available_.Has(b); + if (hint_a && !hint_b) { + Record(a); + } else if (hint_b && !hint_a) { + Record(b); + } else { + Record(a, b); + } + } + + private: + CPUFeaturesAuditor* auditor_; +}; + +void CPUFeaturesAuditor::LoadStoreHelper(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + switch (instr->Mask(LoadStoreMask)) { + case LDR_b: + case LDR_q: + case STR_b: + case STR_q: + scope.Record(CPUFeatures::kNEON); + return; + case LDR_h: + case LDR_s: + case LDR_d: + case STR_h: + case STR_s: + case STR_d: + scope.RecordOneOrBothOf(CPUFeatures::kFP, CPUFeatures::kNEON); + return; + default: + // No special CPU features. + return; + } +} + +void CPUFeaturesAuditor::LoadStorePairHelper(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + switch (instr->Mask(LoadStorePairMask)) { + case LDP_q: + case STP_q: + scope.Record(CPUFeatures::kNEON); + return; + case LDP_s: + case LDP_d: + case STP_s: + case STP_d: { + scope.RecordOneOrBothOf(CPUFeatures::kFP, CPUFeatures::kNEON); + return; + } + default: + // No special CPU features. + return; + } +} + +void CPUFeaturesAuditor::VisitAddSubExtended(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + USE(instr); +} + +void CPUFeaturesAuditor::VisitAddSubImmediate(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + USE(instr); +} + +void CPUFeaturesAuditor::VisitAddSubShifted(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + USE(instr); +} + +void CPUFeaturesAuditor::VisitAddSubWithCarry(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + USE(instr); +} + +void CPUFeaturesAuditor::VisitAtomicMemory(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + switch (instr->Mask(AtomicMemoryMask)) { + case LDAPRB: + case LDAPRH: + case LDAPR_w: + case LDAPR_x: + scope.Record(CPUFeatures::kRCpc); + return; + default: + // Everything else belongs to the Atomics extension. + scope.Record(CPUFeatures::kAtomics); + return; + } +} + +void CPUFeaturesAuditor::VisitBitfield(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + USE(instr); +} + +void CPUFeaturesAuditor::VisitCompareBranch(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + USE(instr); +} + +void CPUFeaturesAuditor::VisitConditionalBranch(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + USE(instr); +} + +void CPUFeaturesAuditor::VisitConditionalCompareImmediate( + const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + USE(instr); +} + +void CPUFeaturesAuditor::VisitConditionalCompareRegister( + const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + USE(instr); +} + +void CPUFeaturesAuditor::VisitConditionalSelect(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + USE(instr); +} + +void CPUFeaturesAuditor::VisitCrypto2RegSHA(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + USE(instr); +} + +void CPUFeaturesAuditor::VisitCrypto3RegSHA(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + USE(instr); +} + +void CPUFeaturesAuditor::VisitCryptoAES(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + USE(instr); +} + +void CPUFeaturesAuditor::VisitDataProcessing1Source(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + switch (instr->Mask(DataProcessing1SourceMask)) { + case PACIA: + case PACIB: + case PACDA: + case PACDB: + case AUTIA: + case AUTIB: + case AUTDA: + case AUTDB: + case PACIZA: + case PACIZB: + case PACDZA: + case PACDZB: + case AUTIZA: + case AUTIZB: + case AUTDZA: + case AUTDZB: + case XPACI: + case XPACD: + scope.Record(CPUFeatures::kPAuth); + return; + default: + // No special CPU features. + return; + } +} + +void CPUFeaturesAuditor::VisitDataProcessing2Source(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + switch (instr->Mask(DataProcessing2SourceMask)) { + case CRC32B: + case CRC32H: + case CRC32W: + case CRC32X: + case CRC32CB: + case CRC32CH: + case CRC32CW: + case CRC32CX: + scope.Record(CPUFeatures::kCRC32); + return; + case PACGA: + scope.Record(CPUFeatures::kPAuth, CPUFeatures::kPAuthGeneric); + return; + default: + // No special CPU features. + return; + } +} + +void CPUFeaturesAuditor::VisitDataProcessing3Source(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + USE(instr); +} + +void CPUFeaturesAuditor::VisitException(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + USE(instr); +} + +void CPUFeaturesAuditor::VisitExtract(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + USE(instr); +} + +void CPUFeaturesAuditor::VisitFPCompare(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require FP. + scope.Record(CPUFeatures::kFP); + switch (instr->Mask(FPCompareMask)) { + case FCMP_h: + case FCMP_h_zero: + case FCMPE_h: + case FCMPE_h_zero: + scope.Record(CPUFeatures::kFPHalf); + return; + default: + // No special CPU features. + return; + } +} + +void CPUFeaturesAuditor::VisitFPConditionalCompare(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require FP. + scope.Record(CPUFeatures::kFP); + switch (instr->Mask(FPConditionalCompareMask)) { + case FCCMP_h: + case FCCMPE_h: + scope.Record(CPUFeatures::kFPHalf); + return; + default: + // No special CPU features. + return; + } +} + +void CPUFeaturesAuditor::VisitFPConditionalSelect(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require FP. + scope.Record(CPUFeatures::kFP); + if (instr->Mask(FPConditionalSelectMask) == FCSEL_h) { + scope.Record(CPUFeatures::kFPHalf); + } +} + +void CPUFeaturesAuditor::VisitFPDataProcessing1Source( + const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require FP. + scope.Record(CPUFeatures::kFP); + switch (instr->Mask(FPDataProcessing1SourceMask)) { + case FMOV_h: + case FABS_h: + case FNEG_h: + case FSQRT_h: + case FRINTN_h: + case FRINTP_h: + case FRINTM_h: + case FRINTZ_h: + case FRINTA_h: + case FRINTX_h: + case FRINTI_h: + scope.Record(CPUFeatures::kFPHalf); + return; + default: + // No special CPU features. + // This category includes some half-precision FCVT instructions that do + // not require FPHalf. + return; + } +} + +void CPUFeaturesAuditor::VisitFPDataProcessing2Source( + const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require FP. + scope.Record(CPUFeatures::kFP); + switch (instr->Mask(FPDataProcessing2SourceMask)) { + case FMUL_h: + case FDIV_h: + case FADD_h: + case FSUB_h: + case FMAX_h: + case FMIN_h: + case FMAXNM_h: + case FMINNM_h: + case FNMUL_h: + scope.Record(CPUFeatures::kFPHalf); + return; + default: + // No special CPU features. + return; + } +} + +void CPUFeaturesAuditor::VisitFPDataProcessing3Source( + const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require FP. + scope.Record(CPUFeatures::kFP); + switch (instr->Mask(FPDataProcessing3SourceMask)) { + case FMADD_h: + case FMSUB_h: + case FNMADD_h: + case FNMSUB_h: + scope.Record(CPUFeatures::kFPHalf); + return; + default: + // No special CPU features. + return; + } +} + +void CPUFeaturesAuditor::VisitFPFixedPointConvert(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require FP. + scope.Record(CPUFeatures::kFP); + switch (instr->Mask(FPFixedPointConvertMask)) { + case FCVTZS_wh_fixed: + case FCVTZS_xh_fixed: + case FCVTZU_wh_fixed: + case FCVTZU_xh_fixed: + case SCVTF_hw_fixed: + case SCVTF_hx_fixed: + case UCVTF_hw_fixed: + case UCVTF_hx_fixed: + scope.Record(CPUFeatures::kFPHalf); + return; + default: + // No special CPU features. + return; + } +} + +void CPUFeaturesAuditor::VisitFPImmediate(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require FP. + scope.Record(CPUFeatures::kFP); + if (instr->Mask(FPImmediateMask) == FMOV_h_imm) { + scope.Record(CPUFeatures::kFPHalf); + } +} + +void CPUFeaturesAuditor::VisitFPIntegerConvert(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require FP. + scope.Record(CPUFeatures::kFP); + switch (instr->Mask(FPIntegerConvertMask)) { + case FCVTAS_wh: + case FCVTAS_xh: + case FCVTAU_wh: + case FCVTAU_xh: + case FCVTMS_wh: + case FCVTMS_xh: + case FCVTMU_wh: + case FCVTMU_xh: + case FCVTNS_wh: + case FCVTNS_xh: + case FCVTNU_wh: + case FCVTNU_xh: + case FCVTPS_wh: + case FCVTPS_xh: + case FCVTPU_wh: + case FCVTPU_xh: + case FCVTZS_wh: + case FCVTZS_xh: + case FCVTZU_wh: + case FCVTZU_xh: + case FMOV_hw: + case FMOV_hx: + case FMOV_wh: + case FMOV_xh: + case SCVTF_hw: + case SCVTF_hx: + case UCVTF_hw: + case UCVTF_hx: + scope.Record(CPUFeatures::kFPHalf); + return; + case FMOV_d1_x: + case FMOV_x_d1: + scope.Record(CPUFeatures::kNEON); + return; + case FJCVTZS: + scope.Record(CPUFeatures::kJSCVT); + return; + default: + // No special CPU features. + return; + } +} + +void CPUFeaturesAuditor::VisitLoadLiteral(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + switch (instr->Mask(LoadLiteralMask)) { + case LDR_s_lit: + case LDR_d_lit: + scope.RecordOneOrBothOf(CPUFeatures::kFP, CPUFeatures::kNEON); + return; + case LDR_q_lit: + scope.Record(CPUFeatures::kNEON); + return; + default: + // No special CPU features. + return; + } +} + +void CPUFeaturesAuditor::VisitLoadStoreExclusive(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + switch (instr->Mask(LoadStoreExclusiveMask)) { + case CAS_w: + case CASA_w: + case CASL_w: + case CASAL_w: + case CAS_x: + case CASA_x: + case CASL_x: + case CASAL_x: + case CASB: + case CASAB: + case CASLB: + case CASALB: + case CASH: + case CASAH: + case CASLH: + case CASALH: + case CASP_w: + case CASPA_w: + case CASPL_w: + case CASPAL_w: + case CASP_x: + case CASPA_x: + case CASPL_x: + case CASPAL_x: + scope.Record(CPUFeatures::kAtomics); + return; + case STLLRB: + case LDLARB: + case STLLRH: + case LDLARH: + case STLLR_w: + case LDLAR_w: + case STLLR_x: + case LDLAR_x: + scope.Record(CPUFeatures::kLORegions); + return; + default: + // No special CPU features. + return; + } +} + +void CPUFeaturesAuditor::VisitLoadStorePairNonTemporal( + const Instruction* instr) { + LoadStorePairHelper(instr); +} + +void CPUFeaturesAuditor::VisitLoadStorePairOffset(const Instruction* instr) { + LoadStorePairHelper(instr); +} + +void CPUFeaturesAuditor::VisitLoadStorePairPostIndex(const Instruction* instr) { + LoadStorePairHelper(instr); +} + +void CPUFeaturesAuditor::VisitLoadStorePairPreIndex(const Instruction* instr) { + LoadStorePairHelper(instr); +} + +void CPUFeaturesAuditor::VisitLoadStorePostIndex(const Instruction* instr) { + LoadStoreHelper(instr); +} + +void CPUFeaturesAuditor::VisitLoadStorePreIndex(const Instruction* instr) { + LoadStoreHelper(instr); +} + +void CPUFeaturesAuditor::VisitLoadStoreRegisterOffset( + const Instruction* instr) { + LoadStoreHelper(instr); +} + +void CPUFeaturesAuditor::VisitLoadStoreUnscaledOffset( + const Instruction* instr) { + LoadStoreHelper(instr); +} + +void CPUFeaturesAuditor::VisitLoadStoreUnsignedOffset( + const Instruction* instr) { + LoadStoreHelper(instr); +} + +void CPUFeaturesAuditor::VisitLogicalImmediate(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + USE(instr); +} + +void CPUFeaturesAuditor::VisitLogicalShifted(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + USE(instr); +} + +void CPUFeaturesAuditor::VisitMoveWideImmediate(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + USE(instr); +} + +void CPUFeaturesAuditor::VisitNEON2RegMisc(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON. + scope.Record(CPUFeatures::kNEON); + switch (instr->Mask(NEON2RegMiscFPMask)) { + case NEON_FABS: + case NEON_FNEG: + case NEON_FSQRT: + case NEON_FCVTL: + case NEON_FCVTN: + case NEON_FCVTXN: + case NEON_FRINTI: + case NEON_FRINTX: + case NEON_FRINTA: + case NEON_FRINTM: + case NEON_FRINTN: + case NEON_FRINTP: + case NEON_FRINTZ: + case NEON_FCVTNS: + case NEON_FCVTNU: + case NEON_FCVTPS: + case NEON_FCVTPU: + case NEON_FCVTMS: + case NEON_FCVTMU: + case NEON_FCVTZS: + case NEON_FCVTZU: + case NEON_FCVTAS: + case NEON_FCVTAU: + case NEON_SCVTF: + case NEON_UCVTF: + case NEON_FRSQRTE: + case NEON_FRECPE: + case NEON_FCMGT_zero: + case NEON_FCMGE_zero: + case NEON_FCMEQ_zero: + case NEON_FCMLE_zero: + case NEON_FCMLT_zero: + scope.Record(CPUFeatures::kFP); + return; + default: + // No additional features. + return; + } +} + +void CPUFeaturesAuditor::VisitNEON2RegMiscFP16(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEONHalf. + scope.Record(CPUFeatures::kFP, CPUFeatures::kNEON, CPUFeatures::kNEONHalf); + USE(instr); +} + +void CPUFeaturesAuditor::VisitNEON3Different(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON. + scope.Record(CPUFeatures::kNEON); + USE(instr); +} + +void CPUFeaturesAuditor::VisitNEON3Same(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON. + scope.Record(CPUFeatures::kNEON); + if (instr->Mask(NEON3SameFPFMask) == NEON3SameFPFixed) { + scope.Record(CPUFeatures::kFP); + } +} + +void CPUFeaturesAuditor::VisitNEON3SameExtra(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON. + scope.Record(CPUFeatures::kNEON); + if ((instr->Mask(NEON3SameExtraFCMLAMask) == NEON_FCMLA) || + (instr->Mask(NEON3SameExtraFCADDMask) == NEON_FCADD)) { + scope.Record(CPUFeatures::kFP, CPUFeatures::kFcma); + if (instr->GetNEONSize() == 1) scope.Record(CPUFeatures::kNEONHalf); + } else { + switch (instr->Mask(NEON3SameExtraMask)) { + case NEON_SDOT: + case NEON_UDOT: + scope.Record(CPUFeatures::kDotProduct); + return; + case NEON_SQRDMLAH: + case NEON_SQRDMLSH: + scope.Record(CPUFeatures::kRDM); + return; + default: + // No additional features. + return; + } + } +} + +void CPUFeaturesAuditor::VisitNEON3SameFP16(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON FP16 support. + scope.Record(CPUFeatures::kFP, CPUFeatures::kNEON, CPUFeatures::kNEONHalf); + USE(instr); +} + +void CPUFeaturesAuditor::VisitNEONAcrossLanes(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON. + scope.Record(CPUFeatures::kNEON); + if (instr->Mask(NEONAcrossLanesFP16FMask) == NEONAcrossLanesFP16Fixed) { + // FMAXV_H, FMINV_H, FMAXNMV_H, FMINNMV_H + scope.Record(CPUFeatures::kFP, CPUFeatures::kNEONHalf); + } else if (instr->Mask(NEONAcrossLanesFPFMask) == NEONAcrossLanesFPFixed) { + // FMAXV, FMINV, FMAXNMV, FMINNMV + scope.Record(CPUFeatures::kFP); + } +} + +void CPUFeaturesAuditor::VisitNEONByIndexedElement(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON. + scope.Record(CPUFeatures::kNEON); + switch (instr->Mask(NEONByIndexedElementMask)) { + case NEON_SDOT_byelement: + case NEON_UDOT_byelement: + scope.Record(CPUFeatures::kDotProduct); + return; + case NEON_SQRDMLAH_byelement: + case NEON_SQRDMLSH_byelement: + scope.Record(CPUFeatures::kRDM); + return; + default: + // Fall through to check other FP instructions. + break; + } + switch (instr->Mask(NEONByIndexedElementFPMask)) { + case NEON_FMLA_H_byelement: + case NEON_FMLS_H_byelement: + case NEON_FMUL_H_byelement: + case NEON_FMULX_H_byelement: + scope.Record(CPUFeatures::kNEONHalf); + VIXL_FALLTHROUGH(); + case NEON_FMLA_byelement: + case NEON_FMLS_byelement: + case NEON_FMUL_byelement: + case NEON_FMULX_byelement: + scope.Record(CPUFeatures::kFP); + return; + default: + switch (instr->Mask(NEONByIndexedElementFPComplexMask)) { + case NEON_FCMLA_byelement: + scope.Record(CPUFeatures::kFP, CPUFeatures::kFcma); + if (instr->GetNEONSize() == 1) scope.Record(CPUFeatures::kNEONHalf); + return; + } + // No additional features. + return; + } +} + +void CPUFeaturesAuditor::VisitNEONCopy(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON. + scope.Record(CPUFeatures::kNEON); + USE(instr); +} + +void CPUFeaturesAuditor::VisitNEONExtract(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON. + scope.Record(CPUFeatures::kNEON); + USE(instr); +} + +void CPUFeaturesAuditor::VisitNEONLoadStoreMultiStruct( + const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON. + scope.Record(CPUFeatures::kNEON); + USE(instr); +} + +void CPUFeaturesAuditor::VisitNEONLoadStoreMultiStructPostIndex( + const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON. + scope.Record(CPUFeatures::kNEON); + USE(instr); +} + +void CPUFeaturesAuditor::VisitNEONLoadStoreSingleStruct( + const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON. + scope.Record(CPUFeatures::kNEON); + USE(instr); +} + +void CPUFeaturesAuditor::VisitNEONLoadStoreSingleStructPostIndex( + const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON. + scope.Record(CPUFeatures::kNEON); + USE(instr); +} + +void CPUFeaturesAuditor::VisitNEONModifiedImmediate(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON. + scope.Record(CPUFeatures::kNEON); + if (instr->GetNEONCmode() == 0xf) { + // FMOV (vector, immediate), double-, single- or half-precision. + scope.Record(CPUFeatures::kFP); + if (instr->ExtractBit(11)) scope.Record(CPUFeatures::kNEONHalf); + } + USE(instr); +} + +void CPUFeaturesAuditor::VisitNEONPerm(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON. + scope.Record(CPUFeatures::kNEON); + USE(instr); +} + +void CPUFeaturesAuditor::VisitNEONScalar2RegMisc(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON. + scope.Record(CPUFeatures::kNEON); + switch (instr->Mask(NEONScalar2RegMiscFPMask)) { + case NEON_FRECPE_scalar: + case NEON_FRECPX_scalar: + case NEON_FRSQRTE_scalar: + case NEON_FCMGT_zero_scalar: + case NEON_FCMGE_zero_scalar: + case NEON_FCMEQ_zero_scalar: + case NEON_FCMLE_zero_scalar: + case NEON_FCMLT_zero_scalar: + case NEON_SCVTF_scalar: + case NEON_UCVTF_scalar: + case NEON_FCVTNS_scalar: + case NEON_FCVTNU_scalar: + case NEON_FCVTPS_scalar: + case NEON_FCVTPU_scalar: + case NEON_FCVTMS_scalar: + case NEON_FCVTMU_scalar: + case NEON_FCVTZS_scalar: + case NEON_FCVTZU_scalar: + case NEON_FCVTAS_scalar: + case NEON_FCVTAU_scalar: + case NEON_FCVTXN_scalar: + scope.Record(CPUFeatures::kFP); + return; + default: + // No additional features. + return; + } +} + +void CPUFeaturesAuditor::VisitNEONScalar2RegMiscFP16(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEONHalf. + scope.Record(CPUFeatures::kFP, CPUFeatures::kNEON, CPUFeatures::kNEONHalf); + USE(instr); +} + +void CPUFeaturesAuditor::VisitNEONScalar3Diff(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON. + scope.Record(CPUFeatures::kNEON); + USE(instr); +} + +void CPUFeaturesAuditor::VisitNEONScalar3Same(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON. + scope.Record(CPUFeatures::kNEON); + if (instr->Mask(NEONScalar3SameFPFMask) == NEONScalar3SameFPFixed) { + scope.Record(CPUFeatures::kFP); + } +} + +void CPUFeaturesAuditor::VisitNEONScalar3SameExtra(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON and RDM. + scope.Record(CPUFeatures::kNEON, CPUFeatures::kRDM); + USE(instr); +} + +void CPUFeaturesAuditor::VisitNEONScalar3SameFP16(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEONHalf. + scope.Record(CPUFeatures::kFP, CPUFeatures::kNEON, CPUFeatures::kNEONHalf); + USE(instr); +} + +void CPUFeaturesAuditor::VisitNEONScalarByIndexedElement( + const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON. + scope.Record(CPUFeatures::kNEON); + switch (instr->Mask(NEONScalarByIndexedElementMask)) { + case NEON_SQRDMLAH_byelement_scalar: + case NEON_SQRDMLSH_byelement_scalar: + scope.Record(CPUFeatures::kRDM); + return; + default: + switch (instr->Mask(NEONScalarByIndexedElementFPMask)) { + case NEON_FMLA_H_byelement_scalar: + case NEON_FMLS_H_byelement_scalar: + case NEON_FMUL_H_byelement_scalar: + case NEON_FMULX_H_byelement_scalar: + scope.Record(CPUFeatures::kNEONHalf); + VIXL_FALLTHROUGH(); + case NEON_FMLA_byelement_scalar: + case NEON_FMLS_byelement_scalar: + case NEON_FMUL_byelement_scalar: + case NEON_FMULX_byelement_scalar: + scope.Record(CPUFeatures::kFP); + return; + } + // No additional features. + return; + } +} + +void CPUFeaturesAuditor::VisitNEONScalarCopy(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON. + scope.Record(CPUFeatures::kNEON); + USE(instr); +} + +void CPUFeaturesAuditor::VisitNEONScalarPairwise(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON. + scope.Record(CPUFeatures::kNEON); + switch (instr->Mask(NEONScalarPairwiseMask)) { + case NEON_FMAXNMP_h_scalar: + case NEON_FADDP_h_scalar: + case NEON_FMAXP_h_scalar: + case NEON_FMINNMP_h_scalar: + case NEON_FMINP_h_scalar: + scope.Record(CPUFeatures::kNEONHalf); + VIXL_FALLTHROUGH(); + case NEON_FADDP_scalar: + case NEON_FMAXP_scalar: + case NEON_FMAXNMP_scalar: + case NEON_FMINP_scalar: + case NEON_FMINNMP_scalar: + scope.Record(CPUFeatures::kFP); + return; + default: + // No additional features. + return; + } +} + +void CPUFeaturesAuditor::VisitNEONScalarShiftImmediate( + const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON. + scope.Record(CPUFeatures::kNEON); + switch (instr->Mask(NEONScalarShiftImmediateMask)) { + case NEON_FCVTZS_imm_scalar: + case NEON_FCVTZU_imm_scalar: + case NEON_SCVTF_imm_scalar: + case NEON_UCVTF_imm_scalar: + scope.Record(CPUFeatures::kFP); + // If immh is 0b001x then the data type is FP16, and requires kNEONHalf. + if ((instr->GetImmNEONImmh() & 0xe) == 0x2) { + scope.Record(CPUFeatures::kNEONHalf); + } + return; + default: + // No additional features. + return; + } +} + +void CPUFeaturesAuditor::VisitNEONShiftImmediate(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON. + scope.Record(CPUFeatures::kNEON); + switch (instr->Mask(NEONShiftImmediateMask)) { + case NEON_SCVTF_imm: + case NEON_UCVTF_imm: + case NEON_FCVTZS_imm: + case NEON_FCVTZU_imm: + scope.Record(CPUFeatures::kFP); + // If immh is 0b001x then the data type is FP16, and requires kNEONHalf. + if ((instr->GetImmNEONImmh() & 0xe) == 0x2) { + scope.Record(CPUFeatures::kNEONHalf); + } + return; + default: + // No additional features. + return; + } +} + +void CPUFeaturesAuditor::VisitNEONTable(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON. + scope.Record(CPUFeatures::kNEON); + USE(instr); +} + +void CPUFeaturesAuditor::VisitPCRelAddressing(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + USE(instr); +} + +void CPUFeaturesAuditor::VisitSystem(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + if (instr->Mask(SystemHintFMask) == SystemHintFixed) { + CPUFeatures required; + switch (instr->GetInstructionBits()) { + case PACIA1716: + case PACIB1716: + case AUTIA1716: + case AUTIB1716: + case PACIAZ: + case PACIASP: + case PACIBZ: + case PACIBSP: + case AUTIAZ: + case AUTIASP: + case AUTIBZ: + case AUTIBSP: + case XPACLRI: + required.Combine(CPUFeatures::kPAuth); + break; + default: + if (instr->GetImmHint() == ESB) required.Combine(CPUFeatures::kRAS); + break; + } + + // These are all HINT instructions, and behave as NOPs if the corresponding + // features are not implemented, so we record the corresponding features + // only if they are available. + if (available_.Has(required)) scope.Record(required); + } +} + +void CPUFeaturesAuditor::VisitTestBranch(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + USE(instr); +} + +void CPUFeaturesAuditor::VisitUnallocated(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + USE(instr); +} + +void CPUFeaturesAuditor::VisitUnconditionalBranch(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + USE(instr); +} + +void CPUFeaturesAuditor::VisitUnconditionalBranchToRegister( + const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + switch (instr->Mask(UnconditionalBranchToRegisterMask)) { + case BRAAZ: + case BRABZ: + case BLRAAZ: + case BLRABZ: + case RETAA: + case RETAB: + case BRAA: + case BRAB: + case BLRAA: + case BLRAB: + scope.Record(CPUFeatures::kPAuth); + return; + default: + // No additional features. + return; + } +} + +void CPUFeaturesAuditor::VisitUnimplemented(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + USE(instr); +} + + +} // namespace aarch64 +} // namespace vixl diff --git a/dep/vixl/src/aarch64/decoder-aarch64.cc b/dep/vixl/src/aarch64/decoder-aarch64.cc new file mode 100644 index 000000000..4cac45c45 --- /dev/null +++ b/dep/vixl/src/aarch64/decoder-aarch64.cc @@ -0,0 +1,1029 @@ +// Copyright 2014, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "../globals-vixl.h" +#include "../utils-vixl.h" + +#include "decoder-aarch64.h" + +namespace vixl { +namespace aarch64 { + +void Decoder::DecodeInstruction(const Instruction* instr) { + if (instr->ExtractBits(28, 27) == 0) { + VisitUnallocated(instr); + } else { + switch (instr->ExtractBits(27, 24)) { + // 0: PC relative addressing. + case 0x0: + DecodePCRelAddressing(instr); + break; + + // 1: Add/sub immediate. + case 0x1: + DecodeAddSubImmediate(instr); + break; + + // A: Logical shifted register. + // Add/sub with carry. + // Conditional compare register. + // Conditional compare immediate. + // Conditional select. + // Data processing 1 source. + // Data processing 2 source. + // B: Add/sub shifted register. + // Add/sub extended register. + // Data processing 3 source. + case 0xA: + case 0xB: + DecodeDataProcessing(instr); + break; + + // 2: Logical immediate. + // Move wide immediate. + case 0x2: + DecodeLogical(instr); + break; + + // 3: Bitfield. + // Extract. + case 0x3: + DecodeBitfieldExtract(instr); + break; + + // 4: Unconditional branch immediate. + // Exception generation. + // Compare and branch immediate. + // 5: Compare and branch immediate. + // Conditional branch. + // System. + // 6,7: Unconditional branch. + // Test and branch immediate. + case 0x4: + case 0x5: + case 0x6: + case 0x7: + DecodeBranchSystemException(instr); + break; + + // 8,9: Load/store register pair post-index. + // Load register literal. + // Load/store register unscaled immediate. + // Load/store register immediate post-index. + // Load/store register immediate pre-index. + // Load/store register offset. + // Load/store exclusive. + // C,D: Load/store register pair offset. + // Load/store register pair pre-index. + // Load/store register unsigned immediate. + // Advanced SIMD. + case 0x8: + case 0x9: + case 0xC: + case 0xD: + DecodeLoadStore(instr); + break; + + // E: FP fixed point conversion. + // FP integer conversion. + // FP data processing 1 source. + // FP compare. + // FP immediate. + // FP data processing 2 source. + // FP conditional compare. + // FP conditional select. + // Advanced SIMD. + // F: FP data processing 3 source. + // Advanced SIMD. + case 0xE: + case 0xF: + DecodeFP(instr); + break; + } + } +} + +void Decoder::AppendVisitor(DecoderVisitor* new_visitor) { + visitors_.push_back(new_visitor); +} + + +void Decoder::PrependVisitor(DecoderVisitor* new_visitor) { + visitors_.push_front(new_visitor); +} + + +void Decoder::InsertVisitorBefore(DecoderVisitor* new_visitor, + DecoderVisitor* registered_visitor) { + std::list::iterator it; + for (it = visitors_.begin(); it != visitors_.end(); it++) { + if (*it == registered_visitor) { + visitors_.insert(it, new_visitor); + return; + } + } + // We reached the end of the list. The last element must be + // registered_visitor. + VIXL_ASSERT(*it == registered_visitor); + visitors_.insert(it, new_visitor); +} + + +void Decoder::InsertVisitorAfter(DecoderVisitor* new_visitor, + DecoderVisitor* registered_visitor) { + std::list::iterator it; + for (it = visitors_.begin(); it != visitors_.end(); it++) { + if (*it == registered_visitor) { + it++; + visitors_.insert(it, new_visitor); + return; + } + } + // We reached the end of the list. The last element must be + // registered_visitor. + VIXL_ASSERT(*it == registered_visitor); + visitors_.push_back(new_visitor); +} + + +void Decoder::RemoveVisitor(DecoderVisitor* visitor) { + visitors_.remove(visitor); +} + + +void Decoder::DecodePCRelAddressing(const Instruction* instr) { + VIXL_ASSERT(instr->ExtractBits(27, 24) == 0x0); + // We know bit 28 is set, as = 0 is filtered out at the top level + // decode. + VIXL_ASSERT(instr->ExtractBit(28) == 0x1); + VisitPCRelAddressing(instr); +} + + +void Decoder::DecodeBranchSystemException(const Instruction* instr) { + VIXL_ASSERT((instr->ExtractBits(27, 24) == 0x4) || + (instr->ExtractBits(27, 24) == 0x5) || + (instr->ExtractBits(27, 24) == 0x6) || + (instr->ExtractBits(27, 24) == 0x7)); + + switch (instr->ExtractBits(31, 29)) { + case 0: + case 4: { + VisitUnconditionalBranch(instr); + break; + } + case 1: + case 5: { + if (instr->ExtractBit(25) == 0) { + VisitCompareBranch(instr); + } else { + VisitTestBranch(instr); + } + break; + } + case 2: { + if (instr->ExtractBit(25) == 0) { + if ((instr->ExtractBit(24) == 0x1) || + (instr->Mask(0x01000010) == 0x00000010)) { + VisitUnallocated(instr); + } else { + VisitConditionalBranch(instr); + } + } else { + VisitUnallocated(instr); + } + break; + } + case 6: { + if (instr->ExtractBit(25) == 0) { + if (instr->ExtractBit(24) == 0) { + if ((instr->ExtractBits(4, 2) != 0) || + (instr->Mask(0x00E0001D) == 0x00200001) || + (instr->Mask(0x00E0001D) == 0x00400001) || + (instr->Mask(0x00E0001E) == 0x00200002) || + (instr->Mask(0x00E0001E) == 0x00400002) || + (instr->Mask(0x00E0001C) == 0x00600000) || + (instr->Mask(0x00E0001C) == 0x00800000) || + (instr->Mask(0x00E0001F) == 0x00A00000) || + (instr->Mask(0x00C0001C) == 0x00C00000)) { + VisitUnallocated(instr); + } else { + VisitException(instr); + } + } else { + if (instr->ExtractBits(23, 22) == 0) { + const Instr masked_003FF0E0 = instr->Mask(0x003FF0E0); + if ((instr->ExtractBits(21, 19) == 0x4) || + (masked_003FF0E0 == 0x00033000) || + (masked_003FF0E0 == 0x003FF020) || + (masked_003FF0E0 == 0x003FF060) || + (masked_003FF0E0 == 0x003FF0E0) || + (instr->Mask(0x00388000) == 0x00008000) || + (instr->Mask(0x0038E000) == 0x00000000) || + (instr->Mask(0x0039E000) == 0x00002000) || + (instr->Mask(0x003AE000) == 0x00002000) || + (instr->Mask(0x003CE000) == 0x00042000) || + (instr->Mask(0x0038F000) == 0x00005000) || + (instr->Mask(0x0038E000) == 0x00006000)) { + VisitUnallocated(instr); + } else { + VisitSystem(instr); + } + } else { + VisitUnallocated(instr); + } + } + } else { + if (((instr->ExtractBit(24) == 0x1) && + (instr->ExtractBits(23, 21) > 0x1)) || + (instr->ExtractBits(20, 16) != 0x1F) || + (instr->ExtractBits(15, 10) == 0x1) || + (instr->ExtractBits(15, 10) > 0x3) || + (instr->ExtractBits(24, 21) == 0x3) || + (instr->ExtractBits(24, 22) == 0x3)) { + VisitUnallocated(instr); + } else { + VisitUnconditionalBranchToRegister(instr); + } + } + break; + } + case 3: + case 7: { + VisitUnallocated(instr); + break; + } + } +} + + +void Decoder::DecodeLoadStore(const Instruction* instr) { + VIXL_ASSERT((instr->ExtractBits(27, 24) == 0x8) || + (instr->ExtractBits(27, 24) == 0x9) || + (instr->ExtractBits(27, 24) == 0xC) || + (instr->ExtractBits(27, 24) == 0xD)); + // TODO(all): rearrange the tree to integrate this branch. + if ((instr->ExtractBit(28) == 0) && (instr->ExtractBit(29) == 0) && + (instr->ExtractBit(26) == 1)) { + DecodeNEONLoadStore(instr); + return; + } + + if (instr->ExtractBit(24) == 0) { + if (instr->ExtractBit(28) == 0) { + if (instr->ExtractBit(29) == 0) { + if (instr->ExtractBit(26) == 0) { + VisitLoadStoreExclusive(instr); + } else { + VIXL_UNREACHABLE(); + } + } else { + if ((instr->ExtractBits(31, 30) == 0x3) || + (instr->Mask(0xC4400000) == 0x40000000)) { + VisitUnallocated(instr); + } else { + if (instr->ExtractBit(23) == 0) { + if (instr->Mask(0xC4400000) == 0xC0400000) { + VisitUnallocated(instr); + } else { + VisitLoadStorePairNonTemporal(instr); + } + } else { + VisitLoadStorePairPostIndex(instr); + } + } + } + } else { + if (instr->ExtractBit(29) == 0) { + if (instr->Mask(0xC4000000) == 0xC4000000) { + VisitUnallocated(instr); + } else { + VisitLoadLiteral(instr); + } + } else { + if ((instr->Mask(0x44800000) == 0x44800000) || + (instr->Mask(0x84800000) == 0x84800000)) { + VisitUnallocated(instr); + } else { + if (instr->ExtractBit(21) == 0) { + switch (instr->ExtractBits(11, 10)) { + case 0: { + VisitLoadStoreUnscaledOffset(instr); + break; + } + case 1: { + if (instr->Mask(0xC4C00000) == 0xC0800000) { + VisitUnallocated(instr); + } else { + VisitLoadStorePostIndex(instr); + } + break; + } + case 2: { + // TODO: VisitLoadStoreRegisterOffsetUnpriv. + VisitUnimplemented(instr); + break; + } + case 3: { + if (instr->Mask(0xC4C00000) == 0xC0800000) { + VisitUnallocated(instr); + } else { + VisitLoadStorePreIndex(instr); + } + break; + } + } + } else { + if (instr->ExtractBits(11, 10) == 0x2) { + if (instr->ExtractBit(14) == 0) { + VisitUnallocated(instr); + } else { + VisitLoadStoreRegisterOffset(instr); + } + } else { + if (instr->ExtractBits(11, 10) == 0x0) { + if (instr->ExtractBit(25) == 0) { + if (instr->ExtractBit(26) == 0) { + if ((instr->ExtractBit(15) == 1) && + ((instr->ExtractBits(14, 12) == 0x1) || + (instr->ExtractBit(13) == 1) || + (instr->ExtractBits(14, 12) == 0x5) || + ((instr->ExtractBits(14, 12) == 0x4) && + ((instr->ExtractBit(23) == 0) || + (instr->ExtractBits(23, 22) == 0x3))))) { + VisitUnallocated(instr); + } else { + VisitAtomicMemory(instr); + } + } else { + VisitUnallocated(instr); + } + } else { + VisitUnallocated(instr); + } + } else { + VisitUnallocated(instr); + } + } + } + } + } + } + } else { + if (instr->ExtractBit(28) == 0) { + if (instr->ExtractBit(29) == 0) { + VisitUnallocated(instr); + } else { + if ((instr->ExtractBits(31, 30) == 0x3) || + (instr->Mask(0xC4400000) == 0x40000000)) { + VisitUnallocated(instr); + } else { + if (instr->ExtractBit(23) == 0) { + VisitLoadStorePairOffset(instr); + } else { + VisitLoadStorePairPreIndex(instr); + } + } + } + } else { + if (instr->ExtractBit(29) == 0) { + VisitUnallocated(instr); + } else { + if ((instr->Mask(0x84C00000) == 0x80C00000) || + (instr->Mask(0x44800000) == 0x44800000) || + (instr->Mask(0x84800000) == 0x84800000)) { + VisitUnallocated(instr); + } else { + VisitLoadStoreUnsignedOffset(instr); + } + } + } + } +} + + +void Decoder::DecodeLogical(const Instruction* instr) { + VIXL_ASSERT(instr->ExtractBits(27, 24) == 0x2); + + if (instr->Mask(0x80400000) == 0x00400000) { + VisitUnallocated(instr); + } else { + if (instr->ExtractBit(23) == 0) { + VisitLogicalImmediate(instr); + } else { + if (instr->ExtractBits(30, 29) == 0x1) { + VisitUnallocated(instr); + } else { + VisitMoveWideImmediate(instr); + } + } + } +} + + +void Decoder::DecodeBitfieldExtract(const Instruction* instr) { + VIXL_ASSERT(instr->ExtractBits(27, 24) == 0x3); + + if ((instr->Mask(0x80400000) == 0x80000000) || + (instr->Mask(0x80400000) == 0x00400000) || + (instr->Mask(0x80008000) == 0x00008000)) { + VisitUnallocated(instr); + } else if (instr->ExtractBit(23) == 0) { + if ((instr->Mask(0x80200000) == 0x00200000) || + (instr->Mask(0x60000000) == 0x60000000)) { + VisitUnallocated(instr); + } else { + VisitBitfield(instr); + } + } else { + if ((instr->Mask(0x60200000) == 0x00200000) || + (instr->Mask(0x60000000) != 0x00000000)) { + VisitUnallocated(instr); + } else { + VisitExtract(instr); + } + } +} + + +void Decoder::DecodeAddSubImmediate(const Instruction* instr) { + VIXL_ASSERT(instr->ExtractBits(27, 24) == 0x1); + if (instr->ExtractBit(23) == 1) { + VisitUnallocated(instr); + } else { + VisitAddSubImmediate(instr); + } +} + + +void Decoder::DecodeDataProcessing(const Instruction* instr) { + VIXL_ASSERT((instr->ExtractBits(27, 24) == 0xA) || + (instr->ExtractBits(27, 24) == 0xB)); + + if (instr->ExtractBit(24) == 0) { + if (instr->ExtractBit(28) == 0) { + if (instr->Mask(0x80008000) == 0x00008000) { + VisitUnallocated(instr); + } else { + VisitLogicalShifted(instr); + } + } else { + switch (instr->ExtractBits(23, 21)) { + case 0: { + if (instr->Mask(0x0000FC00) != 0) { + VisitUnallocated(instr); + } else { + VisitAddSubWithCarry(instr); + } + break; + } + case 2: { + if ((instr->ExtractBit(29) == 0) || (instr->Mask(0x00000410) != 0)) { + VisitUnallocated(instr); + } else { + if (instr->ExtractBit(11) == 0) { + VisitConditionalCompareRegister(instr); + } else { + VisitConditionalCompareImmediate(instr); + } + } + break; + } + case 4: { + if (instr->Mask(0x20000800) != 0x00000000) { + VisitUnallocated(instr); + } else { + VisitConditionalSelect(instr); + } + break; + } + case 6: { + if (instr->ExtractBit(29) == 0x1) { + VisitUnallocated(instr); + VIXL_FALLTHROUGH(); + } else { + if (instr->ExtractBit(30) == 0) { + if ((instr->ExtractBit(15) == 0x1) || + (instr->ExtractBits(15, 11) == 0) || + (instr->ExtractBits(15, 12) == 0x1) || + ((instr->ExtractBits(15, 12) == 0x3) && + (instr->ExtractBit(31) == 0)) || + (instr->ExtractBits(15, 13) == 0x3) || + (instr->Mask(0x8000EC00) == 0x00004C00) || + (instr->Mask(0x8000E800) == 0x80004000) || + (instr->Mask(0x8000E400) == 0x80004000)) { + VisitUnallocated(instr); + } else { + VisitDataProcessing2Source(instr); + } + } else { + if ((instr->ExtractBits(20, 17) != 0) || + (instr->ExtractBit(15) == 1) || + ((instr->ExtractBit(16) == 1) && + ((instr->ExtractBits(14, 10) > 17) || + (instr->ExtractBit(31) == 0))) || + ((instr->ExtractBit(16) == 0) && + ((instr->ExtractBits(14, 13) != 0) || + (instr->Mask(0xA01FFC00) == 0x00000C00) || + (instr->Mask(0x201FF800) == 0x00001800)))) { + VisitUnallocated(instr); + } else { + VisitDataProcessing1Source(instr); + } + } + break; + } + } + case 1: + case 3: + case 5: + case 7: + VisitUnallocated(instr); + break; + } + } + } else { + if (instr->ExtractBit(28) == 0) { + if (instr->ExtractBit(21) == 0) { + if ((instr->ExtractBits(23, 22) == 0x3) || + (instr->Mask(0x80008000) == 0x00008000)) { + VisitUnallocated(instr); + } else { + VisitAddSubShifted(instr); + } + } else { + if ((instr->Mask(0x00C00000) != 0x00000000) || + (instr->Mask(0x00001400) == 0x00001400) || + (instr->Mask(0x00001800) == 0x00001800)) { + VisitUnallocated(instr); + } else { + VisitAddSubExtended(instr); + } + } + } else { + if ((instr->ExtractBit(30) == 0x1) || + (instr->ExtractBits(30, 29) == 0x1) || + (instr->Mask(0xE0600000) == 0x00200000) || + (instr->Mask(0xE0608000) == 0x00400000) || + (instr->Mask(0x60608000) == 0x00408000) || + (instr->Mask(0x60E00000) == 0x00E00000) || + (instr->Mask(0x60E00000) == 0x00800000) || + (instr->Mask(0x60E00000) == 0x00600000)) { + VisitUnallocated(instr); + } else { + VisitDataProcessing3Source(instr); + } + } + } +} + + +void Decoder::DecodeFP(const Instruction* instr) { + VIXL_ASSERT((instr->ExtractBits(27, 24) == 0xE) || + (instr->ExtractBits(27, 24) == 0xF)); + if (instr->ExtractBit(28) == 0) { + DecodeNEONVectorDataProcessing(instr); + } else { + if (instr->ExtractBits(31, 30) == 0x3) { + VisitUnallocated(instr); + } else if (instr->ExtractBits(31, 30) == 0x1) { + DecodeNEONScalarDataProcessing(instr); + } else { + if (instr->ExtractBit(29) == 0) { + if (instr->ExtractBit(24) == 0) { + if (instr->ExtractBit(21) == 0) { + if ((instr->ExtractBits(23, 22) == 0x2) || + (instr->ExtractBit(18) == 1) || + (instr->Mask(0x80008000) == 0x00000000) || + (instr->Mask(0x000E0000) == 0x00000000) || + (instr->Mask(0x000E0000) == 0x000A0000) || + (instr->Mask(0x00160000) == 0x00000000) || + (instr->Mask(0x00160000) == 0x00120000)) { + VisitUnallocated(instr); + } else { + VisitFPFixedPointConvert(instr); + } + } else { + if (instr->ExtractBits(15, 10) == 32) { + VisitUnallocated(instr); + } else if (instr->ExtractBits(15, 10) == 0) { + if ((instr->Mask(0x000E0000) == 0x000A0000) || + (instr->Mask(0x000E0000) == 0x000C0000) || + (instr->Mask(0x00160000) == 0x00120000) || + (instr->Mask(0x00160000) == 0x00140000) || + (instr->Mask(0x20C40000) == 0x00800000) || + (instr->Mask(0x20C60000) == 0x00840000) || + (instr->Mask(0xA0C60000) == 0x80060000) || + (instr->Mask(0xA0C60000) == 0x00860000) || + (instr->Mask(0xA0CE0000) == 0x80860000) || + (instr->Mask(0xA0CE0000) == 0x804E0000) || + (instr->Mask(0xA0CE0000) == 0x000E0000) || + (instr->Mask(0xA0D60000) == 0x00160000) || + (instr->Mask(0xA0D60000) == 0x80560000) || + (instr->Mask(0xA0D60000) == 0x80960000)) { + VisitUnallocated(instr); + } else { + VisitFPIntegerConvert(instr); + } + } else if (instr->ExtractBits(14, 10) == 16) { + const Instr masked_A0DF8000 = instr->Mask(0xA0DF8000); + if ((instr->Mask(0x80180000) != 0) || + (masked_A0DF8000 == 0x00020000) || + (masked_A0DF8000 == 0x00030000) || + (masked_A0DF8000 == 0x00068000) || + (masked_A0DF8000 == 0x00428000) || + (masked_A0DF8000 == 0x00430000) || + (masked_A0DF8000 == 0x00468000) || + (instr->Mask(0xA0D80000) == 0x00800000) || + (instr->Mask(0xA0DF0000) == 0x00C30000) || + (instr->Mask(0xA0DF8000) == 0x00C68000)) { + VisitUnallocated(instr); + } else { + VisitFPDataProcessing1Source(instr); + } + } else if (instr->ExtractBits(13, 10) == 8) { + if ((instr->ExtractBits(15, 14) != 0) || + (instr->ExtractBits(2, 0) != 0) || + (instr->ExtractBit(31) == 1) || + (instr->ExtractBits(23, 22) == 0x2)) { + VisitUnallocated(instr); + } else { + VisitFPCompare(instr); + } + } else if (instr->ExtractBits(12, 10) == 4) { + if ((instr->ExtractBits(9, 5) != 0) || + // Valid enc: 01d, 00s, 11h. + (instr->ExtractBits(23, 22) == 0x2) || + (instr->ExtractBit(31) == 1)) { + VisitUnallocated(instr); + } else { + VisitFPImmediate(instr); + } + } else { + if ((instr->ExtractBits(23, 22) == 0x2) || + (instr->ExtractBit(31) == 1)) { + VisitUnallocated(instr); + } else { + switch (instr->ExtractBits(11, 10)) { + case 1: { + VisitFPConditionalCompare(instr); + break; + } + case 2: { + if (instr->ExtractBits(15, 12) > 0x8) { + VisitUnallocated(instr); + } else { + VisitFPDataProcessing2Source(instr); + } + break; + } + case 3: { + VisitFPConditionalSelect(instr); + break; + } + default: + VIXL_UNREACHABLE(); + } + } + } + } + } else { + // Bit 30 == 1 has been handled earlier. + VIXL_ASSERT(instr->ExtractBit(30) == 0); + if ((instr->Mask(0xA0000000) != 0) || + (instr->ExtractBits(23, 22) == 0x2)) { + VisitUnallocated(instr); + } else { + VisitFPDataProcessing3Source(instr); + } + } + } else { + VisitUnallocated(instr); + } + } + } +} + + +void Decoder::DecodeNEONLoadStore(const Instruction* instr) { + VIXL_ASSERT(instr->ExtractBits(29, 25) == 0x6); + if (instr->ExtractBit(31) == 0) { + if ((instr->ExtractBit(24) == 0) && (instr->ExtractBit(21) == 1)) { + VisitUnallocated(instr); + return; + } + + if (instr->ExtractBit(23) == 0) { + if (instr->ExtractBits(20, 16) == 0) { + if (instr->ExtractBit(24) == 0) { + VisitNEONLoadStoreMultiStruct(instr); + } else { + VisitNEONLoadStoreSingleStruct(instr); + } + } else { + VisitUnallocated(instr); + } + } else { + if (instr->ExtractBit(24) == 0) { + VisitNEONLoadStoreMultiStructPostIndex(instr); + } else { + VisitNEONLoadStoreSingleStructPostIndex(instr); + } + } + } else { + VisitUnallocated(instr); + } +} + + +void Decoder::DecodeNEONVectorDataProcessing(const Instruction* instr) { + VIXL_ASSERT(instr->ExtractBits(28, 25) == 0x7); + if (instr->ExtractBit(31) == 0) { + if (instr->ExtractBit(24) == 0) { + if (instr->ExtractBit(21) == 0) { + if (instr->ExtractBit(15) == 0) { + if (instr->ExtractBit(10) == 0) { + if (instr->ExtractBit(29) == 0) { + if (instr->ExtractBit(11) == 0) { + VisitNEONTable(instr); + } else { + VisitNEONPerm(instr); + } + } else { + VisitNEONExtract(instr); + } + } else { + if (instr->ExtractBits(23, 22) == 0) { + VisitNEONCopy(instr); + } else if (instr->ExtractBit(14) == 0x0 && + instr->ExtractBit(22) == 0x1) { + // U + a + opcode. + uint8_t decode_field = + (instr->ExtractBit(29) << 1) | instr->ExtractBit(23); + decode_field = (decode_field << 3) | instr->ExtractBits(13, 11); + switch (decode_field) { + case 0x5: + case 0xB: + case 0xC: + case 0xD: + case 0x11: + case 0x19: + case 0x1B: + case 0x1F: + VisitUnallocated(instr); + break; + default: + VisitNEON3SameFP16(instr); + break; + } + } else { + VisitUnallocated(instr); + } + } + } else if (instr->ExtractBit(10) == 0) { + VisitUnallocated(instr); + } else if ((instr->ExtractBits(14, 11) == 0x3) || + (instr->ExtractBits(14, 13) == 0x1)) { + // opcode = 0b0011 + // opcode = 0b01xx + VisitUnallocated(instr); + } else if (instr->ExtractBit(29) == 0) { + // U == 0 + if (instr->ExtractBits(14, 11) == 0x2) { + // opcode = 0b0010 + VisitNEON3SameExtra(instr); + } else { + VisitUnallocated(instr); + } + } else { + // U == 1 + if ((instr->ExtractBits(14, 11) == 0xd) || + (instr->ExtractBits(14, 11) == 0xf)) { + // opcode = 0b11x1 + VisitUnallocated(instr); + } else { + VisitNEON3SameExtra(instr); + } + } + } else { + if (instr->ExtractBit(10) == 0) { + if (instr->ExtractBit(11) == 0) { + VisitNEON3Different(instr); + } else { + if (instr->ExtractBits(18, 17) == 0) { + if (instr->ExtractBit(20) == 0) { + if (instr->ExtractBit(19) == 0) { + VisitNEON2RegMisc(instr); + } else { + if (instr->ExtractBits(30, 29) == 0x2) { + VisitCryptoAES(instr); + } else { + VisitUnallocated(instr); + } + } + } else { + if (instr->ExtractBit(19) == 0) { + VisitNEONAcrossLanes(instr); + } else { + if (instr->ExtractBit(22) == 0) { + VisitUnallocated(instr); + } else { + if ((instr->ExtractBits(16, 15) == 0x0) || + (instr->ExtractBits(16, 14) == 0x2) || + (instr->ExtractBits(16, 15) == 0x2) || + (instr->ExtractBits(16, 12) == 0x1e) || + ((instr->ExtractBit(23) == 0) && + ((instr->ExtractBits(16, 14) == 0x3) || + (instr->ExtractBits(16, 12) == 0x1f))) || + ((instr->ExtractBit(23) == 1) && + (instr->ExtractBits(16, 12) == 0x1c))) { + VisitUnallocated(instr); + } else { + VisitNEON2RegMiscFP16(instr); + } + } + } + } + } else { + VisitUnallocated(instr); + } + } + } else { + VisitNEON3Same(instr); + } + } + } else { + if (instr->ExtractBit(10) == 0) { + VisitNEONByIndexedElement(instr); + } else { + if (instr->ExtractBit(23) == 0) { + if (instr->ExtractBits(22, 19) == 0) { + VisitNEONModifiedImmediate(instr); + } else { + VisitNEONShiftImmediate(instr); + } + } else { + VisitUnallocated(instr); + } + } + } + } else { + VisitUnallocated(instr); + } +} + + +void Decoder::DecodeNEONScalarDataProcessing(const Instruction* instr) { + VIXL_ASSERT(instr->ExtractBits(28, 25) == 0xF); + if (instr->ExtractBit(24) == 0) { + if (instr->ExtractBit(21) == 0) { + if (instr->ExtractBit(15) == 0) { + if (instr->ExtractBit(10) == 0) { + if (instr->ExtractBit(29) == 0) { + if (instr->ExtractBit(11) == 0) { + VisitCrypto3RegSHA(instr); + } else { + VisitUnallocated(instr); + } + } else { + VisitUnallocated(instr); + } + } else { + if (instr->ExtractBits(23, 22) == 0) { + VisitNEONScalarCopy(instr); + } else { + if (instr->Mask(0x00404000) == 0x00400000) { + if ((instr->ExtractBits(13, 11) == 0x6) || + (instr->ExtractBits(13, 11) < 2) || + ((instr->Mask(0x20800000) == 0x00000000) && + ((instr->ExtractBits(13, 11) < 0x3) || + (instr->ExtractBits(13, 11) == 0x5))) || + ((instr->Mask(0x20800000) == 0x00800000) && + (instr->ExtractBits(13, 11) < 0x7)) || + ((instr->Mask(0x20800000) == 0x20000000) && + ((instr->ExtractBits(13, 11) < 0x4) || + (instr->ExtractBits(13, 11) == 0x7))) || + ((instr->Mask(0x20800000) == 0x20800000) && + (instr->ExtractBits(12, 11) == 0x3))) { + VisitUnallocated(instr); + } else { + VisitNEONScalar3SameFP16(instr); + } + } else { + VisitUnallocated(instr); + } + } + } + } else { + if (instr->ExtractBit(29) == 0) { + VisitUnallocated(instr); + } else { + if (instr->ExtractBit(10) == 0) { + VisitUnallocated(instr); + } else { + VisitNEONScalar3SameExtra(instr); + } + } + } + } else { + if (instr->ExtractBit(10) == 0) { + if (instr->ExtractBit(11) == 0) { + VisitNEONScalar3Diff(instr); + } else { + if (instr->ExtractBits(18, 17) == 0) { + if (instr->ExtractBit(20) == 0) { + if (instr->ExtractBit(19) == 0) { + VisitNEONScalar2RegMisc(instr); + } else { + if (instr->ExtractBit(29) == 0) { + VisitCrypto2RegSHA(instr); + } else { + VisitUnallocated(instr); + } + } + } else { + if (instr->ExtractBit(19) == 0) { + VisitNEONScalarPairwise(instr); + } else { + if (instr->ExtractBit(22) == 0) { + VisitUnallocated(instr); + } else { + if ((instr->ExtractBits(16, 15) == 0x0) || + (instr->ExtractBits(16, 14) == 0x2) || + (instr->ExtractBits(16, 15) == 0x2) || + (instr->ExtractBits(16, 13) == 0xc) || + (instr->ExtractBits(16, 12) == 0x1e) || + ((instr->ExtractBit(23) == 0) && + ((instr->ExtractBits(16, 14) == 0x3) || + (instr->ExtractBits(16, 12) == 0x1f))) || + ((instr->ExtractBit(23) == 1) && + ((instr->ExtractBits(16, 12) == 0xf) || + (instr->ExtractBits(16, 12) == 0x1c) || + ((instr->ExtractBit(29) == 1) && + ((instr->ExtractBits(16, 12) == 0xe) || + (instr->ExtractBits(16, 12) == 0x1f)))))) { + VisitUnallocated(instr); + } else { + VisitNEONScalar2RegMiscFP16(instr); + } + } + } + } + } else { + VisitUnallocated(instr); + } + } + } else { + VisitNEONScalar3Same(instr); + } + } + } else { + if (instr->ExtractBit(10) == 0) { + VisitNEONScalarByIndexedElement(instr); + } else { + if (instr->ExtractBit(23) == 0) { + VisitNEONScalarShiftImmediate(instr); + } else { + VisitUnallocated(instr); + } + } + } +} + + +#define DEFINE_VISITOR_CALLERS(A) \ + void Decoder::Visit##A(const Instruction* instr) { \ + VIXL_ASSERT(((A##FMask == 0) && (A##Fixed == 0)) || \ + (instr->Mask(A##FMask) == A##Fixed)); \ + std::list::iterator it; \ + for (it = visitors_.begin(); it != visitors_.end(); it++) { \ + (*it)->Visit##A(instr); \ + } \ + } +VISITOR_LIST(DEFINE_VISITOR_CALLERS) +#undef DEFINE_VISITOR_CALLERS +} // namespace aarch64 +} // namespace vixl diff --git a/dep/vixl/src/aarch64/disasm-aarch64.cc b/dep/vixl/src/aarch64/disasm-aarch64.cc new file mode 100644 index 000000000..1c00443dd --- /dev/null +++ b/dep/vixl/src/aarch64/disasm-aarch64.cc @@ -0,0 +1,5817 @@ +// Copyright 2015, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include +#include + +#include "disasm-aarch64.h" + +namespace vixl { +namespace aarch64 { + + +Disassembler::Disassembler() { + buffer_size_ = 256; + buffer_ = reinterpret_cast(malloc(buffer_size_)); + buffer_pos_ = 0; + own_buffer_ = true; + code_address_offset_ = 0; +} + + +Disassembler::Disassembler(char *text_buffer, int buffer_size) { + buffer_size_ = buffer_size; + buffer_ = text_buffer; + buffer_pos_ = 0; + own_buffer_ = false; + code_address_offset_ = 0; +} + + +Disassembler::~Disassembler() { + if (own_buffer_) { + free(buffer_); + } +} + + +char *Disassembler::GetOutput() { return buffer_; } + + +void Disassembler::VisitAddSubImmediate(const Instruction *instr) { + bool rd_is_zr = RdIsZROrSP(instr); + bool stack_op = + (rd_is_zr || RnIsZROrSP(instr)) && (instr->GetImmAddSub() == 0) ? true + : false; + const char *mnemonic = ""; + const char *form = "'Rds, 'Rns, 'IAddSub"; + const char *form_cmp = "'Rns, 'IAddSub"; + const char *form_mov = "'Rds, 'Rns"; + + switch (instr->Mask(AddSubImmediateMask)) { + case ADD_w_imm: + case ADD_x_imm: { + mnemonic = "add"; + if (stack_op) { + mnemonic = "mov"; + form = form_mov; + } + break; + } + case ADDS_w_imm: + case ADDS_x_imm: { + mnemonic = "adds"; + if (rd_is_zr) { + mnemonic = "cmn"; + form = form_cmp; + } + break; + } + case SUB_w_imm: + case SUB_x_imm: + mnemonic = "sub"; + break; + case SUBS_w_imm: + case SUBS_x_imm: { + mnemonic = "subs"; + if (rd_is_zr) { + mnemonic = "cmp"; + form = form_cmp; + } + break; + } + default: + VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitAddSubShifted(const Instruction *instr) { + bool rd_is_zr = RdIsZROrSP(instr); + bool rn_is_zr = RnIsZROrSP(instr); + const char *mnemonic = ""; + const char *form = "'Rd, 'Rn, 'Rm'NDP"; + const char *form_cmp = "'Rn, 'Rm'NDP"; + const char *form_neg = "'Rd, 'Rm'NDP"; + + switch (instr->Mask(AddSubShiftedMask)) { + case ADD_w_shift: + case ADD_x_shift: + mnemonic = "add"; + break; + case ADDS_w_shift: + case ADDS_x_shift: { + mnemonic = "adds"; + if (rd_is_zr) { + mnemonic = "cmn"; + form = form_cmp; + } + break; + } + case SUB_w_shift: + case SUB_x_shift: { + mnemonic = "sub"; + if (rn_is_zr) { + mnemonic = "neg"; + form = form_neg; + } + break; + } + case SUBS_w_shift: + case SUBS_x_shift: { + mnemonic = "subs"; + if (rd_is_zr) { + mnemonic = "cmp"; + form = form_cmp; + } else if (rn_is_zr) { + mnemonic = "negs"; + form = form_neg; + } + break; + } + default: + VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitAddSubExtended(const Instruction *instr) { + bool rd_is_zr = RdIsZROrSP(instr); + const char *mnemonic = ""; + Extend mode = static_cast(instr->GetExtendMode()); + const char *form = ((mode == UXTX) || (mode == SXTX)) ? "'Rds, 'Rns, 'Xm'Ext" + : "'Rds, 'Rns, 'Wm'Ext"; + const char *form_cmp = + ((mode == UXTX) || (mode == SXTX)) ? "'Rns, 'Xm'Ext" : "'Rns, 'Wm'Ext"; + + switch (instr->Mask(AddSubExtendedMask)) { + case ADD_w_ext: + case ADD_x_ext: + mnemonic = "add"; + break; + case ADDS_w_ext: + case ADDS_x_ext: { + mnemonic = "adds"; + if (rd_is_zr) { + mnemonic = "cmn"; + form = form_cmp; + } + break; + } + case SUB_w_ext: + case SUB_x_ext: + mnemonic = "sub"; + break; + case SUBS_w_ext: + case SUBS_x_ext: { + mnemonic = "subs"; + if (rd_is_zr) { + mnemonic = "cmp"; + form = form_cmp; + } + break; + } + default: + VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitAddSubWithCarry(const Instruction *instr) { + bool rn_is_zr = RnIsZROrSP(instr); + const char *mnemonic = ""; + const char *form = "'Rd, 'Rn, 'Rm"; + const char *form_neg = "'Rd, 'Rm"; + + switch (instr->Mask(AddSubWithCarryMask)) { + case ADC_w: + case ADC_x: + mnemonic = "adc"; + break; + case ADCS_w: + case ADCS_x: + mnemonic = "adcs"; + break; + case SBC_w: + case SBC_x: { + mnemonic = "sbc"; + if (rn_is_zr) { + mnemonic = "ngc"; + form = form_neg; + } + break; + } + case SBCS_w: + case SBCS_x: { + mnemonic = "sbcs"; + if (rn_is_zr) { + mnemonic = "ngcs"; + form = form_neg; + } + break; + } + default: + VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitLogicalImmediate(const Instruction *instr) { + bool rd_is_zr = RdIsZROrSP(instr); + bool rn_is_zr = RnIsZROrSP(instr); + const char *mnemonic = ""; + const char *form = "'Rds, 'Rn, 'ITri"; + + if (instr->GetImmLogical() == 0) { + // The immediate encoded in the instruction is not in the expected format. + Format(instr, "unallocated", "(LogicalImmediate)"); + return; + } + + switch (instr->Mask(LogicalImmediateMask)) { + case AND_w_imm: + case AND_x_imm: + mnemonic = "and"; + break; + case ORR_w_imm: + case ORR_x_imm: { + mnemonic = "orr"; + unsigned reg_size = + (instr->GetSixtyFourBits() == 1) ? kXRegSize : kWRegSize; + if (rn_is_zr && !IsMovzMovnImm(reg_size, instr->GetImmLogical())) { + mnemonic = "mov"; + form = "'Rds, 'ITri"; + } + break; + } + case EOR_w_imm: + case EOR_x_imm: + mnemonic = "eor"; + break; + case ANDS_w_imm: + case ANDS_x_imm: { + mnemonic = "ands"; + if (rd_is_zr) { + mnemonic = "tst"; + form = "'Rn, 'ITri"; + } + break; + } + default: + VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +bool Disassembler::IsMovzMovnImm(unsigned reg_size, uint64_t value) { + VIXL_ASSERT((reg_size == kXRegSize) || + ((reg_size == kWRegSize) && (value <= 0xffffffff))); + + // Test for movz: 16 bits set at positions 0, 16, 32 or 48. + if (((value & UINT64_C(0xffffffffffff0000)) == 0) || + ((value & UINT64_C(0xffffffff0000ffff)) == 0) || + ((value & UINT64_C(0xffff0000ffffffff)) == 0) || + ((value & UINT64_C(0x0000ffffffffffff)) == 0)) { + return true; + } + + // Test for movn: NOT(16 bits set at positions 0, 16, 32 or 48). + if ((reg_size == kXRegSize) && + (((~value & UINT64_C(0xffffffffffff0000)) == 0) || + ((~value & UINT64_C(0xffffffff0000ffff)) == 0) || + ((~value & UINT64_C(0xffff0000ffffffff)) == 0) || + ((~value & UINT64_C(0x0000ffffffffffff)) == 0))) { + return true; + } + if ((reg_size == kWRegSize) && (((value & 0xffff0000) == 0xffff0000) || + ((value & 0x0000ffff) == 0x0000ffff))) { + return true; + } + return false; +} + + +void Disassembler::VisitLogicalShifted(const Instruction *instr) { + bool rd_is_zr = RdIsZROrSP(instr); + bool rn_is_zr = RnIsZROrSP(instr); + const char *mnemonic = ""; + const char *form = "'Rd, 'Rn, 'Rm'NLo"; + + switch (instr->Mask(LogicalShiftedMask)) { + case AND_w: + case AND_x: + mnemonic = "and"; + break; + case BIC_w: + case BIC_x: + mnemonic = "bic"; + break; + case EOR_w: + case EOR_x: + mnemonic = "eor"; + break; + case EON_w: + case EON_x: + mnemonic = "eon"; + break; + case BICS_w: + case BICS_x: + mnemonic = "bics"; + break; + case ANDS_w: + case ANDS_x: { + mnemonic = "ands"; + if (rd_is_zr) { + mnemonic = "tst"; + form = "'Rn, 'Rm'NLo"; + } + break; + } + case ORR_w: + case ORR_x: { + mnemonic = "orr"; + if (rn_is_zr && (instr->GetImmDPShift() == 0) && + (instr->GetShiftDP() == LSL)) { + mnemonic = "mov"; + form = "'Rd, 'Rm"; + } + break; + } + case ORN_w: + case ORN_x: { + mnemonic = "orn"; + if (rn_is_zr) { + mnemonic = "mvn"; + form = "'Rd, 'Rm'NLo"; + } + break; + } + default: + VIXL_UNREACHABLE(); + } + + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitConditionalCompareRegister(const Instruction *instr) { + const char *mnemonic = ""; + const char *form = "'Rn, 'Rm, 'INzcv, 'Cond"; + + switch (instr->Mask(ConditionalCompareRegisterMask)) { + case CCMN_w: + case CCMN_x: + mnemonic = "ccmn"; + break; + case CCMP_w: + case CCMP_x: + mnemonic = "ccmp"; + break; + default: + VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitConditionalCompareImmediate(const Instruction *instr) { + const char *mnemonic = ""; + const char *form = "'Rn, 'IP, 'INzcv, 'Cond"; + + switch (instr->Mask(ConditionalCompareImmediateMask)) { + case CCMN_w_imm: + case CCMN_x_imm: + mnemonic = "ccmn"; + break; + case CCMP_w_imm: + case CCMP_x_imm: + mnemonic = "ccmp"; + break; + default: + VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitConditionalSelect(const Instruction *instr) { + bool rnm_is_zr = (RnIsZROrSP(instr) && RmIsZROrSP(instr)); + bool rn_is_rm = (instr->GetRn() == instr->GetRm()); + const char *mnemonic = ""; + const char *form = "'Rd, 'Rn, 'Rm, 'Cond"; + const char *form_test = "'Rd, 'CInv"; + const char *form_update = "'Rd, 'Rn, 'CInv"; + + Condition cond = static_cast(instr->GetCondition()); + bool invertible_cond = (cond != al) && (cond != nv); + + switch (instr->Mask(ConditionalSelectMask)) { + case CSEL_w: + case CSEL_x: + mnemonic = "csel"; + break; + case CSINC_w: + case CSINC_x: { + mnemonic = "csinc"; + if (rnm_is_zr && invertible_cond) { + mnemonic = "cset"; + form = form_test; + } else if (rn_is_rm && invertible_cond) { + mnemonic = "cinc"; + form = form_update; + } + break; + } + case CSINV_w: + case CSINV_x: { + mnemonic = "csinv"; + if (rnm_is_zr && invertible_cond) { + mnemonic = "csetm"; + form = form_test; + } else if (rn_is_rm && invertible_cond) { + mnemonic = "cinv"; + form = form_update; + } + break; + } + case CSNEG_w: + case CSNEG_x: { + mnemonic = "csneg"; + if (rn_is_rm && invertible_cond) { + mnemonic = "cneg"; + form = form_update; + } + break; + } + default: + VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitBitfield(const Instruction *instr) { + unsigned s = instr->GetImmS(); + unsigned r = instr->GetImmR(); + unsigned rd_size_minus_1 = + ((instr->GetSixtyFourBits() == 1) ? kXRegSize : kWRegSize) - 1; + const char *mnemonic = ""; + const char *form = ""; + const char *form_shift_right = "'Rd, 'Rn, 'IBr"; + const char *form_extend = "'Rd, 'Wn"; + const char *form_bfiz = "'Rd, 'Rn, 'IBZ-r, 'IBs+1"; + const char *form_bfc = "'Rd, 'IBZ-r, 'IBs+1"; + const char *form_bfx = "'Rd, 'Rn, 'IBr, 'IBs-r+1"; + const char *form_lsl = "'Rd, 'Rn, 'IBZ-r"; + + switch (instr->Mask(BitfieldMask)) { + case SBFM_w: + case SBFM_x: { + mnemonic = "sbfx"; + form = form_bfx; + if (r == 0) { + form = form_extend; + if (s == 7) { + mnemonic = "sxtb"; + } else if (s == 15) { + mnemonic = "sxth"; + } else if ((s == 31) && (instr->GetSixtyFourBits() == 1)) { + mnemonic = "sxtw"; + } else { + form = form_bfx; + } + } else if (s == rd_size_minus_1) { + mnemonic = "asr"; + form = form_shift_right; + } else if (s < r) { + mnemonic = "sbfiz"; + form = form_bfiz; + } + break; + } + case UBFM_w: + case UBFM_x: { + mnemonic = "ubfx"; + form = form_bfx; + if (r == 0) { + form = form_extend; + if (s == 7) { + mnemonic = "uxtb"; + } else if (s == 15) { + mnemonic = "uxth"; + } else { + form = form_bfx; + } + } + if (s == rd_size_minus_1) { + mnemonic = "lsr"; + form = form_shift_right; + } else if (r == s + 1) { + mnemonic = "lsl"; + form = form_lsl; + } else if (s < r) { + mnemonic = "ubfiz"; + form = form_bfiz; + } + break; + } + case BFM_w: + case BFM_x: { + mnemonic = "bfxil"; + form = form_bfx; + if (s < r) { + if (instr->GetRn() == kZeroRegCode) { + mnemonic = "bfc"; + form = form_bfc; + } else { + mnemonic = "bfi"; + form = form_bfiz; + } + } + } + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitExtract(const Instruction *instr) { + const char *mnemonic = ""; + const char *form = "'Rd, 'Rn, 'Rm, 'IExtract"; + + switch (instr->Mask(ExtractMask)) { + case EXTR_w: + case EXTR_x: { + if (instr->GetRn() == instr->GetRm()) { + mnemonic = "ror"; + form = "'Rd, 'Rn, 'IExtract"; + } else { + mnemonic = "extr"; + } + break; + } + default: + VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitPCRelAddressing(const Instruction *instr) { + switch (instr->Mask(PCRelAddressingMask)) { + case ADR: + Format(instr, "adr", "'Xd, 'AddrPCRelByte"); + break; + case ADRP: + Format(instr, "adrp", "'Xd, 'AddrPCRelPage"); + break; + default: + Format(instr, "unimplemented", "(PCRelAddressing)"); + } +} + + +void Disassembler::VisitConditionalBranch(const Instruction *instr) { + switch (instr->Mask(ConditionalBranchMask)) { + case B_cond: + Format(instr, "b.'CBrn", "'TImmCond"); + break; + default: + VIXL_UNREACHABLE(); + } +} + + +void Disassembler::VisitUnconditionalBranchToRegister( + const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form; + + switch (instr->Mask(UnconditionalBranchToRegisterMask)) { + case BR: + mnemonic = "br"; + form = "'Xn"; + break; + case BLR: + mnemonic = "blr"; + form = "'Xn"; + break; + case RET: { + mnemonic = "ret"; + if (instr->GetRn() == kLinkRegCode) { + form = NULL; + } else { + form = "'Xn"; + } + break; + } + case BRAAZ: + mnemonic = "braaz"; + form = "'Xn"; + break; + case BRABZ: + mnemonic = "brabz"; + form = "'Xn"; + break; + case BLRAAZ: + mnemonic = "blraaz"; + form = "'Xn"; + break; + case BLRABZ: + mnemonic = "blrabz"; + form = "'Xn"; + break; + case RETAA: + mnemonic = "retaa"; + form = NULL; + break; + case RETAB: + mnemonic = "retab"; + form = NULL; + break; + case BRAA: + mnemonic = "braa"; + form = "'Xn, 'Xds"; + break; + case BRAB: + mnemonic = "brab"; + form = "'Xn, 'Xds"; + break; + case BLRAA: + mnemonic = "blraa"; + form = "'Xn, 'Xds"; + break; + case BLRAB: + mnemonic = "blrab"; + form = "'Xn, 'Xds"; + break; + default: + form = "(UnconditionalBranchToRegister)"; + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitUnconditionalBranch(const Instruction *instr) { + const char *mnemonic = ""; + const char *form = "'TImmUncn"; + + switch (instr->Mask(UnconditionalBranchMask)) { + case B: + mnemonic = "b"; + break; + case BL: + mnemonic = "bl"; + break; + default: + VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitDataProcessing1Source(const Instruction *instr) { + const char *mnemonic = ""; + const char *form = "'Rd, 'Rn"; + + switch (instr->Mask(DataProcessing1SourceMask)) { +#define FORMAT(A, B) \ + case A##_w: \ + case A##_x: \ + mnemonic = B; \ + break; + FORMAT(RBIT, "rbit"); + FORMAT(REV16, "rev16"); + FORMAT(REV, "rev"); + FORMAT(CLZ, "clz"); + FORMAT(CLS, "cls"); +#undef FORMAT + +#define PAUTH_VARIATIONS(V) \ + V(PACI, "paci") \ + V(PACD, "pacd") \ + V(AUTI, "auti") \ + V(AUTD, "autd") +#define PAUTH_CASE(NAME, MN) \ + case NAME##A: \ + mnemonic = MN "a"; \ + form = "'Xd, 'Xns"; \ + break; \ + case NAME##ZA: \ + mnemonic = MN "za"; \ + form = "'Xd"; \ + break; \ + case NAME##B: \ + mnemonic = MN "b"; \ + form = "'Xd, 'Xns"; \ + break; \ + case NAME##ZB: \ + mnemonic = MN "zb"; \ + form = "'Xd"; \ + break; + + PAUTH_VARIATIONS(PAUTH_CASE) +#undef PAUTH_CASE + + case XPACI: + mnemonic = "xpaci"; + form = "'Xd"; + break; + case XPACD: + mnemonic = "xpacd"; + form = "'Xd"; + break; + case REV32_x: + mnemonic = "rev32"; + break; + default: + VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitDataProcessing2Source(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "'Rd, 'Rn, 'Rm"; + const char *form_wwx = "'Wd, 'Wn, 'Xm"; + + switch (instr->Mask(DataProcessing2SourceMask)) { +#define FORMAT(A, B) \ + case A##_w: \ + case A##_x: \ + mnemonic = B; \ + break; + FORMAT(UDIV, "udiv"); + FORMAT(SDIV, "sdiv"); + FORMAT(LSLV, "lsl"); + FORMAT(LSRV, "lsr"); + FORMAT(ASRV, "asr"); + FORMAT(RORV, "ror"); +#undef FORMAT + case PACGA: + mnemonic = "pacga"; + form = "'Xd, 'Xn, 'Xms"; + break; + case CRC32B: + mnemonic = "crc32b"; + break; + case CRC32H: + mnemonic = "crc32h"; + break; + case CRC32W: + mnemonic = "crc32w"; + break; + case CRC32X: + mnemonic = "crc32x"; + form = form_wwx; + break; + case CRC32CB: + mnemonic = "crc32cb"; + break; + case CRC32CH: + mnemonic = "crc32ch"; + break; + case CRC32CW: + mnemonic = "crc32cw"; + break; + case CRC32CX: + mnemonic = "crc32cx"; + form = form_wwx; + break; + default: + form = "(DataProcessing2Source)"; + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitDataProcessing3Source(const Instruction *instr) { + bool ra_is_zr = RaIsZROrSP(instr); + const char *mnemonic = ""; + const char *form = "'Xd, 'Wn, 'Wm, 'Xa"; + const char *form_rrr = "'Rd, 'Rn, 'Rm"; + const char *form_rrrr = "'Rd, 'Rn, 'Rm, 'Ra"; + const char *form_xww = "'Xd, 'Wn, 'Wm"; + const char *form_xxx = "'Xd, 'Xn, 'Xm"; + + switch (instr->Mask(DataProcessing3SourceMask)) { + case MADD_w: + case MADD_x: { + mnemonic = "madd"; + form = form_rrrr; + if (ra_is_zr) { + mnemonic = "mul"; + form = form_rrr; + } + break; + } + case MSUB_w: + case MSUB_x: { + mnemonic = "msub"; + form = form_rrrr; + if (ra_is_zr) { + mnemonic = "mneg"; + form = form_rrr; + } + break; + } + case SMADDL_x: { + mnemonic = "smaddl"; + if (ra_is_zr) { + mnemonic = "smull"; + form = form_xww; + } + break; + } + case SMSUBL_x: { + mnemonic = "smsubl"; + if (ra_is_zr) { + mnemonic = "smnegl"; + form = form_xww; + } + break; + } + case UMADDL_x: { + mnemonic = "umaddl"; + if (ra_is_zr) { + mnemonic = "umull"; + form = form_xww; + } + break; + } + case UMSUBL_x: { + mnemonic = "umsubl"; + if (ra_is_zr) { + mnemonic = "umnegl"; + form = form_xww; + } + break; + } + case SMULH_x: { + mnemonic = "smulh"; + form = form_xxx; + break; + } + case UMULH_x: { + mnemonic = "umulh"; + form = form_xxx; + break; + } + default: + VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitCompareBranch(const Instruction *instr) { + const char *mnemonic = ""; + const char *form = "'Rt, 'TImmCmpa"; + + switch (instr->Mask(CompareBranchMask)) { + case CBZ_w: + case CBZ_x: + mnemonic = "cbz"; + break; + case CBNZ_w: + case CBNZ_x: + mnemonic = "cbnz"; + break; + default: + VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitTestBranch(const Instruction *instr) { + const char *mnemonic = ""; + // If the top bit of the immediate is clear, the tested register is + // disassembled as Wt, otherwise Xt. As the top bit of the immediate is + // encoded in bit 31 of the instruction, we can reuse the Rt form, which + // uses bit 31 (normally "sf") to choose the register size. + const char *form = "'Rt, 'IS, 'TImmTest"; + + switch (instr->Mask(TestBranchMask)) { + case TBZ: + mnemonic = "tbz"; + break; + case TBNZ: + mnemonic = "tbnz"; + break; + default: + VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitMoveWideImmediate(const Instruction *instr) { + const char *mnemonic = ""; + const char *form = "'Rd, 'IMoveImm"; + + // Print the shift separately for movk, to make it clear which half word will + // be overwritten. Movn and movz print the computed immediate, which includes + // shift calculation. + switch (instr->Mask(MoveWideImmediateMask)) { + case MOVN_w: + case MOVN_x: + if ((instr->GetImmMoveWide()) || (instr->GetShiftMoveWide() == 0)) { + if ((instr->GetSixtyFourBits() == 0) && + (instr->GetImmMoveWide() == 0xffff)) { + mnemonic = "movn"; + } else { + mnemonic = "mov"; + form = "'Rd, 'IMoveNeg"; + } + } else { + mnemonic = "movn"; + } + break; + case MOVZ_w: + case MOVZ_x: + if ((instr->GetImmMoveWide()) || (instr->GetShiftMoveWide() == 0)) + mnemonic = "mov"; + else + mnemonic = "movz"; + break; + case MOVK_w: + case MOVK_x: + mnemonic = "movk"; + form = "'Rd, 'IMoveLSL"; + break; + default: + VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +#define LOAD_STORE_LIST(V) \ + V(STRB_w, "strb", "'Wt") \ + V(STRH_w, "strh", "'Wt") \ + V(STR_w, "str", "'Wt") \ + V(STR_x, "str", "'Xt") \ + V(LDRB_w, "ldrb", "'Wt") \ + V(LDRH_w, "ldrh", "'Wt") \ + V(LDR_w, "ldr", "'Wt") \ + V(LDR_x, "ldr", "'Xt") \ + V(LDRSB_x, "ldrsb", "'Xt") \ + V(LDRSH_x, "ldrsh", "'Xt") \ + V(LDRSW_x, "ldrsw", "'Xt") \ + V(LDRSB_w, "ldrsb", "'Wt") \ + V(LDRSH_w, "ldrsh", "'Wt") \ + V(STR_b, "str", "'Bt") \ + V(STR_h, "str", "'Ht") \ + V(STR_s, "str", "'St") \ + V(STR_d, "str", "'Dt") \ + V(LDR_b, "ldr", "'Bt") \ + V(LDR_h, "ldr", "'Ht") \ + V(LDR_s, "ldr", "'St") \ + V(LDR_d, "ldr", "'Dt") \ + V(STR_q, "str", "'Qt") \ + V(LDR_q, "ldr", "'Qt") + +void Disassembler::VisitLoadStorePreIndex(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(LoadStorePreIndex)"; + + switch (instr->Mask(LoadStorePreIndexMask)) { +#define LS_PREINDEX(A, B, C) \ + case A##_pre: \ + mnemonic = B; \ + form = C ", ['Xns'ILSi]!"; \ + break; + LOAD_STORE_LIST(LS_PREINDEX) +#undef LS_PREINDEX + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitLoadStorePostIndex(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(LoadStorePostIndex)"; + + switch (instr->Mask(LoadStorePostIndexMask)) { +#define LS_POSTINDEX(A, B, C) \ + case A##_post: \ + mnemonic = B; \ + form = C ", ['Xns]'ILSi"; \ + break; + LOAD_STORE_LIST(LS_POSTINDEX) +#undef LS_POSTINDEX + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitLoadStoreUnsignedOffset(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(LoadStoreUnsignedOffset)"; + + switch (instr->Mask(LoadStoreUnsignedOffsetMask)) { +#define LS_UNSIGNEDOFFSET(A, B, C) \ + case A##_unsigned: \ + mnemonic = B; \ + form = C ", ['Xns'ILU]"; \ + break; + LOAD_STORE_LIST(LS_UNSIGNEDOFFSET) +#undef LS_UNSIGNEDOFFSET + case PRFM_unsigned: + mnemonic = "prfm"; + form = "'PrefOp, ['Xns'ILU]"; + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitLoadStoreRegisterOffset(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(LoadStoreRegisterOffset)"; + + switch (instr->Mask(LoadStoreRegisterOffsetMask)) { +#define LS_REGISTEROFFSET(A, B, C) \ + case A##_reg: \ + mnemonic = B; \ + form = C ", ['Xns, 'Offsetreg]"; \ + break; + LOAD_STORE_LIST(LS_REGISTEROFFSET) +#undef LS_REGISTEROFFSET + case PRFM_reg: + mnemonic = "prfm"; + form = "'PrefOp, ['Xns, 'Offsetreg]"; + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitLoadStoreUnscaledOffset(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "'Wt, ['Xns'ILS]"; + const char *form_x = "'Xt, ['Xns'ILS]"; + const char *form_b = "'Bt, ['Xns'ILS]"; + const char *form_h = "'Ht, ['Xns'ILS]"; + const char *form_s = "'St, ['Xns'ILS]"; + const char *form_d = "'Dt, ['Xns'ILS]"; + const char *form_q = "'Qt, ['Xns'ILS]"; + const char *form_prefetch = "'PrefOp, ['Xns'ILS]"; + + switch (instr->Mask(LoadStoreUnscaledOffsetMask)) { + case STURB_w: + mnemonic = "sturb"; + break; + case STURH_w: + mnemonic = "sturh"; + break; + case STUR_w: + mnemonic = "stur"; + break; + case STUR_x: + mnemonic = "stur"; + form = form_x; + break; + case STUR_b: + mnemonic = "stur"; + form = form_b; + break; + case STUR_h: + mnemonic = "stur"; + form = form_h; + break; + case STUR_s: + mnemonic = "stur"; + form = form_s; + break; + case STUR_d: + mnemonic = "stur"; + form = form_d; + break; + case STUR_q: + mnemonic = "stur"; + form = form_q; + break; + case LDURB_w: + mnemonic = "ldurb"; + break; + case LDURH_w: + mnemonic = "ldurh"; + break; + case LDUR_w: + mnemonic = "ldur"; + break; + case LDUR_x: + mnemonic = "ldur"; + form = form_x; + break; + case LDUR_b: + mnemonic = "ldur"; + form = form_b; + break; + case LDUR_h: + mnemonic = "ldur"; + form = form_h; + break; + case LDUR_s: + mnemonic = "ldur"; + form = form_s; + break; + case LDUR_d: + mnemonic = "ldur"; + form = form_d; + break; + case LDUR_q: + mnemonic = "ldur"; + form = form_q; + break; + case LDURSB_x: + form = form_x; + VIXL_FALLTHROUGH(); + case LDURSB_w: + mnemonic = "ldursb"; + break; + case LDURSH_x: + form = form_x; + VIXL_FALLTHROUGH(); + case LDURSH_w: + mnemonic = "ldursh"; + break; + case LDURSW_x: + mnemonic = "ldursw"; + form = form_x; + break; + case PRFUM: + mnemonic = "prfum"; + form = form_prefetch; + break; + default: + form = "(LoadStoreUnscaledOffset)"; + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitLoadLiteral(const Instruction *instr) { + const char *mnemonic = "ldr"; + const char *form = "(LoadLiteral)"; + + switch (instr->Mask(LoadLiteralMask)) { + case LDR_w_lit: + form = "'Wt, 'ILLiteral 'LValue"; + break; + case LDR_x_lit: + form = "'Xt, 'ILLiteral 'LValue"; + break; + case LDR_s_lit: + form = "'St, 'ILLiteral 'LValue"; + break; + case LDR_d_lit: + form = "'Dt, 'ILLiteral 'LValue"; + break; + case LDR_q_lit: + form = "'Qt, 'ILLiteral 'LValue"; + break; + case LDRSW_x_lit: { + mnemonic = "ldrsw"; + form = "'Xt, 'ILLiteral 'LValue"; + break; + } + case PRFM_lit: { + mnemonic = "prfm"; + form = "'PrefOp, 'ILLiteral 'LValue"; + break; + } + default: + mnemonic = "unimplemented"; + } + Format(instr, mnemonic, form); +} + + +#define LOAD_STORE_PAIR_LIST(V) \ + V(STP_w, "stp", "'Wt, 'Wt2", "2") \ + V(LDP_w, "ldp", "'Wt, 'Wt2", "2") \ + V(LDPSW_x, "ldpsw", "'Xt, 'Xt2", "2") \ + V(STP_x, "stp", "'Xt, 'Xt2", "3") \ + V(LDP_x, "ldp", "'Xt, 'Xt2", "3") \ + V(STP_s, "stp", "'St, 'St2", "2") \ + V(LDP_s, "ldp", "'St, 'St2", "2") \ + V(STP_d, "stp", "'Dt, 'Dt2", "3") \ + V(LDP_d, "ldp", "'Dt, 'Dt2", "3") \ + V(LDP_q, "ldp", "'Qt, 'Qt2", "4") \ + V(STP_q, "stp", "'Qt, 'Qt2", "4") + +void Disassembler::VisitLoadStorePairPostIndex(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(LoadStorePairPostIndex)"; + + switch (instr->Mask(LoadStorePairPostIndexMask)) { +#define LSP_POSTINDEX(A, B, C, D) \ + case A##_post: \ + mnemonic = B; \ + form = C ", ['Xns]'ILP" D "i"; \ + break; + LOAD_STORE_PAIR_LIST(LSP_POSTINDEX) +#undef LSP_POSTINDEX + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitLoadStorePairPreIndex(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(LoadStorePairPreIndex)"; + + switch (instr->Mask(LoadStorePairPreIndexMask)) { +#define LSP_PREINDEX(A, B, C, D) \ + case A##_pre: \ + mnemonic = B; \ + form = C ", ['Xns'ILP" D "i]!"; \ + break; + LOAD_STORE_PAIR_LIST(LSP_PREINDEX) +#undef LSP_PREINDEX + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitLoadStorePairOffset(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(LoadStorePairOffset)"; + + switch (instr->Mask(LoadStorePairOffsetMask)) { +#define LSP_OFFSET(A, B, C, D) \ + case A##_off: \ + mnemonic = B; \ + form = C ", ['Xns'ILP" D "]"; \ + break; + LOAD_STORE_PAIR_LIST(LSP_OFFSET) +#undef LSP_OFFSET + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitLoadStorePairNonTemporal(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form; + + switch (instr->Mask(LoadStorePairNonTemporalMask)) { + case STNP_w: + mnemonic = "stnp"; + form = "'Wt, 'Wt2, ['Xns'ILP2]"; + break; + case LDNP_w: + mnemonic = "ldnp"; + form = "'Wt, 'Wt2, ['Xns'ILP2]"; + break; + case STNP_x: + mnemonic = "stnp"; + form = "'Xt, 'Xt2, ['Xns'ILP3]"; + break; + case LDNP_x: + mnemonic = "ldnp"; + form = "'Xt, 'Xt2, ['Xns'ILP3]"; + break; + case STNP_s: + mnemonic = "stnp"; + form = "'St, 'St2, ['Xns'ILP2]"; + break; + case LDNP_s: + mnemonic = "ldnp"; + form = "'St, 'St2, ['Xns'ILP2]"; + break; + case STNP_d: + mnemonic = "stnp"; + form = "'Dt, 'Dt2, ['Xns'ILP3]"; + break; + case LDNP_d: + mnemonic = "ldnp"; + form = "'Dt, 'Dt2, ['Xns'ILP3]"; + break; + case STNP_q: + mnemonic = "stnp"; + form = "'Qt, 'Qt2, ['Xns'ILP4]"; + break; + case LDNP_q: + mnemonic = "ldnp"; + form = "'Qt, 'Qt2, ['Xns'ILP4]"; + break; + default: + form = "(LoadStorePairNonTemporal)"; + } + Format(instr, mnemonic, form); +} + +// clang-format off +#define LOAD_STORE_EXCLUSIVE_LIST(V) \ + V(STXRB_w, "stxrb", "'Ws, 'Wt") \ + V(STXRH_w, "stxrh", "'Ws, 'Wt") \ + V(STXR_w, "stxr", "'Ws, 'Wt") \ + V(STXR_x, "stxr", "'Ws, 'Xt") \ + V(LDXRB_w, "ldxrb", "'Wt") \ + V(LDXRH_w, "ldxrh", "'Wt") \ + V(LDXR_w, "ldxr", "'Wt") \ + V(LDXR_x, "ldxr", "'Xt") \ + V(STXP_w, "stxp", "'Ws, 'Wt, 'Wt2") \ + V(STXP_x, "stxp", "'Ws, 'Xt, 'Xt2") \ + V(LDXP_w, "ldxp", "'Wt, 'Wt2") \ + V(LDXP_x, "ldxp", "'Xt, 'Xt2") \ + V(STLXRB_w, "stlxrb", "'Ws, 'Wt") \ + V(STLXRH_w, "stlxrh", "'Ws, 'Wt") \ + V(STLXR_w, "stlxr", "'Ws, 'Wt") \ + V(STLXR_x, "stlxr", "'Ws, 'Xt") \ + V(LDAXRB_w, "ldaxrb", "'Wt") \ + V(LDAXRH_w, "ldaxrh", "'Wt") \ + V(LDAXR_w, "ldaxr", "'Wt") \ + V(LDAXR_x, "ldaxr", "'Xt") \ + V(STLXP_w, "stlxp", "'Ws, 'Wt, 'Wt2") \ + V(STLXP_x, "stlxp", "'Ws, 'Xt, 'Xt2") \ + V(LDAXP_w, "ldaxp", "'Wt, 'Wt2") \ + V(LDAXP_x, "ldaxp", "'Xt, 'Xt2") \ + V(STLRB_w, "stlrb", "'Wt") \ + V(STLRH_w, "stlrh", "'Wt") \ + V(STLR_w, "stlr", "'Wt") \ + V(STLR_x, "stlr", "'Xt") \ + V(LDARB_w, "ldarb", "'Wt") \ + V(LDARH_w, "ldarh", "'Wt") \ + V(LDAR_w, "ldar", "'Wt") \ + V(LDAR_x, "ldar", "'Xt") \ + V(STLLRB, "stllrb", "'Wt") \ + V(STLLRH, "stllrh", "'Wt") \ + V(STLLR_w, "stllr", "'Wt") \ + V(STLLR_x, "stllr", "'Xt") \ + V(LDLARB, "ldlarb", "'Wt") \ + V(LDLARH, "ldlarh", "'Wt") \ + V(LDLAR_w, "ldlar", "'Wt") \ + V(LDLAR_x, "ldlar", "'Xt") \ + V(CAS_w, "cas", "'Ws, 'Wt") \ + V(CAS_x, "cas", "'Xs, 'Xt") \ + V(CASA_w, "casa", "'Ws, 'Wt") \ + V(CASA_x, "casa", "'Xs, 'Xt") \ + V(CASL_w, "casl", "'Ws, 'Wt") \ + V(CASL_x, "casl", "'Xs, 'Xt") \ + V(CASAL_w, "casal", "'Ws, 'Wt") \ + V(CASAL_x, "casal", "'Xs, 'Xt") \ + V(CASB, "casb", "'Ws, 'Wt") \ + V(CASAB, "casab", "'Ws, 'Wt") \ + V(CASLB, "caslb", "'Ws, 'Wt") \ + V(CASALB, "casalb", "'Ws, 'Wt") \ + V(CASH, "cash", "'Ws, 'Wt") \ + V(CASAH, "casah", "'Ws, 'Wt") \ + V(CASLH, "caslh", "'Ws, 'Wt") \ + V(CASALH, "casalh", "'Ws, 'Wt") \ + V(CASP_w, "casp", "'Ws, 'W(s+1), 'Wt, 'W(t+1)") \ + V(CASP_x, "casp", "'Xs, 'X(s+1), 'Xt, 'X(t+1)") \ + V(CASPA_w, "caspa", "'Ws, 'W(s+1), 'Wt, 'W(t+1)") \ + V(CASPA_x, "caspa", "'Xs, 'X(s+1), 'Xt, 'X(t+1)") \ + V(CASPL_w, "caspl", "'Ws, 'W(s+1), 'Wt, 'W(t+1)") \ + V(CASPL_x, "caspl", "'Xs, 'X(s+1), 'Xt, 'X(t+1)") \ + V(CASPAL_w, "caspal", "'Ws, 'W(s+1), 'Wt, 'W(t+1)") \ + V(CASPAL_x, "caspal", "'Xs, 'X(s+1), 'Xt, 'X(t+1)") +// clang-format on + + +void Disassembler::VisitLoadStoreExclusive(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form; + + switch (instr->Mask(LoadStoreExclusiveMask)) { +#define LSX(A, B, C) \ + case A: \ + mnemonic = B; \ + form = C ", ['Xns]"; \ + break; + LOAD_STORE_EXCLUSIVE_LIST(LSX) +#undef LSX + default: + form = "(LoadStoreExclusive)"; + } + + switch (instr->Mask(LoadStoreExclusiveMask)) { + case CASP_w: + case CASP_x: + case CASPA_w: + case CASPA_x: + case CASPL_w: + case CASPL_x: + case CASPAL_w: + case CASPAL_x: + if ((instr->GetRs() % 2 == 1) || (instr->GetRt() % 2 == 1)) { + mnemonic = "unallocated"; + form = "(LoadStoreExclusive)"; + } + break; + } + + Format(instr, mnemonic, form); +} + +#define ATOMIC_MEMORY_SIMPLE_LIST(V) \ + V(LDADD, "add") \ + V(LDCLR, "clr") \ + V(LDEOR, "eor") \ + V(LDSET, "set") \ + V(LDSMAX, "smax") \ + V(LDSMIN, "smin") \ + V(LDUMAX, "umax") \ + V(LDUMIN, "umin") + +void Disassembler::VisitAtomicMemory(const Instruction *instr) { + const int kMaxAtomicOpMnemonicLength = 16; + const char *mnemonic; + const char *form = "'Ws, 'Wt, ['Xns]"; + + switch (instr->Mask(AtomicMemoryMask)) { +#define AMS(A, MN) \ + case A##B: \ + mnemonic = MN "b"; \ + break; \ + case A##AB: \ + mnemonic = MN "ab"; \ + break; \ + case A##LB: \ + mnemonic = MN "lb"; \ + break; \ + case A##ALB: \ + mnemonic = MN "alb"; \ + break; \ + case A##H: \ + mnemonic = MN "h"; \ + break; \ + case A##AH: \ + mnemonic = MN "ah"; \ + break; \ + case A##LH: \ + mnemonic = MN "lh"; \ + break; \ + case A##ALH: \ + mnemonic = MN "alh"; \ + break; \ + case A##_w: \ + mnemonic = MN; \ + break; \ + case A##A_w: \ + mnemonic = MN "a"; \ + break; \ + case A##L_w: \ + mnemonic = MN "l"; \ + break; \ + case A##AL_w: \ + mnemonic = MN "al"; \ + break; \ + case A##_x: \ + mnemonic = MN; \ + form = "'Xs, 'Xt, ['Xns]"; \ + break; \ + case A##A_x: \ + mnemonic = MN "a"; \ + form = "'Xs, 'Xt, ['Xns]"; \ + break; \ + case A##L_x: \ + mnemonic = MN "l"; \ + form = "'Xs, 'Xt, ['Xns]"; \ + break; \ + case A##AL_x: \ + mnemonic = MN "al"; \ + form = "'Xs, 'Xt, ['Xns]"; \ + break; + ATOMIC_MEMORY_SIMPLE_LIST(AMS) + + // SWP has the same semantics as ldadd etc but without the store aliases. + AMS(SWP, "swp") +#undef AMS + + case LDAPRB: + mnemonic = "ldaprb"; + form = "'Wt, ['Xns]"; + break; + case LDAPRH: + mnemonic = "ldaprh"; + form = "'Wt, ['Xns]"; + break; + case LDAPR_w: + mnemonic = "ldapr"; + form = "'Wt, ['Xns]"; + break; + case LDAPR_x: + mnemonic = "ldapr"; + form = "'Xt, ['Xns]"; + break; + default: + mnemonic = "unimplemented"; + form = "(AtomicMemory)"; + } + + const char *prefix = ""; + switch (instr->Mask(AtomicMemoryMask)) { +#define AMS(A, MN) \ + case A##AB: \ + case A##ALB: \ + case A##AH: \ + case A##ALH: \ + case A##A_w: \ + case A##AL_w: \ + case A##A_x: \ + case A##AL_x: \ + prefix = "ld"; \ + break; \ + case A##B: \ + case A##LB: \ + case A##H: \ + case A##LH: \ + case A##_w: \ + case A##L_w: { \ + prefix = "ld"; \ + unsigned rt = instr->GetRt(); \ + if (Register(rt, 32).IsZero()) { \ + prefix = "st"; \ + form = "'Ws, ['Xns]"; \ + } \ + break; \ + } \ + case A##_x: \ + case A##L_x: { \ + prefix = "ld"; \ + unsigned rt = instr->GetRt(); \ + if (Register(rt, 64).IsZero()) { \ + prefix = "st"; \ + form = "'Xs, ['Xns]"; \ + } \ + break; \ + } + ATOMIC_MEMORY_SIMPLE_LIST(AMS) +#undef AMS + } + + char buffer[kMaxAtomicOpMnemonicLength]; + if (strlen(prefix) > 0) { + snprintf(buffer, kMaxAtomicOpMnemonicLength, "%s%s", prefix, mnemonic); + mnemonic = buffer; + } + + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitFPCompare(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "'Fn, 'Fm"; + const char *form_zero = "'Fn, #0.0"; + + switch (instr->Mask(FPCompareMask)) { + case FCMP_h_zero: + case FCMP_s_zero: + case FCMP_d_zero: + form = form_zero; + VIXL_FALLTHROUGH(); + case FCMP_h: + case FCMP_s: + case FCMP_d: + mnemonic = "fcmp"; + break; + case FCMPE_h_zero: + case FCMPE_s_zero: + case FCMPE_d_zero: + form = form_zero; + VIXL_FALLTHROUGH(); + case FCMPE_h: + case FCMPE_s: + case FCMPE_d: + mnemonic = "fcmpe"; + break; + default: + form = "(FPCompare)"; + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitFPConditionalCompare(const Instruction *instr) { + const char *mnemonic = "unmplemented"; + const char *form = "'Fn, 'Fm, 'INzcv, 'Cond"; + + switch (instr->Mask(FPConditionalCompareMask)) { + case FCCMP_h: + case FCCMP_s: + case FCCMP_d: + mnemonic = "fccmp"; + break; + case FCCMPE_h: + case FCCMPE_s: + case FCCMPE_d: + mnemonic = "fccmpe"; + break; + default: + form = "(FPConditionalCompare)"; + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitFPConditionalSelect(const Instruction *instr) { + const char *mnemonic = ""; + const char *form = "'Fd, 'Fn, 'Fm, 'Cond"; + + switch (instr->Mask(FPConditionalSelectMask)) { + case FCSEL_h: + case FCSEL_s: + case FCSEL_d: + mnemonic = "fcsel"; + break; + default: + VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitFPDataProcessing1Source(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "'Fd, 'Fn"; + + switch (instr->Mask(FPDataProcessing1SourceMask)) { +#define FORMAT(A, B) \ + case A##_h: \ + case A##_s: \ + case A##_d: \ + mnemonic = B; \ + break; + FORMAT(FMOV, "fmov"); + FORMAT(FABS, "fabs"); + FORMAT(FNEG, "fneg"); + FORMAT(FSQRT, "fsqrt"); + FORMAT(FRINTN, "frintn"); + FORMAT(FRINTP, "frintp"); + FORMAT(FRINTM, "frintm"); + FORMAT(FRINTZ, "frintz"); + FORMAT(FRINTA, "frinta"); + FORMAT(FRINTX, "frintx"); + FORMAT(FRINTI, "frinti"); +#undef FORMAT + case FCVT_ds: + mnemonic = "fcvt"; + form = "'Dd, 'Sn"; + break; + case FCVT_sd: + mnemonic = "fcvt"; + form = "'Sd, 'Dn"; + break; + case FCVT_hs: + mnemonic = "fcvt"; + form = "'Hd, 'Sn"; + break; + case FCVT_sh: + mnemonic = "fcvt"; + form = "'Sd, 'Hn"; + break; + case FCVT_dh: + mnemonic = "fcvt"; + form = "'Dd, 'Hn"; + break; + case FCVT_hd: + mnemonic = "fcvt"; + form = "'Hd, 'Dn"; + break; + default: + form = "(FPDataProcessing1Source)"; + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitFPDataProcessing2Source(const Instruction *instr) { + const char *mnemonic = ""; + const char *form = "'Fd, 'Fn, 'Fm"; + + switch (instr->Mask(FPDataProcessing2SourceMask)) { +#define FORMAT(A, B) \ + case A##_h: \ + case A##_s: \ + case A##_d: \ + mnemonic = B; \ + break; + FORMAT(FADD, "fadd"); + FORMAT(FSUB, "fsub"); + FORMAT(FMUL, "fmul"); + FORMAT(FDIV, "fdiv"); + FORMAT(FMAX, "fmax"); + FORMAT(FMIN, "fmin"); + FORMAT(FMAXNM, "fmaxnm"); + FORMAT(FMINNM, "fminnm"); + FORMAT(FNMUL, "fnmul"); +#undef FORMAT + default: + VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitFPDataProcessing3Source(const Instruction *instr) { + const char *mnemonic = ""; + const char *form = "'Fd, 'Fn, 'Fm, 'Fa"; + + switch (instr->Mask(FPDataProcessing3SourceMask)) { +#define FORMAT(A, B) \ + case A##_h: \ + case A##_s: \ + case A##_d: \ + mnemonic = B; \ + break; + FORMAT(FMADD, "fmadd"); + FORMAT(FMSUB, "fmsub"); + FORMAT(FNMADD, "fnmadd"); + FORMAT(FNMSUB, "fnmsub"); +#undef FORMAT + default: + VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitFPImmediate(const Instruction *instr) { + const char *mnemonic = ""; + const char *form = "(FPImmediate)"; + switch (instr->Mask(FPImmediateMask)) { + case FMOV_h_imm: + mnemonic = "fmov"; + form = "'Hd, 'IFPHalf"; + break; + case FMOV_s_imm: + mnemonic = "fmov"; + form = "'Sd, 'IFPSingle"; + break; + case FMOV_d_imm: + mnemonic = "fmov"; + form = "'Dd, 'IFPDouble"; + break; + default: + VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitFPIntegerConvert(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(FPIntegerConvert)"; + const char *form_rf = "'Rd, 'Fn"; + const char *form_fr = "'Fd, 'Rn"; + + switch (instr->Mask(FPIntegerConvertMask)) { + case FMOV_wh: + case FMOV_xh: + case FMOV_ws: + case FMOV_xd: + mnemonic = "fmov"; + form = form_rf; + break; + case FMOV_hw: + case FMOV_hx: + case FMOV_sw: + case FMOV_dx: + mnemonic = "fmov"; + form = form_fr; + break; + case FMOV_d1_x: + mnemonic = "fmov"; + form = "'Vd.D[1], 'Rn"; + break; + case FMOV_x_d1: + mnemonic = "fmov"; + form = "'Rd, 'Vn.D[1]"; + break; + case FCVTAS_wh: + case FCVTAS_xh: + case FCVTAS_ws: + case FCVTAS_xs: + case FCVTAS_wd: + case FCVTAS_xd: + mnemonic = "fcvtas"; + form = form_rf; + break; + case FCVTAU_wh: + case FCVTAU_xh: + case FCVTAU_ws: + case FCVTAU_xs: + case FCVTAU_wd: + case FCVTAU_xd: + mnemonic = "fcvtau"; + form = form_rf; + break; + case FCVTMS_wh: + case FCVTMS_xh: + case FCVTMS_ws: + case FCVTMS_xs: + case FCVTMS_wd: + case FCVTMS_xd: + mnemonic = "fcvtms"; + form = form_rf; + break; + case FCVTMU_wh: + case FCVTMU_xh: + case FCVTMU_ws: + case FCVTMU_xs: + case FCVTMU_wd: + case FCVTMU_xd: + mnemonic = "fcvtmu"; + form = form_rf; + break; + case FCVTNS_wh: + case FCVTNS_xh: + case FCVTNS_ws: + case FCVTNS_xs: + case FCVTNS_wd: + case FCVTNS_xd: + mnemonic = "fcvtns"; + form = form_rf; + break; + case FCVTNU_wh: + case FCVTNU_xh: + case FCVTNU_ws: + case FCVTNU_xs: + case FCVTNU_wd: + case FCVTNU_xd: + mnemonic = "fcvtnu"; + form = form_rf; + break; + case FCVTZU_wh: + case FCVTZU_xh: + case FCVTZU_ws: + case FCVTZU_xs: + case FCVTZU_wd: + case FCVTZU_xd: + mnemonic = "fcvtzu"; + form = form_rf; + break; + case FCVTZS_wh: + case FCVTZS_xh: + case FCVTZS_ws: + case FCVTZS_xs: + case FCVTZS_wd: + case FCVTZS_xd: + mnemonic = "fcvtzs"; + form = form_rf; + break; + case FCVTPU_wh: + case FCVTPU_xh: + case FCVTPU_xs: + case FCVTPU_wd: + case FCVTPU_ws: + case FCVTPU_xd: + mnemonic = "fcvtpu"; + form = form_rf; + break; + case FCVTPS_wh: + case FCVTPS_xh: + case FCVTPS_ws: + case FCVTPS_xs: + case FCVTPS_wd: + case FCVTPS_xd: + mnemonic = "fcvtps"; + form = form_rf; + break; + case SCVTF_hw: + case SCVTF_hx: + case SCVTF_sw: + case SCVTF_sx: + case SCVTF_dw: + case SCVTF_dx: + mnemonic = "scvtf"; + form = form_fr; + break; + case UCVTF_hw: + case UCVTF_hx: + case UCVTF_sw: + case UCVTF_sx: + case UCVTF_dw: + case UCVTF_dx: + mnemonic = "ucvtf"; + form = form_fr; + break; + case FJCVTZS: + mnemonic = "fjcvtzs"; + form = form_rf; + break; + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitFPFixedPointConvert(const Instruction *instr) { + const char *mnemonic = ""; + const char *form = "'Rd, 'Fn, 'IFPFBits"; + const char *form_fr = "'Fd, 'Rn, 'IFPFBits"; + + switch (instr->Mask(FPFixedPointConvertMask)) { + case FCVTZS_wh_fixed: + case FCVTZS_xh_fixed: + case FCVTZS_ws_fixed: + case FCVTZS_xs_fixed: + case FCVTZS_wd_fixed: + case FCVTZS_xd_fixed: + mnemonic = "fcvtzs"; + break; + case FCVTZU_wh_fixed: + case FCVTZU_xh_fixed: + case FCVTZU_ws_fixed: + case FCVTZU_xs_fixed: + case FCVTZU_wd_fixed: + case FCVTZU_xd_fixed: + mnemonic = "fcvtzu"; + break; + case SCVTF_hw_fixed: + case SCVTF_hx_fixed: + case SCVTF_sw_fixed: + case SCVTF_sx_fixed: + case SCVTF_dw_fixed: + case SCVTF_dx_fixed: + mnemonic = "scvtf"; + form = form_fr; + break; + case UCVTF_hw_fixed: + case UCVTF_hx_fixed: + case UCVTF_sw_fixed: + case UCVTF_sx_fixed: + case UCVTF_dw_fixed: + case UCVTF_dx_fixed: + mnemonic = "ucvtf"; + form = form_fr; + break; + default: + VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + +// clang-format off +#define PAUTH_SYSTEM_MNEMONICS(V) \ + V(PACIA1716, "pacia1716") \ + V(PACIB1716, "pacib1716") \ + V(AUTIA1716, "autia1716") \ + V(AUTIB1716, "autib1716") \ + V(PACIAZ, "paciaz") \ + V(PACIASP, "paciasp") \ + V(PACIBZ, "pacibz") \ + V(PACIBSP, "pacibsp") \ + V(AUTIAZ, "autiaz") \ + V(AUTIASP, "autiasp") \ + V(AUTIBZ, "autibz") \ + V(AUTIBSP, "autibsp") +// clang-format on + +void Disassembler::VisitSystem(const Instruction *instr) { + // Some system instructions hijack their Op and Cp fields to represent a + // range of immediates instead of indicating a different instruction. This + // makes the decoding tricky. + const char *mnemonic = "unimplemented"; + const char *form = "(System)"; + if (instr->GetInstructionBits() == XPACLRI) { + mnemonic = "xpaclri"; + form = NULL; + } else if (instr->Mask(SystemPAuthFMask) == SystemPAuthFixed) { + switch (instr->Mask(SystemPAuthMask)) { +#define PAUTH_CASE(NAME, MN) \ + case NAME: \ + mnemonic = MN; \ + form = NULL; \ + break; + + PAUTH_SYSTEM_MNEMONICS(PAUTH_CASE) +#undef PAUTH_CASE + } + } else if (instr->Mask(SystemExclusiveMonitorFMask) == + SystemExclusiveMonitorFixed) { + switch (instr->Mask(SystemExclusiveMonitorMask)) { + case CLREX: { + mnemonic = "clrex"; + form = (instr->GetCRm() == 0xf) ? NULL : "'IX"; + break; + } + } + } else if (instr->Mask(SystemSysRegFMask) == SystemSysRegFixed) { + switch (instr->Mask(SystemSysRegMask)) { + case MRS: { + mnemonic = "mrs"; + form = "'Xt, 'IY"; + break; + } + case MSR: { + mnemonic = "msr"; + form = "'IY, 'Xt"; + break; + } + } + } else if (instr->Mask(SystemHintFMask) == SystemHintFixed) { + switch (instr->GetImmHint()) { + case NOP: { + form = NULL; + mnemonic = "nop"; + break; + } + case YIELD: { + form = NULL; + mnemonic = "yield"; + break; + } + case WFE: { + form = NULL; + mnemonic = "wfe"; + break; + } + case WFI: { + form = NULL; + mnemonic = "wfi"; + break; + } + case SEV: { + form = NULL; + mnemonic = "sev"; + break; + } + case SEVL: { + form = NULL; + mnemonic = "sevl"; + break; + } + case ESB: { + form = NULL; + mnemonic = "esb"; + break; + } + case CSDB: { + form = NULL; + mnemonic = "csdb"; + break; + } + default: { + // Fall back to 'hint #'. + form = "'IH"; + mnemonic = "hint"; + break; + } + } + } else if (instr->Mask(MemBarrierFMask) == MemBarrierFixed) { + switch (instr->Mask(MemBarrierMask)) { + case DMB: { + mnemonic = "dmb"; + form = "'M"; + break; + } + case DSB: { + mnemonic = "dsb"; + form = "'M"; + break; + } + case ISB: { + mnemonic = "isb"; + form = NULL; + break; + } + } + } else if (instr->Mask(SystemSysFMask) == SystemSysFixed) { + switch (instr->GetSysOp()) { + case IVAU: + mnemonic = "ic"; + form = "ivau, 'Xt"; + break; + case CVAC: + mnemonic = "dc"; + form = "cvac, 'Xt"; + break; + case CVAU: + mnemonic = "dc"; + form = "cvau, 'Xt"; + break; + case CIVAC: + mnemonic = "dc"; + form = "civac, 'Xt"; + break; + case ZVA: + mnemonic = "dc"; + form = "zva, 'Xt"; + break; + default: + mnemonic = "sys"; + if (instr->GetRt() == 31) { + form = "'G1, 'Kn, 'Km, 'G2"; + } else { + form = "'G1, 'Kn, 'Km, 'G2, 'Xt"; + } + break; + } + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitException(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "'IDebug"; + + switch (instr->Mask(ExceptionMask)) { + case HLT: + mnemonic = "hlt"; + break; + case BRK: + mnemonic = "brk"; + break; + case SVC: + mnemonic = "svc"; + break; + case HVC: + mnemonic = "hvc"; + break; + case SMC: + mnemonic = "smc"; + break; + case DCPS1: + mnemonic = "dcps1"; + form = "{'IDebug}"; + break; + case DCPS2: + mnemonic = "dcps2"; + form = "{'IDebug}"; + break; + case DCPS3: + mnemonic = "dcps3"; + form = "{'IDebug}"; + break; + default: + form = "(Exception)"; + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitCrypto2RegSHA(const Instruction *instr) { + VisitUnimplemented(instr); +} + + +void Disassembler::VisitCrypto3RegSHA(const Instruction *instr) { + VisitUnimplemented(instr); +} + + +void Disassembler::VisitCryptoAES(const Instruction *instr) { + VisitUnimplemented(instr); +} + + +void Disassembler::VisitNEON2RegMisc(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "'Vd.%s, 'Vn.%s"; + const char *form_cmp_zero = "'Vd.%s, 'Vn.%s, #0"; + const char *form_fcmp_zero = "'Vd.%s, 'Vn.%s, #0.0"; + NEONFormatDecoder nfd(instr); + + static const NEONFormatMap map_lp_ta = + {{23, 22, 30}, {NF_4H, NF_8H, NF_2S, NF_4S, NF_1D, NF_2D}}; + + static const NEONFormatMap map_cvt_ta = {{22}, {NF_4S, NF_2D}}; + + static const NEONFormatMap map_cvt_tb = {{22, 30}, + {NF_4H, NF_8H, NF_2S, NF_4S}}; + + if (instr->Mask(NEON2RegMiscOpcode) <= NEON_NEG_opcode) { + // These instructions all use a two bit size field, except NOT and RBIT, + // which use the field to encode the operation. + switch (instr->Mask(NEON2RegMiscMask)) { + case NEON_REV64: + mnemonic = "rev64"; + break; + case NEON_REV32: + mnemonic = "rev32"; + break; + case NEON_REV16: + mnemonic = "rev16"; + break; + case NEON_SADDLP: + mnemonic = "saddlp"; + nfd.SetFormatMap(0, &map_lp_ta); + break; + case NEON_UADDLP: + mnemonic = "uaddlp"; + nfd.SetFormatMap(0, &map_lp_ta); + break; + case NEON_SUQADD: + mnemonic = "suqadd"; + break; + case NEON_USQADD: + mnemonic = "usqadd"; + break; + case NEON_CLS: + mnemonic = "cls"; + break; + case NEON_CLZ: + mnemonic = "clz"; + break; + case NEON_CNT: + mnemonic = "cnt"; + break; + case NEON_SADALP: + mnemonic = "sadalp"; + nfd.SetFormatMap(0, &map_lp_ta); + break; + case NEON_UADALP: + mnemonic = "uadalp"; + nfd.SetFormatMap(0, &map_lp_ta); + break; + case NEON_SQABS: + mnemonic = "sqabs"; + break; + case NEON_SQNEG: + mnemonic = "sqneg"; + break; + case NEON_CMGT_zero: + mnemonic = "cmgt"; + form = form_cmp_zero; + break; + case NEON_CMGE_zero: + mnemonic = "cmge"; + form = form_cmp_zero; + break; + case NEON_CMEQ_zero: + mnemonic = "cmeq"; + form = form_cmp_zero; + break; + case NEON_CMLE_zero: + mnemonic = "cmle"; + form = form_cmp_zero; + break; + case NEON_CMLT_zero: + mnemonic = "cmlt"; + form = form_cmp_zero; + break; + case NEON_ABS: + mnemonic = "abs"; + break; + case NEON_NEG: + mnemonic = "neg"; + break; + case NEON_RBIT_NOT: + switch (instr->GetFPType()) { + case 0: + mnemonic = "mvn"; + break; + case 1: + mnemonic = "rbit"; + break; + default: + form = "(NEON2RegMisc)"; + } + nfd.SetFormatMaps(nfd.LogicalFormatMap()); + break; + } + } else { + // These instructions all use a one bit size field, except XTN, SQXTUN, + // SHLL, SQXTN and UQXTN, which use a two bit size field. + nfd.SetFormatMaps(nfd.FPFormatMap()); + switch (instr->Mask(NEON2RegMiscFPMask)) { + case NEON_FABS: + mnemonic = "fabs"; + break; + case NEON_FNEG: + mnemonic = "fneg"; + break; + case NEON_FCVTN: + mnemonic = instr->Mask(NEON_Q) ? "fcvtn2" : "fcvtn"; + nfd.SetFormatMap(0, &map_cvt_tb); + nfd.SetFormatMap(1, &map_cvt_ta); + break; + case NEON_FCVTXN: + mnemonic = instr->Mask(NEON_Q) ? "fcvtxn2" : "fcvtxn"; + nfd.SetFormatMap(0, &map_cvt_tb); + nfd.SetFormatMap(1, &map_cvt_ta); + break; + case NEON_FCVTL: + mnemonic = instr->Mask(NEON_Q) ? "fcvtl2" : "fcvtl"; + nfd.SetFormatMap(0, &map_cvt_ta); + nfd.SetFormatMap(1, &map_cvt_tb); + break; + case NEON_FRINTN: + mnemonic = "frintn"; + break; + case NEON_FRINTA: + mnemonic = "frinta"; + break; + case NEON_FRINTP: + mnemonic = "frintp"; + break; + case NEON_FRINTM: + mnemonic = "frintm"; + break; + case NEON_FRINTX: + mnemonic = "frintx"; + break; + case NEON_FRINTZ: + mnemonic = "frintz"; + break; + case NEON_FRINTI: + mnemonic = "frinti"; + break; + case NEON_FCVTNS: + mnemonic = "fcvtns"; + break; + case NEON_FCVTNU: + mnemonic = "fcvtnu"; + break; + case NEON_FCVTPS: + mnemonic = "fcvtps"; + break; + case NEON_FCVTPU: + mnemonic = "fcvtpu"; + break; + case NEON_FCVTMS: + mnemonic = "fcvtms"; + break; + case NEON_FCVTMU: + mnemonic = "fcvtmu"; + break; + case NEON_FCVTZS: + mnemonic = "fcvtzs"; + break; + case NEON_FCVTZU: + mnemonic = "fcvtzu"; + break; + case NEON_FCVTAS: + mnemonic = "fcvtas"; + break; + case NEON_FCVTAU: + mnemonic = "fcvtau"; + break; + case NEON_FSQRT: + mnemonic = "fsqrt"; + break; + case NEON_SCVTF: + mnemonic = "scvtf"; + break; + case NEON_UCVTF: + mnemonic = "ucvtf"; + break; + case NEON_URSQRTE: + mnemonic = "ursqrte"; + break; + case NEON_URECPE: + mnemonic = "urecpe"; + break; + case NEON_FRSQRTE: + mnemonic = "frsqrte"; + break; + case NEON_FRECPE: + mnemonic = "frecpe"; + break; + case NEON_FCMGT_zero: + mnemonic = "fcmgt"; + form = form_fcmp_zero; + break; + case NEON_FCMGE_zero: + mnemonic = "fcmge"; + form = form_fcmp_zero; + break; + case NEON_FCMEQ_zero: + mnemonic = "fcmeq"; + form = form_fcmp_zero; + break; + case NEON_FCMLE_zero: + mnemonic = "fcmle"; + form = form_fcmp_zero; + break; + case NEON_FCMLT_zero: + mnemonic = "fcmlt"; + form = form_fcmp_zero; + break; + default: + if ((NEON_XTN_opcode <= instr->Mask(NEON2RegMiscOpcode)) && + (instr->Mask(NEON2RegMiscOpcode) <= NEON_UQXTN_opcode)) { + nfd.SetFormatMap(0, nfd.IntegerFormatMap()); + nfd.SetFormatMap(1, nfd.LongIntegerFormatMap()); + + switch (instr->Mask(NEON2RegMiscMask)) { + case NEON_XTN: + mnemonic = "xtn"; + break; + case NEON_SQXTN: + mnemonic = "sqxtn"; + break; + case NEON_UQXTN: + mnemonic = "uqxtn"; + break; + case NEON_SQXTUN: + mnemonic = "sqxtun"; + break; + case NEON_SHLL: + mnemonic = "shll"; + nfd.SetFormatMap(0, nfd.LongIntegerFormatMap()); + nfd.SetFormatMap(1, nfd.IntegerFormatMap()); + switch (instr->GetNEONSize()) { + case 0: + form = "'Vd.%s, 'Vn.%s, #8"; + break; + case 1: + form = "'Vd.%s, 'Vn.%s, #16"; + break; + case 2: + form = "'Vd.%s, 'Vn.%s, #32"; + break; + default: + Format(instr, "unallocated", "(NEON2RegMisc)"); + return; + } + } + Format(instr, nfd.Mnemonic(mnemonic), nfd.Substitute(form)); + return; + } else { + form = "(NEON2RegMisc)"; + } + } + } + Format(instr, mnemonic, nfd.Substitute(form)); +} + +void Disassembler::VisitNEON2RegMiscFP16(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "'Vd.%s, 'Vn.%s"; + const char *form_cmp = "'Vd.%s, 'Vn.%s, #0.0"; + + static const NEONFormatMap map_half = {{30}, {NF_4H, NF_8H}}; + NEONFormatDecoder nfd(instr, &map_half); + + switch (instr->Mask(NEON2RegMiscFP16Mask)) { +// clang-format off +#define FORMAT(A, B) \ + case NEON_##A##_H: \ + mnemonic = B; \ + break; + FORMAT(FABS, "fabs") + FORMAT(FCVTAS, "fcvtas") + FORMAT(FCVTAU, "fcvtau") + FORMAT(FCVTMS, "fcvtms") + FORMAT(FCVTMU, "fcvtmu") + FORMAT(FCVTNS, "fcvtns") + FORMAT(FCVTNU, "fcvtnu") + FORMAT(FCVTPS, "fcvtps") + FORMAT(FCVTPU, "fcvtpu") + FORMAT(FCVTZS, "fcvtzs") + FORMAT(FCVTZU, "fcvtzu") + FORMAT(FNEG, "fneg") + FORMAT(FRECPE, "frecpe") + FORMAT(FRINTA, "frinta") + FORMAT(FRINTI, "frinti") + FORMAT(FRINTM, "frintm") + FORMAT(FRINTN, "frintn") + FORMAT(FRINTP, "frintp") + FORMAT(FRINTX, "frintx") + FORMAT(FRINTZ, "frintz") + FORMAT(FRSQRTE, "frsqrte") + FORMAT(FSQRT, "fsqrt") + FORMAT(SCVTF, "scvtf") + FORMAT(UCVTF, "ucvtf") +// clang-format on +#undef FORMAT + + case NEON_FCMEQ_H_zero: + mnemonic = "fcmeq"; + form = form_cmp; + break; + case NEON_FCMGT_H_zero: + mnemonic = "fcmgt"; + form = form_cmp; + break; + case NEON_FCMGE_H_zero: + mnemonic = "fcmge"; + form = form_cmp; + break; + case NEON_FCMLT_H_zero: + mnemonic = "fcmlt"; + form = form_cmp; + break; + case NEON_FCMLE_H_zero: + mnemonic = "fcmle"; + form = form_cmp; + break; + default: + form = "(NEON2RegMiscFP16)"; + } + Format(instr, mnemonic, nfd.Substitute(form)); +} + + +void Disassembler::VisitNEON3Same(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "'Vd.%s, 'Vn.%s, 'Vm.%s"; + NEONFormatDecoder nfd(instr); + + if (instr->Mask(NEON3SameLogicalFMask) == NEON3SameLogicalFixed) { + switch (instr->Mask(NEON3SameLogicalMask)) { + case NEON_AND: + mnemonic = "and"; + break; + case NEON_ORR: + mnemonic = "orr"; + if (instr->GetRm() == instr->GetRn()) { + mnemonic = "mov"; + form = "'Vd.%s, 'Vn.%s"; + } + break; + case NEON_ORN: + mnemonic = "orn"; + break; + case NEON_EOR: + mnemonic = "eor"; + break; + case NEON_BIC: + mnemonic = "bic"; + break; + case NEON_BIF: + mnemonic = "bif"; + break; + case NEON_BIT: + mnemonic = "bit"; + break; + case NEON_BSL: + mnemonic = "bsl"; + break; + default: + form = "(NEON3Same)"; + } + nfd.SetFormatMaps(nfd.LogicalFormatMap()); + } else { + static const char *mnemonics[] = {"shadd", + "uhadd", + "shadd", + "uhadd", + "sqadd", + "uqadd", + "sqadd", + "uqadd", + "srhadd", + "urhadd", + "srhadd", + "urhadd", + // Handled by logical cases above. + NULL, + NULL, + NULL, + NULL, + "shsub", + "uhsub", + "shsub", + "uhsub", + "sqsub", + "uqsub", + "sqsub", + "uqsub", + "cmgt", + "cmhi", + "cmgt", + "cmhi", + "cmge", + "cmhs", + "cmge", + "cmhs", + "sshl", + "ushl", + "sshl", + "ushl", + "sqshl", + "uqshl", + "sqshl", + "uqshl", + "srshl", + "urshl", + "srshl", + "urshl", + "sqrshl", + "uqrshl", + "sqrshl", + "uqrshl", + "smax", + "umax", + "smax", + "umax", + "smin", + "umin", + "smin", + "umin", + "sabd", + "uabd", + "sabd", + "uabd", + "saba", + "uaba", + "saba", + "uaba", + "add", + "sub", + "add", + "sub", + "cmtst", + "cmeq", + "cmtst", + "cmeq", + "mla", + "mls", + "mla", + "mls", + "mul", + "pmul", + "mul", + "pmul", + "smaxp", + "umaxp", + "smaxp", + "umaxp", + "sminp", + "uminp", + "sminp", + "uminp", + "sqdmulh", + "sqrdmulh", + "sqdmulh", + "sqrdmulh", + "addp", + "unallocated", + "addp", + "unallocated", + "fmaxnm", + "fmaxnmp", + "fminnm", + "fminnmp", + "fmla", + "unallocated", + "fmls", + "unallocated", + "fadd", + "faddp", + "fsub", + "fabd", + "fmulx", + "fmul", + "unallocated", + "unallocated", + "fcmeq", + "fcmge", + "unallocated", + "fcmgt", + "unallocated", + "facge", + "unallocated", + "facgt", + "fmax", + "fmaxp", + "fmin", + "fminp", + "frecps", + "fdiv", + "frsqrts", + "unallocated"}; + + // Operation is determined by the opcode bits (15-11), the top bit of + // size (23) and the U bit (29). + unsigned index = (instr->ExtractBits(15, 11) << 2) | + (instr->ExtractBit(23) << 1) | instr->ExtractBit(29); + VIXL_ASSERT(index < ArrayLength(mnemonics)); + mnemonic = mnemonics[index]; + // Assert that index is not one of the previously handled logical + // instructions. + VIXL_ASSERT(mnemonic != NULL); + + if (instr->Mask(NEON3SameFPFMask) == NEON3SameFPFixed) { + nfd.SetFormatMaps(nfd.FPFormatMap()); + } + } + Format(instr, mnemonic, nfd.Substitute(form)); +} + +void Disassembler::VisitNEON3SameFP16(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "'Vd.%s, 'Vn.%s, 'Vm.%s"; + + NEONFormatDecoder nfd(instr); + nfd.SetFormatMaps(nfd.FP16FormatMap()); + + switch (instr->Mask(NEON3SameFP16Mask)) { +#define FORMAT(A, B) \ + case NEON_##A##_H: \ + mnemonic = B; \ + break; + FORMAT(FMAXNM, "fmaxnm"); + FORMAT(FMLA, "fmla"); + FORMAT(FADD, "fadd"); + FORMAT(FMULX, "fmulx"); + FORMAT(FCMEQ, "fcmeq"); + FORMAT(FMAX, "fmax"); + FORMAT(FRECPS, "frecps"); + FORMAT(FMINNM, "fminnm"); + FORMAT(FMLS, "fmls"); + FORMAT(FSUB, "fsub"); + FORMAT(FMIN, "fmin"); + FORMAT(FRSQRTS, "frsqrts"); + FORMAT(FMAXNMP, "fmaxnmp"); + FORMAT(FADDP, "faddp"); + FORMAT(FMUL, "fmul"); + FORMAT(FCMGE, "fcmge"); + FORMAT(FACGE, "facge"); + FORMAT(FMAXP, "fmaxp"); + FORMAT(FDIV, "fdiv"); + FORMAT(FMINNMP, "fminnmp"); + FORMAT(FABD, "fabd"); + FORMAT(FCMGT, "fcmgt"); + FORMAT(FACGT, "facgt"); + FORMAT(FMINP, "fminp"); +#undef FORMAT + default: + form = "(NEON3SameFP16)"; + } + + Format(instr, mnemonic, nfd.Substitute(form)); +} + +void Disassembler::VisitNEON3SameExtra(const Instruction *instr) { + static const NEONFormatMap map_usdot = {{30}, {NF_8B, NF_16B}}; + + const char *mnemonic = "unallocated"; + const char *form = "(NEON3SameExtra)"; + + NEONFormatDecoder nfd(instr); + + if (instr->Mask(NEON3SameExtraFCMLAMask) == NEON_FCMLA) { + mnemonic = "fcmla"; + form = "'Vd.%s, 'Vn.%s, 'Vm.%s, 'IVFCNM"; + } else if (instr->Mask(NEON3SameExtraFCADDMask) == NEON_FCADD) { + mnemonic = "fcadd"; + form = "'Vd.%s, 'Vn.%s, 'Vm.%s, 'IVFCNA"; + } else { + form = "'Vd.%s, 'Vn.%s, 'Vm.%s"; + switch (instr->Mask(NEON3SameExtraMask)) { + case NEON_SDOT: + mnemonic = "sdot"; + nfd.SetFormatMap(1, &map_usdot); + nfd.SetFormatMap(2, &map_usdot); + break; + case NEON_SQRDMLAH: + mnemonic = "sqrdmlah"; + break; + case NEON_UDOT: + mnemonic = "udot"; + nfd.SetFormatMap(1, &map_usdot); + nfd.SetFormatMap(2, &map_usdot); + break; + case NEON_SQRDMLSH: + mnemonic = "sqrdmlsh"; + break; + } + } + + Format(instr, mnemonic, nfd.Substitute(form)); +} + + +void Disassembler::VisitNEON3Different(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "'Vd.%s, 'Vn.%s, 'Vm.%s"; + + NEONFormatDecoder nfd(instr); + nfd.SetFormatMap(0, nfd.LongIntegerFormatMap()); + + // Ignore the Q bit. Appending a "2" suffix is handled later. + switch (instr->Mask(NEON3DifferentMask) & ~NEON_Q) { + case NEON_PMULL: + mnemonic = "pmull"; + break; + case NEON_SABAL: + mnemonic = "sabal"; + break; + case NEON_SABDL: + mnemonic = "sabdl"; + break; + case NEON_SADDL: + mnemonic = "saddl"; + break; + case NEON_SMLAL: + mnemonic = "smlal"; + break; + case NEON_SMLSL: + mnemonic = "smlsl"; + break; + case NEON_SMULL: + mnemonic = "smull"; + break; + case NEON_SSUBL: + mnemonic = "ssubl"; + break; + case NEON_SQDMLAL: + mnemonic = "sqdmlal"; + break; + case NEON_SQDMLSL: + mnemonic = "sqdmlsl"; + break; + case NEON_SQDMULL: + mnemonic = "sqdmull"; + break; + case NEON_UABAL: + mnemonic = "uabal"; + break; + case NEON_UABDL: + mnemonic = "uabdl"; + break; + case NEON_UADDL: + mnemonic = "uaddl"; + break; + case NEON_UMLAL: + mnemonic = "umlal"; + break; + case NEON_UMLSL: + mnemonic = "umlsl"; + break; + case NEON_UMULL: + mnemonic = "umull"; + break; + case NEON_USUBL: + mnemonic = "usubl"; + break; + case NEON_SADDW: + mnemonic = "saddw"; + nfd.SetFormatMap(1, nfd.LongIntegerFormatMap()); + break; + case NEON_SSUBW: + mnemonic = "ssubw"; + nfd.SetFormatMap(1, nfd.LongIntegerFormatMap()); + break; + case NEON_UADDW: + mnemonic = "uaddw"; + nfd.SetFormatMap(1, nfd.LongIntegerFormatMap()); + break; + case NEON_USUBW: + mnemonic = "usubw"; + nfd.SetFormatMap(1, nfd.LongIntegerFormatMap()); + break; + case NEON_ADDHN: + mnemonic = "addhn"; + nfd.SetFormatMaps(nfd.LongIntegerFormatMap()); + nfd.SetFormatMap(0, nfd.IntegerFormatMap()); + break; + case NEON_RADDHN: + mnemonic = "raddhn"; + nfd.SetFormatMaps(nfd.LongIntegerFormatMap()); + nfd.SetFormatMap(0, nfd.IntegerFormatMap()); + break; + case NEON_RSUBHN: + mnemonic = "rsubhn"; + nfd.SetFormatMaps(nfd.LongIntegerFormatMap()); + nfd.SetFormatMap(0, nfd.IntegerFormatMap()); + break; + case NEON_SUBHN: + mnemonic = "subhn"; + nfd.SetFormatMaps(nfd.LongIntegerFormatMap()); + nfd.SetFormatMap(0, nfd.IntegerFormatMap()); + break; + default: + form = "(NEON3Different)"; + } + Format(instr, nfd.Mnemonic(mnemonic), nfd.Substitute(form)); +} + + +void Disassembler::VisitNEONAcrossLanes(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "%sd, 'Vn.%s"; + const char *form_half = "'Hd, 'Vn.%s"; + bool half_op = false; + static const NEONFormatMap map_half = {{30}, {NF_4H, NF_8H}}; + + NEONFormatDecoder nfd(instr, + NEONFormatDecoder::ScalarFormatMap(), + NEONFormatDecoder::IntegerFormatMap()); + + if (instr->Mask(NEONAcrossLanesFP16FMask) == NEONAcrossLanesFP16Fixed) { + half_op = true; + form = form_half; + nfd.SetFormatMaps(&map_half); + switch (instr->Mask(NEONAcrossLanesFP16Mask)) { + case NEON_FMAXV_H: + mnemonic = "fmaxv"; + break; + case NEON_FMINV_H: + mnemonic = "fminv"; + break; + case NEON_FMAXNMV_H: + mnemonic = "fmaxnmv"; + break; + case NEON_FMINNMV_H: + mnemonic = "fminnmv"; + break; + } + } else if (instr->Mask(NEONAcrossLanesFPFMask) == NEONAcrossLanesFPFixed) { + nfd.SetFormatMap(0, nfd.FPScalarFormatMap()); + nfd.SetFormatMap(1, nfd.FPFormatMap()); + switch (instr->Mask(NEONAcrossLanesFPMask)) { + case NEON_FMAXV: + mnemonic = "fmaxv"; + break; + case NEON_FMINV: + mnemonic = "fminv"; + break; + case NEON_FMAXNMV: + mnemonic = "fmaxnmv"; + break; + case NEON_FMINNMV: + mnemonic = "fminnmv"; + break; + default: + form = "(NEONAcrossLanes)"; + break; + } + } else if (instr->Mask(NEONAcrossLanesFMask) == NEONAcrossLanesFixed) { + switch (instr->Mask(NEONAcrossLanesMask)) { + case NEON_ADDV: + mnemonic = "addv"; + break; + case NEON_SMAXV: + mnemonic = "smaxv"; + break; + case NEON_SMINV: + mnemonic = "sminv"; + break; + case NEON_UMAXV: + mnemonic = "umaxv"; + break; + case NEON_UMINV: + mnemonic = "uminv"; + break; + case NEON_SADDLV: + mnemonic = "saddlv"; + nfd.SetFormatMap(0, nfd.LongScalarFormatMap()); + break; + case NEON_UADDLV: + mnemonic = "uaddlv"; + nfd.SetFormatMap(0, nfd.LongScalarFormatMap()); + break; + default: + form = "(NEONAcrossLanes)"; + break; + } + } + + if (half_op) { + Format(instr, mnemonic, nfd.Substitute(form)); + } else { + Format(instr, + mnemonic, + nfd.Substitute(form, + NEONFormatDecoder::kPlaceholder, + NEONFormatDecoder::kFormat)); + } +} + + +void Disassembler::VisitNEONByIndexedElement(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + bool l_instr = false; + bool fp_instr = false; + bool cn_instr = false; + bool half_instr = false; + + const char *form = "'Vd.%s, 'Vn.%s, 'Ve.%s['IVByElemIndex]"; + + static const NEONFormatMap map_ta = {{23, 22}, {NF_UNDEF, NF_4S, NF_2D}}; + static const NEONFormatMap map_cn = + {{23, 22, 30}, + {NF_UNDEF, NF_UNDEF, NF_4H, NF_8H, NF_UNDEF, NF_4S, NF_UNDEF, NF_UNDEF}}; + static const NEONFormatMap map_usdot = {{30}, {NF_8B, NF_16B}}; + static const NEONFormatMap map_half = {{30}, {NF_4H, NF_8H}}; + + NEONFormatDecoder nfd(instr, + &map_ta, + NEONFormatDecoder::IntegerFormatMap(), + NEONFormatDecoder::ScalarFormatMap()); + + switch (instr->Mask(NEONByIndexedElementMask)) { + case NEON_SMULL_byelement: + mnemonic = "smull"; + l_instr = true; + break; + case NEON_UMULL_byelement: + mnemonic = "umull"; + l_instr = true; + break; + case NEON_SMLAL_byelement: + mnemonic = "smlal"; + l_instr = true; + break; + case NEON_UMLAL_byelement: + mnemonic = "umlal"; + l_instr = true; + break; + case NEON_SMLSL_byelement: + mnemonic = "smlsl"; + l_instr = true; + break; + case NEON_UMLSL_byelement: + mnemonic = "umlsl"; + l_instr = true; + break; + case NEON_SQDMULL_byelement: + mnemonic = "sqdmull"; + l_instr = true; + break; + case NEON_SQDMLAL_byelement: + mnemonic = "sqdmlal"; + l_instr = true; + break; + case NEON_SQDMLSL_byelement: + mnemonic = "sqdmlsl"; + l_instr = true; + break; + case NEON_MUL_byelement: + mnemonic = "mul"; + break; + case NEON_MLA_byelement: + mnemonic = "mla"; + break; + case NEON_MLS_byelement: + mnemonic = "mls"; + break; + case NEON_SQDMULH_byelement: + mnemonic = "sqdmulh"; + break; + case NEON_SQRDMULH_byelement: + mnemonic = "sqrdmulh"; + break; + case NEON_SDOT_byelement: + mnemonic = "sdot"; + form = "'Vd.%s, 'Vn.%s, 'Ve.4b['IVByElemIndex]"; + nfd.SetFormatMap(1, &map_usdot); + break; + case NEON_SQRDMLAH_byelement: + mnemonic = "sqrdmlah"; + break; + case NEON_UDOT_byelement: + mnemonic = "udot"; + form = "'Vd.%s, 'Vn.%s, 'Ve.4b['IVByElemIndex]"; + nfd.SetFormatMap(1, &map_usdot); + break; + case NEON_SQRDMLSH_byelement: + mnemonic = "sqrdmlsh"; + break; + default: + switch (instr->Mask(NEONByIndexedElementFPMask)) { + case NEON_FMUL_byelement: + mnemonic = "fmul"; + fp_instr = true; + break; + case NEON_FMLA_byelement: + mnemonic = "fmla"; + fp_instr = true; + break; + case NEON_FMLS_byelement: + mnemonic = "fmls"; + fp_instr = true; + break; + case NEON_FMULX_byelement: + mnemonic = "fmulx"; + fp_instr = true; + break; + case NEON_FMLA_H_byelement: + mnemonic = "fmla"; + half_instr = true; + break; + case NEON_FMLS_H_byelement: + mnemonic = "fmls"; + half_instr = true; + break; + case NEON_FMUL_H_byelement: + mnemonic = "fmul"; + half_instr = true; + break; + case NEON_FMULX_H_byelement: + mnemonic = "fmulx"; + half_instr = true; + break; + default: + switch (instr->Mask(NEONByIndexedElementFPComplexMask)) { + case NEON_FCMLA_byelement: + mnemonic = "fcmla"; + cn_instr = true; + form = "'Vd.%s, 'Vn.%s, 'Ve.%s['IVByElemIndexRot], 'ILFCNR"; + break; + } + } + } + + if (half_instr) { + form = "'Vd.%s, 'Vn.%s, 'Ve.h['IVByElemIndex]"; + nfd.SetFormatMaps(&map_half, &map_half); + Format(instr, mnemonic, nfd.Substitute(form)); + } else if (l_instr) { + Format(instr, nfd.Mnemonic(mnemonic), nfd.Substitute(form)); + } else if (fp_instr) { + nfd.SetFormatMap(0, nfd.FPFormatMap()); + Format(instr, mnemonic, nfd.Substitute(form)); + } else if (cn_instr) { + nfd.SetFormatMap(0, &map_cn); + nfd.SetFormatMap(1, &map_cn); + Format(instr, mnemonic, nfd.Substitute(form)); + } else { + nfd.SetFormatMap(0, nfd.IntegerFormatMap()); + Format(instr, mnemonic, nfd.Substitute(form)); + } +} + + +void Disassembler::VisitNEONCopy(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(NEONCopy)"; + + NEONFormatDecoder nfd(instr, + NEONFormatDecoder::TriangularFormatMap(), + NEONFormatDecoder::TriangularScalarFormatMap()); + + if (instr->Mask(NEONCopyInsElementMask) == NEON_INS_ELEMENT) { + mnemonic = "mov"; + nfd.SetFormatMap(0, nfd.TriangularScalarFormatMap()); + form = "'Vd.%s['IVInsIndex1], 'Vn.%s['IVInsIndex2]"; + } else if (instr->Mask(NEONCopyInsGeneralMask) == NEON_INS_GENERAL) { + mnemonic = "mov"; + nfd.SetFormatMap(0, nfd.TriangularScalarFormatMap()); + if (nfd.GetVectorFormat() == kFormatD) { + form = "'Vd.%s['IVInsIndex1], 'Xn"; + } else { + form = "'Vd.%s['IVInsIndex1], 'Wn"; + } + } else if (instr->Mask(NEONCopyUmovMask) == NEON_UMOV) { + if (instr->Mask(NEON_Q) || ((instr->GetImmNEON5() & 7) == 4)) { + mnemonic = "mov"; + } else { + mnemonic = "umov"; + } + nfd.SetFormatMap(0, nfd.TriangularScalarFormatMap()); + if (nfd.GetVectorFormat() == kFormatD) { + form = "'Xd, 'Vn.%s['IVInsIndex1]"; + } else { + form = "'Wd, 'Vn.%s['IVInsIndex1]"; + } + } else if (instr->Mask(NEONCopySmovMask) == NEON_SMOV) { + mnemonic = "smov"; + nfd.SetFormatMap(0, nfd.TriangularScalarFormatMap()); + form = "'Rdq, 'Vn.%s['IVInsIndex1]"; + } else if (instr->Mask(NEONCopyDupElementMask) == NEON_DUP_ELEMENT) { + mnemonic = "dup"; + form = "'Vd.%s, 'Vn.%s['IVInsIndex1]"; + } else if (instr->Mask(NEONCopyDupGeneralMask) == NEON_DUP_GENERAL) { + mnemonic = "dup"; + if (nfd.GetVectorFormat() == kFormat2D) { + form = "'Vd.%s, 'Xn"; + } else { + form = "'Vd.%s, 'Wn"; + } + } + Format(instr, mnemonic, nfd.Substitute(form)); +} + + +void Disassembler::VisitNEONExtract(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(NEONExtract)"; + NEONFormatDecoder nfd(instr, NEONFormatDecoder::LogicalFormatMap()); + if (instr->Mask(NEONExtractMask) == NEON_EXT) { + mnemonic = "ext"; + form = "'Vd.%s, 'Vn.%s, 'Vm.%s, 'IVExtract"; + } + Format(instr, mnemonic, nfd.Substitute(form)); +} + + +void Disassembler::VisitNEONLoadStoreMultiStruct(const Instruction *instr) { + const char *mnemonic = NULL; + const char *form = NULL; + const char *form_1v = "{'Vt.%1$s}, ['Xns]"; + const char *form_2v = "{'Vt.%1$s, 'Vt2.%1$s}, ['Xns]"; + const char *form_3v = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s}, ['Xns]"; + const char *form_4v = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s, 'Vt4.%1$s}, ['Xns]"; + NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap()); + + switch (instr->Mask(NEONLoadStoreMultiStructMask)) { + case NEON_LD1_1v: + mnemonic = "ld1"; + form = form_1v; + break; + case NEON_LD1_2v: + mnemonic = "ld1"; + form = form_2v; + break; + case NEON_LD1_3v: + mnemonic = "ld1"; + form = form_3v; + break; + case NEON_LD1_4v: + mnemonic = "ld1"; + form = form_4v; + break; + case NEON_LD2: + mnemonic = "ld2"; + form = form_2v; + break; + case NEON_LD3: + mnemonic = "ld3"; + form = form_3v; + break; + case NEON_LD4: + mnemonic = "ld4"; + form = form_4v; + break; + case NEON_ST1_1v: + mnemonic = "st1"; + form = form_1v; + break; + case NEON_ST1_2v: + mnemonic = "st1"; + form = form_2v; + break; + case NEON_ST1_3v: + mnemonic = "st1"; + form = form_3v; + break; + case NEON_ST1_4v: + mnemonic = "st1"; + form = form_4v; + break; + case NEON_ST2: + mnemonic = "st2"; + form = form_2v; + break; + case NEON_ST3: + mnemonic = "st3"; + form = form_3v; + break; + case NEON_ST4: + mnemonic = "st4"; + form = form_4v; + break; + default: + break; + } + + // Work out unallocated encodings. + bool allocated = (mnemonic != NULL); + switch (instr->Mask(NEONLoadStoreMultiStructMask)) { + case NEON_LD2: + case NEON_LD3: + case NEON_LD4: + case NEON_ST2: + case NEON_ST3: + case NEON_ST4: + // LD[2-4] and ST[2-4] cannot use .1d format. + allocated = (instr->GetNEONQ() != 0) || (instr->GetNEONLSSize() != 3); + break; + default: + break; + } + if (allocated) { + VIXL_ASSERT(mnemonic != NULL); + VIXL_ASSERT(form != NULL); + } else { + mnemonic = "unallocated"; + form = "(NEONLoadStoreMultiStruct)"; + } + + Format(instr, mnemonic, nfd.Substitute(form)); +} + + +void Disassembler::VisitNEONLoadStoreMultiStructPostIndex( + const Instruction *instr) { + const char *mnemonic = NULL; + const char *form = NULL; + const char *form_1v = "{'Vt.%1$s}, ['Xns], 'Xmr1"; + const char *form_2v = "{'Vt.%1$s, 'Vt2.%1$s}, ['Xns], 'Xmr2"; + const char *form_3v = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s}, ['Xns], 'Xmr3"; + const char *form_4v = + "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s, 'Vt4.%1$s}, ['Xns], 'Xmr4"; + NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap()); + + switch (instr->Mask(NEONLoadStoreMultiStructPostIndexMask)) { + case NEON_LD1_1v_post: + mnemonic = "ld1"; + form = form_1v; + break; + case NEON_LD1_2v_post: + mnemonic = "ld1"; + form = form_2v; + break; + case NEON_LD1_3v_post: + mnemonic = "ld1"; + form = form_3v; + break; + case NEON_LD1_4v_post: + mnemonic = "ld1"; + form = form_4v; + break; + case NEON_LD2_post: + mnemonic = "ld2"; + form = form_2v; + break; + case NEON_LD3_post: + mnemonic = "ld3"; + form = form_3v; + break; + case NEON_LD4_post: + mnemonic = "ld4"; + form = form_4v; + break; + case NEON_ST1_1v_post: + mnemonic = "st1"; + form = form_1v; + break; + case NEON_ST1_2v_post: + mnemonic = "st1"; + form = form_2v; + break; + case NEON_ST1_3v_post: + mnemonic = "st1"; + form = form_3v; + break; + case NEON_ST1_4v_post: + mnemonic = "st1"; + form = form_4v; + break; + case NEON_ST2_post: + mnemonic = "st2"; + form = form_2v; + break; + case NEON_ST3_post: + mnemonic = "st3"; + form = form_3v; + break; + case NEON_ST4_post: + mnemonic = "st4"; + form = form_4v; + break; + default: + break; + } + + // Work out unallocated encodings. + bool allocated = (mnemonic != NULL); + switch (instr->Mask(NEONLoadStoreMultiStructPostIndexMask)) { + case NEON_LD2_post: + case NEON_LD3_post: + case NEON_LD4_post: + case NEON_ST2_post: + case NEON_ST3_post: + case NEON_ST4_post: + // LD[2-4] and ST[2-4] cannot use .1d format. + allocated = (instr->GetNEONQ() != 0) || (instr->GetNEONLSSize() != 3); + break; + default: + break; + } + if (allocated) { + VIXL_ASSERT(mnemonic != NULL); + VIXL_ASSERT(form != NULL); + } else { + mnemonic = "unallocated"; + form = "(NEONLoadStoreMultiStructPostIndex)"; + } + + Format(instr, mnemonic, nfd.Substitute(form)); +} + + +void Disassembler::VisitNEONLoadStoreSingleStruct(const Instruction *instr) { + const char *mnemonic = NULL; + const char *form = NULL; + + const char *form_1b = "{'Vt.b}['IVLSLane0], ['Xns]"; + const char *form_1h = "{'Vt.h}['IVLSLane1], ['Xns]"; + const char *form_1s = "{'Vt.s}['IVLSLane2], ['Xns]"; + const char *form_1d = "{'Vt.d}['IVLSLane3], ['Xns]"; + NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap()); + + switch (instr->Mask(NEONLoadStoreSingleStructMask)) { + case NEON_LD1_b: + mnemonic = "ld1"; + form = form_1b; + break; + case NEON_LD1_h: + mnemonic = "ld1"; + form = form_1h; + break; + case NEON_LD1_s: + mnemonic = "ld1"; + VIXL_STATIC_ASSERT((NEON_LD1_s | (1 << NEONLSSize_offset)) == NEON_LD1_d); + form = ((instr->GetNEONLSSize() & 1) == 0) ? form_1s : form_1d; + break; + case NEON_ST1_b: + mnemonic = "st1"; + form = form_1b; + break; + case NEON_ST1_h: + mnemonic = "st1"; + form = form_1h; + break; + case NEON_ST1_s: + mnemonic = "st1"; + VIXL_STATIC_ASSERT((NEON_ST1_s | (1 << NEONLSSize_offset)) == NEON_ST1_d); + form = ((instr->GetNEONLSSize() & 1) == 0) ? form_1s : form_1d; + break; + case NEON_LD1R: + mnemonic = "ld1r"; + form = "{'Vt.%s}, ['Xns]"; + break; + case NEON_LD2_b: + case NEON_ST2_b: + mnemonic = (instr->GetLdStXLoad() == 1) ? "ld2" : "st2"; + form = "{'Vt.b, 'Vt2.b}['IVLSLane0], ['Xns]"; + break; + case NEON_LD2_h: + case NEON_ST2_h: + mnemonic = (instr->GetLdStXLoad() == 1) ? "ld2" : "st2"; + form = "{'Vt.h, 'Vt2.h}['IVLSLane1], ['Xns]"; + break; + case NEON_LD2_s: + case NEON_ST2_s: + VIXL_STATIC_ASSERT((NEON_ST2_s | (1 << NEONLSSize_offset)) == NEON_ST2_d); + VIXL_STATIC_ASSERT((NEON_LD2_s | (1 << NEONLSSize_offset)) == NEON_LD2_d); + mnemonic = (instr->GetLdStXLoad() == 1) ? "ld2" : "st2"; + if ((instr->GetNEONLSSize() & 1) == 0) { + form = "{'Vt.s, 'Vt2.s}['IVLSLane2], ['Xns]"; + } else { + form = "{'Vt.d, 'Vt2.d}['IVLSLane3], ['Xns]"; + } + break; + case NEON_LD2R: + mnemonic = "ld2r"; + form = "{'Vt.%s, 'Vt2.%s}, ['Xns]"; + break; + case NEON_LD3_b: + case NEON_ST3_b: + mnemonic = (instr->GetLdStXLoad() == 1) ? "ld3" : "st3"; + form = "{'Vt.b, 'Vt2.b, 'Vt3.b}['IVLSLane0], ['Xns]"; + break; + case NEON_LD3_h: + case NEON_ST3_h: + mnemonic = (instr->GetLdStXLoad() == 1) ? "ld3" : "st3"; + form = "{'Vt.h, 'Vt2.h, 'Vt3.h}['IVLSLane1], ['Xns]"; + break; + case NEON_LD3_s: + case NEON_ST3_s: + mnemonic = (instr->GetLdStXLoad() == 1) ? "ld3" : "st3"; + if ((instr->GetNEONLSSize() & 1) == 0) { + form = "{'Vt.s, 'Vt2.s, 'Vt3.s}['IVLSLane2], ['Xns]"; + } else { + form = "{'Vt.d, 'Vt2.d, 'Vt3.d}['IVLSLane3], ['Xns]"; + } + break; + case NEON_LD3R: + mnemonic = "ld3r"; + form = "{'Vt.%s, 'Vt2.%s, 'Vt3.%s}, ['Xns]"; + break; + case NEON_LD4_b: + case NEON_ST4_b: + mnemonic = (instr->GetLdStXLoad() == 1) ? "ld4" : "st4"; + form = "{'Vt.b, 'Vt2.b, 'Vt3.b, 'Vt4.b}['IVLSLane0], ['Xns]"; + break; + case NEON_LD4_h: + case NEON_ST4_h: + mnemonic = (instr->GetLdStXLoad() == 1) ? "ld4" : "st4"; + form = "{'Vt.h, 'Vt2.h, 'Vt3.h, 'Vt4.h}['IVLSLane1], ['Xns]"; + break; + case NEON_LD4_s: + case NEON_ST4_s: + VIXL_STATIC_ASSERT((NEON_LD4_s | (1 << NEONLSSize_offset)) == NEON_LD4_d); + VIXL_STATIC_ASSERT((NEON_ST4_s | (1 << NEONLSSize_offset)) == NEON_ST4_d); + mnemonic = (instr->GetLdStXLoad() == 1) ? "ld4" : "st4"; + if ((instr->GetNEONLSSize() & 1) == 0) { + form = "{'Vt.s, 'Vt2.s, 'Vt3.s, 'Vt4.s}['IVLSLane2], ['Xns]"; + } else { + form = "{'Vt.d, 'Vt2.d, 'Vt3.d, 'Vt4.d}['IVLSLane3], ['Xns]"; + } + break; + case NEON_LD4R: + mnemonic = "ld4r"; + form = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s, 'Vt4.%1$s}, ['Xns]"; + break; + default: + break; + } + + // Work out unallocated encodings. + bool allocated = (mnemonic != NULL); + switch (instr->Mask(NEONLoadStoreSingleStructMask)) { + case NEON_LD1_h: + case NEON_LD2_h: + case NEON_LD3_h: + case NEON_LD4_h: + case NEON_ST1_h: + case NEON_ST2_h: + case NEON_ST3_h: + case NEON_ST4_h: + VIXL_ASSERT(allocated); + allocated = ((instr->GetNEONLSSize() & 1) == 0); + break; + case NEON_LD1_s: + case NEON_LD2_s: + case NEON_LD3_s: + case NEON_LD4_s: + case NEON_ST1_s: + case NEON_ST2_s: + case NEON_ST3_s: + case NEON_ST4_s: + VIXL_ASSERT(allocated); + allocated = (instr->GetNEONLSSize() <= 1) && + ((instr->GetNEONLSSize() == 0) || (instr->GetNEONS() == 0)); + break; + case NEON_LD1R: + case NEON_LD2R: + case NEON_LD3R: + case NEON_LD4R: + VIXL_ASSERT(allocated); + allocated = (instr->GetNEONS() == 0); + break; + default: + break; + } + if (allocated) { + VIXL_ASSERT(mnemonic != NULL); + VIXL_ASSERT(form != NULL); + } else { + mnemonic = "unallocated"; + form = "(NEONLoadStoreSingleStruct)"; + } + + Format(instr, mnemonic, nfd.Substitute(form)); +} + + +void Disassembler::VisitNEONLoadStoreSingleStructPostIndex( + const Instruction *instr) { + const char *mnemonic = NULL; + const char *form = NULL; + + const char *form_1b = "{'Vt.b}['IVLSLane0], ['Xns], 'Xmb1"; + const char *form_1h = "{'Vt.h}['IVLSLane1], ['Xns], 'Xmb2"; + const char *form_1s = "{'Vt.s}['IVLSLane2], ['Xns], 'Xmb4"; + const char *form_1d = "{'Vt.d}['IVLSLane3], ['Xns], 'Xmb8"; + NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap()); + + switch (instr->Mask(NEONLoadStoreSingleStructPostIndexMask)) { + case NEON_LD1_b_post: + mnemonic = "ld1"; + form = form_1b; + break; + case NEON_LD1_h_post: + mnemonic = "ld1"; + form = form_1h; + break; + case NEON_LD1_s_post: + mnemonic = "ld1"; + VIXL_STATIC_ASSERT((NEON_LD1_s | (1 << NEONLSSize_offset)) == NEON_LD1_d); + form = ((instr->GetNEONLSSize() & 1) == 0) ? form_1s : form_1d; + break; + case NEON_ST1_b_post: + mnemonic = "st1"; + form = form_1b; + break; + case NEON_ST1_h_post: + mnemonic = "st1"; + form = form_1h; + break; + case NEON_ST1_s_post: + mnemonic = "st1"; + VIXL_STATIC_ASSERT((NEON_ST1_s | (1 << NEONLSSize_offset)) == NEON_ST1_d); + form = ((instr->GetNEONLSSize() & 1) == 0) ? form_1s : form_1d; + break; + case NEON_LD1R_post: + mnemonic = "ld1r"; + form = "{'Vt.%s}, ['Xns], 'Xmz1"; + break; + case NEON_LD2_b_post: + case NEON_ST2_b_post: + mnemonic = (instr->GetLdStXLoad() == 1) ? "ld2" : "st2"; + form = "{'Vt.b, 'Vt2.b}['IVLSLane0], ['Xns], 'Xmb2"; + break; + case NEON_ST2_h_post: + case NEON_LD2_h_post: + mnemonic = (instr->GetLdStXLoad() == 1) ? "ld2" : "st2"; + form = "{'Vt.h, 'Vt2.h}['IVLSLane1], ['Xns], 'Xmb4"; + break; + case NEON_LD2_s_post: + case NEON_ST2_s_post: + mnemonic = (instr->GetLdStXLoad() == 1) ? "ld2" : "st2"; + if ((instr->GetNEONLSSize() & 1) == 0) + form = "{'Vt.s, 'Vt2.s}['IVLSLane2], ['Xns], 'Xmb8"; + else + form = "{'Vt.d, 'Vt2.d}['IVLSLane3], ['Xns], 'Xmb16"; + break; + case NEON_LD2R_post: + mnemonic = "ld2r"; + form = "{'Vt.%s, 'Vt2.%s}, ['Xns], 'Xmz2"; + break; + case NEON_LD3_b_post: + case NEON_ST3_b_post: + mnemonic = (instr->GetLdStXLoad() == 1) ? "ld3" : "st3"; + form = "{'Vt.b, 'Vt2.b, 'Vt3.b}['IVLSLane0], ['Xns], 'Xmb3"; + break; + case NEON_LD3_h_post: + case NEON_ST3_h_post: + mnemonic = (instr->GetLdStXLoad() == 1) ? "ld3" : "st3"; + form = "{'Vt.h, 'Vt2.h, 'Vt3.h}['IVLSLane1], ['Xns], 'Xmb6"; + break; + case NEON_LD3_s_post: + case NEON_ST3_s_post: + mnemonic = (instr->GetLdStXLoad() == 1) ? "ld3" : "st3"; + if ((instr->GetNEONLSSize() & 1) == 0) + form = "{'Vt.s, 'Vt2.s, 'Vt3.s}['IVLSLane2], ['Xns], 'Xmb12"; + else + form = "{'Vt.d, 'Vt2.d, 'Vt3.d}['IVLSLane3], ['Xns], 'Xmb24"; + break; + case NEON_LD3R_post: + mnemonic = "ld3r"; + form = "{'Vt.%s, 'Vt2.%s, 'Vt3.%s}, ['Xns], 'Xmz3"; + break; + case NEON_LD4_b_post: + case NEON_ST4_b_post: + mnemonic = (instr->GetLdStXLoad() == 1) ? "ld4" : "st4"; + form = "{'Vt.b, 'Vt2.b, 'Vt3.b, 'Vt4.b}['IVLSLane0], ['Xns], 'Xmb4"; + break; + case NEON_LD4_h_post: + case NEON_ST4_h_post: + mnemonic = (instr->GetLdStXLoad()) == 1 ? "ld4" : "st4"; + form = "{'Vt.h, 'Vt2.h, 'Vt3.h, 'Vt4.h}['IVLSLane1], ['Xns], 'Xmb8"; + break; + case NEON_LD4_s_post: + case NEON_ST4_s_post: + mnemonic = (instr->GetLdStXLoad() == 1) ? "ld4" : "st4"; + if ((instr->GetNEONLSSize() & 1) == 0) + form = "{'Vt.s, 'Vt2.s, 'Vt3.s, 'Vt4.s}['IVLSLane2], ['Xns], 'Xmb16"; + else + form = "{'Vt.d, 'Vt2.d, 'Vt3.d, 'Vt4.d}['IVLSLane3], ['Xns], 'Xmb32"; + break; + case NEON_LD4R_post: + mnemonic = "ld4r"; + form = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s, 'Vt4.%1$s}, ['Xns], 'Xmz4"; + break; + default: + break; + } + + // Work out unallocated encodings. + bool allocated = (mnemonic != NULL); + switch (instr->Mask(NEONLoadStoreSingleStructPostIndexMask)) { + case NEON_LD1_h_post: + case NEON_LD2_h_post: + case NEON_LD3_h_post: + case NEON_LD4_h_post: + case NEON_ST1_h_post: + case NEON_ST2_h_post: + case NEON_ST3_h_post: + case NEON_ST4_h_post: + VIXL_ASSERT(allocated); + allocated = ((instr->GetNEONLSSize() & 1) == 0); + break; + case NEON_LD1_s_post: + case NEON_LD2_s_post: + case NEON_LD3_s_post: + case NEON_LD4_s_post: + case NEON_ST1_s_post: + case NEON_ST2_s_post: + case NEON_ST3_s_post: + case NEON_ST4_s_post: + VIXL_ASSERT(allocated); + allocated = (instr->GetNEONLSSize() <= 1) && + ((instr->GetNEONLSSize() == 0) || (instr->GetNEONS() == 0)); + break; + case NEON_LD1R_post: + case NEON_LD2R_post: + case NEON_LD3R_post: + case NEON_LD4R_post: + VIXL_ASSERT(allocated); + allocated = (instr->GetNEONS() == 0); + break; + default: + break; + } + if (allocated) { + VIXL_ASSERT(mnemonic != NULL); + VIXL_ASSERT(form != NULL); + } else { + mnemonic = "unallocated"; + form = "(NEONLoadStoreSingleStructPostIndex)"; + } + + Format(instr, mnemonic, nfd.Substitute(form)); +} + + +void Disassembler::VisitNEONModifiedImmediate(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "'Vt.%s, 'IVMIImm8, lsl 'IVMIShiftAmt1"; + + int half_enc = instr->ExtractBit(11); + int cmode = instr->GetNEONCmode(); + int cmode_3 = (cmode >> 3) & 1; + int cmode_2 = (cmode >> 2) & 1; + int cmode_1 = (cmode >> 1) & 1; + int cmode_0 = cmode & 1; + int q = instr->GetNEONQ(); + int op = instr->GetNEONModImmOp(); + + static const NEONFormatMap map_b = {{30}, {NF_8B, NF_16B}}; + static const NEONFormatMap map_h = {{30}, {NF_4H, NF_8H}}; + static const NEONFormatMap map_s = {{30}, {NF_2S, NF_4S}}; + NEONFormatDecoder nfd(instr, &map_b); + if (cmode_3 == 0) { + if (cmode_0 == 0) { + mnemonic = (op == 1) ? "mvni" : "movi"; + } else { // cmode<0> == '1'. + mnemonic = (op == 1) ? "bic" : "orr"; + } + nfd.SetFormatMap(0, &map_s); + } else { // cmode<3> == '1'. + if (cmode_2 == 0) { + if (cmode_0 == 0) { + mnemonic = (op == 1) ? "mvni" : "movi"; + } else { // cmode<0> == '1'. + mnemonic = (op == 1) ? "bic" : "orr"; + } + nfd.SetFormatMap(0, &map_h); + } else { // cmode<2> == '1'. + if (cmode_1 == 0) { + mnemonic = (op == 1) ? "mvni" : "movi"; + form = "'Vt.%s, 'IVMIImm8, msl 'IVMIShiftAmt2"; + nfd.SetFormatMap(0, &map_s); + } else { // cmode<1> == '1'. + if (cmode_0 == 0) { + mnemonic = "movi"; + if (op == 0) { + form = "'Vt.%s, 'IVMIImm8"; + } else { + form = (q == 0) ? "'Dd, 'IVMIImm" : "'Vt.2d, 'IVMIImm"; + } + } else { // cmode<0> == '1' + mnemonic = "fmov"; + if (half_enc == 1) { + form = "'Vt.%s, 'IVMIImmFPHalf"; + nfd.SetFormatMap(0, &map_h); + } else if (op == 0) { + form = "'Vt.%s, 'IVMIImmFPSingle"; + nfd.SetFormatMap(0, &map_s); + } else { + if (q == 1) { + form = "'Vt.2d, 'IVMIImmFPDouble"; + } else { + mnemonic = "unallocated"; + form = "(NEONModifiedImmediate)"; + } + } + } + } + } + } + Format(instr, mnemonic, nfd.Substitute(form)); +} + + +void Disassembler::VisitNEONScalar2RegMisc(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "%sd, %sn"; + const char *form_0 = "%sd, %sn, #0"; + const char *form_fp0 = "%sd, %sn, #0.0"; + + NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap()); + + if (instr->Mask(NEON2RegMiscOpcode) <= NEON_NEG_scalar_opcode) { + // These instructions all use a two bit size field, except NOT and RBIT, + // which use the field to encode the operation. + switch (instr->Mask(NEONScalar2RegMiscMask)) { + case NEON_CMGT_zero_scalar: + mnemonic = "cmgt"; + form = form_0; + break; + case NEON_CMGE_zero_scalar: + mnemonic = "cmge"; + form = form_0; + break; + case NEON_CMLE_zero_scalar: + mnemonic = "cmle"; + form = form_0; + break; + case NEON_CMLT_zero_scalar: + mnemonic = "cmlt"; + form = form_0; + break; + case NEON_CMEQ_zero_scalar: + mnemonic = "cmeq"; + form = form_0; + break; + case NEON_NEG_scalar: + mnemonic = "neg"; + break; + case NEON_SQNEG_scalar: + mnemonic = "sqneg"; + break; + case NEON_ABS_scalar: + mnemonic = "abs"; + break; + case NEON_SQABS_scalar: + mnemonic = "sqabs"; + break; + case NEON_SUQADD_scalar: + mnemonic = "suqadd"; + break; + case NEON_USQADD_scalar: + mnemonic = "usqadd"; + break; + default: + form = "(NEONScalar2RegMisc)"; + } + } else { + // These instructions all use a one bit size field, except SQXTUN, SQXTN + // and UQXTN, which use a two bit size field. + nfd.SetFormatMaps(nfd.FPScalarFormatMap()); + switch (instr->Mask(NEONScalar2RegMiscFPMask)) { + case NEON_FRSQRTE_scalar: + mnemonic = "frsqrte"; + break; + case NEON_FRECPE_scalar: + mnemonic = "frecpe"; + break; + case NEON_SCVTF_scalar: + mnemonic = "scvtf"; + break; + case NEON_UCVTF_scalar: + mnemonic = "ucvtf"; + break; + case NEON_FCMGT_zero_scalar: + mnemonic = "fcmgt"; + form = form_fp0; + break; + case NEON_FCMGE_zero_scalar: + mnemonic = "fcmge"; + form = form_fp0; + break; + case NEON_FCMLE_zero_scalar: + mnemonic = "fcmle"; + form = form_fp0; + break; + case NEON_FCMLT_zero_scalar: + mnemonic = "fcmlt"; + form = form_fp0; + break; + case NEON_FCMEQ_zero_scalar: + mnemonic = "fcmeq"; + form = form_fp0; + break; + case NEON_FRECPX_scalar: + mnemonic = "frecpx"; + break; + case NEON_FCVTNS_scalar: + mnemonic = "fcvtns"; + break; + case NEON_FCVTNU_scalar: + mnemonic = "fcvtnu"; + break; + case NEON_FCVTPS_scalar: + mnemonic = "fcvtps"; + break; + case NEON_FCVTPU_scalar: + mnemonic = "fcvtpu"; + break; + case NEON_FCVTMS_scalar: + mnemonic = "fcvtms"; + break; + case NEON_FCVTMU_scalar: + mnemonic = "fcvtmu"; + break; + case NEON_FCVTZS_scalar: + mnemonic = "fcvtzs"; + break; + case NEON_FCVTZU_scalar: + mnemonic = "fcvtzu"; + break; + case NEON_FCVTAS_scalar: + mnemonic = "fcvtas"; + break; + case NEON_FCVTAU_scalar: + mnemonic = "fcvtau"; + break; + case NEON_FCVTXN_scalar: + nfd.SetFormatMap(0, nfd.LongScalarFormatMap()); + mnemonic = "fcvtxn"; + break; + default: + nfd.SetFormatMap(0, nfd.ScalarFormatMap()); + nfd.SetFormatMap(1, nfd.LongScalarFormatMap()); + switch (instr->Mask(NEONScalar2RegMiscMask)) { + case NEON_SQXTN_scalar: + mnemonic = "sqxtn"; + break; + case NEON_UQXTN_scalar: + mnemonic = "uqxtn"; + break; + case NEON_SQXTUN_scalar: + mnemonic = "sqxtun"; + break; + default: + form = "(NEONScalar2RegMisc)"; + } + } + } + Format(instr, mnemonic, nfd.SubstitutePlaceholders(form)); +} + +void Disassembler::VisitNEONScalar2RegMiscFP16(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "'Hd, 'Hn"; + const char *form_fp0 = "'Hd, 'Hn, #0.0"; + + switch (instr->Mask(NEONScalar2RegMiscFP16Mask)) { +#define FORMAT(A, B) \ + case NEON_##A##_H_scalar: \ + mnemonic = B; \ + break; + // clang-format off + FORMAT(FCVTNS, "fcvtns") + FORMAT(FCVTMS, "fcvtms") + FORMAT(FCVTAS, "fcvtas") + FORMAT(SCVTF, "scvtf") + FORMAT(FCVTPS, "fcvtps") + FORMAT(FCVTZS, "fcvtzs") + FORMAT(FRECPE, "frecpe") + FORMAT(FRECPX, "frecpx") + FORMAT(FCVTNU, "fcvtnu") + FORMAT(FCVTMU, "fcvtmu") + FORMAT(FCVTAU, "fcvtau") + FORMAT(UCVTF, "ucvtf") + FORMAT(FCVTPU, "fcvtpu") + FORMAT(FCVTZU, "fcvtzu") + FORMAT(FRSQRTE, "frsqrte") +// clang-format on +#undef FORMAT +#define FORMAT(A, B) \ + case NEON_##A##_H_zero_scalar: \ + mnemonic = B; \ + form = form_fp0; \ + break; + FORMAT(FCMGT, "fcmgt") + FORMAT(FCMEQ, "fcmeq") + FORMAT(FCMLT, "fcmlt") + FORMAT(FCMGE, "fcmge") + FORMAT(FCMLE, "fcmle") +#undef FORMAT + + default: + VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitNEONScalar3Diff(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "%sd, %sn, %sm"; + NEONFormatDecoder nfd(instr, + NEONFormatDecoder::LongScalarFormatMap(), + NEONFormatDecoder::ScalarFormatMap()); + + switch (instr->Mask(NEONScalar3DiffMask)) { + case NEON_SQDMLAL_scalar: + mnemonic = "sqdmlal"; + break; + case NEON_SQDMLSL_scalar: + mnemonic = "sqdmlsl"; + break; + case NEON_SQDMULL_scalar: + mnemonic = "sqdmull"; + break; + default: + form = "(NEONScalar3Diff)"; + } + Format(instr, mnemonic, nfd.SubstitutePlaceholders(form)); +} + + +void Disassembler::VisitNEONScalar3Same(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "%sd, %sn, %sm"; + NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap()); + + if (instr->Mask(NEONScalar3SameFPFMask) == NEONScalar3SameFPFixed) { + nfd.SetFormatMaps(nfd.FPScalarFormatMap()); + switch (instr->Mask(NEONScalar3SameFPMask)) { + case NEON_FACGE_scalar: + mnemonic = "facge"; + break; + case NEON_FACGT_scalar: + mnemonic = "facgt"; + break; + case NEON_FCMEQ_scalar: + mnemonic = "fcmeq"; + break; + case NEON_FCMGE_scalar: + mnemonic = "fcmge"; + break; + case NEON_FCMGT_scalar: + mnemonic = "fcmgt"; + break; + case NEON_FMULX_scalar: + mnemonic = "fmulx"; + break; + case NEON_FRECPS_scalar: + mnemonic = "frecps"; + break; + case NEON_FRSQRTS_scalar: + mnemonic = "frsqrts"; + break; + case NEON_FABD_scalar: + mnemonic = "fabd"; + break; + default: + form = "(NEONScalar3Same)"; + } + } else { + switch (instr->Mask(NEONScalar3SameMask)) { + case NEON_ADD_scalar: + mnemonic = "add"; + break; + case NEON_SUB_scalar: + mnemonic = "sub"; + break; + case NEON_CMEQ_scalar: + mnemonic = "cmeq"; + break; + case NEON_CMGE_scalar: + mnemonic = "cmge"; + break; + case NEON_CMGT_scalar: + mnemonic = "cmgt"; + break; + case NEON_CMHI_scalar: + mnemonic = "cmhi"; + break; + case NEON_CMHS_scalar: + mnemonic = "cmhs"; + break; + case NEON_CMTST_scalar: + mnemonic = "cmtst"; + break; + case NEON_UQADD_scalar: + mnemonic = "uqadd"; + break; + case NEON_SQADD_scalar: + mnemonic = "sqadd"; + break; + case NEON_UQSUB_scalar: + mnemonic = "uqsub"; + break; + case NEON_SQSUB_scalar: + mnemonic = "sqsub"; + break; + case NEON_USHL_scalar: + mnemonic = "ushl"; + break; + case NEON_SSHL_scalar: + mnemonic = "sshl"; + break; + case NEON_UQSHL_scalar: + mnemonic = "uqshl"; + break; + case NEON_SQSHL_scalar: + mnemonic = "sqshl"; + break; + case NEON_URSHL_scalar: + mnemonic = "urshl"; + break; + case NEON_SRSHL_scalar: + mnemonic = "srshl"; + break; + case NEON_UQRSHL_scalar: + mnemonic = "uqrshl"; + break; + case NEON_SQRSHL_scalar: + mnemonic = "sqrshl"; + break; + case NEON_SQDMULH_scalar: + mnemonic = "sqdmulh"; + break; + case NEON_SQRDMULH_scalar: + mnemonic = "sqrdmulh"; + break; + default: + form = "(NEONScalar3Same)"; + } + } + Format(instr, mnemonic, nfd.SubstitutePlaceholders(form)); +} + +void Disassembler::VisitNEONScalar3SameFP16(const Instruction *instr) { + const char *mnemonic = NULL; + const char *form = "'Hd, 'Hn, 'Hm"; + + switch (instr->Mask(NEONScalar3SameFP16Mask)) { + case NEON_FABD_H_scalar: + mnemonic = "fabd"; + break; + case NEON_FMULX_H_scalar: + mnemonic = "fmulx"; + break; + case NEON_FCMEQ_H_scalar: + mnemonic = "fcmeq"; + break; + case NEON_FCMGE_H_scalar: + mnemonic = "fcmge"; + break; + case NEON_FCMGT_H_scalar: + mnemonic = "fcmgt"; + break; + case NEON_FACGE_H_scalar: + mnemonic = "facge"; + break; + case NEON_FACGT_H_scalar: + mnemonic = "facgt"; + break; + case NEON_FRECPS_H_scalar: + mnemonic = "frecps"; + break; + case NEON_FRSQRTS_H_scalar: + mnemonic = "frsqrts"; + break; + default: + VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + +void Disassembler::VisitNEONScalar3SameExtra(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "%sd, %sn, %sm"; + NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap()); + + switch (instr->Mask(NEONScalar3SameExtraMask)) { + case NEON_SQRDMLAH_scalar: + mnemonic = "sqrdmlah"; + break; + case NEON_SQRDMLSH_scalar: + mnemonic = "sqrdmlsh"; + break; + default: + form = "(NEONScalar3SameExtra)"; + } + Format(instr, mnemonic, nfd.SubstitutePlaceholders(form)); +} + + +void Disassembler::VisitNEONScalarByIndexedElement(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "%sd, %sn, 'Ve.%s['IVByElemIndex]"; + const char *form_half = "'Hd, 'Hn, 'Ve.h['IVByElemIndex]"; + NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap()); + bool long_instr = false; + + switch (instr->Mask(NEONScalarByIndexedElementMask)) { + case NEON_SQDMULL_byelement_scalar: + mnemonic = "sqdmull"; + long_instr = true; + break; + case NEON_SQDMLAL_byelement_scalar: + mnemonic = "sqdmlal"; + long_instr = true; + break; + case NEON_SQDMLSL_byelement_scalar: + mnemonic = "sqdmlsl"; + long_instr = true; + break; + case NEON_SQDMULH_byelement_scalar: + mnemonic = "sqdmulh"; + break; + case NEON_SQRDMULH_byelement_scalar: + mnemonic = "sqrdmulh"; + break; + case NEON_SQRDMLAH_byelement_scalar: + mnemonic = "sqrdmlah"; + break; + case NEON_SQRDMLSH_byelement_scalar: + mnemonic = "sqrdmlsh"; + break; + default: + nfd.SetFormatMap(0, nfd.FPScalarFormatMap()); + switch (instr->Mask(NEONScalarByIndexedElementFPMask)) { + case NEON_FMUL_byelement_scalar: + mnemonic = "fmul"; + break; + case NEON_FMLA_byelement_scalar: + mnemonic = "fmla"; + break; + case NEON_FMLS_byelement_scalar: + mnemonic = "fmls"; + break; + case NEON_FMULX_byelement_scalar: + mnemonic = "fmulx"; + break; + case NEON_FMLA_H_byelement_scalar: + mnemonic = "fmla"; + form = form_half; + break; + case NEON_FMLS_H_byelement_scalar: + mnemonic = "fmls"; + form = form_half; + break; + case NEON_FMUL_H_byelement_scalar: + mnemonic = "fmul"; + form = form_half; + break; + case NEON_FMULX_H_byelement_scalar: + mnemonic = "fmulx"; + form = form_half; + break; + default: + form = "(NEONScalarByIndexedElement)"; + } + } + + if (long_instr) { + nfd.SetFormatMap(0, nfd.LongScalarFormatMap()); + } + + Format(instr, + mnemonic, + nfd.Substitute(form, nfd.kPlaceholder, nfd.kPlaceholder, nfd.kFormat)); +} + + +void Disassembler::VisitNEONScalarCopy(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(NEONScalarCopy)"; + + NEONFormatDecoder nfd(instr, NEONFormatDecoder::TriangularScalarFormatMap()); + + if (instr->Mask(NEONScalarCopyMask) == NEON_DUP_ELEMENT_scalar) { + mnemonic = "mov"; + form = "%sd, 'Vn.%s['IVInsIndex1]"; + } + + Format(instr, mnemonic, nfd.Substitute(form, nfd.kPlaceholder, nfd.kFormat)); +} + + +void Disassembler::VisitNEONScalarPairwise(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "%sd, 'Vn.%s"; + NEONFormatMap map = {{22}, {NF_2S, NF_2D}}; + NEONFormatDecoder nfd(instr, + NEONFormatDecoder::FPScalarPairwiseFormatMap(), + &map); + + switch (instr->Mask(NEONScalarPairwiseMask)) { + case NEON_ADDP_scalar: + // All pairwise operations except ADDP use bit U to differentiate FP16 + // from FP32/FP64 variations. + nfd.SetFormatMap(0, NEONFormatDecoder::FPScalarFormatMap()); + mnemonic = "addp"; + break; + case NEON_FADDP_h_scalar: + form = "%sd, 'Vn.2h"; + VIXL_FALLTHROUGH(); + case NEON_FADDP_scalar: + mnemonic = "faddp"; + break; + case NEON_FMAXP_h_scalar: + form = "%sd, 'Vn.2h"; + VIXL_FALLTHROUGH(); + case NEON_FMAXP_scalar: + mnemonic = "fmaxp"; + break; + case NEON_FMAXNMP_h_scalar: + form = "%sd, 'Vn.2h"; + VIXL_FALLTHROUGH(); + case NEON_FMAXNMP_scalar: + mnemonic = "fmaxnmp"; + break; + case NEON_FMINP_h_scalar: + form = "%sd, 'Vn.2h"; + VIXL_FALLTHROUGH(); + case NEON_FMINP_scalar: + mnemonic = "fminp"; + break; + case NEON_FMINNMP_h_scalar: + form = "%sd, 'Vn.2h"; + VIXL_FALLTHROUGH(); + case NEON_FMINNMP_scalar: + mnemonic = "fminnmp"; + break; + default: + form = "(NEONScalarPairwise)"; + } + Format(instr, + mnemonic, + nfd.Substitute(form, + NEONFormatDecoder::kPlaceholder, + NEONFormatDecoder::kFormat)); +} + + +void Disassembler::VisitNEONScalarShiftImmediate(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "%sd, %sn, 'Is1"; + const char *form_2 = "%sd, %sn, 'Is2"; + + static const NEONFormatMap map_shift = {{22, 21, 20, 19}, + {NF_UNDEF, + NF_B, + NF_H, + NF_H, + NF_S, + NF_S, + NF_S, + NF_S, + NF_D, + NF_D, + NF_D, + NF_D, + NF_D, + NF_D, + NF_D, + NF_D}}; + static const NEONFormatMap map_shift_narrow = + {{21, 20, 19}, {NF_UNDEF, NF_H, NF_S, NF_S, NF_D, NF_D, NF_D, NF_D}}; + NEONFormatDecoder nfd(instr, &map_shift); + + if (instr->GetImmNEONImmh()) { // immh has to be non-zero. + switch (instr->Mask(NEONScalarShiftImmediateMask)) { + case NEON_FCVTZU_imm_scalar: + mnemonic = "fcvtzu"; + break; + case NEON_FCVTZS_imm_scalar: + mnemonic = "fcvtzs"; + break; + case NEON_SCVTF_imm_scalar: + mnemonic = "scvtf"; + break; + case NEON_UCVTF_imm_scalar: + mnemonic = "ucvtf"; + break; + case NEON_SRI_scalar: + mnemonic = "sri"; + break; + case NEON_SSHR_scalar: + mnemonic = "sshr"; + break; + case NEON_USHR_scalar: + mnemonic = "ushr"; + break; + case NEON_SRSHR_scalar: + mnemonic = "srshr"; + break; + case NEON_URSHR_scalar: + mnemonic = "urshr"; + break; + case NEON_SSRA_scalar: + mnemonic = "ssra"; + break; + case NEON_USRA_scalar: + mnemonic = "usra"; + break; + case NEON_SRSRA_scalar: + mnemonic = "srsra"; + break; + case NEON_URSRA_scalar: + mnemonic = "ursra"; + break; + case NEON_SHL_scalar: + mnemonic = "shl"; + form = form_2; + break; + case NEON_SLI_scalar: + mnemonic = "sli"; + form = form_2; + break; + case NEON_SQSHLU_scalar: + mnemonic = "sqshlu"; + form = form_2; + break; + case NEON_SQSHL_imm_scalar: + mnemonic = "sqshl"; + form = form_2; + break; + case NEON_UQSHL_imm_scalar: + mnemonic = "uqshl"; + form = form_2; + break; + case NEON_UQSHRN_scalar: + mnemonic = "uqshrn"; + nfd.SetFormatMap(1, &map_shift_narrow); + break; + case NEON_UQRSHRN_scalar: + mnemonic = "uqrshrn"; + nfd.SetFormatMap(1, &map_shift_narrow); + break; + case NEON_SQSHRN_scalar: + mnemonic = "sqshrn"; + nfd.SetFormatMap(1, &map_shift_narrow); + break; + case NEON_SQRSHRN_scalar: + mnemonic = "sqrshrn"; + nfd.SetFormatMap(1, &map_shift_narrow); + break; + case NEON_SQSHRUN_scalar: + mnemonic = "sqshrun"; + nfd.SetFormatMap(1, &map_shift_narrow); + break; + case NEON_SQRSHRUN_scalar: + mnemonic = "sqrshrun"; + nfd.SetFormatMap(1, &map_shift_narrow); + break; + default: + form = "(NEONScalarShiftImmediate)"; + } + } else { + form = "(NEONScalarShiftImmediate)"; + } + Format(instr, mnemonic, nfd.SubstitutePlaceholders(form)); +} + + +void Disassembler::VisitNEONShiftImmediate(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "'Vd.%s, 'Vn.%s, 'Is1"; + const char *form_shift_2 = "'Vd.%s, 'Vn.%s, 'Is2"; + const char *form_xtl = "'Vd.%s, 'Vn.%s"; + + // 0001->8H, 001x->4S, 01xx->2D, all others undefined. + static const NEONFormatMap map_shift_ta = + {{22, 21, 20, 19}, + {NF_UNDEF, NF_8H, NF_4S, NF_4S, NF_2D, NF_2D, NF_2D, NF_2D}}; + + // 00010->8B, 00011->16B, 001x0->4H, 001x1->8H, + // 01xx0->2S, 01xx1->4S, 1xxx1->2D, all others undefined. + static const NEONFormatMap map_shift_tb = + {{22, 21, 20, 19, 30}, + {NF_UNDEF, NF_UNDEF, NF_8B, NF_16B, NF_4H, NF_8H, NF_4H, + NF_8H, NF_2S, NF_4S, NF_2S, NF_4S, NF_2S, NF_4S, + NF_2S, NF_4S, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, + NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, + NF_UNDEF, NF_2D, NF_UNDEF, NF_2D}}; + + NEONFormatDecoder nfd(instr, &map_shift_tb); + + if (instr->GetImmNEONImmh()) { // immh has to be non-zero. + switch (instr->Mask(NEONShiftImmediateMask)) { + case NEON_SQSHLU: + mnemonic = "sqshlu"; + form = form_shift_2; + break; + case NEON_SQSHL_imm: + mnemonic = "sqshl"; + form = form_shift_2; + break; + case NEON_UQSHL_imm: + mnemonic = "uqshl"; + form = form_shift_2; + break; + case NEON_SHL: + mnemonic = "shl"; + form = form_shift_2; + break; + case NEON_SLI: + mnemonic = "sli"; + form = form_shift_2; + break; + case NEON_SCVTF_imm: + mnemonic = "scvtf"; + break; + case NEON_UCVTF_imm: + mnemonic = "ucvtf"; + break; + case NEON_FCVTZU_imm: + mnemonic = "fcvtzu"; + break; + case NEON_FCVTZS_imm: + mnemonic = "fcvtzs"; + break; + case NEON_SRI: + mnemonic = "sri"; + break; + case NEON_SSHR: + mnemonic = "sshr"; + break; + case NEON_USHR: + mnemonic = "ushr"; + break; + case NEON_SRSHR: + mnemonic = "srshr"; + break; + case NEON_URSHR: + mnemonic = "urshr"; + break; + case NEON_SSRA: + mnemonic = "ssra"; + break; + case NEON_USRA: + mnemonic = "usra"; + break; + case NEON_SRSRA: + mnemonic = "srsra"; + break; + case NEON_URSRA: + mnemonic = "ursra"; + break; + case NEON_SHRN: + mnemonic = instr->Mask(NEON_Q) ? "shrn2" : "shrn"; + nfd.SetFormatMap(1, &map_shift_ta); + break; + case NEON_RSHRN: + mnemonic = instr->Mask(NEON_Q) ? "rshrn2" : "rshrn"; + nfd.SetFormatMap(1, &map_shift_ta); + break; + case NEON_UQSHRN: + mnemonic = instr->Mask(NEON_Q) ? "uqshrn2" : "uqshrn"; + nfd.SetFormatMap(1, &map_shift_ta); + break; + case NEON_UQRSHRN: + mnemonic = instr->Mask(NEON_Q) ? "uqrshrn2" : "uqrshrn"; + nfd.SetFormatMap(1, &map_shift_ta); + break; + case NEON_SQSHRN: + mnemonic = instr->Mask(NEON_Q) ? "sqshrn2" : "sqshrn"; + nfd.SetFormatMap(1, &map_shift_ta); + break; + case NEON_SQRSHRN: + mnemonic = instr->Mask(NEON_Q) ? "sqrshrn2" : "sqrshrn"; + nfd.SetFormatMap(1, &map_shift_ta); + break; + case NEON_SQSHRUN: + mnemonic = instr->Mask(NEON_Q) ? "sqshrun2" : "sqshrun"; + nfd.SetFormatMap(1, &map_shift_ta); + break; + case NEON_SQRSHRUN: + mnemonic = instr->Mask(NEON_Q) ? "sqrshrun2" : "sqrshrun"; + nfd.SetFormatMap(1, &map_shift_ta); + break; + case NEON_SSHLL: + nfd.SetFormatMap(0, &map_shift_ta); + if (instr->GetImmNEONImmb() == 0 && + CountSetBits(instr->GetImmNEONImmh(), 32) == 1) { // sxtl variant. + form = form_xtl; + mnemonic = instr->Mask(NEON_Q) ? "sxtl2" : "sxtl"; + } else { // sshll variant. + form = form_shift_2; + mnemonic = instr->Mask(NEON_Q) ? "sshll2" : "sshll"; + } + break; + case NEON_USHLL: + nfd.SetFormatMap(0, &map_shift_ta); + if (instr->GetImmNEONImmb() == 0 && + CountSetBits(instr->GetImmNEONImmh(), 32) == 1) { // uxtl variant. + form = form_xtl; + mnemonic = instr->Mask(NEON_Q) ? "uxtl2" : "uxtl"; + } else { // ushll variant. + form = form_shift_2; + mnemonic = instr->Mask(NEON_Q) ? "ushll2" : "ushll"; + } + break; + default: + form = "(NEONShiftImmediate)"; + } + } else { + form = "(NEONShiftImmediate)"; + } + Format(instr, mnemonic, nfd.Substitute(form)); +} + + +void Disassembler::VisitNEONTable(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(NEONTable)"; + const char form_1v[] = "'Vd.%%s, {'Vn.16b}, 'Vm.%%s"; + const char form_2v[] = "'Vd.%%s, {'Vn.16b, v%d.16b}, 'Vm.%%s"; + const char form_3v[] = "'Vd.%%s, {'Vn.16b, v%d.16b, v%d.16b}, 'Vm.%%s"; + const char form_4v[] = + "'Vd.%%s, {'Vn.16b, v%d.16b, v%d.16b, v%d.16b}, 'Vm.%%s"; + static const NEONFormatMap map_b = {{30}, {NF_8B, NF_16B}}; + NEONFormatDecoder nfd(instr, &map_b); + + switch (instr->Mask(NEONTableMask)) { + case NEON_TBL_1v: + mnemonic = "tbl"; + form = form_1v; + break; + case NEON_TBL_2v: + mnemonic = "tbl"; + form = form_2v; + break; + case NEON_TBL_3v: + mnemonic = "tbl"; + form = form_3v; + break; + case NEON_TBL_4v: + mnemonic = "tbl"; + form = form_4v; + break; + case NEON_TBX_1v: + mnemonic = "tbx"; + form = form_1v; + break; + case NEON_TBX_2v: + mnemonic = "tbx"; + form = form_2v; + break; + case NEON_TBX_3v: + mnemonic = "tbx"; + form = form_3v; + break; + case NEON_TBX_4v: + mnemonic = "tbx"; + form = form_4v; + break; + default: + break; + } + + char re_form[sizeof(form_4v) + 6]; + int reg_num = instr->GetRn(); + snprintf(re_form, + sizeof(re_form), + form, + (reg_num + 1) % kNumberOfVRegisters, + (reg_num + 2) % kNumberOfVRegisters, + (reg_num + 3) % kNumberOfVRegisters); + + Format(instr, mnemonic, nfd.Substitute(re_form)); +} + + +void Disassembler::VisitNEONPerm(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "'Vd.%s, 'Vn.%s, 'Vm.%s"; + NEONFormatDecoder nfd(instr); + + switch (instr->Mask(NEONPermMask)) { + case NEON_TRN1: + mnemonic = "trn1"; + break; + case NEON_TRN2: + mnemonic = "trn2"; + break; + case NEON_UZP1: + mnemonic = "uzp1"; + break; + case NEON_UZP2: + mnemonic = "uzp2"; + break; + case NEON_ZIP1: + mnemonic = "zip1"; + break; + case NEON_ZIP2: + mnemonic = "zip2"; + break; + default: + form = "(NEONPerm)"; + } + Format(instr, mnemonic, nfd.Substitute(form)); +} + + +void Disassembler::VisitUnimplemented(const Instruction *instr) { + Format(instr, "unimplemented", "(Unimplemented)"); +} + + +void Disassembler::VisitUnallocated(const Instruction *instr) { + Format(instr, "unallocated", "(Unallocated)"); +} + + +void Disassembler::ProcessOutput(const Instruction * /*instr*/) { + // The base disasm does nothing more than disassembling into a buffer. +} + + +void Disassembler::AppendRegisterNameToOutput(const Instruction *instr, + const CPURegister ®) { + USE(instr); + VIXL_ASSERT(reg.IsValid()); + char reg_char; + + if (reg.IsRegister()) { + reg_char = reg.Is64Bits() ? 'x' : 'w'; + } else { + VIXL_ASSERT(reg.IsVRegister()); + switch (reg.GetSizeInBits()) { + case kBRegSize: + reg_char = 'b'; + break; + case kHRegSize: + reg_char = 'h'; + break; + case kSRegSize: + reg_char = 's'; + break; + case kDRegSize: + reg_char = 'd'; + break; + default: + VIXL_ASSERT(reg.Is128Bits()); + reg_char = 'q'; + } + } + + if (reg.IsVRegister() || !(reg.Aliases(sp) || reg.Aliases(xzr))) { + // A core or scalar/vector register: [wx]0 - 30, [bhsdq]0 - 31. + AppendToOutput("%c%d", reg_char, reg.GetCode()); + } else if (reg.Aliases(sp)) { + // Disassemble w31/x31 as stack pointer wsp/sp. + AppendToOutput("%s", reg.Is64Bits() ? "sp" : "wsp"); + } else { + // Disassemble w31/x31 as zero register wzr/xzr. + AppendToOutput("%czr", reg_char); + } +} + + +void Disassembler::AppendPCRelativeOffsetToOutput(const Instruction *instr, + int64_t offset) { + USE(instr); + if (offset < 0) { + // Cast to uint64_t so that INT64_MIN is handled in a well-defined way. + uint64_t abs_offset = -static_cast(offset); + AppendToOutput("#-0x%" PRIx64, abs_offset); + } else { + AppendToOutput("#+0x%" PRIx64, offset); + } +} + + +void Disassembler::AppendAddressToOutput(const Instruction *instr, + const void *addr) { + USE(instr); + AppendToOutput("(addr 0x%" PRIxPTR ")", reinterpret_cast(addr)); +} + + +void Disassembler::AppendCodeAddressToOutput(const Instruction *instr, + const void *addr) { + AppendAddressToOutput(instr, addr); +} + + +void Disassembler::AppendDataAddressToOutput(const Instruction *instr, + const void *addr) { + AppendAddressToOutput(instr, addr); +} + + +void Disassembler::AppendCodeRelativeAddressToOutput(const Instruction *instr, + const void *addr) { + USE(instr); + int64_t rel_addr = CodeRelativeAddress(addr); + if (rel_addr >= 0) { + AppendToOutput("(addr 0x%" PRIx64 ")", rel_addr); + } else { + AppendToOutput("(addr -0x%" PRIx64 ")", -rel_addr); + } +} + + +void Disassembler::AppendCodeRelativeCodeAddressToOutput( + const Instruction *instr, const void *addr) { + AppendCodeRelativeAddressToOutput(instr, addr); +} + + +void Disassembler::AppendCodeRelativeDataAddressToOutput( + const Instruction *instr, const void *addr) { + AppendCodeRelativeAddressToOutput(instr, addr); +} + + +void Disassembler::MapCodeAddress(int64_t base_address, + const Instruction *instr_address) { + set_code_address_offset(base_address - + reinterpret_cast(instr_address)); +} +int64_t Disassembler::CodeRelativeAddress(const void *addr) { + return reinterpret_cast(addr) + code_address_offset(); +} + + +void Disassembler::Format(const Instruction *instr, + const char *mnemonic, + const char *format) { + VIXL_ASSERT(mnemonic != NULL); + ResetOutput(); + Substitute(instr, mnemonic); + if (format != NULL) { + VIXL_ASSERT(buffer_pos_ < buffer_size_); + buffer_[buffer_pos_++] = ' '; + Substitute(instr, format); + } + VIXL_ASSERT(buffer_pos_ < buffer_size_); + buffer_[buffer_pos_] = 0; + ProcessOutput(instr); +} + + +void Disassembler::Substitute(const Instruction *instr, const char *string) { + char chr = *string++; + while (chr != '\0') { + if (chr == '\'') { + string += SubstituteField(instr, string); + } else { + VIXL_ASSERT(buffer_pos_ < buffer_size_); + buffer_[buffer_pos_++] = chr; + } + chr = *string++; + } +} + + +int Disassembler::SubstituteField(const Instruction *instr, + const char *format) { + switch (format[0]) { + // NB. The remaining substitution prefix characters are: GJKUZ. + case 'R': // Register. X or W, selected by sf bit. + case 'F': // FP register. S or D, selected by type field. + case 'V': // Vector register, V, vector format. + case 'W': + case 'X': + case 'B': + case 'H': + case 'S': + case 'D': + case 'Q': + return SubstituteRegisterField(instr, format); + case 'I': + return SubstituteImmediateField(instr, format); + case 'L': + return SubstituteLiteralField(instr, format); + case 'N': + return SubstituteShiftField(instr, format); + case 'P': + return SubstitutePrefetchField(instr, format); + case 'C': + return SubstituteConditionField(instr, format); + case 'E': + return SubstituteExtendField(instr, format); + case 'A': + return SubstitutePCRelAddressField(instr, format); + case 'T': + return SubstituteBranchTargetField(instr, format); + case 'O': + return SubstituteLSRegOffsetField(instr, format); + case 'M': + return SubstituteBarrierField(instr, format); + case 'K': + return SubstituteCrField(instr, format); + case 'G': + return SubstituteSysOpField(instr, format); + default: { + VIXL_UNREACHABLE(); + return 1; + } + } +} + + +int Disassembler::SubstituteRegisterField(const Instruction *instr, + const char *format) { + char reg_prefix = format[0]; + unsigned reg_num = 0; + unsigned field_len = 2; + + switch (format[1]) { + case 'd': + reg_num = instr->GetRd(); + if (format[2] == 'q') { + reg_prefix = instr->GetNEONQ() ? 'X' : 'W'; + field_len = 3; + } + break; + case 'n': + reg_num = instr->GetRn(); + break; + case 'm': + reg_num = instr->GetRm(); + switch (format[2]) { + // Handle registers tagged with b (bytes), z (instruction), or + // r (registers), used for address updates in + // NEON load/store instructions. + case 'r': + case 'b': + case 'z': { + field_len = 3; + char *eimm; + int imm = static_cast(strtol(&format[3], &eimm, 10)); + field_len += eimm - &format[3]; + if (reg_num == 31) { + switch (format[2]) { + case 'z': + imm *= (1 << instr->GetNEONLSSize()); + break; + case 'r': + imm *= (instr->GetNEONQ() == 0) ? kDRegSizeInBytes + : kQRegSizeInBytes; + break; + case 'b': + break; + } + AppendToOutput("#%d", imm); + return field_len; + } + break; + } + } + break; + case 'e': + // This is register Rm, but using a 4-bit specifier. Used in NEON + // by-element instructions. + reg_num = (instr->GetRm() & 0xf); + break; + case 'a': + reg_num = instr->GetRa(); + break; + case 's': + reg_num = instr->GetRs(); + break; + case 't': + reg_num = instr->GetRt(); + if (format[0] == 'V') { + if ((format[2] >= '2') && (format[2] <= '4')) { + // Handle consecutive vector register specifiers Vt2, Vt3 and Vt4. + reg_num = (reg_num + format[2] - '1') % 32; + field_len = 3; + } + } else { + if (format[2] == '2') { + // Handle register specifier Rt2. + reg_num = instr->GetRt2(); + field_len = 3; + } + } + break; + case '(': { + switch (format[2]) { + case 's': + reg_num = instr->GetRs(); + break; + case 't': + reg_num = instr->GetRt(); + break; + default: + VIXL_UNREACHABLE(); + } + + VIXL_ASSERT(format[3] == '+'); + int i = 4; + int addition = 0; + while (format[i] != ')') { + VIXL_ASSERT((format[i] >= '0') && (format[i] <= '9')); + addition *= 10; + addition += format[i] - '0'; + ++i; + } + reg_num += addition; + field_len = i + 1; + break; + } + default: + VIXL_UNREACHABLE(); + } + + // Increase field length for registers tagged as stack. + if (format[1] != '(' && format[2] == 's') { + field_len = 3; + } + + CPURegister::RegisterType reg_type = CPURegister::kRegister; + unsigned reg_size = kXRegSize; + + switch (reg_prefix) { + case 'R': + reg_prefix = instr->GetSixtyFourBits() ? 'X' : 'W'; + break; + case 'F': + switch (instr->GetFPType()) { + case 3: + reg_prefix = 'H'; + break; + case 0: + reg_prefix = 'S'; + break; + default: + reg_prefix = 'D'; + } + } + + switch (reg_prefix) { + case 'W': + reg_type = CPURegister::kRegister; + reg_size = kWRegSize; + break; + case 'X': + reg_type = CPURegister::kRegister; + reg_size = kXRegSize; + break; + case 'B': + reg_type = CPURegister::kVRegister; + reg_size = kBRegSize; + break; + case 'H': + reg_type = CPURegister::kVRegister; + reg_size = kHRegSize; + break; + case 'S': + reg_type = CPURegister::kVRegister; + reg_size = kSRegSize; + break; + case 'D': + reg_type = CPURegister::kVRegister; + reg_size = kDRegSize; + break; + case 'Q': + reg_type = CPURegister::kVRegister; + reg_size = kQRegSize; + break; + case 'V': + AppendToOutput("v%d", reg_num); + return field_len; + default: + VIXL_UNREACHABLE(); + } + + if ((reg_type == CPURegister::kRegister) && (reg_num == kZeroRegCode) && + (format[2] == 's')) { + reg_num = kSPRegInternalCode; + } + + AppendRegisterNameToOutput(instr, CPURegister(reg_num, reg_size, reg_type)); + + return field_len; +} + + +int Disassembler::SubstituteImmediateField(const Instruction *instr, + const char *format) { + VIXL_ASSERT(format[0] == 'I'); + + switch (format[1]) { + case 'M': { // IMoveImm, IMoveNeg or IMoveLSL. + if (format[5] == 'L') { + AppendToOutput("#0x%" PRIx32, instr->GetImmMoveWide()); + if (instr->GetShiftMoveWide() > 0) { + AppendToOutput(", lsl #%" PRId32, 16 * instr->GetShiftMoveWide()); + } + } else { + VIXL_ASSERT((format[5] == 'I') || (format[5] == 'N')); + uint64_t imm = static_cast(instr->GetImmMoveWide()) + << (16 * instr->GetShiftMoveWide()); + if (format[5] == 'N') imm = ~imm; + if (!instr->GetSixtyFourBits()) imm &= UINT64_C(0xffffffff); + AppendToOutput("#0x%" PRIx64, imm); + } + return 8; + } + case 'L': { + switch (format[2]) { + case 'L': { // ILLiteral - Immediate Load Literal. + AppendToOutput("pc%+" PRId32, + instr->GetImmLLiteral() * + static_cast(kLiteralEntrySize)); + return 9; + } + case 'S': { // ILS - Immediate Load/Store. + // ILSi - As above, but an index field which must not be + // omitted even if it is zero. + bool is_index = format[3] == 'i'; + if (is_index || (instr->GetImmLS() != 0)) { + AppendToOutput(", #%" PRId32, instr->GetImmLS()); + } + return is_index ? 4 : 3; + } + case 'P': { // ILPx - Immediate Load/Store Pair, x = access size. + // ILPxi - As above, but an index field which must not be + // omitted even if it is zero. + VIXL_ASSERT((format[3] >= '0') && (format[3] <= '9')); + bool is_index = format[4] == 'i'; + if (is_index || (instr->GetImmLSPair() != 0)) { + // format[3] is the scale value. Convert to a number. + int scale = 1 << (format[3] - '0'); + AppendToOutput(", #%" PRId32, instr->GetImmLSPair() * scale); + } + return is_index ? 5 : 4; + } + case 'U': { // ILU - Immediate Load/Store Unsigned. + if (instr->GetImmLSUnsigned() != 0) { + int shift = instr->GetSizeLS(); + AppendToOutput(", #%" PRId32, instr->GetImmLSUnsigned() << shift); + } + return 3; + } + case 'F': { // ILF(CNR) - Immediate Rotation Value for Complex Numbers + AppendToOutput("#%" PRId32, instr->GetImmRotFcmlaSca() * 90); + return strlen("ILFCNR"); + } + default: { + VIXL_UNIMPLEMENTED(); + return 0; + } + } + } + case 'C': { // ICondB - Immediate Conditional Branch. + int64_t offset = instr->GetImmCondBranch() << 2; + AppendPCRelativeOffsetToOutput(instr, offset); + return 6; + } + case 'A': { // IAddSub. + VIXL_ASSERT(instr->GetShiftAddSub() <= 1); + int64_t imm = instr->GetImmAddSub() << (12 * instr->GetShiftAddSub()); + AppendToOutput("#0x%" PRIx64 " (%" PRId64 ")", imm, imm); + return 7; + } + case 'F': { // IFPHalf, IFPSingle, IFPDouble, or IFPFBits. + if (format[3] == 'F') { // IFPFbits. + AppendToOutput("#%" PRId32, 64 - instr->GetFPScale()); + return 8; + } else { + AppendToOutput("#0x%" PRIx32 " (%.4f)", + instr->GetImmFP(), + format[3] == 'H' + ? FPToFloat(instr->GetImmFP16(), kIgnoreDefaultNaN) + : (format[3] == 'S') ? instr->GetImmFP32() + : instr->GetImmFP64()); + if (format[3] == 'H') { + return 7; + } else { + return 9; + } + } + } + case 'H': { // IH - ImmHint + AppendToOutput("#%" PRId32, instr->GetImmHint()); + return 2; + } + case 'T': { // ITri - Immediate Triangular Encoded. + AppendToOutput("#0x%" PRIx64, instr->GetImmLogical()); + return 4; + } + case 'N': { // INzcv. + int nzcv = (instr->GetNzcv() << Flags_offset); + AppendToOutput("#%c%c%c%c", + ((nzcv & NFlag) == 0) ? 'n' : 'N', + ((nzcv & ZFlag) == 0) ? 'z' : 'Z', + ((nzcv & CFlag) == 0) ? 'c' : 'C', + ((nzcv & VFlag) == 0) ? 'v' : 'V'); + return 5; + } + case 'P': { // IP - Conditional compare. + AppendToOutput("#%" PRId32, instr->GetImmCondCmp()); + return 2; + } + case 'B': { // Bitfields. + return SubstituteBitfieldImmediateField(instr, format); + } + case 'E': { // IExtract. + AppendToOutput("#%" PRId32, instr->GetImmS()); + return 8; + } + case 'S': { // IS - Test and branch bit. + AppendToOutput("#%" PRId32, + (instr->GetImmTestBranchBit5() << 5) | + instr->GetImmTestBranchBit40()); + return 2; + } + case 's': { // Is - Shift (immediate). + switch (format[2]) { + case '1': { // Is1 - SSHR. + int shift = 16 << HighestSetBitPosition(instr->GetImmNEONImmh()); + shift -= instr->GetImmNEONImmhImmb(); + AppendToOutput("#%d", shift); + return 3; + } + case '2': { // Is2 - SLI. + int shift = instr->GetImmNEONImmhImmb(); + shift -= 8 << HighestSetBitPosition(instr->GetImmNEONImmh()); + AppendToOutput("#%d", shift); + return 3; + } + default: { + VIXL_UNIMPLEMENTED(); + return 0; + } + } + } + case 'D': { // IDebug - HLT and BRK instructions. + AppendToOutput("#0x%" PRIx32, instr->GetImmException()); + return 6; + } + case 'V': { // Immediate Vector. + switch (format[2]) { + case 'F': { + switch (format[5]) { + // Convert 'rot' bit encodings into equivalent angle rotation + case 'A': + AppendToOutput("#%" PRId32, + instr->GetImmRotFcadd() == 1 ? 270 : 90); + break; + case 'M': + AppendToOutput("#%" PRId32, instr->GetImmRotFcmlaVec() * 90); + break; + } + return strlen("IVFCN") + 1; + } + case 'E': { // IVExtract. + AppendToOutput("#%" PRId32, instr->GetImmNEONExt()); + return 9; + } + case 'B': { // IVByElemIndex. + int ret = strlen("IVByElemIndex"); + int vm_index = (instr->GetNEONH() << 1) | instr->GetNEONL(); + if ((strncmp(format, + "IVByElemIndexRot", + strlen("IVByElemIndexRot")) == 0)) { + // FCMLA uses 'H' bit index when SIZE is 2, else H:L + if (instr->GetNEONSize() == 2) { + vm_index = instr->GetNEONH(); + } + ret += 3; + } else if (instr->GetNEONSize() == 1) { + vm_index = (vm_index << 1) | instr->GetNEONM(); + } else if (instr->GetNEONSize() == 0) { + // Half-precision FP ops use H:L:M bit index + vm_index = (instr->GetNEONH() << 2) | (instr->GetNEONL() << 1) | + instr->GetNEONM(); + } + AppendToOutput("%d", vm_index); + return ret; + } + case 'I': { // INS element. + if (strncmp(format, "IVInsIndex", strlen("IVInsIndex")) == 0) { + unsigned rd_index, rn_index; + unsigned imm5 = instr->GetImmNEON5(); + unsigned imm4 = instr->GetImmNEON4(); + int tz = CountTrailingZeros(imm5, 32); + if (tz <= 3) { // Defined for tz = 0 to 3 only. + rd_index = imm5 >> (tz + 1); + rn_index = imm4 >> tz; + if (strncmp(format, "IVInsIndex1", strlen("IVInsIndex1")) == 0) { + AppendToOutput("%d", rd_index); + return strlen("IVInsIndex1"); + } else if (strncmp(format, + "IVInsIndex2", + strlen("IVInsIndex2")) == 0) { + AppendToOutput("%d", rn_index); + return strlen("IVInsIndex2"); + } + } + return 0; + } + VIXL_FALLTHROUGH(); + } + case 'L': { // IVLSLane[0123] - suffix indicates access size shift. + AppendToOutput("%d", instr->GetNEONLSIndex(format[8] - '0')); + return 9; + } + case 'M': { // Modified Immediate cases. + if (strncmp(format, "IVMIImmFPHalf", strlen("IVMIImmFPHalf")) == 0) { + AppendToOutput("#0x%" PRIx32 " (%.4f)", + instr->GetImmNEONabcdefgh(), + FPToFloat(instr->GetImmNEONFP16(), + kIgnoreDefaultNaN)); + return strlen("IVMIImmFPHalf"); + } else if (strncmp(format, + "IVMIImmFPSingle", + strlen("IVMIImmFPSingle")) == 0) { + AppendToOutput("#0x%" PRIx32 " (%.4f)", + instr->GetImmNEONabcdefgh(), + instr->GetImmNEONFP32()); + return strlen("IVMIImmFPSingle"); + } else if (strncmp(format, + "IVMIImmFPDouble", + strlen("IVMIImmFPDouble")) == 0) { + AppendToOutput("#0x%" PRIx32 " (%.4f)", + instr->GetImmNEONabcdefgh(), + instr->GetImmNEONFP64()); + return strlen("IVMIImmFPDouble"); + } else if (strncmp(format, "IVMIImm8", strlen("IVMIImm8")) == 0) { + uint64_t imm8 = instr->GetImmNEONabcdefgh(); + AppendToOutput("#0x%" PRIx64, imm8); + return strlen("IVMIImm8"); + } else if (strncmp(format, "IVMIImm", strlen("IVMIImm")) == 0) { + uint64_t imm8 = instr->GetImmNEONabcdefgh(); + uint64_t imm = 0; + for (int i = 0; i < 8; ++i) { + if (imm8 & (1 << i)) { + imm |= (UINT64_C(0xff) << (8 * i)); + } + } + AppendToOutput("#0x%" PRIx64, imm); + return strlen("IVMIImm"); + } else if (strncmp(format, + "IVMIShiftAmt1", + strlen("IVMIShiftAmt1")) == 0) { + int cmode = instr->GetNEONCmode(); + int shift_amount = 8 * ((cmode >> 1) & 3); + AppendToOutput("#%d", shift_amount); + return strlen("IVMIShiftAmt1"); + } else if (strncmp(format, + "IVMIShiftAmt2", + strlen("IVMIShiftAmt2")) == 0) { + int cmode = instr->GetNEONCmode(); + int shift_amount = 8 << (cmode & 1); + AppendToOutput("#%d", shift_amount); + return strlen("IVMIShiftAmt2"); + } else { + VIXL_UNIMPLEMENTED(); + return 0; + } + } + default: { + VIXL_UNIMPLEMENTED(); + return 0; + } + } + } + case 'X': { // IX - CLREX instruction. + AppendToOutput("#0x%" PRIx32, instr->GetCRm()); + return 2; + } + case 'Y': { // IY - system register immediate. + switch (instr->GetImmSystemRegister()) { + case NZCV: + AppendToOutput("nzcv"); + break; + case FPCR: + AppendToOutput("fpcr"); + break; + default: + AppendToOutput("S%d_%d_c%d_c%d_%d", + instr->GetSysOp0(), + instr->GetSysOp1(), + instr->GetCRn(), + instr->GetCRm(), + instr->GetSysOp2()); + break; + } + return 2; + } + default: { + VIXL_UNIMPLEMENTED(); + return 0; + } + } +} + + +int Disassembler::SubstituteBitfieldImmediateField(const Instruction *instr, + const char *format) { + VIXL_ASSERT((format[0] == 'I') && (format[1] == 'B')); + unsigned r = instr->GetImmR(); + unsigned s = instr->GetImmS(); + + switch (format[2]) { + case 'r': { // IBr. + AppendToOutput("#%d", r); + return 3; + } + case 's': { // IBs+1 or IBs-r+1. + if (format[3] == '+') { + AppendToOutput("#%d", s + 1); + return 5; + } else { + VIXL_ASSERT(format[3] == '-'); + AppendToOutput("#%d", s - r + 1); + return 7; + } + } + case 'Z': { // IBZ-r. + VIXL_ASSERT((format[3] == '-') && (format[4] == 'r')); + unsigned reg_size = + (instr->GetSixtyFourBits() == 1) ? kXRegSize : kWRegSize; + AppendToOutput("#%d", reg_size - r); + return 5; + } + default: { + VIXL_UNREACHABLE(); + return 0; + } + } +} + + +int Disassembler::SubstituteLiteralField(const Instruction *instr, + const char *format) { + VIXL_ASSERT(strncmp(format, "LValue", 6) == 0); + USE(format); + + const void *address = instr->GetLiteralAddress(); + switch (instr->Mask(LoadLiteralMask)) { + case LDR_w_lit: + case LDR_x_lit: + case LDRSW_x_lit: + case LDR_s_lit: + case LDR_d_lit: + case LDR_q_lit: + AppendCodeRelativeDataAddressToOutput(instr, address); + break; + case PRFM_lit: { + // Use the prefetch hint to decide how to print the address. + switch (instr->GetPrefetchHint()) { + case 0x0: // PLD: prefetch for load. + case 0x2: // PST: prepare for store. + AppendCodeRelativeDataAddressToOutput(instr, address); + break; + case 0x1: // PLI: preload instructions. + AppendCodeRelativeCodeAddressToOutput(instr, address); + break; + case 0x3: // Unallocated hint. + AppendCodeRelativeAddressToOutput(instr, address); + break; + } + break; + } + default: + VIXL_UNREACHABLE(); + } + + return 6; +} + + +int Disassembler::SubstituteShiftField(const Instruction *instr, + const char *format) { + VIXL_ASSERT(format[0] == 'N'); + VIXL_ASSERT(instr->GetShiftDP() <= 0x3); + + switch (format[1]) { + case 'D': { // HDP. + VIXL_ASSERT(instr->GetShiftDP() != ROR); + VIXL_FALLTHROUGH(); + } + case 'L': { // HLo. + if (instr->GetImmDPShift() != 0) { + const char *shift_type[] = {"lsl", "lsr", "asr", "ror"}; + AppendToOutput(", %s #%" PRId32, + shift_type[instr->GetShiftDP()], + instr->GetImmDPShift()); + } + return 3; + } + default: + VIXL_UNIMPLEMENTED(); + return 0; + } +} + + +int Disassembler::SubstituteConditionField(const Instruction *instr, + const char *format) { + VIXL_ASSERT(format[0] == 'C'); + const char *condition_code[] = {"eq", + "ne", + "hs", + "lo", + "mi", + "pl", + "vs", + "vc", + "hi", + "ls", + "ge", + "lt", + "gt", + "le", + "al", + "nv"}; + int cond; + switch (format[1]) { + case 'B': + cond = instr->GetConditionBranch(); + break; + case 'I': { + cond = InvertCondition(static_cast(instr->GetCondition())); + break; + } + default: + cond = instr->GetCondition(); + } + AppendToOutput("%s", condition_code[cond]); + return 4; +} + + +int Disassembler::SubstitutePCRelAddressField(const Instruction *instr, + const char *format) { + VIXL_ASSERT((strcmp(format, "AddrPCRelByte") == 0) || // Used by `adr`. + (strcmp(format, "AddrPCRelPage") == 0)); // Used by `adrp`. + + int64_t offset = instr->GetImmPCRel(); + + // Compute the target address based on the effective address (after applying + // code_address_offset). This is required for correct behaviour of adrp. + const Instruction *base = instr + code_address_offset(); + if (format[9] == 'P') { + offset *= kPageSize; + base = AlignDown(base, kPageSize); + } + // Strip code_address_offset before printing, so we can use the + // semantically-correct AppendCodeRelativeAddressToOutput. + const void *target = + reinterpret_cast(base + offset - code_address_offset()); + + AppendPCRelativeOffsetToOutput(instr, offset); + AppendToOutput(" "); + AppendCodeRelativeAddressToOutput(instr, target); + return 13; +} + + +int Disassembler::SubstituteBranchTargetField(const Instruction *instr, + const char *format) { + VIXL_ASSERT(strncmp(format, "TImm", 4) == 0); + + int64_t offset = 0; + switch (format[5]) { + // BImmUncn - unconditional branch immediate. + case 'n': + offset = instr->GetImmUncondBranch(); + break; + // BImmCond - conditional branch immediate. + case 'o': + offset = instr->GetImmCondBranch(); + break; + // BImmCmpa - compare and branch immediate. + case 'm': + offset = instr->GetImmCmpBranch(); + break; + // BImmTest - test and branch immediate. + case 'e': + offset = instr->GetImmTestBranch(); + break; + default: + VIXL_UNIMPLEMENTED(); + } + offset *= static_cast(kInstructionSize); + const void *target_address = reinterpret_cast(instr + offset); + VIXL_STATIC_ASSERT(sizeof(*instr) == 1); + + AppendPCRelativeOffsetToOutput(instr, offset); + AppendToOutput(" "); + AppendCodeRelativeCodeAddressToOutput(instr, target_address); + + return 8; +} + + +int Disassembler::SubstituteExtendField(const Instruction *instr, + const char *format) { + VIXL_ASSERT(strncmp(format, "Ext", 3) == 0); + VIXL_ASSERT(instr->GetExtendMode() <= 7); + USE(format); + + const char *extend_mode[] = + {"uxtb", "uxth", "uxtw", "uxtx", "sxtb", "sxth", "sxtw", "sxtx"}; + + // If rd or rn is SP, uxtw on 32-bit registers and uxtx on 64-bit + // registers becomes lsl. + if (((instr->GetRd() == kZeroRegCode) || (instr->GetRn() == kZeroRegCode)) && + (((instr->GetExtendMode() == UXTW) && (instr->GetSixtyFourBits() == 0)) || + (instr->GetExtendMode() == UXTX))) { + if (instr->GetImmExtendShift() > 0) { + AppendToOutput(", lsl #%" PRId32, instr->GetImmExtendShift()); + } + } else { + AppendToOutput(", %s", extend_mode[instr->GetExtendMode()]); + if (instr->GetImmExtendShift() > 0) { + AppendToOutput(" #%" PRId32, instr->GetImmExtendShift()); + } + } + return 3; +} + + +int Disassembler::SubstituteLSRegOffsetField(const Instruction *instr, + const char *format) { + VIXL_ASSERT(strncmp(format, "Offsetreg", 9) == 0); + const char *extend_mode[] = {"undefined", + "undefined", + "uxtw", + "lsl", + "undefined", + "undefined", + "sxtw", + "sxtx"}; + USE(format); + + unsigned shift = instr->GetImmShiftLS(); + Extend ext = static_cast(instr->GetExtendMode()); + char reg_type = ((ext == UXTW) || (ext == SXTW)) ? 'w' : 'x'; + + unsigned rm = instr->GetRm(); + if (rm == kZeroRegCode) { + AppendToOutput("%czr", reg_type); + } else { + AppendToOutput("%c%d", reg_type, rm); + } + + // Extend mode UXTX is an alias for shift mode LSL here. + if (!((ext == UXTX) && (shift == 0))) { + AppendToOutput(", %s", extend_mode[ext]); + if (shift != 0) { + AppendToOutput(" #%d", instr->GetSizeLS()); + } + } + return 9; +} + + +int Disassembler::SubstitutePrefetchField(const Instruction *instr, + const char *format) { + VIXL_ASSERT(format[0] == 'P'); + USE(format); + + static const char *hints[] = {"ld", "li", "st"}; + static const char *stream_options[] = {"keep", "strm"}; + + unsigned hint = instr->GetPrefetchHint(); + unsigned target = instr->GetPrefetchTarget() + 1; + unsigned stream = instr->GetPrefetchStream(); + + if ((hint >= ArrayLength(hints)) || (target > 3)) { + // Unallocated prefetch operations. + int prefetch_mode = instr->GetImmPrefetchOperation(); + AppendToOutput("#0b%c%c%c%c%c", + (prefetch_mode & (1 << 4)) ? '1' : '0', + (prefetch_mode & (1 << 3)) ? '1' : '0', + (prefetch_mode & (1 << 2)) ? '1' : '0', + (prefetch_mode & (1 << 1)) ? '1' : '0', + (prefetch_mode & (1 << 0)) ? '1' : '0'); + } else { + VIXL_ASSERT(stream < ArrayLength(stream_options)); + AppendToOutput("p%sl%d%s", hints[hint], target, stream_options[stream]); + } + return 6; +} + +int Disassembler::SubstituteBarrierField(const Instruction *instr, + const char *format) { + VIXL_ASSERT(format[0] == 'M'); + USE(format); + + static const char *options[4][4] = {{"sy (0b0000)", "oshld", "oshst", "osh"}, + {"sy (0b0100)", "nshld", "nshst", "nsh"}, + {"sy (0b1000)", "ishld", "ishst", "ish"}, + {"sy (0b1100)", "ld", "st", "sy"}}; + int domain = instr->GetImmBarrierDomain(); + int type = instr->GetImmBarrierType(); + + AppendToOutput("%s", options[domain][type]); + return 1; +} + +int Disassembler::SubstituteSysOpField(const Instruction *instr, + const char *format) { + VIXL_ASSERT(format[0] == 'G'); + int op = -1; + switch (format[1]) { + case '1': + op = instr->GetSysOp1(); + break; + case '2': + op = instr->GetSysOp2(); + break; + default: + VIXL_UNREACHABLE(); + } + AppendToOutput("#%d", op); + return 2; +} + +int Disassembler::SubstituteCrField(const Instruction *instr, + const char *format) { + VIXL_ASSERT(format[0] == 'K'); + int cr = -1; + switch (format[1]) { + case 'n': + cr = instr->GetCRn(); + break; + case 'm': + cr = instr->GetCRm(); + break; + default: + VIXL_UNREACHABLE(); + } + AppendToOutput("C%d", cr); + return 2; +} + +void Disassembler::ResetOutput() { + buffer_pos_ = 0; + buffer_[buffer_pos_] = 0; +} + + +void Disassembler::AppendToOutput(const char *format, ...) { + va_list args; + va_start(args, format); + buffer_pos_ += vsnprintf(&buffer_[buffer_pos_], + buffer_size_ - buffer_pos_, + format, + args); + va_end(args); +} + + +void PrintDisassembler::Disassemble(const Instruction *instr) { + Decoder decoder; + if (cpu_features_auditor_ != NULL) { + decoder.AppendVisitor(cpu_features_auditor_); + } + decoder.AppendVisitor(this); + decoder.Decode(instr); +} + +void PrintDisassembler::DisassembleBuffer(const Instruction *start, + const Instruction *end) { + Decoder decoder; + if (cpu_features_auditor_ != NULL) { + decoder.AppendVisitor(cpu_features_auditor_); + } + decoder.AppendVisitor(this); + decoder.Decode(start, end); +} + +void PrintDisassembler::DisassembleBuffer(const Instruction *start, + uint64_t size) { + DisassembleBuffer(start, start + size); +} + + +void PrintDisassembler::ProcessOutput(const Instruction *instr) { + int bytes_printed = fprintf(stream_, + "0x%016" PRIx64 " %08" PRIx32 "\t\t%s", + reinterpret_cast(instr), + instr->GetInstructionBits(), + GetOutput()); + if (cpu_features_auditor_ != NULL) { + CPUFeatures needs = cpu_features_auditor_->GetInstructionFeatures(); + needs.Remove(cpu_features_auditor_->GetAvailableFeatures()); + if (needs != CPUFeatures::None()) { + // Try to align annotations. This value is arbitrary, but based on looking + // good with most instructions. Note that, for historical reasons, the + // disassembly itself is printed with tab characters, so bytes_printed is + // _not_ equivalent to the number of occupied screen columns. However, the + // prefix before the tabs is always the same length, so the annotation + // indentation does not change from one line to the next. + const int indent_to = 70; + // Always allow some space between the instruction and the annotation. + const int min_pad = 2; + + int pad = std::max(min_pad, (indent_to - bytes_printed)); + fprintf(stream_, "%*s", pad, ""); + + std::stringstream features; + features << needs; + fprintf(stream_, + "%s%s%s", + cpu_features_prefix_, + features.str().c_str(), + cpu_features_suffix_); + } + } + fprintf(stream_, "\n"); +} + +} // namespace aarch64 +} // namespace vixl diff --git a/dep/vixl/src/aarch64/instructions-aarch64.cc b/dep/vixl/src/aarch64/instructions-aarch64.cc new file mode 100644 index 000000000..a99a0459d --- /dev/null +++ b/dep/vixl/src/aarch64/instructions-aarch64.cc @@ -0,0 +1,713 @@ +// Copyright 2015, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "instructions-aarch64.h" +#include "assembler-aarch64.h" + +namespace vixl { +namespace aarch64 { + +static uint64_t RepeatBitsAcrossReg(unsigned reg_size, + uint64_t value, + unsigned width) { + VIXL_ASSERT((width == 2) || (width == 4) || (width == 8) || (width == 16) || + (width == 32)); + VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize)); + uint64_t result = value & ((UINT64_C(1) << width) - 1); + for (unsigned i = width; i < reg_size; i *= 2) { + result |= (result << i); + } + return result; +} + + +bool Instruction::IsLoad() const { + if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) { + return false; + } + + if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) { + return Mask(LoadStorePairLBit) != 0; + } else { + LoadStoreOp op = static_cast(Mask(LoadStoreMask)); + switch (op) { + case LDRB_w: + case LDRH_w: + case LDR_w: + case LDR_x: + case LDRSB_w: + case LDRSB_x: + case LDRSH_w: + case LDRSH_x: + case LDRSW_x: + case LDR_b: + case LDR_h: + case LDR_s: + case LDR_d: + case LDR_q: + return true; + default: + return false; + } + } +} + + +bool Instruction::IsStore() const { + if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) { + return false; + } + + if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) { + return Mask(LoadStorePairLBit) == 0; + } else { + LoadStoreOp op = static_cast(Mask(LoadStoreMask)); + switch (op) { + case STRB_w: + case STRH_w: + case STR_w: + case STR_x: + case STR_b: + case STR_h: + case STR_s: + case STR_d: + case STR_q: + return true; + default: + return false; + } + } +} + + +// Logical immediates can't encode zero, so a return value of zero is used to +// indicate a failure case. Specifically, where the constraints on imm_s are +// not met. +uint64_t Instruction::GetImmLogical() const { + unsigned reg_size = GetSixtyFourBits() ? kXRegSize : kWRegSize; + int32_t n = GetBitN(); + int32_t imm_s = GetImmSetBits(); + int32_t imm_r = GetImmRotate(); + + // An integer is constructed from the n, imm_s and imm_r bits according to + // the following table: + // + // N imms immr size S R + // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr) + // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr) + // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr) + // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr) + // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr) + // 0 11110s xxxxxr 2 UInt(s) UInt(r) + // (s bits must not be all set) + // + // A pattern is constructed of size bits, where the least significant S+1 + // bits are set. The pattern is rotated right by R, and repeated across a + // 32 or 64-bit value, depending on destination register width. + // + + if (n == 1) { + if (imm_s == 0x3f) { + return 0; + } + uint64_t bits = (UINT64_C(1) << (imm_s + 1)) - 1; + return RotateRight(bits, imm_r, 64); + } else { + if ((imm_s >> 1) == 0x1f) { + return 0; + } + for (int width = 0x20; width >= 0x2; width >>= 1) { + if ((imm_s & width) == 0) { + int mask = width - 1; + if ((imm_s & mask) == mask) { + return 0; + } + uint64_t bits = (UINT64_C(1) << ((imm_s & mask) + 1)) - 1; + return RepeatBitsAcrossReg(reg_size, + RotateRight(bits, imm_r & mask, width), + width); + } + } + } + VIXL_UNREACHABLE(); + return 0; +} + + +uint32_t Instruction::GetImmNEONabcdefgh() const { + return GetImmNEONabc() << 5 | GetImmNEONdefgh(); +} + + +Float16 Instruction::Imm8ToFloat16(uint32_t imm8) { + // Imm8: abcdefgh (8 bits) + // Half: aBbb.cdef.gh00.0000 (16 bits) + // where B is b ^ 1 + uint32_t bits = imm8; + uint16_t bit7 = (bits >> 7) & 0x1; + uint16_t bit6 = (bits >> 6) & 0x1; + uint16_t bit5_to_0 = bits & 0x3f; + uint16_t result = (bit7 << 15) | ((4 - bit6) << 12) | (bit5_to_0 << 6); + return RawbitsToFloat16(result); +} + + +float Instruction::Imm8ToFP32(uint32_t imm8) { + // Imm8: abcdefgh (8 bits) + // Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits) + // where B is b ^ 1 + uint32_t bits = imm8; + uint32_t bit7 = (bits >> 7) & 0x1; + uint32_t bit6 = (bits >> 6) & 0x1; + uint32_t bit5_to_0 = bits & 0x3f; + uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19); + + return RawbitsToFloat(result); +} + + +Float16 Instruction::GetImmFP16() const { return Imm8ToFloat16(GetImmFP()); } + + +float Instruction::GetImmFP32() const { return Imm8ToFP32(GetImmFP()); } + + +double Instruction::Imm8ToFP64(uint32_t imm8) { + // Imm8: abcdefgh (8 bits) + // Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000 + // 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits) + // where B is b ^ 1 + uint32_t bits = imm8; + uint64_t bit7 = (bits >> 7) & 0x1; + uint64_t bit6 = (bits >> 6) & 0x1; + uint64_t bit5_to_0 = bits & 0x3f; + uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48); + + return RawbitsToDouble(result); +} + + +double Instruction::GetImmFP64() const { return Imm8ToFP64(GetImmFP()); } + + +Float16 Instruction::GetImmNEONFP16() const { + return Imm8ToFloat16(GetImmNEONabcdefgh()); +} + + +float Instruction::GetImmNEONFP32() const { + return Imm8ToFP32(GetImmNEONabcdefgh()); +} + + +double Instruction::GetImmNEONFP64() const { + return Imm8ToFP64(GetImmNEONabcdefgh()); +} + + +unsigned CalcLSDataSize(LoadStoreOp op) { + VIXL_ASSERT((LSSize_offset + LSSize_width) == (kInstructionSize * 8)); + unsigned size = static_cast(op) >> LSSize_offset; + if ((op & LSVector_mask) != 0) { + // Vector register memory operations encode the access size in the "size" + // and "opc" fields. + if ((size == 0) && ((op & LSOpc_mask) >> LSOpc_offset) >= 2) { + size = kQRegSizeInBytesLog2; + } + } + return size; +} + + +unsigned CalcLSPairDataSize(LoadStorePairOp op) { + VIXL_STATIC_ASSERT(kXRegSizeInBytes == kDRegSizeInBytes); + VIXL_STATIC_ASSERT(kWRegSizeInBytes == kSRegSizeInBytes); + switch (op) { + case STP_q: + case LDP_q: + return kQRegSizeInBytesLog2; + case STP_x: + case LDP_x: + case STP_d: + case LDP_d: + return kXRegSizeInBytesLog2; + default: + return kWRegSizeInBytesLog2; + } +} + + +int Instruction::GetImmBranchRangeBitwidth(ImmBranchType branch_type) { + switch (branch_type) { + case UncondBranchType: + return ImmUncondBranch_width; + case CondBranchType: + return ImmCondBranch_width; + case CompareBranchType: + return ImmCmpBranch_width; + case TestBranchType: + return ImmTestBranch_width; + default: + VIXL_UNREACHABLE(); + return 0; + } +} + + +int32_t Instruction::GetImmBranchForwardRange(ImmBranchType branch_type) { + int32_t encoded_max = 1 << (GetImmBranchRangeBitwidth(branch_type) - 1); + return encoded_max * kInstructionSize; +} + + +bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type, + int64_t offset) { + return IsIntN(GetImmBranchRangeBitwidth(branch_type), offset); +} + + +const Instruction* Instruction::GetImmPCOffsetTarget() const { + const Instruction* base = this; + ptrdiff_t offset; + if (IsPCRelAddressing()) { + // ADR and ADRP. + offset = GetImmPCRel(); + if (Mask(PCRelAddressingMask) == ADRP) { + base = AlignDown(base, kPageSize); + offset *= kPageSize; + } else { + VIXL_ASSERT(Mask(PCRelAddressingMask) == ADR); + } + } else { + // All PC-relative branches. + VIXL_ASSERT(GetBranchType() != UnknownBranchType); + // Relative branch offsets are instruction-size-aligned. + offset = GetImmBranch() * static_cast(kInstructionSize); + } + return base + offset; +} + + +int Instruction::GetImmBranch() const { + switch (GetBranchType()) { + case CondBranchType: + return GetImmCondBranch(); + case UncondBranchType: + return GetImmUncondBranch(); + case CompareBranchType: + return GetImmCmpBranch(); + case TestBranchType: + return GetImmTestBranch(); + default: + VIXL_UNREACHABLE(); + } + return 0; +} + + +void Instruction::SetImmPCOffsetTarget(const Instruction* target) { + if (IsPCRelAddressing()) { + SetPCRelImmTarget(target); + } else { + SetBranchImmTarget(target); + } +} + + +void Instruction::SetPCRelImmTarget(const Instruction* target) { + ptrdiff_t imm21; + if ((Mask(PCRelAddressingMask) == ADR)) { + imm21 = target - this; + } else { + VIXL_ASSERT(Mask(PCRelAddressingMask) == ADRP); + uintptr_t this_page = reinterpret_cast(this) / kPageSize; + uintptr_t target_page = reinterpret_cast(target) / kPageSize; + imm21 = target_page - this_page; + } + Instr imm = Assembler::ImmPCRelAddress(static_cast(imm21)); + + SetInstructionBits(Mask(~ImmPCRel_mask) | imm); +} + + +void Instruction::SetBranchImmTarget(const Instruction* target) { + VIXL_ASSERT(((target - this) & 3) == 0); + Instr branch_imm = 0; + uint32_t imm_mask = 0; + int offset = static_cast((target - this) >> kInstructionSizeLog2); + switch (GetBranchType()) { + case CondBranchType: { + branch_imm = Assembler::ImmCondBranch(offset); + imm_mask = ImmCondBranch_mask; + break; + } + case UncondBranchType: { + branch_imm = Assembler::ImmUncondBranch(offset); + imm_mask = ImmUncondBranch_mask; + break; + } + case CompareBranchType: { + branch_imm = Assembler::ImmCmpBranch(offset); + imm_mask = ImmCmpBranch_mask; + break; + } + case TestBranchType: { + branch_imm = Assembler::ImmTestBranch(offset); + imm_mask = ImmTestBranch_mask; + break; + } + default: + VIXL_UNREACHABLE(); + } + SetInstructionBits(Mask(~imm_mask) | branch_imm); +} + + +void Instruction::SetImmLLiteral(const Instruction* source) { + VIXL_ASSERT(IsWordAligned(source)); + ptrdiff_t offset = (source - this) >> kLiteralEntrySizeLog2; + Instr imm = Assembler::ImmLLiteral(static_cast(offset)); + Instr mask = ImmLLiteral_mask; + + SetInstructionBits(Mask(~mask) | imm); +} + + +VectorFormat VectorFormatHalfWidth(VectorFormat vform) { + VIXL_ASSERT(vform == kFormat8H || vform == kFormat4S || vform == kFormat2D || + vform == kFormatH || vform == kFormatS || vform == kFormatD); + switch (vform) { + case kFormat8H: + return kFormat8B; + case kFormat4S: + return kFormat4H; + case kFormat2D: + return kFormat2S; + case kFormatH: + return kFormatB; + case kFormatS: + return kFormatH; + case kFormatD: + return kFormatS; + default: + VIXL_UNREACHABLE(); + return kFormatUndefined; + } +} + + +VectorFormat VectorFormatDoubleWidth(VectorFormat vform) { + VIXL_ASSERT(vform == kFormat8B || vform == kFormat4H || vform == kFormat2S || + vform == kFormatB || vform == kFormatH || vform == kFormatS); + switch (vform) { + case kFormat8B: + return kFormat8H; + case kFormat4H: + return kFormat4S; + case kFormat2S: + return kFormat2D; + case kFormatB: + return kFormatH; + case kFormatH: + return kFormatS; + case kFormatS: + return kFormatD; + default: + VIXL_UNREACHABLE(); + return kFormatUndefined; + } +} + + +VectorFormat VectorFormatFillQ(VectorFormat vform) { + switch (vform) { + case kFormatB: + case kFormat8B: + case kFormat16B: + return kFormat16B; + case kFormatH: + case kFormat4H: + case kFormat8H: + return kFormat8H; + case kFormatS: + case kFormat2S: + case kFormat4S: + return kFormat4S; + case kFormatD: + case kFormat1D: + case kFormat2D: + return kFormat2D; + default: + VIXL_UNREACHABLE(); + return kFormatUndefined; + } +} + +VectorFormat VectorFormatHalfWidthDoubleLanes(VectorFormat vform) { + switch (vform) { + case kFormat4H: + return kFormat8B; + case kFormat8H: + return kFormat16B; + case kFormat2S: + return kFormat4H; + case kFormat4S: + return kFormat8H; + case kFormat1D: + return kFormat2S; + case kFormat2D: + return kFormat4S; + default: + VIXL_UNREACHABLE(); + return kFormatUndefined; + } +} + +VectorFormat VectorFormatDoubleLanes(VectorFormat vform) { + VIXL_ASSERT(vform == kFormat8B || vform == kFormat4H || vform == kFormat2S); + switch (vform) { + case kFormat8B: + return kFormat16B; + case kFormat4H: + return kFormat8H; + case kFormat2S: + return kFormat4S; + default: + VIXL_UNREACHABLE(); + return kFormatUndefined; + } +} + + +VectorFormat VectorFormatHalfLanes(VectorFormat vform) { + VIXL_ASSERT(vform == kFormat16B || vform == kFormat8H || vform == kFormat4S); + switch (vform) { + case kFormat16B: + return kFormat8B; + case kFormat8H: + return kFormat4H; + case kFormat4S: + return kFormat2S; + default: + VIXL_UNREACHABLE(); + return kFormatUndefined; + } +} + + +VectorFormat ScalarFormatFromLaneSize(int laneSize) { + switch (laneSize) { + case 8: + return kFormatB; + case 16: + return kFormatH; + case 32: + return kFormatS; + case 64: + return kFormatD; + default: + VIXL_UNREACHABLE(); + return kFormatUndefined; + } +} + + +VectorFormat ScalarFormatFromFormat(VectorFormat vform) { + return ScalarFormatFromLaneSize(LaneSizeInBitsFromFormat(vform)); +} + + +unsigned RegisterSizeInBitsFromFormat(VectorFormat vform) { + VIXL_ASSERT(vform != kFormatUndefined); + switch (vform) { + case kFormatB: + return kBRegSize; + case kFormatH: + return kHRegSize; + case kFormatS: + case kFormat2H: + return kSRegSize; + case kFormatD: + return kDRegSize; + case kFormat8B: + case kFormat4H: + case kFormat2S: + case kFormat1D: + return kDRegSize; + default: + return kQRegSize; + } +} + + +unsigned RegisterSizeInBytesFromFormat(VectorFormat vform) { + return RegisterSizeInBitsFromFormat(vform) / 8; +} + + +unsigned LaneSizeInBitsFromFormat(VectorFormat vform) { + VIXL_ASSERT(vform != kFormatUndefined); + switch (vform) { + case kFormatB: + case kFormat8B: + case kFormat16B: + return 8; + case kFormatH: + case kFormat2H: + case kFormat4H: + case kFormat8H: + return 16; + case kFormatS: + case kFormat2S: + case kFormat4S: + return 32; + case kFormatD: + case kFormat1D: + case kFormat2D: + return 64; + default: + VIXL_UNREACHABLE(); + return 0; + } +} + + +int LaneSizeInBytesFromFormat(VectorFormat vform) { + return LaneSizeInBitsFromFormat(vform) / 8; +} + + +int LaneSizeInBytesLog2FromFormat(VectorFormat vform) { + VIXL_ASSERT(vform != kFormatUndefined); + switch (vform) { + case kFormatB: + case kFormat8B: + case kFormat16B: + return 0; + case kFormatH: + case kFormat2H: + case kFormat4H: + case kFormat8H: + return 1; + case kFormatS: + case kFormat2S: + case kFormat4S: + return 2; + case kFormatD: + case kFormat1D: + case kFormat2D: + return 3; + default: + VIXL_UNREACHABLE(); + return 0; + } +} + + +int LaneCountFromFormat(VectorFormat vform) { + VIXL_ASSERT(vform != kFormatUndefined); + switch (vform) { + case kFormat16B: + return 16; + case kFormat8B: + case kFormat8H: + return 8; + case kFormat4H: + case kFormat4S: + return 4; + case kFormat2H: + case kFormat2S: + case kFormat2D: + return 2; + case kFormat1D: + case kFormatB: + case kFormatH: + case kFormatS: + case kFormatD: + return 1; + default: + VIXL_UNREACHABLE(); + return 0; + } +} + + +int MaxLaneCountFromFormat(VectorFormat vform) { + VIXL_ASSERT(vform != kFormatUndefined); + switch (vform) { + case kFormatB: + case kFormat8B: + case kFormat16B: + return 16; + case kFormatH: + case kFormat4H: + case kFormat8H: + return 8; + case kFormatS: + case kFormat2S: + case kFormat4S: + return 4; + case kFormatD: + case kFormat1D: + case kFormat2D: + return 2; + default: + VIXL_UNREACHABLE(); + return 0; + } +} + + +// Does 'vform' indicate a vector format or a scalar format? +bool IsVectorFormat(VectorFormat vform) { + VIXL_ASSERT(vform != kFormatUndefined); + switch (vform) { + case kFormatB: + case kFormatH: + case kFormatS: + case kFormatD: + return false; + default: + return true; + } +} + + +int64_t MaxIntFromFormat(VectorFormat vform) { + return INT64_MAX >> (64 - LaneSizeInBitsFromFormat(vform)); +} + + +int64_t MinIntFromFormat(VectorFormat vform) { + return INT64_MIN >> (64 - LaneSizeInBitsFromFormat(vform)); +} + + +uint64_t MaxUintFromFormat(VectorFormat vform) { + return UINT64_MAX >> (64 - LaneSizeInBitsFromFormat(vform)); +} +} // namespace aarch64 +} // namespace vixl diff --git a/dep/vixl/src/aarch64/instrument-aarch64.cc b/dep/vixl/src/aarch64/instrument-aarch64.cc new file mode 100644 index 000000000..c3097efd4 --- /dev/null +++ b/dep/vixl/src/aarch64/instrument-aarch64.cc @@ -0,0 +1,916 @@ +// Copyright 2014, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "instrument-aarch64.h" + +namespace vixl { +namespace aarch64 { + +Counter::Counter(const char* name, CounterType type) + : count_(0), enabled_(false), type_(type) { + VIXL_ASSERT(name != NULL); + strncpy(name_, name, kCounterNameMaxLength); + // Make sure `name_` is always NULL-terminated, even if the source's length is + // higher. + name_[kCounterNameMaxLength - 1] = '\0'; +} + + +void Counter::Enable() { enabled_ = true; } + + +void Counter::Disable() { enabled_ = false; } + + +bool Counter::IsEnabled() { return enabled_; } + + +void Counter::Increment() { + if (enabled_) { + count_++; + } +} + + +uint64_t Counter::GetCount() { + uint64_t result = count_; + if (type_ == Gauge) { + // If the counter is a Gauge, reset the count after reading. + count_ = 0; + } + return result; +} + + +const char* Counter::GetName() { return name_; } + + +CounterType Counter::GetType() { return type_; } + + +struct CounterDescriptor { + const char* name; + CounterType type; +}; + + +static const CounterDescriptor kCounterList[] = + {{"Instruction", Cumulative}, + + {"Move Immediate", Gauge}, + {"Add/Sub DP", Gauge}, + {"Logical DP", Gauge}, + {"Other Int DP", Gauge}, + {"FP DP", Gauge}, + + {"Conditional Select", Gauge}, + {"Conditional Compare", Gauge}, + + {"Unconditional Branch", Gauge}, + {"Compare and Branch", Gauge}, + {"Test and Branch", Gauge}, + {"Conditional Branch", Gauge}, + + {"Load Integer", Gauge}, + {"Load FP", Gauge}, + {"Load Pair", Gauge}, + {"Load Literal", Gauge}, + + {"Store Integer", Gauge}, + {"Store FP", Gauge}, + {"Store Pair", Gauge}, + + {"PC Addressing", Gauge}, + {"Other", Gauge}, + {"NEON", Gauge}, + {"Crypto", Gauge}}; + + +Instrument::Instrument(const char* datafile, uint64_t sample_period) + : output_stream_(stdout), sample_period_(sample_period) { + // Set up the output stream. If datafile is non-NULL, use that file. If it + // can't be opened, or datafile is NULL, use stdout. + if (datafile != NULL) { + output_stream_ = fopen(datafile, "w"); + if (output_stream_ == NULL) { + printf("Can't open output file %s. Using stdout.\n", datafile); + output_stream_ = stdout; + } + } + + static const int num_counters = + sizeof(kCounterList) / sizeof(CounterDescriptor); + + // Dump an instrumentation description comment at the top of the file. + fprintf(output_stream_, "# counters=%d\n", num_counters); + fprintf(output_stream_, "# sample_period=%" PRIu64 "\n", sample_period_); + + // Construct Counter objects from counter description array. + for (int i = 0; i < num_counters; i++) { + Counter* counter = new Counter(kCounterList[i].name, kCounterList[i].type); + counters_.push_back(counter); + } + + DumpCounterNames(); +} + + +Instrument::~Instrument() { + // Dump any remaining instruction data to the output file. + DumpCounters(); + + // Free all the counter objects. + std::list::iterator it; + for (it = counters_.begin(); it != counters_.end(); it++) { + delete *it; + } + + if (output_stream_ != stdout) { + fclose(output_stream_); + } +} + + +void Instrument::Update() { + // Increment the instruction counter, and dump all counters if a sample period + // has elapsed. + static Counter* counter = GetCounter("Instruction"); + VIXL_ASSERT(counter->GetType() == Cumulative); + counter->Increment(); + + if ((sample_period_ != 0) && counter->IsEnabled() && + (counter->GetCount() % sample_period_) == 0) { + DumpCounters(); + } +} + + +void Instrument::DumpCounters() { + // Iterate through the counter objects, dumping their values to the output + // stream. + std::list::const_iterator it; + for (it = counters_.begin(); it != counters_.end(); it++) { + fprintf(output_stream_, "%" PRIu64 ",", (*it)->GetCount()); + } + fprintf(output_stream_, "\n"); + fflush(output_stream_); +} + + +void Instrument::DumpCounterNames() { + // Iterate through the counter objects, dumping the counter names to the + // output stream. + std::list::const_iterator it; + for (it = counters_.begin(); it != counters_.end(); it++) { + fprintf(output_stream_, "%s,", (*it)->GetName()); + } + fprintf(output_stream_, "\n"); + fflush(output_stream_); +} + + +void Instrument::HandleInstrumentationEvent(unsigned event) { + switch (event) { + case InstrumentStateEnable: + Enable(); + break; + case InstrumentStateDisable: + Disable(); + break; + default: + DumpEventMarker(event); + } +} + + +void Instrument::DumpEventMarker(unsigned marker) { + // Dumpan event marker to the output stream as a specially formatted comment + // line. + static Counter* counter = GetCounter("Instruction"); + + fprintf(output_stream_, + "# %c%c @ %" PRId64 "\n", + marker & 0xff, + (marker >> 8) & 0xff, + counter->GetCount()); +} + + +Counter* Instrument::GetCounter(const char* name) { + // Get a Counter object by name from the counter list. + std::list::const_iterator it; + for (it = counters_.begin(); it != counters_.end(); it++) { + if (strcmp((*it)->GetName(), name) == 0) { + return *it; + } + } + + // A Counter by that name does not exist: print an error message to stderr + // and the output file, and exit. + static const char* error_message = + "# Error: Unknown counter \"%s\". Exiting.\n"; + fprintf(stderr, error_message, name); + fprintf(output_stream_, error_message, name); + exit(1); +} + + +void Instrument::Enable() { + std::list::iterator it; + for (it = counters_.begin(); it != counters_.end(); it++) { + (*it)->Enable(); + } +} + + +void Instrument::Disable() { + std::list::iterator it; + for (it = counters_.begin(); it != counters_.end(); it++) { + (*it)->Disable(); + } +} + + +void Instrument::VisitPCRelAddressing(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("PC Addressing"); + counter->Increment(); +} + + +void Instrument::VisitAddSubImmediate(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Add/Sub DP"); + counter->Increment(); +} + + +void Instrument::VisitLogicalImmediate(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Logical DP"); + counter->Increment(); +} + + +void Instrument::VisitMoveWideImmediate(const Instruction* instr) { + Update(); + static Counter* counter = GetCounter("Move Immediate"); + + if (instr->IsMovn() && (instr->GetRd() == kZeroRegCode)) { + unsigned imm = instr->GetImmMoveWide(); + HandleInstrumentationEvent(imm); + } else { + counter->Increment(); + } +} + + +void Instrument::VisitBitfield(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Other Int DP"); + counter->Increment(); +} + + +void Instrument::VisitExtract(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Other Int DP"); + counter->Increment(); +} + + +void Instrument::VisitUnconditionalBranch(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Unconditional Branch"); + counter->Increment(); +} + + +void Instrument::VisitUnconditionalBranchToRegister(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Unconditional Branch"); + counter->Increment(); +} + + +void Instrument::VisitCompareBranch(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Compare and Branch"); + counter->Increment(); +} + + +void Instrument::VisitTestBranch(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Test and Branch"); + counter->Increment(); +} + + +void Instrument::VisitConditionalBranch(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Conditional Branch"); + counter->Increment(); +} + + +void Instrument::VisitSystem(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Other"); + counter->Increment(); +} + + +void Instrument::VisitException(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Other"); + counter->Increment(); +} + + +void Instrument::InstrumentLoadStorePair(const Instruction* instr) { + static Counter* load_pair_counter = GetCounter("Load Pair"); + static Counter* store_pair_counter = GetCounter("Store Pair"); + + if (instr->Mask(LoadStorePairLBit) != 0) { + load_pair_counter->Increment(); + } else { + store_pair_counter->Increment(); + } +} + + +void Instrument::VisitLoadStorePairPostIndex(const Instruction* instr) { + Update(); + InstrumentLoadStorePair(instr); +} + + +void Instrument::VisitLoadStorePairOffset(const Instruction* instr) { + Update(); + InstrumentLoadStorePair(instr); +} + + +void Instrument::VisitLoadStorePairPreIndex(const Instruction* instr) { + Update(); + InstrumentLoadStorePair(instr); +} + + +void Instrument::VisitLoadStorePairNonTemporal(const Instruction* instr) { + Update(); + InstrumentLoadStorePair(instr); +} + + +void Instrument::VisitLoadStoreExclusive(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Other"); + counter->Increment(); +} + + +void Instrument::VisitAtomicMemory(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Other"); + counter->Increment(); +} + + +void Instrument::VisitLoadLiteral(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Load Literal"); + counter->Increment(); +} + + +void Instrument::InstrumentLoadStore(const Instruction* instr) { + static Counter* load_int_counter = GetCounter("Load Integer"); + static Counter* store_int_counter = GetCounter("Store Integer"); + static Counter* load_fp_counter = GetCounter("Load FP"); + static Counter* store_fp_counter = GetCounter("Store FP"); + + switch (instr->Mask(LoadStoreMask)) { + case STRB_w: + case STRH_w: + case STR_w: + VIXL_FALLTHROUGH(); + case STR_x: + store_int_counter->Increment(); + break; + case STR_s: + VIXL_FALLTHROUGH(); + case STR_d: + store_fp_counter->Increment(); + break; + case LDRB_w: + case LDRH_w: + case LDR_w: + case LDR_x: + case LDRSB_x: + case LDRSH_x: + case LDRSW_x: + case LDRSB_w: + VIXL_FALLTHROUGH(); + case LDRSH_w: + load_int_counter->Increment(); + break; + case LDR_s: + VIXL_FALLTHROUGH(); + case LDR_d: + load_fp_counter->Increment(); + break; + } +} + + +void Instrument::VisitLoadStoreUnscaledOffset(const Instruction* instr) { + Update(); + InstrumentLoadStore(instr); +} + + +void Instrument::VisitLoadStorePostIndex(const Instruction* instr) { + USE(instr); + Update(); + InstrumentLoadStore(instr); +} + + +void Instrument::VisitLoadStorePreIndex(const Instruction* instr) { + Update(); + InstrumentLoadStore(instr); +} + + +void Instrument::VisitLoadStoreRegisterOffset(const Instruction* instr) { + Update(); + InstrumentLoadStore(instr); +} + + +void Instrument::VisitLoadStoreUnsignedOffset(const Instruction* instr) { + Update(); + InstrumentLoadStore(instr); +} + + +void Instrument::VisitLogicalShifted(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Logical DP"); + counter->Increment(); +} + + +void Instrument::VisitAddSubShifted(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Add/Sub DP"); + counter->Increment(); +} + + +void Instrument::VisitAddSubExtended(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Add/Sub DP"); + counter->Increment(); +} + + +void Instrument::VisitAddSubWithCarry(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Add/Sub DP"); + counter->Increment(); +} + + +void Instrument::VisitConditionalCompareRegister(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Conditional Compare"); + counter->Increment(); +} + + +void Instrument::VisitConditionalCompareImmediate(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Conditional Compare"); + counter->Increment(); +} + + +void Instrument::VisitConditionalSelect(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Conditional Select"); + counter->Increment(); +} + + +void Instrument::VisitDataProcessing1Source(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Other Int DP"); + counter->Increment(); +} + + +void Instrument::VisitDataProcessing2Source(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Other Int DP"); + counter->Increment(); +} + + +void Instrument::VisitDataProcessing3Source(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Other Int DP"); + counter->Increment(); +} + + +void Instrument::VisitFPCompare(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("FP DP"); + counter->Increment(); +} + + +void Instrument::VisitFPConditionalCompare(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Conditional Compare"); + counter->Increment(); +} + + +void Instrument::VisitFPConditionalSelect(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Conditional Select"); + counter->Increment(); +} + + +void Instrument::VisitFPImmediate(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("FP DP"); + counter->Increment(); +} + + +void Instrument::VisitFPDataProcessing1Source(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("FP DP"); + counter->Increment(); +} + + +void Instrument::VisitFPDataProcessing2Source(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("FP DP"); + counter->Increment(); +} + + +void Instrument::VisitFPDataProcessing3Source(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("FP DP"); + counter->Increment(); +} + + +void Instrument::VisitFPIntegerConvert(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("FP DP"); + counter->Increment(); +} + + +void Instrument::VisitFPFixedPointConvert(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("FP DP"); + counter->Increment(); +} + + +void Instrument::VisitCrypto2RegSHA(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Crypto"); + counter->Increment(); +} + + +void Instrument::VisitCrypto3RegSHA(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Crypto"); + counter->Increment(); +} + + +void Instrument::VisitCryptoAES(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Crypto"); + counter->Increment(); +} + + +void Instrument::VisitNEON2RegMisc(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEON2RegMiscFP16(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEON3Same(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEON3SameFP16(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEON3SameExtra(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEON3Different(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONAcrossLanes(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONByIndexedElement(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONCopy(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONExtract(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONLoadStoreMultiStruct(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONLoadStoreMultiStructPostIndex( + const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONLoadStoreSingleStruct(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONLoadStoreSingleStructPostIndex( + const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONModifiedImmediate(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONScalar2RegMisc(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONScalar2RegMiscFP16(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONScalar3Diff(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONScalar3Same(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONScalar3SameFP16(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONScalar3SameExtra(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONScalarByIndexedElement(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONScalarCopy(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONScalarPairwise(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONScalarShiftImmediate(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONShiftImmediate(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONTable(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONPerm(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitUnallocated(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Other"); + counter->Increment(); +} + + +void Instrument::VisitUnimplemented(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Other"); + counter->Increment(); +} + + +} // namespace aarch64 +} // namespace vixl diff --git a/dep/vixl/src/aarch64/logic-aarch64.cc b/dep/vixl/src/aarch64/logic-aarch64.cc new file mode 100644 index 000000000..aebd2270a --- /dev/null +++ b/dep/vixl/src/aarch64/logic-aarch64.cc @@ -0,0 +1,5340 @@ +// Copyright 2015, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64 + +#include + +#include "simulator-aarch64.h" + +namespace vixl { +namespace aarch64 { + +using vixl::internal::SimFloat16; + +template +bool IsFloat64() { + return false; +} +template <> +bool IsFloat64() { + return true; +} + +template +bool IsFloat32() { + return false; +} +template <> +bool IsFloat32() { + return true; +} + +template +bool IsFloat16() { + return false; +} +template <> +bool IsFloat16() { + return true; +} +template <> +bool IsFloat16() { + return true; +} + +template <> +double Simulator::FPDefaultNaN() { + return kFP64DefaultNaN; +} + + +template <> +float Simulator::FPDefaultNaN() { + return kFP32DefaultNaN; +} + + +template <> +SimFloat16 Simulator::FPDefaultNaN() { + return SimFloat16(kFP16DefaultNaN); +} + + +double Simulator::FixedToDouble(int64_t src, int fbits, FPRounding round) { + if (src >= 0) { + return UFixedToDouble(src, fbits, round); + } else if (src == INT64_MIN) { + return -UFixedToDouble(src, fbits, round); + } else { + return -UFixedToDouble(-src, fbits, round); + } +} + + +double Simulator::UFixedToDouble(uint64_t src, int fbits, FPRounding round) { + // An input of 0 is a special case because the result is effectively + // subnormal: The exponent is encoded as 0 and there is no implicit 1 bit. + if (src == 0) { + return 0.0; + } + + // Calculate the exponent. The highest significant bit will have the value + // 2^exponent. + const int highest_significant_bit = 63 - CountLeadingZeros(src); + const int64_t exponent = highest_significant_bit - fbits; + + return FPRoundToDouble(0, exponent, src, round); +} + + +float Simulator::FixedToFloat(int64_t src, int fbits, FPRounding round) { + if (src >= 0) { + return UFixedToFloat(src, fbits, round); + } else if (src == INT64_MIN) { + return -UFixedToFloat(src, fbits, round); + } else { + return -UFixedToFloat(-src, fbits, round); + } +} + + +float Simulator::UFixedToFloat(uint64_t src, int fbits, FPRounding round) { + // An input of 0 is a special case because the result is effectively + // subnormal: The exponent is encoded as 0 and there is no implicit 1 bit. + if (src == 0) { + return 0.0f; + } + + // Calculate the exponent. The highest significant bit will have the value + // 2^exponent. + const int highest_significant_bit = 63 - CountLeadingZeros(src); + const int32_t exponent = highest_significant_bit - fbits; + + return FPRoundToFloat(0, exponent, src, round); +} + + +SimFloat16 Simulator::FixedToFloat16(int64_t src, int fbits, FPRounding round) { + if (src >= 0) { + return UFixedToFloat16(src, fbits, round); + } else if (src == INT64_MIN) { + return -UFixedToFloat16(src, fbits, round); + } else { + return -UFixedToFloat16(-src, fbits, round); + } +} + + +SimFloat16 Simulator::UFixedToFloat16(uint64_t src, + int fbits, + FPRounding round) { + // An input of 0 is a special case because the result is effectively + // subnormal: The exponent is encoded as 0 and there is no implicit 1 bit. + if (src == 0) { + return 0.0f; + } + + // Calculate the exponent. The highest significant bit will have the value + // 2^exponent. + const int highest_significant_bit = 63 - CountLeadingZeros(src); + const int16_t exponent = highest_significant_bit - fbits; + + return FPRoundToFloat16(0, exponent, src, round); +} + + +void Simulator::ld1(VectorFormat vform, LogicVRegister dst, uint64_t addr) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.ReadUintFromMem(vform, i, addr); + addr += LaneSizeInBytesFromFormat(vform); + } +} + + +void Simulator::ld1(VectorFormat vform, + LogicVRegister dst, + int index, + uint64_t addr) { + dst.ReadUintFromMem(vform, index, addr); +} + + +void Simulator::ld1r(VectorFormat vform, LogicVRegister dst, uint64_t addr) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.ReadUintFromMem(vform, i, addr); + } +} + + +void Simulator::ld2(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + uint64_t addr1) { + dst1.ClearForWrite(vform); + dst2.ClearForWrite(vform); + int esize = LaneSizeInBytesFromFormat(vform); + uint64_t addr2 = addr1 + esize; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst1.ReadUintFromMem(vform, i, addr1); + dst2.ReadUintFromMem(vform, i, addr2); + addr1 += 2 * esize; + addr2 += 2 * esize; + } +} + + +void Simulator::ld2(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + int index, + uint64_t addr1) { + dst1.ClearForWrite(vform); + dst2.ClearForWrite(vform); + uint64_t addr2 = addr1 + LaneSizeInBytesFromFormat(vform); + dst1.ReadUintFromMem(vform, index, addr1); + dst2.ReadUintFromMem(vform, index, addr2); +} + + +void Simulator::ld2r(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + uint64_t addr) { + dst1.ClearForWrite(vform); + dst2.ClearForWrite(vform); + uint64_t addr2 = addr + LaneSizeInBytesFromFormat(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst1.ReadUintFromMem(vform, i, addr); + dst2.ReadUintFromMem(vform, i, addr2); + } +} + + +void Simulator::ld3(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + LogicVRegister dst3, + uint64_t addr1) { + dst1.ClearForWrite(vform); + dst2.ClearForWrite(vform); + dst3.ClearForWrite(vform); + int esize = LaneSizeInBytesFromFormat(vform); + uint64_t addr2 = addr1 + esize; + uint64_t addr3 = addr2 + esize; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst1.ReadUintFromMem(vform, i, addr1); + dst2.ReadUintFromMem(vform, i, addr2); + dst3.ReadUintFromMem(vform, i, addr3); + addr1 += 3 * esize; + addr2 += 3 * esize; + addr3 += 3 * esize; + } +} + + +void Simulator::ld3(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + LogicVRegister dst3, + int index, + uint64_t addr1) { + dst1.ClearForWrite(vform); + dst2.ClearForWrite(vform); + dst3.ClearForWrite(vform); + uint64_t addr2 = addr1 + LaneSizeInBytesFromFormat(vform); + uint64_t addr3 = addr2 + LaneSizeInBytesFromFormat(vform); + dst1.ReadUintFromMem(vform, index, addr1); + dst2.ReadUintFromMem(vform, index, addr2); + dst3.ReadUintFromMem(vform, index, addr3); +} + + +void Simulator::ld3r(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + LogicVRegister dst3, + uint64_t addr) { + dst1.ClearForWrite(vform); + dst2.ClearForWrite(vform); + dst3.ClearForWrite(vform); + uint64_t addr2 = addr + LaneSizeInBytesFromFormat(vform); + uint64_t addr3 = addr2 + LaneSizeInBytesFromFormat(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst1.ReadUintFromMem(vform, i, addr); + dst2.ReadUintFromMem(vform, i, addr2); + dst3.ReadUintFromMem(vform, i, addr3); + } +} + + +void Simulator::ld4(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + LogicVRegister dst3, + LogicVRegister dst4, + uint64_t addr1) { + dst1.ClearForWrite(vform); + dst2.ClearForWrite(vform); + dst3.ClearForWrite(vform); + dst4.ClearForWrite(vform); + int esize = LaneSizeInBytesFromFormat(vform); + uint64_t addr2 = addr1 + esize; + uint64_t addr3 = addr2 + esize; + uint64_t addr4 = addr3 + esize; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst1.ReadUintFromMem(vform, i, addr1); + dst2.ReadUintFromMem(vform, i, addr2); + dst3.ReadUintFromMem(vform, i, addr3); + dst4.ReadUintFromMem(vform, i, addr4); + addr1 += 4 * esize; + addr2 += 4 * esize; + addr3 += 4 * esize; + addr4 += 4 * esize; + } +} + + +void Simulator::ld4(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + LogicVRegister dst3, + LogicVRegister dst4, + int index, + uint64_t addr1) { + dst1.ClearForWrite(vform); + dst2.ClearForWrite(vform); + dst3.ClearForWrite(vform); + dst4.ClearForWrite(vform); + uint64_t addr2 = addr1 + LaneSizeInBytesFromFormat(vform); + uint64_t addr3 = addr2 + LaneSizeInBytesFromFormat(vform); + uint64_t addr4 = addr3 + LaneSizeInBytesFromFormat(vform); + dst1.ReadUintFromMem(vform, index, addr1); + dst2.ReadUintFromMem(vform, index, addr2); + dst3.ReadUintFromMem(vform, index, addr3); + dst4.ReadUintFromMem(vform, index, addr4); +} + + +void Simulator::ld4r(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + LogicVRegister dst3, + LogicVRegister dst4, + uint64_t addr) { + dst1.ClearForWrite(vform); + dst2.ClearForWrite(vform); + dst3.ClearForWrite(vform); + dst4.ClearForWrite(vform); + uint64_t addr2 = addr + LaneSizeInBytesFromFormat(vform); + uint64_t addr3 = addr2 + LaneSizeInBytesFromFormat(vform); + uint64_t addr4 = addr3 + LaneSizeInBytesFromFormat(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst1.ReadUintFromMem(vform, i, addr); + dst2.ReadUintFromMem(vform, i, addr2); + dst3.ReadUintFromMem(vform, i, addr3); + dst4.ReadUintFromMem(vform, i, addr4); + } +} + + +void Simulator::st1(VectorFormat vform, LogicVRegister src, uint64_t addr) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + src.WriteUintToMem(vform, i, addr); + addr += LaneSizeInBytesFromFormat(vform); + } +} + + +void Simulator::st1(VectorFormat vform, + LogicVRegister src, + int index, + uint64_t addr) { + src.WriteUintToMem(vform, index, addr); +} + + +void Simulator::st2(VectorFormat vform, + LogicVRegister dst, + LogicVRegister dst2, + uint64_t addr) { + int esize = LaneSizeInBytesFromFormat(vform); + uint64_t addr2 = addr + esize; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.WriteUintToMem(vform, i, addr); + dst2.WriteUintToMem(vform, i, addr2); + addr += 2 * esize; + addr2 += 2 * esize; + } +} + + +void Simulator::st2(VectorFormat vform, + LogicVRegister dst, + LogicVRegister dst2, + int index, + uint64_t addr) { + int esize = LaneSizeInBytesFromFormat(vform); + dst.WriteUintToMem(vform, index, addr); + dst2.WriteUintToMem(vform, index, addr + 1 * esize); +} + + +void Simulator::st3(VectorFormat vform, + LogicVRegister dst, + LogicVRegister dst2, + LogicVRegister dst3, + uint64_t addr) { + int esize = LaneSizeInBytesFromFormat(vform); + uint64_t addr2 = addr + esize; + uint64_t addr3 = addr2 + esize; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.WriteUintToMem(vform, i, addr); + dst2.WriteUintToMem(vform, i, addr2); + dst3.WriteUintToMem(vform, i, addr3); + addr += 3 * esize; + addr2 += 3 * esize; + addr3 += 3 * esize; + } +} + + +void Simulator::st3(VectorFormat vform, + LogicVRegister dst, + LogicVRegister dst2, + LogicVRegister dst3, + int index, + uint64_t addr) { + int esize = LaneSizeInBytesFromFormat(vform); + dst.WriteUintToMem(vform, index, addr); + dst2.WriteUintToMem(vform, index, addr + 1 * esize); + dst3.WriteUintToMem(vform, index, addr + 2 * esize); +} + + +void Simulator::st4(VectorFormat vform, + LogicVRegister dst, + LogicVRegister dst2, + LogicVRegister dst3, + LogicVRegister dst4, + uint64_t addr) { + int esize = LaneSizeInBytesFromFormat(vform); + uint64_t addr2 = addr + esize; + uint64_t addr3 = addr2 + esize; + uint64_t addr4 = addr3 + esize; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.WriteUintToMem(vform, i, addr); + dst2.WriteUintToMem(vform, i, addr2); + dst3.WriteUintToMem(vform, i, addr3); + dst4.WriteUintToMem(vform, i, addr4); + addr += 4 * esize; + addr2 += 4 * esize; + addr3 += 4 * esize; + addr4 += 4 * esize; + } +} + + +void Simulator::st4(VectorFormat vform, + LogicVRegister dst, + LogicVRegister dst2, + LogicVRegister dst3, + LogicVRegister dst4, + int index, + uint64_t addr) { + int esize = LaneSizeInBytesFromFormat(vform); + dst.WriteUintToMem(vform, index, addr); + dst2.WriteUintToMem(vform, index, addr + 1 * esize); + dst3.WriteUintToMem(vform, index, addr + 2 * esize); + dst4.WriteUintToMem(vform, index, addr + 3 * esize); +} + + +LogicVRegister Simulator::cmp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + Condition cond) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + int64_t sa = src1.Int(vform, i); + int64_t sb = src2.Int(vform, i); + uint64_t ua = src1.Uint(vform, i); + uint64_t ub = src2.Uint(vform, i); + bool result = false; + switch (cond) { + case eq: + result = (ua == ub); + break; + case ge: + result = (sa >= sb); + break; + case gt: + result = (sa > sb); + break; + case hi: + result = (ua > ub); + break; + case hs: + result = (ua >= ub); + break; + case lt: + result = (sa < sb); + break; + case le: + result = (sa <= sb); + break; + default: + VIXL_UNREACHABLE(); + break; + } + dst.SetUint(vform, i, result ? MaxUintFromFormat(vform) : 0); + } + return dst; +} + + +LogicVRegister Simulator::cmp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + int imm, + Condition cond) { + SimVRegister temp; + LogicVRegister imm_reg = dup_immediate(vform, temp, imm); + return cmp(vform, dst, src1, imm_reg, cond); +} + + +LogicVRegister Simulator::cmptst(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + uint64_t ua = src1.Uint(vform, i); + uint64_t ub = src2.Uint(vform, i); + dst.SetUint(vform, i, ((ua & ub) != 0) ? MaxUintFromFormat(vform) : 0); + } + return dst; +} + + +LogicVRegister Simulator::add(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + int lane_size = LaneSizeInBitsFromFormat(vform); + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + // Test for unsigned saturation. + uint64_t ua = src1.UintLeftJustified(vform, i); + uint64_t ub = src2.UintLeftJustified(vform, i); + uint64_t ur = ua + ub; + if (ur < ua) { + dst.SetUnsignedSat(i, true); + } + + // Test for signed saturation. + bool pos_a = (ua >> 63) == 0; + bool pos_b = (ub >> 63) == 0; + bool pos_r = (ur >> 63) == 0; + // If the signs of the operands are the same, but different from the result, + // there was an overflow. + if ((pos_a == pos_b) && (pos_a != pos_r)) { + dst.SetSignedSat(i, pos_a); + } + + dst.SetInt(vform, i, ur >> (64 - lane_size)); + } + return dst; +} + + +LogicVRegister Simulator::addp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uzp1(vform, temp1, src1, src2); + uzp2(vform, temp2, src1, src2); + add(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::mla(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + mul(vform, temp, src1, src2); + add(vform, dst, dst, temp); + return dst; +} + + +LogicVRegister Simulator::mls(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + mul(vform, temp, src1, src2); + sub(vform, dst, dst, temp); + return dst; +} + + +LogicVRegister Simulator::mul(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetUint(vform, i, src1.Uint(vform, i) * src2.Uint(vform, i)); + } + return dst; +} + + +LogicVRegister Simulator::mul(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = VectorFormatFillQ(vform); + return mul(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::mla(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = VectorFormatFillQ(vform); + return mla(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::mls(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = VectorFormatFillQ(vform); + return mls(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::smull(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return smull(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::smull2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return smull2(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::umull(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return umull(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::umull2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return umull2(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::smlal(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return smlal(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::smlal2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return smlal2(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::umlal(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return umlal(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::umlal2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return umlal2(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::smlsl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return smlsl(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::smlsl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return smlsl2(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::umlsl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return umlsl(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::umlsl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return umlsl2(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::sqdmull(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return sqdmull(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::sqdmull2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return sqdmull2(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::sqdmlal(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return sqdmlal(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::sqdmlal2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return sqdmlal2(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::sqdmlsl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return sqdmlsl(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::sqdmlsl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return sqdmlsl2(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::sqdmulh(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = VectorFormatFillQ(vform); + return sqdmulh(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::sqrdmulh(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = VectorFormatFillQ(vform); + return sqrdmulh(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::sdot(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = VectorFormatFillQ(vform); + return sdot(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::sqrdmlah(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = VectorFormatFillQ(vform); + return sqrdmlah(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::udot(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = VectorFormatFillQ(vform); + return udot(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::sqrdmlsh(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = VectorFormatFillQ(vform); + return sqrdmlsh(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +uint16_t Simulator::PolynomialMult(uint8_t op1, uint8_t op2) const { + uint16_t result = 0; + uint16_t extended_op2 = op2; + for (int i = 0; i < 8; ++i) { + if ((op1 >> i) & 1) { + result = result ^ (extended_op2 << i); + } + } + return result; +} + + +LogicVRegister Simulator::pmul(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetUint(vform, + i, + PolynomialMult(src1.Uint(vform, i), src2.Uint(vform, i))); + } + return dst; +} + + +LogicVRegister Simulator::pmull(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + VectorFormat vform_src = VectorFormatHalfWidth(vform); + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetUint(vform, + i, + PolynomialMult(src1.Uint(vform_src, i), + src2.Uint(vform_src, i))); + } + return dst; +} + + +LogicVRegister Simulator::pmull2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + VectorFormat vform_src = VectorFormatHalfWidthDoubleLanes(vform); + dst.ClearForWrite(vform); + int lane_count = LaneCountFromFormat(vform); + for (int i = 0; i < lane_count; i++) { + dst.SetUint(vform, + i, + PolynomialMult(src1.Uint(vform_src, lane_count + i), + src2.Uint(vform_src, lane_count + i))); + } + return dst; +} + + +LogicVRegister Simulator::sub(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + int lane_size = LaneSizeInBitsFromFormat(vform); + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + // Test for unsigned saturation. + uint64_t ua = src1.UintLeftJustified(vform, i); + uint64_t ub = src2.UintLeftJustified(vform, i); + uint64_t ur = ua - ub; + if (ub > ua) { + dst.SetUnsignedSat(i, false); + } + + // Test for signed saturation. + bool pos_a = (ua >> 63) == 0; + bool pos_b = (ub >> 63) == 0; + bool pos_r = (ur >> 63) == 0; + // If the signs of the operands are different, and the sign of the first + // operand doesn't match the result, there was an overflow. + if ((pos_a != pos_b) && (pos_a != pos_r)) { + dst.SetSignedSat(i, pos_a); + } + + dst.SetInt(vform, i, ur >> (64 - lane_size)); + } + return dst; +} + + +LogicVRegister Simulator::and_(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetUint(vform, i, src1.Uint(vform, i) & src2.Uint(vform, i)); + } + return dst; +} + + +LogicVRegister Simulator::orr(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetUint(vform, i, src1.Uint(vform, i) | src2.Uint(vform, i)); + } + return dst; +} + + +LogicVRegister Simulator::orn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetUint(vform, i, src1.Uint(vform, i) | ~src2.Uint(vform, i)); + } + return dst; +} + + +LogicVRegister Simulator::eor(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetUint(vform, i, src1.Uint(vform, i) ^ src2.Uint(vform, i)); + } + return dst; +} + + +LogicVRegister Simulator::bic(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetUint(vform, i, src1.Uint(vform, i) & ~src2.Uint(vform, i)); + } + return dst; +} + + +LogicVRegister Simulator::bic(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + uint64_t imm) { + uint64_t result[16]; + int laneCount = LaneCountFromFormat(vform); + for (int i = 0; i < laneCount; ++i) { + result[i] = src.Uint(vform, i) & ~imm; + } + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, result[i]); + } + return dst; +} + + +LogicVRegister Simulator::bif(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + uint64_t operand1 = dst.Uint(vform, i); + uint64_t operand2 = ~src2.Uint(vform, i); + uint64_t operand3 = src1.Uint(vform, i); + uint64_t result = operand1 ^ ((operand1 ^ operand3) & operand2); + dst.SetUint(vform, i, result); + } + return dst; +} + + +LogicVRegister Simulator::bit(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + uint64_t operand1 = dst.Uint(vform, i); + uint64_t operand2 = src2.Uint(vform, i); + uint64_t operand3 = src1.Uint(vform, i); + uint64_t result = operand1 ^ ((operand1 ^ operand3) & operand2); + dst.SetUint(vform, i, result); + } + return dst; +} + + +LogicVRegister Simulator::bsl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + uint64_t operand1 = src2.Uint(vform, i); + uint64_t operand2 = dst.Uint(vform, i); + uint64_t operand3 = src1.Uint(vform, i); + uint64_t result = operand1 ^ ((operand1 ^ operand3) & operand2); + dst.SetUint(vform, i, result); + } + return dst; +} + + +LogicVRegister Simulator::sminmax(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool max) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + int64_t src1_val = src1.Int(vform, i); + int64_t src2_val = src2.Int(vform, i); + int64_t dst_val; + if (max) { + dst_val = (src1_val > src2_val) ? src1_val : src2_val; + } else { + dst_val = (src1_val < src2_val) ? src1_val : src2_val; + } + dst.SetInt(vform, i, dst_val); + } + return dst; +} + + +LogicVRegister Simulator::smax(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + return sminmax(vform, dst, src1, src2, true); +} + + +LogicVRegister Simulator::smin(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + return sminmax(vform, dst, src1, src2, false); +} + + +LogicVRegister Simulator::sminmaxp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool max) { + int lanes = LaneCountFromFormat(vform); + int64_t result[kMaxLanesPerVector]; + const LogicVRegister* src = &src1; + for (int j = 0; j < 2; j++) { + for (int i = 0; i < lanes; i += 2) { + int64_t first_val = src->Int(vform, i); + int64_t second_val = src->Int(vform, i + 1); + int64_t dst_val; + if (max) { + dst_val = (first_val > second_val) ? first_val : second_val; + } else { + dst_val = (first_val < second_val) ? first_val : second_val; + } + VIXL_ASSERT(((i >> 1) + (j * lanes / 2)) < kMaxLanesPerVector); + result[(i >> 1) + (j * lanes / 2)] = dst_val; + } + src = &src2; + } + dst.SetIntArray(vform, result); + return dst; +} + + +LogicVRegister Simulator::smaxp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + return sminmaxp(vform, dst, src1, src2, true); +} + + +LogicVRegister Simulator::sminp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + return sminmaxp(vform, dst, src1, src2, false); +} + + +LogicVRegister Simulator::addp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + VIXL_ASSERT(vform == kFormatD); + + uint64_t dst_val = src.Uint(kFormat2D, 0) + src.Uint(kFormat2D, 1); + dst.ClearForWrite(vform); + dst.SetUint(vform, 0, dst_val); + return dst; +} + + +LogicVRegister Simulator::addv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + VectorFormat vform_dst = + ScalarFormatFromLaneSize(LaneSizeInBitsFromFormat(vform)); + + + int64_t dst_val = 0; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst_val += src.Int(vform, i); + } + + dst.ClearForWrite(vform_dst); + dst.SetInt(vform_dst, 0, dst_val); + return dst; +} + + +LogicVRegister Simulator::saddlv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + VectorFormat vform_dst = + ScalarFormatFromLaneSize(LaneSizeInBitsFromFormat(vform) * 2); + + int64_t dst_val = 0; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst_val += src.Int(vform, i); + } + + dst.ClearForWrite(vform_dst); + dst.SetInt(vform_dst, 0, dst_val); + return dst; +} + + +LogicVRegister Simulator::uaddlv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + VectorFormat vform_dst = + ScalarFormatFromLaneSize(LaneSizeInBitsFromFormat(vform) * 2); + + uint64_t dst_val = 0; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst_val += src.Uint(vform, i); + } + + dst.ClearForWrite(vform_dst); + dst.SetUint(vform_dst, 0, dst_val); + return dst; +} + + +LogicVRegister Simulator::sminmaxv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + bool max) { + int64_t dst_val = max ? INT64_MIN : INT64_MAX; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + int64_t src_val = src.Int(vform, i); + if (max) { + dst_val = (src_val > dst_val) ? src_val : dst_val; + } else { + dst_val = (src_val < dst_val) ? src_val : dst_val; + } + } + dst.ClearForWrite(ScalarFormatFromFormat(vform)); + dst.SetInt(vform, 0, dst_val); + return dst; +} + + +LogicVRegister Simulator::smaxv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + sminmaxv(vform, dst, src, true); + return dst; +} + + +LogicVRegister Simulator::sminv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + sminmaxv(vform, dst, src, false); + return dst; +} + + +LogicVRegister Simulator::uminmax(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool max) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + uint64_t src1_val = src1.Uint(vform, i); + uint64_t src2_val = src2.Uint(vform, i); + uint64_t dst_val; + if (max) { + dst_val = (src1_val > src2_val) ? src1_val : src2_val; + } else { + dst_val = (src1_val < src2_val) ? src1_val : src2_val; + } + dst.SetUint(vform, i, dst_val); + } + return dst; +} + + +LogicVRegister Simulator::umax(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + return uminmax(vform, dst, src1, src2, true); +} + + +LogicVRegister Simulator::umin(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + return uminmax(vform, dst, src1, src2, false); +} + + +LogicVRegister Simulator::uminmaxp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool max) { + int lanes = LaneCountFromFormat(vform); + uint64_t result[kMaxLanesPerVector]; + const LogicVRegister* src = &src1; + for (int j = 0; j < 2; j++) { + for (int i = 0; i < LaneCountFromFormat(vform); i += 2) { + uint64_t first_val = src->Uint(vform, i); + uint64_t second_val = src->Uint(vform, i + 1); + uint64_t dst_val; + if (max) { + dst_val = (first_val > second_val) ? first_val : second_val; + } else { + dst_val = (first_val < second_val) ? first_val : second_val; + } + VIXL_ASSERT(((i >> 1) + (j * lanes / 2)) < kMaxLanesPerVector); + result[(i >> 1) + (j * lanes / 2)] = dst_val; + } + src = &src2; + } + dst.SetUintArray(vform, result); + return dst; +} + + +LogicVRegister Simulator::umaxp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + return uminmaxp(vform, dst, src1, src2, true); +} + + +LogicVRegister Simulator::uminp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + return uminmaxp(vform, dst, src1, src2, false); +} + + +LogicVRegister Simulator::uminmaxv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + bool max) { + uint64_t dst_val = max ? 0 : UINT64_MAX; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + uint64_t src_val = src.Uint(vform, i); + if (max) { + dst_val = (src_val > dst_val) ? src_val : dst_val; + } else { + dst_val = (src_val < dst_val) ? src_val : dst_val; + } + } + dst.ClearForWrite(ScalarFormatFromFormat(vform)); + dst.SetUint(vform, 0, dst_val); + return dst; +} + + +LogicVRegister Simulator::umaxv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + uminmaxv(vform, dst, src, true); + return dst; +} + + +LogicVRegister Simulator::uminv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + uminmaxv(vform, dst, src, false); + return dst; +} + + +LogicVRegister Simulator::shl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + VIXL_ASSERT(shift >= 0); + SimVRegister temp; + LogicVRegister shiftreg = dup_immediate(vform, temp, shift); + return ushl(vform, dst, src, shiftreg); +} + + +LogicVRegister Simulator::sshll(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + VIXL_ASSERT(shift >= 0); + SimVRegister temp1, temp2; + LogicVRegister shiftreg = dup_immediate(vform, temp1, shift); + LogicVRegister extendedreg = sxtl(vform, temp2, src); + return sshl(vform, dst, extendedreg, shiftreg); +} + + +LogicVRegister Simulator::sshll2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + VIXL_ASSERT(shift >= 0); + SimVRegister temp1, temp2; + LogicVRegister shiftreg = dup_immediate(vform, temp1, shift); + LogicVRegister extendedreg = sxtl2(vform, temp2, src); + return sshl(vform, dst, extendedreg, shiftreg); +} + + +LogicVRegister Simulator::shll(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + int shift = LaneSizeInBitsFromFormat(vform) / 2; + return sshll(vform, dst, src, shift); +} + + +LogicVRegister Simulator::shll2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + int shift = LaneSizeInBitsFromFormat(vform) / 2; + return sshll2(vform, dst, src, shift); +} + + +LogicVRegister Simulator::ushll(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + VIXL_ASSERT(shift >= 0); + SimVRegister temp1, temp2; + LogicVRegister shiftreg = dup_immediate(vform, temp1, shift); + LogicVRegister extendedreg = uxtl(vform, temp2, src); + return ushl(vform, dst, extendedreg, shiftreg); +} + + +LogicVRegister Simulator::ushll2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + VIXL_ASSERT(shift >= 0); + SimVRegister temp1, temp2; + LogicVRegister shiftreg = dup_immediate(vform, temp1, shift); + LogicVRegister extendedreg = uxtl2(vform, temp2, src); + return ushl(vform, dst, extendedreg, shiftreg); +} + + +LogicVRegister Simulator::sli(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + dst.ClearForWrite(vform); + int laneCount = LaneCountFromFormat(vform); + for (int i = 0; i < laneCount; i++) { + uint64_t src_lane = src.Uint(vform, i); + uint64_t dst_lane = dst.Uint(vform, i); + uint64_t shifted = src_lane << shift; + uint64_t mask = MaxUintFromFormat(vform) << shift; + dst.SetUint(vform, i, (dst_lane & ~mask) | shifted); + } + return dst; +} + + +LogicVRegister Simulator::sqshl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + VIXL_ASSERT(shift >= 0); + SimVRegister temp; + LogicVRegister shiftreg = dup_immediate(vform, temp, shift); + return sshl(vform, dst, src, shiftreg).SignedSaturate(vform); +} + + +LogicVRegister Simulator::uqshl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + VIXL_ASSERT(shift >= 0); + SimVRegister temp; + LogicVRegister shiftreg = dup_immediate(vform, temp, shift); + return ushl(vform, dst, src, shiftreg).UnsignedSaturate(vform); +} + + +LogicVRegister Simulator::sqshlu(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + VIXL_ASSERT(shift >= 0); + SimVRegister temp; + LogicVRegister shiftreg = dup_immediate(vform, temp, shift); + return sshl(vform, dst, src, shiftreg).UnsignedSaturate(vform); +} + + +LogicVRegister Simulator::sri(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + dst.ClearForWrite(vform); + int laneCount = LaneCountFromFormat(vform); + VIXL_ASSERT((shift > 0) && + (shift <= static_cast(LaneSizeInBitsFromFormat(vform)))); + for (int i = 0; i < laneCount; i++) { + uint64_t src_lane = src.Uint(vform, i); + uint64_t dst_lane = dst.Uint(vform, i); + uint64_t shifted; + uint64_t mask; + if (shift == 64) { + shifted = 0; + mask = 0; + } else { + shifted = src_lane >> shift; + mask = MaxUintFromFormat(vform) >> shift; + } + dst.SetUint(vform, i, (dst_lane & ~mask) | shifted); + } + return dst; +} + + +LogicVRegister Simulator::ushr(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + VIXL_ASSERT(shift >= 0); + SimVRegister temp; + LogicVRegister shiftreg = dup_immediate(vform, temp, -shift); + return ushl(vform, dst, src, shiftreg); +} + + +LogicVRegister Simulator::sshr(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + VIXL_ASSERT(shift >= 0); + SimVRegister temp; + LogicVRegister shiftreg = dup_immediate(vform, temp, -shift); + return sshl(vform, dst, src, shiftreg); +} + + +LogicVRegister Simulator::ssra(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + SimVRegister temp; + LogicVRegister shifted_reg = sshr(vform, temp, src, shift); + return add(vform, dst, dst, shifted_reg); +} + + +LogicVRegister Simulator::usra(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + SimVRegister temp; + LogicVRegister shifted_reg = ushr(vform, temp, src, shift); + return add(vform, dst, dst, shifted_reg); +} + + +LogicVRegister Simulator::srsra(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + SimVRegister temp; + LogicVRegister shifted_reg = sshr(vform, temp, src, shift).Round(vform); + return add(vform, dst, dst, shifted_reg); +} + + +LogicVRegister Simulator::ursra(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + SimVRegister temp; + LogicVRegister shifted_reg = ushr(vform, temp, src, shift).Round(vform); + return add(vform, dst, dst, shifted_reg); +} + + +LogicVRegister Simulator::cls(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + uint64_t result[16]; + int laneSizeInBits = LaneSizeInBitsFromFormat(vform); + int laneCount = LaneCountFromFormat(vform); + for (int i = 0; i < laneCount; i++) { + result[i] = CountLeadingSignBits(src.Int(vform, i), laneSizeInBits); + } + + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, result[i]); + } + return dst; +} + + +LogicVRegister Simulator::clz(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + uint64_t result[16]; + int laneSizeInBits = LaneSizeInBitsFromFormat(vform); + int laneCount = LaneCountFromFormat(vform); + for (int i = 0; i < laneCount; i++) { + result[i] = CountLeadingZeros(src.Uint(vform, i), laneSizeInBits); + } + + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, result[i]); + } + return dst; +} + + +LogicVRegister Simulator::cnt(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + uint64_t result[16]; + int laneSizeInBits = LaneSizeInBitsFromFormat(vform); + int laneCount = LaneCountFromFormat(vform); + for (int i = 0; i < laneCount; i++) { + uint64_t value = src.Uint(vform, i); + result[i] = 0; + for (int j = 0; j < laneSizeInBits; j++) { + result[i] += (value & 1); + value >>= 1; + } + } + + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, result[i]); + } + return dst; +} + + +LogicVRegister Simulator::sshl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + int8_t shift_val = src2.Int(vform, i); + int64_t lj_src_val = src1.IntLeftJustified(vform, i); + + // Set signed saturation state. + if ((shift_val > CountLeadingSignBits(lj_src_val)) && (lj_src_val != 0)) { + dst.SetSignedSat(i, lj_src_val >= 0); + } + + // Set unsigned saturation state. + if (lj_src_val < 0) { + dst.SetUnsignedSat(i, false); + } else if ((shift_val > CountLeadingZeros(lj_src_val)) && + (lj_src_val != 0)) { + dst.SetUnsignedSat(i, true); + } + + int64_t src_val = src1.Int(vform, i); + bool src_is_negative = src_val < 0; + if (shift_val > 63) { + dst.SetInt(vform, i, 0); + } else if (shift_val < -63) { + dst.SetRounding(i, src_is_negative); + dst.SetInt(vform, i, src_is_negative ? -1 : 0); + } else { + // Use unsigned types for shifts, as behaviour is undefined for signed + // lhs. + uint64_t usrc_val = static_cast(src_val); + + if (shift_val < 0) { + // Convert to right shift. + shift_val = -shift_val; + + // Set rounding state by testing most-significant bit shifted out. + // Rounding only needed on right shifts. + if (((usrc_val >> (shift_val - 1)) & 1) == 1) { + dst.SetRounding(i, true); + } + + usrc_val >>= shift_val; + + if (src_is_negative) { + // Simulate sign-extension. + usrc_val |= (~UINT64_C(0) << (64 - shift_val)); + } + } else { + usrc_val <<= shift_val; + } + dst.SetUint(vform, i, usrc_val); + } + } + return dst; +} + + +LogicVRegister Simulator::ushl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + int8_t shift_val = src2.Int(vform, i); + uint64_t lj_src_val = src1.UintLeftJustified(vform, i); + + // Set saturation state. + if ((shift_val > CountLeadingZeros(lj_src_val)) && (lj_src_val != 0)) { + dst.SetUnsignedSat(i, true); + } + + uint64_t src_val = src1.Uint(vform, i); + if ((shift_val > 63) || (shift_val < -64)) { + dst.SetUint(vform, i, 0); + } else { + if (shift_val < 0) { + // Set rounding state. Rounding only needed on right shifts. + if (((src_val >> (-shift_val - 1)) & 1) == 1) { + dst.SetRounding(i, true); + } + + if (shift_val == -64) { + src_val = 0; + } else { + src_val >>= -shift_val; + } + } else { + src_val <<= shift_val; + } + dst.SetUint(vform, i, src_val); + } + } + return dst; +} + + +LogicVRegister Simulator::neg(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + // Test for signed saturation. + int64_t sa = src.Int(vform, i); + if (sa == MinIntFromFormat(vform)) { + dst.SetSignedSat(i, true); + } + dst.SetInt(vform, i, (sa == INT64_MIN) ? sa : -sa); + } + return dst; +} + + +LogicVRegister Simulator::suqadd(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + int64_t sa = dst.IntLeftJustified(vform, i); + uint64_t ub = src.UintLeftJustified(vform, i); + uint64_t ur = sa + ub; + + int64_t sr; + memcpy(&sr, &ur, sizeof(sr)); + if (sr < sa) { // Test for signed positive saturation. + dst.SetInt(vform, i, MaxIntFromFormat(vform)); + } else { + dst.SetUint(vform, i, dst.Int(vform, i) + src.Uint(vform, i)); + } + } + return dst; +} + + +LogicVRegister Simulator::usqadd(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + uint64_t ua = dst.UintLeftJustified(vform, i); + int64_t sb = src.IntLeftJustified(vform, i); + uint64_t ur = ua + sb; + + if ((sb > 0) && (ur <= ua)) { + dst.SetUint(vform, i, MaxUintFromFormat(vform)); // Positive saturation. + } else if ((sb < 0) && (ur >= ua)) { + dst.SetUint(vform, i, 0); // Negative saturation. + } else { + dst.SetUint(vform, i, dst.Uint(vform, i) + src.Int(vform, i)); + } + } + return dst; +} + + +LogicVRegister Simulator::abs(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + // Test for signed saturation. + int64_t sa = src.Int(vform, i); + if (sa == MinIntFromFormat(vform)) { + dst.SetSignedSat(i, true); + } + if (sa < 0) { + dst.SetInt(vform, i, (sa == INT64_MIN) ? sa : -sa); + } else { + dst.SetInt(vform, i, sa); + } + } + return dst; +} + + +LogicVRegister Simulator::extractnarrow(VectorFormat dstform, + LogicVRegister dst, + bool dstIsSigned, + const LogicVRegister& src, + bool srcIsSigned) { + bool upperhalf = false; + VectorFormat srcform = kFormatUndefined; + int64_t ssrc[8]; + uint64_t usrc[8]; + + switch (dstform) { + case kFormat8B: + upperhalf = false; + srcform = kFormat8H; + break; + case kFormat16B: + upperhalf = true; + srcform = kFormat8H; + break; + case kFormat4H: + upperhalf = false; + srcform = kFormat4S; + break; + case kFormat8H: + upperhalf = true; + srcform = kFormat4S; + break; + case kFormat2S: + upperhalf = false; + srcform = kFormat2D; + break; + case kFormat4S: + upperhalf = true; + srcform = kFormat2D; + break; + case kFormatB: + upperhalf = false; + srcform = kFormatH; + break; + case kFormatH: + upperhalf = false; + srcform = kFormatS; + break; + case kFormatS: + upperhalf = false; + srcform = kFormatD; + break; + default: + VIXL_UNIMPLEMENTED(); + } + + for (int i = 0; i < LaneCountFromFormat(srcform); i++) { + ssrc[i] = src.Int(srcform, i); + usrc[i] = src.Uint(srcform, i); + } + + int offset; + if (upperhalf) { + offset = LaneCountFromFormat(dstform) / 2; + } else { + offset = 0; + dst.ClearForWrite(dstform); + } + + for (int i = 0; i < LaneCountFromFormat(srcform); i++) { + // Test for signed saturation + if (ssrc[i] > MaxIntFromFormat(dstform)) { + dst.SetSignedSat(offset + i, true); + } else if (ssrc[i] < MinIntFromFormat(dstform)) { + dst.SetSignedSat(offset + i, false); + } + + // Test for unsigned saturation + if (srcIsSigned) { + if (ssrc[i] > static_cast(MaxUintFromFormat(dstform))) { + dst.SetUnsignedSat(offset + i, true); + } else if (ssrc[i] < 0) { + dst.SetUnsignedSat(offset + i, false); + } + } else { + if (usrc[i] > MaxUintFromFormat(dstform)) { + dst.SetUnsignedSat(offset + i, true); + } + } + + int64_t result; + if (srcIsSigned) { + result = ssrc[i] & MaxUintFromFormat(dstform); + } else { + result = usrc[i] & MaxUintFromFormat(dstform); + } + + if (dstIsSigned) { + dst.SetInt(dstform, offset + i, result); + } else { + dst.SetUint(dstform, offset + i, result); + } + } + return dst; +} + + +LogicVRegister Simulator::xtn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + return extractnarrow(vform, dst, true, src, true); +} + + +LogicVRegister Simulator::sqxtn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + return extractnarrow(vform, dst, true, src, true).SignedSaturate(vform); +} + + +LogicVRegister Simulator::sqxtun(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + return extractnarrow(vform, dst, false, src, true).UnsignedSaturate(vform); +} + + +LogicVRegister Simulator::uqxtn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + return extractnarrow(vform, dst, false, src, false).UnsignedSaturate(vform); +} + + +LogicVRegister Simulator::absdiff(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool issigned) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + if (issigned) { + int64_t sr = src1.Int(vform, i) - src2.Int(vform, i); + sr = sr > 0 ? sr : -sr; + dst.SetInt(vform, i, sr); + } else { + int64_t sr = src1.Uint(vform, i) - src2.Uint(vform, i); + sr = sr > 0 ? sr : -sr; + dst.SetUint(vform, i, sr); + } + } + return dst; +} + + +LogicVRegister Simulator::saba(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + dst.ClearForWrite(vform); + absdiff(vform, temp, src1, src2, true); + add(vform, dst, dst, temp); + return dst; +} + + +LogicVRegister Simulator::uaba(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + dst.ClearForWrite(vform); + absdiff(vform, temp, src1, src2, false); + add(vform, dst, dst, temp); + return dst; +} + + +LogicVRegister Simulator::not_(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetUint(vform, i, ~src.Uint(vform, i)); + } + return dst; +} + + +LogicVRegister Simulator::rbit(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + uint64_t result[16]; + int laneCount = LaneCountFromFormat(vform); + int laneSizeInBits = LaneSizeInBitsFromFormat(vform); + uint64_t reversed_value; + uint64_t value; + for (int i = 0; i < laneCount; i++) { + value = src.Uint(vform, i); + reversed_value = 0; + for (int j = 0; j < laneSizeInBits; j++) { + reversed_value = (reversed_value << 1) | (value & 1); + value >>= 1; + } + result[i] = reversed_value; + } + + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, result[i]); + } + return dst; +} + + +LogicVRegister Simulator::rev(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int revSize) { + uint64_t result[16]; + int laneCount = LaneCountFromFormat(vform); + int laneSize = LaneSizeInBytesFromFormat(vform); + int lanesPerLoop = revSize / laneSize; + for (int i = 0; i < laneCount; i += lanesPerLoop) { + for (int j = 0; j < lanesPerLoop; j++) { + result[i + lanesPerLoop - 1 - j] = src.Uint(vform, i + j); + } + } + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, result[i]); + } + return dst; +} + + +LogicVRegister Simulator::rev16(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + return rev(vform, dst, src, 2); +} + + +LogicVRegister Simulator::rev32(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + return rev(vform, dst, src, 4); +} + + +LogicVRegister Simulator::rev64(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + return rev(vform, dst, src, 8); +} + + +LogicVRegister Simulator::addlp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + bool is_signed, + bool do_accumulate) { + VectorFormat vformsrc = VectorFormatHalfWidthDoubleLanes(vform); + VIXL_ASSERT(LaneSizeInBitsFromFormat(vformsrc) <= 32); + VIXL_ASSERT(LaneCountFromFormat(vform) <= 8); + + uint64_t result[8]; + int lane_count = LaneCountFromFormat(vform); + for (int i = 0; i < lane_count; i++) { + if (is_signed) { + result[i] = static_cast(src.Int(vformsrc, 2 * i) + + src.Int(vformsrc, 2 * i + 1)); + } else { + result[i] = src.Uint(vformsrc, 2 * i) + src.Uint(vformsrc, 2 * i + 1); + } + } + + dst.ClearForWrite(vform); + for (int i = 0; i < lane_count; ++i) { + if (do_accumulate) { + result[i] += dst.Uint(vform, i); + } + dst.SetUint(vform, i, result[i]); + } + + return dst; +} + + +LogicVRegister Simulator::saddlp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + return addlp(vform, dst, src, true, false); +} + + +LogicVRegister Simulator::uaddlp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + return addlp(vform, dst, src, false, false); +} + + +LogicVRegister Simulator::sadalp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + return addlp(vform, dst, src, true, true); +} + + +LogicVRegister Simulator::uadalp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + return addlp(vform, dst, src, false, true); +} + + +LogicVRegister Simulator::ext(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + uint8_t result[16]; + int laneCount = LaneCountFromFormat(vform); + for (int i = 0; i < laneCount - index; ++i) { + result[i] = src1.Uint(vform, i + index); + } + for (int i = 0; i < index; ++i) { + result[laneCount - index + i] = src2.Uint(vform, i); + } + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, result[i]); + } + return dst; +} + +template +LogicVRegister Simulator::fcadd(VectorFormat vform, + LogicVRegister dst, // d + const LogicVRegister& src1, // n + const LogicVRegister& src2, // m + int rot) { + int elements = LaneCountFromFormat(vform); + + T element1, element3; + rot = (rot == 1) ? 270 : 90; + + // Loop example: + // 2S --> (2/2 = 1 - 1 = 0) --> 1 x Complex Number (2x components: r+i) + // 4S --> (4/2 = 2) - 1 = 1) --> 2 x Complex Number (2x2 components: r+i) + + for (int e = 0; e <= (elements / 2) - 1; e++) { + switch (rot) { + case 90: + element1 = FPNeg(src2.Float(e * 2 + 1)); + element3 = src2.Float(e * 2); + break; + case 270: + element1 = src2.Float(e * 2 + 1); + element3 = FPNeg(src2.Float(e * 2)); + break; + default: + VIXL_UNREACHABLE(); + return dst; // prevents "element(n) may be unintialized" errors + } + dst.ClearForWrite(vform); + dst.SetFloat(e * 2, FPAdd(src1.Float(e * 2), element1)); + dst.SetFloat(e * 2 + 1, FPAdd(src1.Float(e * 2 + 1), element3)); + } + return dst; +} + + +LogicVRegister Simulator::fcadd(VectorFormat vform, + LogicVRegister dst, // d + const LogicVRegister& src1, // n + const LogicVRegister& src2, // m + int rot) { + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + VIXL_UNIMPLEMENTED(); + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + fcadd(vform, dst, src1, src2, rot); + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + fcadd(vform, dst, src1, src2, rot); + } + return dst; +} + + +template +LogicVRegister Simulator::fcmla(VectorFormat vform, + LogicVRegister dst, // d + const LogicVRegister& src1, // n + const LogicVRegister& src2, // m + int index, + int rot) { + int elements = LaneCountFromFormat(vform); + + T element1, element2, element3, element4; + rot *= 90; + + // Loop example: + // 2S --> (2/2 = 1 - 1 = 0) --> 1 x Complex Number (2x components: r+i) + // 4S --> (4/2 = 2) - 1 = 1) --> 2 x Complex Number (2x2 components: r+i) + + for (int e = 0; e <= (elements / 2) - 1; e++) { + switch (rot) { + case 0: + element1 = src2.Float(index * 2); + element2 = src1.Float(e * 2); + element3 = src2.Float(index * 2 + 1); + element4 = src1.Float(e * 2); + break; + case 90: + element1 = FPNeg(src2.Float(index * 2 + 1)); + element2 = src1.Float(e * 2 + 1); + element3 = src2.Float(index * 2); + element4 = src1.Float(e * 2 + 1); + break; + case 180: + element1 = FPNeg(src2.Float(index * 2)); + element2 = src1.Float(e * 2); + element3 = FPNeg(src2.Float(index * 2 + 1)); + element4 = src1.Float(e * 2); + break; + case 270: + element1 = src2.Float(index * 2 + 1); + element2 = src1.Float(e * 2 + 1); + element3 = FPNeg(src2.Float(index * 2)); + element4 = src1.Float(e * 2 + 1); + break; + default: + VIXL_UNREACHABLE(); + return dst; // prevents "element(n) may be unintialized" errors + } + dst.ClearForWrite(vform); + dst.SetFloat(e * 2, FPMulAdd(dst.Float(e * 2), element2, element1)); + dst.SetFloat(e * 2 + 1, + FPMulAdd(dst.Float(e * 2 + 1), element4, element3)); + } + return dst; +} + + +template +LogicVRegister Simulator::fcmla(VectorFormat vform, + LogicVRegister dst, // d + const LogicVRegister& src1, // n + const LogicVRegister& src2, // m + int rot) { + int elements = LaneCountFromFormat(vform); + + T element1, element2, element3, element4; + rot *= 90; + + // Loop example: + // 2S --> (2/2 = 1 - 1 = 0) --> 1 x Complex Number (2x components: r+i) + // 4S --> (4/2 = 2) - 1 = 1) --> 2 x Complex Number (2x2 components: r+i) + + for (int e = 0; e <= (elements / 2) - 1; e++) { + switch (rot) { + case 0: + element1 = src2.Float(e * 2); + element2 = src1.Float(e * 2); + element3 = src2.Float(e * 2 + 1); + element4 = src1.Float(e * 2); + break; + case 90: + element1 = FPNeg(src2.Float(e * 2 + 1)); + element2 = src1.Float(e * 2 + 1); + element3 = src2.Float(e * 2); + element4 = src1.Float(e * 2 + 1); + break; + case 180: + element1 = FPNeg(src2.Float(e * 2)); + element2 = src1.Float(e * 2); + element3 = FPNeg(src2.Float(e * 2 + 1)); + element4 = src1.Float(e * 2); + break; + case 270: + element1 = src2.Float(e * 2 + 1); + element2 = src1.Float(e * 2 + 1); + element3 = FPNeg(src2.Float(e * 2)); + element4 = src1.Float(e * 2 + 1); + break; + default: + VIXL_UNREACHABLE(); + return dst; // prevents "element(n) may be unintialized" errors + } + dst.ClearForWrite(vform); + dst.SetFloat(e * 2, FPMulAdd(dst.Float(e * 2), element2, element1)); + dst.SetFloat(e * 2 + 1, + FPMulAdd(dst.Float(e * 2 + 1), element4, element3)); + } + return dst; +} + + +LogicVRegister Simulator::fcmla(VectorFormat vform, + LogicVRegister dst, // d + const LogicVRegister& src1, // n + const LogicVRegister& src2, // m + int rot) { + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + VIXL_UNIMPLEMENTED(); + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + fcmla(vform, dst, src1, src2, rot); + } else { + fcmla(vform, dst, src1, src2, rot); + } + return dst; +} + + +LogicVRegister Simulator::fcmla(VectorFormat vform, + LogicVRegister dst, // d + const LogicVRegister& src1, // n + const LogicVRegister& src2, // m + int index, + int rot) { + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + VIXL_UNIMPLEMENTED(); + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + fcmla(vform, dst, src1, src2, index, rot); + } else { + fcmla(vform, dst, src1, src2, index, rot); + } + return dst; +} + + +LogicVRegister Simulator::dup_element(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int src_index) { + int laneCount = LaneCountFromFormat(vform); + uint64_t value = src.Uint(vform, src_index); + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, value); + } + return dst; +} + + +LogicVRegister Simulator::dup_immediate(VectorFormat vform, + LogicVRegister dst, + uint64_t imm) { + int laneCount = LaneCountFromFormat(vform); + uint64_t value = imm & MaxUintFromFormat(vform); + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, value); + } + return dst; +} + + +LogicVRegister Simulator::ins_element(VectorFormat vform, + LogicVRegister dst, + int dst_index, + const LogicVRegister& src, + int src_index) { + dst.SetUint(vform, dst_index, src.Uint(vform, src_index)); + return dst; +} + + +LogicVRegister Simulator::ins_immediate(VectorFormat vform, + LogicVRegister dst, + int dst_index, + uint64_t imm) { + uint64_t value = imm & MaxUintFromFormat(vform); + dst.SetUint(vform, dst_index, value); + return dst; +} + + +LogicVRegister Simulator::movi(VectorFormat vform, + LogicVRegister dst, + uint64_t imm) { + int laneCount = LaneCountFromFormat(vform); + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, imm); + } + return dst; +} + + +LogicVRegister Simulator::mvni(VectorFormat vform, + LogicVRegister dst, + uint64_t imm) { + int laneCount = LaneCountFromFormat(vform); + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, ~imm); + } + return dst; +} + + +LogicVRegister Simulator::orr(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + uint64_t imm) { + uint64_t result[16]; + int laneCount = LaneCountFromFormat(vform); + for (int i = 0; i < laneCount; ++i) { + result[i] = src.Uint(vform, i) | imm; + } + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, result[i]); + } + return dst; +} + + +LogicVRegister Simulator::uxtl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + VectorFormat vform_half = VectorFormatHalfWidth(vform); + + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetUint(vform, i, src.Uint(vform_half, i)); + } + return dst; +} + + +LogicVRegister Simulator::sxtl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + VectorFormat vform_half = VectorFormatHalfWidth(vform); + + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetInt(vform, i, src.Int(vform_half, i)); + } + return dst; +} + + +LogicVRegister Simulator::uxtl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + VectorFormat vform_half = VectorFormatHalfWidth(vform); + int lane_count = LaneCountFromFormat(vform); + + dst.ClearForWrite(vform); + for (int i = 0; i < lane_count; i++) { + dst.SetUint(vform, i, src.Uint(vform_half, lane_count + i)); + } + return dst; +} + + +LogicVRegister Simulator::sxtl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + VectorFormat vform_half = VectorFormatHalfWidth(vform); + int lane_count = LaneCountFromFormat(vform); + + dst.ClearForWrite(vform); + for (int i = 0; i < lane_count; i++) { + dst.SetInt(vform, i, src.Int(vform_half, lane_count + i)); + } + return dst; +} + + +LogicVRegister Simulator::shrn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + SimVRegister temp; + VectorFormat vform_src = VectorFormatDoubleWidth(vform); + VectorFormat vform_dst = vform; + LogicVRegister shifted_src = ushr(vform_src, temp, src, shift); + return extractnarrow(vform_dst, dst, false, shifted_src, false); +} + + +LogicVRegister Simulator::shrn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + SimVRegister temp; + VectorFormat vformsrc = VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)); + VectorFormat vformdst = vform; + LogicVRegister shifted_src = ushr(vformsrc, temp, src, shift); + return extractnarrow(vformdst, dst, false, shifted_src, false); +} + + +LogicVRegister Simulator::rshrn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + SimVRegister temp; + VectorFormat vformsrc = VectorFormatDoubleWidth(vform); + VectorFormat vformdst = vform; + LogicVRegister shifted_src = ushr(vformsrc, temp, src, shift).Round(vformsrc); + return extractnarrow(vformdst, dst, false, shifted_src, false); +} + + +LogicVRegister Simulator::rshrn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + SimVRegister temp; + VectorFormat vformsrc = VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)); + VectorFormat vformdst = vform; + LogicVRegister shifted_src = ushr(vformsrc, temp, src, shift).Round(vformsrc); + return extractnarrow(vformdst, dst, false, shifted_src, false); +} + + +LogicVRegister Simulator::Table(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& ind, + bool zero_out_of_bounds, + const LogicVRegister* tab1, + const LogicVRegister* tab2, + const LogicVRegister* tab3, + const LogicVRegister* tab4) { + VIXL_ASSERT(tab1 != NULL); + const LogicVRegister* tab[4] = {tab1, tab2, tab3, tab4}; + uint64_t result[kMaxLanesPerVector]; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + result[i] = zero_out_of_bounds ? 0 : dst.Uint(kFormat16B, i); + } + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + uint64_t j = ind.Uint(vform, i); + int tab_idx = static_cast(j >> 4); + int j_idx = static_cast(j & 15); + if ((tab_idx < 4) && (tab[tab_idx] != NULL)) { + result[i] = tab[tab_idx]->Uint(kFormat16B, j_idx); + } + } + dst.SetUintArray(vform, result); + return dst; +} + + +LogicVRegister Simulator::tbl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& ind) { + return Table(vform, dst, ind, true, &tab); +} + + +LogicVRegister Simulator::tbl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& tab2, + const LogicVRegister& ind) { + return Table(vform, dst, ind, true, &tab, &tab2); +} + + +LogicVRegister Simulator::tbl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& tab2, + const LogicVRegister& tab3, + const LogicVRegister& ind) { + return Table(vform, dst, ind, true, &tab, &tab2, &tab3); +} + + +LogicVRegister Simulator::tbl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& tab2, + const LogicVRegister& tab3, + const LogicVRegister& tab4, + const LogicVRegister& ind) { + return Table(vform, dst, ind, true, &tab, &tab2, &tab3, &tab4); +} + + +LogicVRegister Simulator::tbx(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& ind) { + return Table(vform, dst, ind, false, &tab); +} + + +LogicVRegister Simulator::tbx(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& tab2, + const LogicVRegister& ind) { + return Table(vform, dst, ind, false, &tab, &tab2); +} + + +LogicVRegister Simulator::tbx(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& tab2, + const LogicVRegister& tab3, + const LogicVRegister& ind) { + return Table(vform, dst, ind, false, &tab, &tab2, &tab3); +} + + +LogicVRegister Simulator::tbx(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& tab2, + const LogicVRegister& tab3, + const LogicVRegister& tab4, + const LogicVRegister& ind) { + return Table(vform, dst, ind, false, &tab, &tab2, &tab3, &tab4); +} + + +LogicVRegister Simulator::uqshrn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + return shrn(vform, dst, src, shift).UnsignedSaturate(vform); +} + + +LogicVRegister Simulator::uqshrn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + return shrn2(vform, dst, src, shift).UnsignedSaturate(vform); +} + + +LogicVRegister Simulator::uqrshrn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + return rshrn(vform, dst, src, shift).UnsignedSaturate(vform); +} + + +LogicVRegister Simulator::uqrshrn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + return rshrn2(vform, dst, src, shift).UnsignedSaturate(vform); +} + + +LogicVRegister Simulator::sqshrn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + SimVRegister temp; + VectorFormat vformsrc = VectorFormatDoubleWidth(vform); + VectorFormat vformdst = vform; + LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift); + return sqxtn(vformdst, dst, shifted_src); +} + + +LogicVRegister Simulator::sqshrn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + SimVRegister temp; + VectorFormat vformsrc = VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)); + VectorFormat vformdst = vform; + LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift); + return sqxtn(vformdst, dst, shifted_src); +} + + +LogicVRegister Simulator::sqrshrn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + SimVRegister temp; + VectorFormat vformsrc = VectorFormatDoubleWidth(vform); + VectorFormat vformdst = vform; + LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift).Round(vformsrc); + return sqxtn(vformdst, dst, shifted_src); +} + + +LogicVRegister Simulator::sqrshrn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + SimVRegister temp; + VectorFormat vformsrc = VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)); + VectorFormat vformdst = vform; + LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift).Round(vformsrc); + return sqxtn(vformdst, dst, shifted_src); +} + + +LogicVRegister Simulator::sqshrun(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + SimVRegister temp; + VectorFormat vformsrc = VectorFormatDoubleWidth(vform); + VectorFormat vformdst = vform; + LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift); + return sqxtun(vformdst, dst, shifted_src); +} + + +LogicVRegister Simulator::sqshrun2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + SimVRegister temp; + VectorFormat vformsrc = VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)); + VectorFormat vformdst = vform; + LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift); + return sqxtun(vformdst, dst, shifted_src); +} + + +LogicVRegister Simulator::sqrshrun(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + SimVRegister temp; + VectorFormat vformsrc = VectorFormatDoubleWidth(vform); + VectorFormat vformdst = vform; + LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift).Round(vformsrc); + return sqxtun(vformdst, dst, shifted_src); +} + + +LogicVRegister Simulator::sqrshrun2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + SimVRegister temp; + VectorFormat vformsrc = VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)); + VectorFormat vformdst = vform; + LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift).Round(vformsrc); + return sqxtun(vformdst, dst, shifted_src); +} + + +LogicVRegister Simulator::uaddl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl(vform, temp1, src1); + uxtl(vform, temp2, src2); + add(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::uaddl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl2(vform, temp1, src1); + uxtl2(vform, temp2, src2); + add(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::uaddw(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + uxtl(vform, temp, src2); + add(vform, dst, src1, temp); + return dst; +} + + +LogicVRegister Simulator::uaddw2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + uxtl2(vform, temp, src2); + add(vform, dst, src1, temp); + return dst; +} + + +LogicVRegister Simulator::saddl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl(vform, temp1, src1); + sxtl(vform, temp2, src2); + add(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::saddl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl2(vform, temp1, src1); + sxtl2(vform, temp2, src2); + add(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::saddw(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + sxtl(vform, temp, src2); + add(vform, dst, src1, temp); + return dst; +} + + +LogicVRegister Simulator::saddw2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + sxtl2(vform, temp, src2); + add(vform, dst, src1, temp); + return dst; +} + + +LogicVRegister Simulator::usubl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl(vform, temp1, src1); + uxtl(vform, temp2, src2); + sub(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::usubl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl2(vform, temp1, src1); + uxtl2(vform, temp2, src2); + sub(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::usubw(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + uxtl(vform, temp, src2); + sub(vform, dst, src1, temp); + return dst; +} + + +LogicVRegister Simulator::usubw2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + uxtl2(vform, temp, src2); + sub(vform, dst, src1, temp); + return dst; +} + + +LogicVRegister Simulator::ssubl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl(vform, temp1, src1); + sxtl(vform, temp2, src2); + sub(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::ssubl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl2(vform, temp1, src1); + sxtl2(vform, temp2, src2); + sub(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::ssubw(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + sxtl(vform, temp, src2); + sub(vform, dst, src1, temp); + return dst; +} + + +LogicVRegister Simulator::ssubw2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + sxtl2(vform, temp, src2); + sub(vform, dst, src1, temp); + return dst; +} + + +LogicVRegister Simulator::uabal(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl(vform, temp1, src1); + uxtl(vform, temp2, src2); + uaba(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::uabal2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl2(vform, temp1, src1); + uxtl2(vform, temp2, src2); + uaba(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::sabal(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl(vform, temp1, src1); + sxtl(vform, temp2, src2); + saba(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::sabal2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl2(vform, temp1, src1); + sxtl2(vform, temp2, src2); + saba(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::uabdl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl(vform, temp1, src1); + uxtl(vform, temp2, src2); + absdiff(vform, dst, temp1, temp2, false); + return dst; +} + + +LogicVRegister Simulator::uabdl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl2(vform, temp1, src1); + uxtl2(vform, temp2, src2); + absdiff(vform, dst, temp1, temp2, false); + return dst; +} + + +LogicVRegister Simulator::sabdl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl(vform, temp1, src1); + sxtl(vform, temp2, src2); + absdiff(vform, dst, temp1, temp2, true); + return dst; +} + + +LogicVRegister Simulator::sabdl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl2(vform, temp1, src1); + sxtl2(vform, temp2, src2); + absdiff(vform, dst, temp1, temp2, true); + return dst; +} + + +LogicVRegister Simulator::umull(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl(vform, temp1, src1); + uxtl(vform, temp2, src2); + mul(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::umull2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl2(vform, temp1, src1); + uxtl2(vform, temp2, src2); + mul(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::smull(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl(vform, temp1, src1); + sxtl(vform, temp2, src2); + mul(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::smull2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl2(vform, temp1, src1); + sxtl2(vform, temp2, src2); + mul(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::umlsl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl(vform, temp1, src1); + uxtl(vform, temp2, src2); + mls(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::umlsl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl2(vform, temp1, src1); + uxtl2(vform, temp2, src2); + mls(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::smlsl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl(vform, temp1, src1); + sxtl(vform, temp2, src2); + mls(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::smlsl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl2(vform, temp1, src1); + sxtl2(vform, temp2, src2); + mls(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::umlal(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl(vform, temp1, src1); + uxtl(vform, temp2, src2); + mla(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::umlal2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl2(vform, temp1, src1); + uxtl2(vform, temp2, src2); + mla(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::smlal(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl(vform, temp1, src1); + sxtl(vform, temp2, src2); + mla(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::smlal2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl2(vform, temp1, src1); + sxtl2(vform, temp2, src2); + mla(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::sqdmlal(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + LogicVRegister product = sqdmull(vform, temp, src1, src2); + return add(vform, dst, dst, product).SignedSaturate(vform); +} + + +LogicVRegister Simulator::sqdmlal2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + LogicVRegister product = sqdmull2(vform, temp, src1, src2); + return add(vform, dst, dst, product).SignedSaturate(vform); +} + + +LogicVRegister Simulator::sqdmlsl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + LogicVRegister product = sqdmull(vform, temp, src1, src2); + return sub(vform, dst, dst, product).SignedSaturate(vform); +} + + +LogicVRegister Simulator::sqdmlsl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + LogicVRegister product = sqdmull2(vform, temp, src1, src2); + return sub(vform, dst, dst, product).SignedSaturate(vform); +} + + +LogicVRegister Simulator::sqdmull(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + LogicVRegister product = smull(vform, temp, src1, src2); + return add(vform, dst, product, product).SignedSaturate(vform); +} + + +LogicVRegister Simulator::sqdmull2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + LogicVRegister product = smull2(vform, temp, src1, src2); + return add(vform, dst, product, product).SignedSaturate(vform); +} + + +LogicVRegister Simulator::sqrdmulh(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool round) { + // 2 * INT_32_MIN * INT_32_MIN causes int64_t to overflow. + // To avoid this, we use (src1 * src2 + 1 << (esize - 2)) >> (esize - 1) + // which is same as (2 * src1 * src2 + 1 << (esize - 1)) >> esize. + + int esize = LaneSizeInBitsFromFormat(vform); + int round_const = round ? (1 << (esize - 2)) : 0; + int64_t product; + + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + product = src1.Int(vform, i) * src2.Int(vform, i); + product += round_const; + product = product >> (esize - 1); + + if (product > MaxIntFromFormat(vform)) { + product = MaxIntFromFormat(vform); + } else if (product < MinIntFromFormat(vform)) { + product = MinIntFromFormat(vform); + } + dst.SetInt(vform, i, product); + } + return dst; +} + + +LogicVRegister Simulator::dot(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool is_signed) { + VectorFormat quarter_vform = + VectorFormatHalfWidthDoubleLanes(VectorFormatHalfWidthDoubleLanes(vform)); + + dst.ClearForWrite(vform); + for (int e = 0; e < LaneCountFromFormat(vform); e++) { + int64_t result = 0; + int64_t element1, element2; + for (int i = 0; i < 4; i++) { + int index = 4 * e + i; + if (is_signed) { + element1 = src1.Int(quarter_vform, index); + element2 = src2.Int(quarter_vform, index); + } else { + element1 = src1.Uint(quarter_vform, index); + element2 = src2.Uint(quarter_vform, index); + } + result += element1 * element2; + } + + result += dst.Int(vform, e); + dst.SetInt(vform, e, result); + } + return dst; +} + + +LogicVRegister Simulator::sdot(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + return dot(vform, dst, src1, src2, true); +} + + +LogicVRegister Simulator::udot(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + return dot(vform, dst, src1, src2, false); +} + + +LogicVRegister Simulator::sqrdmlash(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool round, + bool sub_op) { + // 2 * INT_32_MIN * INT_32_MIN causes int64_t to overflow. + // To avoid this, we use: + // (dst << (esize - 1) + src1 * src2 + 1 << (esize - 2)) >> (esize - 1) + // which is same as: + // (dst << esize + 2 * src1 * src2 + 1 << (esize - 1)) >> esize. + + int esize = LaneSizeInBitsFromFormat(vform); + int round_const = round ? (1 << (esize - 2)) : 0; + int64_t accum; + + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + accum = dst.Int(vform, i) << (esize - 1); + if (sub_op) { + accum -= src1.Int(vform, i) * src2.Int(vform, i); + } else { + accum += src1.Int(vform, i) * src2.Int(vform, i); + } + accum += round_const; + accum = accum >> (esize - 1); + + if (accum > MaxIntFromFormat(vform)) { + accum = MaxIntFromFormat(vform); + } else if (accum < MinIntFromFormat(vform)) { + accum = MinIntFromFormat(vform); + } + dst.SetInt(vform, i, accum); + } + return dst; +} + + +LogicVRegister Simulator::sqrdmlah(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool round) { + return sqrdmlash(vform, dst, src1, src2, round, false); +} + + +LogicVRegister Simulator::sqrdmlsh(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool round) { + return sqrdmlash(vform, dst, src1, src2, round, true); +} + + +LogicVRegister Simulator::sqdmulh(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + return sqrdmulh(vform, dst, src1, src2, false); +} + + +LogicVRegister Simulator::addhn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + add(VectorFormatDoubleWidth(vform), temp, src1, src2); + shrn(vform, dst, temp, LaneSizeInBitsFromFormat(vform)); + return dst; +} + + +LogicVRegister Simulator::addhn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + add(VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)), temp, src1, src2); + shrn2(vform, dst, temp, LaneSizeInBitsFromFormat(vform)); + return dst; +} + + +LogicVRegister Simulator::raddhn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + add(VectorFormatDoubleWidth(vform), temp, src1, src2); + rshrn(vform, dst, temp, LaneSizeInBitsFromFormat(vform)); + return dst; +} + + +LogicVRegister Simulator::raddhn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + add(VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)), temp, src1, src2); + rshrn2(vform, dst, temp, LaneSizeInBitsFromFormat(vform)); + return dst; +} + + +LogicVRegister Simulator::subhn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + sub(VectorFormatDoubleWidth(vform), temp, src1, src2); + shrn(vform, dst, temp, LaneSizeInBitsFromFormat(vform)); + return dst; +} + + +LogicVRegister Simulator::subhn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + sub(VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)), temp, src1, src2); + shrn2(vform, dst, temp, LaneSizeInBitsFromFormat(vform)); + return dst; +} + + +LogicVRegister Simulator::rsubhn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + sub(VectorFormatDoubleWidth(vform), temp, src1, src2); + rshrn(vform, dst, temp, LaneSizeInBitsFromFormat(vform)); + return dst; +} + + +LogicVRegister Simulator::rsubhn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + sub(VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)), temp, src1, src2); + rshrn2(vform, dst, temp, LaneSizeInBitsFromFormat(vform)); + return dst; +} + + +LogicVRegister Simulator::trn1(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + uint64_t result[16]; + int laneCount = LaneCountFromFormat(vform); + int pairs = laneCount / 2; + for (int i = 0; i < pairs; ++i) { + result[2 * i] = src1.Uint(vform, 2 * i); + result[(2 * i) + 1] = src2.Uint(vform, 2 * i); + } + + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, result[i]); + } + return dst; +} + + +LogicVRegister Simulator::trn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + uint64_t result[16]; + int laneCount = LaneCountFromFormat(vform); + int pairs = laneCount / 2; + for (int i = 0; i < pairs; ++i) { + result[2 * i] = src1.Uint(vform, (2 * i) + 1); + result[(2 * i) + 1] = src2.Uint(vform, (2 * i) + 1); + } + + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, result[i]); + } + return dst; +} + + +LogicVRegister Simulator::zip1(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + uint64_t result[16]; + int laneCount = LaneCountFromFormat(vform); + int pairs = laneCount / 2; + for (int i = 0; i < pairs; ++i) { + result[2 * i] = src1.Uint(vform, i); + result[(2 * i) + 1] = src2.Uint(vform, i); + } + + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, result[i]); + } + return dst; +} + + +LogicVRegister Simulator::zip2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + uint64_t result[16]; + int laneCount = LaneCountFromFormat(vform); + int pairs = laneCount / 2; + for (int i = 0; i < pairs; ++i) { + result[2 * i] = src1.Uint(vform, pairs + i); + result[(2 * i) + 1] = src2.Uint(vform, pairs + i); + } + + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, result[i]); + } + return dst; +} + + +LogicVRegister Simulator::uzp1(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + uint64_t result[32]; + int laneCount = LaneCountFromFormat(vform); + for (int i = 0; i < laneCount; ++i) { + result[i] = src1.Uint(vform, i); + result[laneCount + i] = src2.Uint(vform, i); + } + + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, result[2 * i]); + } + return dst; +} + + +LogicVRegister Simulator::uzp2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + uint64_t result[32]; + int laneCount = LaneCountFromFormat(vform); + for (int i = 0; i < laneCount; ++i) { + result[i] = src1.Uint(vform, i); + result[laneCount + i] = src2.Uint(vform, i); + } + + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, result[(2 * i) + 1]); + } + return dst; +} + + +template +T Simulator::FPNeg(T op) { + return -op; +} + +template +T Simulator::FPAdd(T op1, T op2) { + T result = FPProcessNaNs(op1, op2); + if (IsNaN(result)) { + return result; + } + + if (IsInf(op1) && IsInf(op2) && (op1 != op2)) { + // inf + -inf returns the default NaN. + FPProcessException(); + return FPDefaultNaN(); + } else { + // Other cases should be handled by standard arithmetic. + return op1 + op2; + } +} + + +template +T Simulator::FPSub(T op1, T op2) { + // NaNs should be handled elsewhere. + VIXL_ASSERT(!IsNaN(op1) && !IsNaN(op2)); + + if (IsInf(op1) && IsInf(op2) && (op1 == op2)) { + // inf - inf returns the default NaN. + FPProcessException(); + return FPDefaultNaN(); + } else { + // Other cases should be handled by standard arithmetic. + return op1 - op2; + } +} + + +template +T Simulator::FPMul(T op1, T op2) { + // NaNs should be handled elsewhere. + VIXL_ASSERT(!IsNaN(op1) && !IsNaN(op2)); + + if ((IsInf(op1) && (op2 == 0.0)) || (IsInf(op2) && (op1 == 0.0))) { + // inf * 0.0 returns the default NaN. + FPProcessException(); + return FPDefaultNaN(); + } else { + // Other cases should be handled by standard arithmetic. + return op1 * op2; + } +} + + +template +T Simulator::FPMulx(T op1, T op2) { + if ((IsInf(op1) && (op2 == 0.0)) || (IsInf(op2) && (op1 == 0.0))) { + // inf * 0.0 returns +/-2.0. + T two = 2.0; + return copysign(1.0, op1) * copysign(1.0, op2) * two; + } + return FPMul(op1, op2); +} + + +template +T Simulator::FPMulAdd(T a, T op1, T op2) { + T result = FPProcessNaNs3(a, op1, op2); + + T sign_a = copysign(1.0, a); + T sign_prod = copysign(1.0, op1) * copysign(1.0, op2); + bool isinf_prod = IsInf(op1) || IsInf(op2); + bool operation_generates_nan = + (IsInf(op1) && (op2 == 0.0)) || // inf * 0.0 + (IsInf(op2) && (op1 == 0.0)) || // 0.0 * inf + (IsInf(a) && isinf_prod && (sign_a != sign_prod)); // inf - inf + + if (IsNaN(result)) { + // Generated NaNs override quiet NaNs propagated from a. + if (operation_generates_nan && IsQuietNaN(a)) { + FPProcessException(); + return FPDefaultNaN(); + } else { + return result; + } + } + + // If the operation would produce a NaN, return the default NaN. + if (operation_generates_nan) { + FPProcessException(); + return FPDefaultNaN(); + } + + // Work around broken fma implementations for exact zero results: The sign of + // exact 0.0 results is positive unless both a and op1 * op2 are negative. + if (((op1 == 0.0) || (op2 == 0.0)) && (a == 0.0)) { + return ((sign_a < T(0.0)) && (sign_prod < T(0.0))) ? -0.0 : 0.0; + } + + result = FusedMultiplyAdd(op1, op2, a); + VIXL_ASSERT(!IsNaN(result)); + + // Work around broken fma implementations for rounded zero results: If a is + // 0.0, the sign of the result is the sign of op1 * op2 before rounding. + if ((a == 0.0) && (result == 0.0)) { + return copysign(0.0, sign_prod); + } + + return result; +} + + +template +T Simulator::FPDiv(T op1, T op2) { + // NaNs should be handled elsewhere. + VIXL_ASSERT(!IsNaN(op1) && !IsNaN(op2)); + + if ((IsInf(op1) && IsInf(op2)) || ((op1 == 0.0) && (op2 == 0.0))) { + // inf / inf and 0.0 / 0.0 return the default NaN. + FPProcessException(); + return FPDefaultNaN(); + } else { + if (op2 == 0.0) { + FPProcessException(); + if (!IsNaN(op1)) { + double op1_sign = copysign(1.0, op1); + double op2_sign = copysign(1.0, op2); + return static_cast(op1_sign * op2_sign * kFP64PositiveInfinity); + } + } + + // Other cases should be handled by standard arithmetic. + return op1 / op2; + } +} + + +template +T Simulator::FPSqrt(T op) { + if (IsNaN(op)) { + return FPProcessNaN(op); + } else if (op < T(0.0)) { + FPProcessException(); + return FPDefaultNaN(); + } else { + return sqrt(op); + } +} + + +template +T Simulator::FPMax(T a, T b) { + T result = FPProcessNaNs(a, b); + if (IsNaN(result)) return result; + + if ((a == 0.0) && (b == 0.0) && (copysign(1.0, a) != copysign(1.0, b))) { + // a and b are zero, and the sign differs: return +0.0. + return 0.0; + } else { + return (a > b) ? a : b; + } +} + + +template +T Simulator::FPMaxNM(T a, T b) { + if (IsQuietNaN(a) && !IsQuietNaN(b)) { + a = kFP64NegativeInfinity; + } else if (!IsQuietNaN(a) && IsQuietNaN(b)) { + b = kFP64NegativeInfinity; + } + + T result = FPProcessNaNs(a, b); + return IsNaN(result) ? result : FPMax(a, b); +} + + +template +T Simulator::FPMin(T a, T b) { + T result = FPProcessNaNs(a, b); + if (IsNaN(result)) return result; + + if ((a == 0.0) && (b == 0.0) && (copysign(1.0, a) != copysign(1.0, b))) { + // a and b are zero, and the sign differs: return -0.0. + return -0.0; + } else { + return (a < b) ? a : b; + } +} + + +template +T Simulator::FPMinNM(T a, T b) { + if (IsQuietNaN(a) && !IsQuietNaN(b)) { + a = kFP64PositiveInfinity; + } else if (!IsQuietNaN(a) && IsQuietNaN(b)) { + b = kFP64PositiveInfinity; + } + + T result = FPProcessNaNs(a, b); + return IsNaN(result) ? result : FPMin(a, b); +} + + +template +T Simulator::FPRecipStepFused(T op1, T op2) { + const T two = 2.0; + if ((IsInf(op1) && (op2 == 0.0)) || ((op1 == 0.0) && (IsInf(op2)))) { + return two; + } else if (IsInf(op1) || IsInf(op2)) { + // Return +inf if signs match, otherwise -inf. + return ((op1 >= 0.0) == (op2 >= 0.0)) ? kFP64PositiveInfinity + : kFP64NegativeInfinity; + } else { + return FusedMultiplyAdd(op1, op2, two); + } +} + +template +bool IsNormal(T value) { + return std::isnormal(value); +} + +template <> +bool IsNormal(SimFloat16 value) { + uint16_t rawbits = Float16ToRawbits(value); + uint16_t exp_mask = 0x7c00; + // Check that the exponent is neither all zeroes or all ones. + return ((rawbits & exp_mask) != 0) && ((~rawbits & exp_mask) != 0); +} + + +template +T Simulator::FPRSqrtStepFused(T op1, T op2) { + const T one_point_five = 1.5; + const T two = 2.0; + + if ((IsInf(op1) && (op2 == 0.0)) || ((op1 == 0.0) && (IsInf(op2)))) { + return one_point_five; + } else if (IsInf(op1) || IsInf(op2)) { + // Return +inf if signs match, otherwise -inf. + return ((op1 >= 0.0) == (op2 >= 0.0)) ? kFP64PositiveInfinity + : kFP64NegativeInfinity; + } else { + // The multiply-add-halve operation must be fully fused, so avoid interim + // rounding by checking which operand can be losslessly divided by two + // before doing the multiply-add. + if (IsNormal(op1 / two)) { + return FusedMultiplyAdd(op1 / two, op2, one_point_five); + } else if (IsNormal(op2 / two)) { + return FusedMultiplyAdd(op1, op2 / two, one_point_five); + } else { + // Neither operand is normal after halving: the result is dominated by + // the addition term, so just return that. + return one_point_five; + } + } +} + +int32_t Simulator::FPToFixedJS(double value) { + // The Z-flag is set when the conversion from double precision floating-point + // to 32-bit integer is exact. If the source value is +/-Infinity, -0.0, NaN, + // outside the bounds of a 32-bit integer, or isn't an exact integer then the + // Z-flag is unset. + int Z = 1; + int32_t result; + + if ((value == 0.0) || (value == kFP64PositiveInfinity) || + (value == kFP64NegativeInfinity)) { + // +/- zero and infinity all return zero, however -0 and +/- Infinity also + // unset the Z-flag. + result = 0.0; + if ((value != 0.0) || std::signbit(value)) { + Z = 0; + } + } else if (std::isnan(value)) { + // NaN values unset the Z-flag and set the result to 0. + FPProcessNaN(value); + result = 0; + Z = 0; + } else { + // All other values are converted to an integer representation, rounded + // toward zero. + double int_result = std::floor(value); + double error = value - int_result; + + if ((error != 0.0) && (int_result < 0.0)) { + int_result++; + } + + // Constrain the value into the range [INT32_MIN, INT32_MAX]. We can almost + // write a one-liner with std::round, but the behaviour on ties is incorrect + // for our purposes. + double mod_const = static_cast(UINT64_C(1) << 32); + double mod_error = + (int_result / mod_const) - std::floor(int_result / mod_const); + double constrained; + if (mod_error == 0.5) { + constrained = INT32_MIN; + } else { + constrained = int_result - mod_const * round(int_result / mod_const); + } + + VIXL_ASSERT(std::floor(constrained) == constrained); + VIXL_ASSERT(constrained >= INT32_MIN); + VIXL_ASSERT(constrained <= INT32_MAX); + + // Take the bottom 32 bits of the result as a 32-bit integer. + result = static_cast(constrained); + + if ((int_result < INT32_MIN) || (int_result > INT32_MAX) || + (error != 0.0)) { + // If the integer result is out of range or the conversion isn't exact, + // take exception and unset the Z-flag. + FPProcessException(); + Z = 0; + } + } + + ReadNzcv().SetN(0); + ReadNzcv().SetZ(Z); + ReadNzcv().SetC(0); + ReadNzcv().SetV(0); + + return result; +} + + +double Simulator::FPRoundInt(double value, FPRounding round_mode) { + if ((value == 0.0) || (value == kFP64PositiveInfinity) || + (value == kFP64NegativeInfinity)) { + return value; + } else if (IsNaN(value)) { + return FPProcessNaN(value); + } + + double int_result = std::floor(value); + double error = value - int_result; + switch (round_mode) { + case FPTieAway: { + // Take care of correctly handling the range ]-0.5, -0.0], which must + // yield -0.0. + if ((-0.5 < value) && (value < 0.0)) { + int_result = -0.0; + + } else if ((error > 0.5) || ((error == 0.5) && (int_result >= 0.0))) { + // If the error is greater than 0.5, or is equal to 0.5 and the integer + // result is positive, round up. + int_result++; + } + break; + } + case FPTieEven: { + // Take care of correctly handling the range [-0.5, -0.0], which must + // yield -0.0. + if ((-0.5 <= value) && (value < 0.0)) { + int_result = -0.0; + + // If the error is greater than 0.5, or is equal to 0.5 and the integer + // result is odd, round up. + } else if ((error > 0.5) || + ((error == 0.5) && (std::fmod(int_result, 2) != 0))) { + int_result++; + } + break; + } + case FPZero: { + // If value>0 then we take floor(value) + // otherwise, ceil(value). + if (value < 0) { + int_result = ceil(value); + } + break; + } + case FPNegativeInfinity: { + // We always use floor(value). + break; + } + case FPPositiveInfinity: { + // Take care of correctly handling the range ]-1.0, -0.0], which must + // yield -0.0. + if ((-1.0 < value) && (value < 0.0)) { + int_result = -0.0; + + // If the error is non-zero, round up. + } else if (error > 0.0) { + int_result++; + } + break; + } + default: + VIXL_UNIMPLEMENTED(); + } + return int_result; +} + + +int16_t Simulator::FPToInt16(double value, FPRounding rmode) { + value = FPRoundInt(value, rmode); + if (value >= kHMaxInt) { + return kHMaxInt; + } else if (value < kHMinInt) { + return kHMinInt; + } + return IsNaN(value) ? 0 : static_cast(value); +} + + +int32_t Simulator::FPToInt32(double value, FPRounding rmode) { + value = FPRoundInt(value, rmode); + if (value >= kWMaxInt) { + return kWMaxInt; + } else if (value < kWMinInt) { + return kWMinInt; + } + return IsNaN(value) ? 0 : static_cast(value); +} + + +int64_t Simulator::FPToInt64(double value, FPRounding rmode) { + value = FPRoundInt(value, rmode); + if (value >= kXMaxInt) { + return kXMaxInt; + } else if (value < kXMinInt) { + return kXMinInt; + } + return IsNaN(value) ? 0 : static_cast(value); +} + + +uint16_t Simulator::FPToUInt16(double value, FPRounding rmode) { + value = FPRoundInt(value, rmode); + if (value >= kHMaxUInt) { + return kHMaxUInt; + } else if (value < 0.0) { + return 0; + } + return IsNaN(value) ? 0 : static_cast(value); +} + + +uint32_t Simulator::FPToUInt32(double value, FPRounding rmode) { + value = FPRoundInt(value, rmode); + if (value >= kWMaxUInt) { + return kWMaxUInt; + } else if (value < 0.0) { + return 0; + } + return IsNaN(value) ? 0 : static_cast(value); +} + + +uint64_t Simulator::FPToUInt64(double value, FPRounding rmode) { + value = FPRoundInt(value, rmode); + if (value >= kXMaxUInt) { + return kXMaxUInt; + } else if (value < 0.0) { + return 0; + } + return IsNaN(value) ? 0 : static_cast(value); +} + + +#define DEFINE_NEON_FP_VECTOR_OP(FN, OP, PROCNAN) \ + template \ + LogicVRegister Simulator::FN(VectorFormat vform, \ + LogicVRegister dst, \ + const LogicVRegister& src1, \ + const LogicVRegister& src2) { \ + dst.ClearForWrite(vform); \ + for (int i = 0; i < LaneCountFromFormat(vform); i++) { \ + T op1 = src1.Float(i); \ + T op2 = src2.Float(i); \ + T result; \ + if (PROCNAN) { \ + result = FPProcessNaNs(op1, op2); \ + if (!IsNaN(result)) { \ + result = OP(op1, op2); \ + } \ + } else { \ + result = OP(op1, op2); \ + } \ + dst.SetFloat(i, result); \ + } \ + return dst; \ + } \ + \ + LogicVRegister Simulator::FN(VectorFormat vform, \ + LogicVRegister dst, \ + const LogicVRegister& src1, \ + const LogicVRegister& src2) { \ + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { \ + FN(vform, dst, src1, src2); \ + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { \ + FN(vform, dst, src1, src2); \ + } else { \ + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); \ + FN(vform, dst, src1, src2); \ + } \ + return dst; \ + } +NEON_FP3SAME_LIST(DEFINE_NEON_FP_VECTOR_OP) +#undef DEFINE_NEON_FP_VECTOR_OP + + +LogicVRegister Simulator::fnmul(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + LogicVRegister product = fmul(vform, temp, src1, src2); + return fneg(vform, dst, product); +} + + +template +LogicVRegister Simulator::frecps(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + T op1 = -src1.Float(i); + T op2 = src2.Float(i); + T result = FPProcessNaNs(op1, op2); + dst.SetFloat(i, IsNaN(result) ? result : FPRecipStepFused(op1, op2)); + } + return dst; +} + + +LogicVRegister Simulator::frecps(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + frecps(vform, dst, src1, src2); + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + frecps(vform, dst, src1, src2); + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + frecps(vform, dst, src1, src2); + } + return dst; +} + + +template +LogicVRegister Simulator::frsqrts(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + T op1 = -src1.Float(i); + T op2 = src2.Float(i); + T result = FPProcessNaNs(op1, op2); + dst.SetFloat(i, IsNaN(result) ? result : FPRSqrtStepFused(op1, op2)); + } + return dst; +} + + +LogicVRegister Simulator::frsqrts(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + frsqrts(vform, dst, src1, src2); + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + frsqrts(vform, dst, src1, src2); + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + frsqrts(vform, dst, src1, src2); + } + return dst; +} + + +template +LogicVRegister Simulator::fcmp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + Condition cond) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + bool result = false; + T op1 = src1.Float(i); + T op2 = src2.Float(i); + T nan_result = FPProcessNaNs(op1, op2); + if (!IsNaN(nan_result)) { + switch (cond) { + case eq: + result = (op1 == op2); + break; + case ge: + result = (op1 >= op2); + break; + case gt: + result = (op1 > op2); + break; + case le: + result = (op1 <= op2); + break; + case lt: + result = (op1 < op2); + break; + default: + VIXL_UNREACHABLE(); + break; + } + } + dst.SetUint(vform, i, result ? MaxUintFromFormat(vform) : 0); + } + return dst; +} + + +LogicVRegister Simulator::fcmp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + Condition cond) { + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + fcmp(vform, dst, src1, src2, cond); + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + fcmp(vform, dst, src1, src2, cond); + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + fcmp(vform, dst, src1, src2, cond); + } + return dst; +} + + +LogicVRegister Simulator::fcmp_zero(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + Condition cond) { + SimVRegister temp; + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + LogicVRegister zero_reg = + dup_immediate(vform, temp, Float16ToRawbits(SimFloat16(0.0))); + fcmp(vform, dst, src, zero_reg, cond); + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + LogicVRegister zero_reg = dup_immediate(vform, temp, FloatToRawbits(0.0)); + fcmp(vform, dst, src, zero_reg, cond); + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + LogicVRegister zero_reg = dup_immediate(vform, temp, DoubleToRawbits(0.0)); + fcmp(vform, dst, src, zero_reg, cond); + } + return dst; +} + + +LogicVRegister Simulator::fabscmp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + Condition cond) { + SimVRegister temp1, temp2; + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + LogicVRegister abs_src1 = fabs_(vform, temp1, src1); + LogicVRegister abs_src2 = fabs_(vform, temp2, src2); + fcmp(vform, dst, abs_src1, abs_src2, cond); + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + LogicVRegister abs_src1 = fabs_(vform, temp1, src1); + LogicVRegister abs_src2 = fabs_(vform, temp2, src2); + fcmp(vform, dst, abs_src1, abs_src2, cond); + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + LogicVRegister abs_src1 = fabs_(vform, temp1, src1); + LogicVRegister abs_src2 = fabs_(vform, temp2, src2); + fcmp(vform, dst, abs_src1, abs_src2, cond); + } + return dst; +} + + +template +LogicVRegister Simulator::fmla(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + T op1 = src1.Float(i); + T op2 = src2.Float(i); + T acc = dst.Float(i); + T result = FPMulAdd(acc, op1, op2); + dst.SetFloat(i, result); + } + return dst; +} + + +LogicVRegister Simulator::fmla(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + fmla(vform, dst, src1, src2); + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + fmla(vform, dst, src1, src2); + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + fmla(vform, dst, src1, src2); + } + return dst; +} + + +template +LogicVRegister Simulator::fmls(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + T op1 = -src1.Float(i); + T op2 = src2.Float(i); + T acc = dst.Float(i); + T result = FPMulAdd(acc, op1, op2); + dst.SetFloat(i, result); + } + return dst; +} + + +LogicVRegister Simulator::fmls(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + fmls(vform, dst, src1, src2); + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + fmls(vform, dst, src1, src2); + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + fmls(vform, dst, src1, src2); + } + return dst; +} + + +template +LogicVRegister Simulator::fneg(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + T op = src.Float(i); + op = -op; + dst.SetFloat(i, op); + } + return dst; +} + + +LogicVRegister Simulator::fneg(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + fneg(vform, dst, src); + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + fneg(vform, dst, src); + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + fneg(vform, dst, src); + } + return dst; +} + + +template +LogicVRegister Simulator::fabs_(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + T op = src.Float(i); + if (copysign(1.0, op) < 0.0) { + op = -op; + } + dst.SetFloat(i, op); + } + return dst; +} + + +LogicVRegister Simulator::fabs_(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + fabs_(vform, dst, src); + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + fabs_(vform, dst, src); + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + fabs_(vform, dst, src); + } + return dst; +} + + +LogicVRegister Simulator::fabd(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + fsub(vform, temp, src1, src2); + fabs_(vform, dst, temp); + return dst; +} + + +LogicVRegister Simulator::fsqrt(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + SimFloat16 result = FPSqrt(src.Float(i)); + dst.SetFloat(i, result); + } + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + float result = FPSqrt(src.Float(i)); + dst.SetFloat(i, result); + } + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + double result = FPSqrt(src.Float(i)); + dst.SetFloat(i, result); + } + } + return dst; +} + + +#define DEFINE_NEON_FP_PAIR_OP(FNP, FN, OP) \ + LogicVRegister Simulator::FNP(VectorFormat vform, \ + LogicVRegister dst, \ + const LogicVRegister& src1, \ + const LogicVRegister& src2) { \ + SimVRegister temp1, temp2; \ + uzp1(vform, temp1, src1, src2); \ + uzp2(vform, temp2, src1, src2); \ + FN(vform, dst, temp1, temp2); \ + return dst; \ + } \ + \ + LogicVRegister Simulator::FNP(VectorFormat vform, \ + LogicVRegister dst, \ + const LogicVRegister& src) { \ + if (vform == kFormatH) { \ + SimFloat16 result(OP(SimFloat16(RawbitsToFloat16(src.Uint(vform, 0))), \ + SimFloat16(RawbitsToFloat16(src.Uint(vform, 1))))); \ + dst.SetUint(vform, 0, Float16ToRawbits(result)); \ + } else if (vform == kFormatS) { \ + float result = OP(src.Float(0), src.Float(1)); \ + dst.SetFloat(0, result); \ + } else { \ + VIXL_ASSERT(vform == kFormatD); \ + double result = OP(src.Float(0), src.Float(1)); \ + dst.SetFloat(0, result); \ + } \ + dst.ClearForWrite(vform); \ + return dst; \ + } +NEON_FPPAIRWISE_LIST(DEFINE_NEON_FP_PAIR_OP) +#undef DEFINE_NEON_FP_PAIR_OP + +template +LogicVRegister Simulator::fminmaxv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + typename TFPMinMaxOp::type Op) { + VIXL_ASSERT((vform == kFormat4H) || (vform == kFormat8H) || + (vform == kFormat4S)); + USE(vform); + T result1 = (this->*Op)(src.Float(0), src.Float(1)); + T result2 = (this->*Op)(src.Float(2), src.Float(3)); + if (vform == kFormat8H) { + T result3 = (this->*Op)(src.Float(4), src.Float(5)); + T result4 = (this->*Op)(src.Float(6), src.Float(7)); + result1 = (this->*Op)(result1, result3); + result2 = (this->*Op)(result2, result4); + } + T result = (this->*Op)(result1, result2); + dst.ClearForWrite(ScalarFormatFromFormat(vform)); + dst.SetFloat(0, result); + return dst; +} + + +LogicVRegister Simulator::fmaxv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + return fminmaxv(vform, dst, src, &Simulator::FPMax); + } else { + return fminmaxv(vform, dst, src, &Simulator::FPMax); + } +} + + +LogicVRegister Simulator::fminv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + return fminmaxv(vform, dst, src, &Simulator::FPMin); + } else { + return fminmaxv(vform, dst, src, &Simulator::FPMin); + } +} + + +LogicVRegister Simulator::fmaxnmv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + return fminmaxv(vform, + dst, + src, + &Simulator::FPMaxNM); + } else { + return fminmaxv(vform, dst, src, &Simulator::FPMaxNM); + } +} + + +LogicVRegister Simulator::fminnmv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + return fminmaxv(vform, + dst, + src, + &Simulator::FPMinNM); + } else { + return fminmaxv(vform, dst, src, &Simulator::FPMinNM); + } +} + + +LogicVRegister Simulator::fmul(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + dst.ClearForWrite(vform); + SimVRegister temp; + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + LogicVRegister index_reg = dup_element(kFormat8H, temp, src2, index); + fmul(vform, dst, src1, index_reg); + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + LogicVRegister index_reg = dup_element(kFormat4S, temp, src2, index); + fmul(vform, dst, src1, index_reg); + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + LogicVRegister index_reg = dup_element(kFormat2D, temp, src2, index); + fmul(vform, dst, src1, index_reg); + } + return dst; +} + + +LogicVRegister Simulator::fmla(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + dst.ClearForWrite(vform); + SimVRegister temp; + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + LogicVRegister index_reg = dup_element(kFormat8H, temp, src2, index); + fmla(vform, dst, src1, index_reg); + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + LogicVRegister index_reg = dup_element(kFormat4S, temp, src2, index); + fmla(vform, dst, src1, index_reg); + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + LogicVRegister index_reg = dup_element(kFormat2D, temp, src2, index); + fmla(vform, dst, src1, index_reg); + } + return dst; +} + + +LogicVRegister Simulator::fmls(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + dst.ClearForWrite(vform); + SimVRegister temp; + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + LogicVRegister index_reg = dup_element(kFormat8H, temp, src2, index); + fmls(vform, dst, src1, index_reg); + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + LogicVRegister index_reg = dup_element(kFormat4S, temp, src2, index); + fmls(vform, dst, src1, index_reg); + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + LogicVRegister index_reg = dup_element(kFormat2D, temp, src2, index); + fmls(vform, dst, src1, index_reg); + } + return dst; +} + + +LogicVRegister Simulator::fmulx(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + dst.ClearForWrite(vform); + SimVRegister temp; + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + LogicVRegister index_reg = dup_element(kFormat8H, temp, src2, index); + fmulx(vform, dst, src1, index_reg); + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + LogicVRegister index_reg = dup_element(kFormat4S, temp, src2, index); + fmulx(vform, dst, src1, index_reg); + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + LogicVRegister index_reg = dup_element(kFormat2D, temp, src2, index); + fmulx(vform, dst, src1, index_reg); + } + return dst; +} + + +LogicVRegister Simulator::frint(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + FPRounding rounding_mode, + bool inexact_exception) { + dst.ClearForWrite(vform); + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + SimFloat16 input = src.Float(i); + SimFloat16 rounded = FPRoundInt(input, rounding_mode); + if (inexact_exception && !IsNaN(input) && (input != rounded)) { + FPProcessException(); + } + dst.SetFloat(i, rounded); + } + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + float input = src.Float(i); + float rounded = FPRoundInt(input, rounding_mode); + if (inexact_exception && !IsNaN(input) && (input != rounded)) { + FPProcessException(); + } + dst.SetFloat(i, rounded); + } + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + double input = src.Float(i); + double rounded = FPRoundInt(input, rounding_mode); + if (inexact_exception && !IsNaN(input) && (input != rounded)) { + FPProcessException(); + } + dst.SetFloat(i, rounded); + } + } + return dst; +} + + +LogicVRegister Simulator::fcvts(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + FPRounding rounding_mode, + int fbits) { + dst.ClearForWrite(vform); + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + SimFloat16 op = + static_cast(src.Float(i)) * std::pow(2.0, fbits); + dst.SetInt(vform, i, FPToInt16(op, rounding_mode)); + } + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + float op = src.Float(i) * std::pow(2.0f, fbits); + dst.SetInt(vform, i, FPToInt32(op, rounding_mode)); + } + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + double op = src.Float(i) * std::pow(2.0, fbits); + dst.SetInt(vform, i, FPToInt64(op, rounding_mode)); + } + } + return dst; +} + + +LogicVRegister Simulator::fcvtu(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + FPRounding rounding_mode, + int fbits) { + dst.ClearForWrite(vform); + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + SimFloat16 op = + static_cast(src.Float(i)) * std::pow(2.0, fbits); + dst.SetUint(vform, i, FPToUInt16(op, rounding_mode)); + } + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + float op = src.Float(i) * std::pow(2.0f, fbits); + dst.SetUint(vform, i, FPToUInt32(op, rounding_mode)); + } + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + double op = src.Float(i) * std::pow(2.0, fbits); + dst.SetUint(vform, i, FPToUInt64(op, rounding_mode)); + } + } + return dst; +} + + +LogicVRegister Simulator::fcvtl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + for (int i = LaneCountFromFormat(vform) - 1; i >= 0; i--) { + // TODO: Full support for SimFloat16 in SimRegister(s). + dst.SetFloat(i, + FPToFloat(RawbitsToFloat16(src.Float(i)), + ReadDN())); + } + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + for (int i = LaneCountFromFormat(vform) - 1; i >= 0; i--) { + dst.SetFloat(i, FPToDouble(src.Float(i), ReadDN())); + } + } + return dst; +} + + +LogicVRegister Simulator::fcvtl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + int lane_count = LaneCountFromFormat(vform); + if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + for (int i = 0; i < lane_count; i++) { + // TODO: Full support for SimFloat16 in SimRegister(s). + dst.SetFloat(i, + FPToFloat(RawbitsToFloat16( + src.Float(i + lane_count)), + ReadDN())); + } + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + for (int i = 0; i < lane_count; i++) { + dst.SetFloat(i, FPToDouble(src.Float(i + lane_count), ReadDN())); + } + } + return dst; +} + + +LogicVRegister Simulator::fcvtn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetFloat(i, + Float16ToRawbits( + FPToFloat16(src.Float(i), FPTieEven, ReadDN()))); + } + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kSRegSize); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetFloat(i, FPToFloat(src.Float(i), FPTieEven, ReadDN())); + } + } + return dst; +} + + +LogicVRegister Simulator::fcvtn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + int lane_count = LaneCountFromFormat(vform) / 2; + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + for (int i = lane_count - 1; i >= 0; i--) { + dst.SetFloat(i + lane_count, + Float16ToRawbits( + FPToFloat16(src.Float(i), FPTieEven, ReadDN()))); + } + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kSRegSize); + for (int i = lane_count - 1; i >= 0; i--) { + dst.SetFloat(i + lane_count, + FPToFloat(src.Float(i), FPTieEven, ReadDN())); + } + } + return dst; +} + + +LogicVRegister Simulator::fcvtxn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kSRegSize); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetFloat(i, FPToFloat(src.Float(i), FPRoundOdd, ReadDN())); + } + return dst; +} + + +LogicVRegister Simulator::fcvtxn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kSRegSize); + int lane_count = LaneCountFromFormat(vform) / 2; + for (int i = lane_count - 1; i >= 0; i--) { + dst.SetFloat(i + lane_count, + FPToFloat(src.Float(i), FPRoundOdd, ReadDN())); + } + return dst; +} + + +// Based on reference C function recip_sqrt_estimate from ARM ARM. +double Simulator::recip_sqrt_estimate(double a) { + int q0, q1, s; + double r; + if (a < 0.5) { + q0 = static_cast(a * 512.0); + r = 1.0 / sqrt((static_cast(q0) + 0.5) / 512.0); + } else { + q1 = static_cast(a * 256.0); + r = 1.0 / sqrt((static_cast(q1) + 0.5) / 256.0); + } + s = static_cast(256.0 * r + 0.5); + return static_cast(s) / 256.0; +} + + +static inline uint64_t Bits(uint64_t val, int start_bit, int end_bit) { + return ExtractUnsignedBitfield64(start_bit, end_bit, val); +} + + +template +T Simulator::FPRecipSqrtEstimate(T op) { + if (IsNaN(op)) { + return FPProcessNaN(op); + } else if (op == 0.0) { + if (copysign(1.0, op) < 0.0) { + return kFP64NegativeInfinity; + } else { + return kFP64PositiveInfinity; + } + } else if (copysign(1.0, op) < 0.0) { + FPProcessException(); + return FPDefaultNaN(); + } else if (IsInf(op)) { + return 0.0; + } else { + uint64_t fraction; + int exp, result_exp; + + if (IsFloat16()) { + exp = Float16Exp(op); + fraction = Float16Mantissa(op); + fraction <<= 42; + } else if (IsFloat32()) { + exp = FloatExp(op); + fraction = FloatMantissa(op); + fraction <<= 29; + } else { + VIXL_ASSERT(IsFloat64()); + exp = DoubleExp(op); + fraction = DoubleMantissa(op); + } + + if (exp == 0) { + while (Bits(fraction, 51, 51) == 0) { + fraction = Bits(fraction, 50, 0) << 1; + exp -= 1; + } + fraction = Bits(fraction, 50, 0) << 1; + } + + double scaled; + if (Bits(exp, 0, 0) == 0) { + scaled = DoublePack(0, 1022, Bits(fraction, 51, 44) << 44); + } else { + scaled = DoublePack(0, 1021, Bits(fraction, 51, 44) << 44); + } + + if (IsFloat16()) { + result_exp = (44 - exp) / 2; + } else if (IsFloat32()) { + result_exp = (380 - exp) / 2; + } else { + VIXL_ASSERT(IsFloat64()); + result_exp = (3068 - exp) / 2; + } + + uint64_t estimate = DoubleToRawbits(recip_sqrt_estimate(scaled)); + + if (IsFloat16()) { + uint16_t exp_bits = static_cast(Bits(result_exp, 4, 0)); + uint16_t est_bits = static_cast(Bits(estimate, 51, 42)); + return Float16Pack(0, exp_bits, est_bits); + } else if (IsFloat32()) { + uint32_t exp_bits = static_cast(Bits(result_exp, 7, 0)); + uint32_t est_bits = static_cast(Bits(estimate, 51, 29)); + return FloatPack(0, exp_bits, est_bits); + } else { + VIXL_ASSERT(IsFloat64()); + return DoublePack(0, Bits(result_exp, 10, 0), Bits(estimate, 51, 0)); + } + } +} + + +LogicVRegister Simulator::frsqrte(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + SimFloat16 input = src.Float(i); + dst.SetFloat(i, FPRecipSqrtEstimate(input)); + } + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + float input = src.Float(i); + dst.SetFloat(i, FPRecipSqrtEstimate(input)); + } + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + double input = src.Float(i); + dst.SetFloat(i, FPRecipSqrtEstimate(input)); + } + } + return dst; +} + +template +T Simulator::FPRecipEstimate(T op, FPRounding rounding) { + uint32_t sign; + + if (IsFloat16()) { + sign = Float16Sign(op); + } else if (IsFloat32()) { + sign = FloatSign(op); + } else { + VIXL_ASSERT(IsFloat64()); + sign = DoubleSign(op); + } + + if (IsNaN(op)) { + return FPProcessNaN(op); + } else if (IsInf(op)) { + return (sign == 1) ? -0.0 : 0.0; + } else if (op == 0.0) { + FPProcessException(); // FPExc_DivideByZero exception. + return (sign == 1) ? kFP64NegativeInfinity : kFP64PositiveInfinity; + } else if ((IsFloat16() && (std::fabs(op) < std::pow(2.0, -16.0))) || + (IsFloat32() && (std::fabs(op) < std::pow(2.0, -128.0))) || + (IsFloat64() && (std::fabs(op) < std::pow(2.0, -1024.0)))) { + bool overflow_to_inf = false; + switch (rounding) { + case FPTieEven: + overflow_to_inf = true; + break; + case FPPositiveInfinity: + overflow_to_inf = (sign == 0); + break; + case FPNegativeInfinity: + overflow_to_inf = (sign == 1); + break; + case FPZero: + overflow_to_inf = false; + break; + default: + break; + } + FPProcessException(); // FPExc_Overflow and FPExc_Inexact. + if (overflow_to_inf) { + return (sign == 1) ? kFP64NegativeInfinity : kFP64PositiveInfinity; + } else { + // Return FPMaxNormal(sign). + if (IsFloat16()) { + return Float16Pack(sign, 0x1f, 0x3ff); + } else if (IsFloat32()) { + return FloatPack(sign, 0xfe, 0x07fffff); + } else { + VIXL_ASSERT(IsFloat64()); + return DoublePack(sign, 0x7fe, 0x0fffffffffffffl); + } + } + } else { + uint64_t fraction; + int exp, result_exp; + uint32_t sign; + + if (IsFloat16()) { + sign = Float16Sign(op); + exp = Float16Exp(op); + fraction = Float16Mantissa(op); + fraction <<= 42; + } else if (IsFloat32()) { + sign = FloatSign(op); + exp = FloatExp(op); + fraction = FloatMantissa(op); + fraction <<= 29; + } else { + VIXL_ASSERT(IsFloat64()); + sign = DoubleSign(op); + exp = DoubleExp(op); + fraction = DoubleMantissa(op); + } + + if (exp == 0) { + if (Bits(fraction, 51, 51) == 0) { + exp -= 1; + fraction = Bits(fraction, 49, 0) << 2; + } else { + fraction = Bits(fraction, 50, 0) << 1; + } + } + + double scaled = DoublePack(0, 1022, Bits(fraction, 51, 44) << 44); + + if (IsFloat16()) { + result_exp = (29 - exp); // In range 29-30 = -1 to 29+1 = 30. + } else if (IsFloat32()) { + result_exp = (253 - exp); // In range 253-254 = -1 to 253+1 = 254. + } else { + VIXL_ASSERT(IsFloat64()); + result_exp = (2045 - exp); // In range 2045-2046 = -1 to 2045+1 = 2046. + } + + double estimate = recip_estimate(scaled); + + fraction = DoubleMantissa(estimate); + if (result_exp == 0) { + fraction = (UINT64_C(1) << 51) | Bits(fraction, 51, 1); + } else if (result_exp == -1) { + fraction = (UINT64_C(1) << 50) | Bits(fraction, 51, 2); + result_exp = 0; + } + if (IsFloat16()) { + uint16_t exp_bits = static_cast(Bits(result_exp, 4, 0)); + uint16_t frac_bits = static_cast(Bits(fraction, 51, 42)); + return Float16Pack(sign, exp_bits, frac_bits); + } else if (IsFloat32()) { + uint32_t exp_bits = static_cast(Bits(result_exp, 7, 0)); + uint32_t frac_bits = static_cast(Bits(fraction, 51, 29)); + return FloatPack(sign, exp_bits, frac_bits); + } else { + VIXL_ASSERT(IsFloat64()); + return DoublePack(sign, Bits(result_exp, 10, 0), Bits(fraction, 51, 0)); + } + } +} + + +LogicVRegister Simulator::frecpe(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + FPRounding round) { + dst.ClearForWrite(vform); + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + SimFloat16 input = src.Float(i); + dst.SetFloat(i, FPRecipEstimate(input, round)); + } + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + float input = src.Float(i); + dst.SetFloat(i, FPRecipEstimate(input, round)); + } + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + double input = src.Float(i); + dst.SetFloat(i, FPRecipEstimate(input, round)); + } + } + return dst; +} + + +LogicVRegister Simulator::ursqrte(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + uint64_t operand; + uint32_t result; + double dp_operand, dp_result; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + operand = src.Uint(vform, i); + if (operand <= 0x3FFFFFFF) { + result = 0xFFFFFFFF; + } else { + dp_operand = operand * std::pow(2.0, -32); + dp_result = recip_sqrt_estimate(dp_operand) * std::pow(2.0, 31); + result = static_cast(dp_result); + } + dst.SetUint(vform, i, result); + } + return dst; +} + + +// Based on reference C function recip_estimate from ARM ARM. +double Simulator::recip_estimate(double a) { + int q, s; + double r; + q = static_cast(a * 512.0); + r = 1.0 / ((static_cast(q) + 0.5) / 512.0); + s = static_cast(256.0 * r + 0.5); + return static_cast(s) / 256.0; +} + + +LogicVRegister Simulator::urecpe(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + uint64_t operand; + uint32_t result; + double dp_operand, dp_result; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + operand = src.Uint(vform, i); + if (operand <= 0x7FFFFFFF) { + result = 0xFFFFFFFF; + } else { + dp_operand = operand * std::pow(2.0, -32); + dp_result = recip_estimate(dp_operand) * std::pow(2.0, 31); + result = static_cast(dp_result); + } + dst.SetUint(vform, i, result); + } + return dst; +} + +template +LogicVRegister Simulator::frecpx(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + T op = src.Float(i); + T result; + if (IsNaN(op)) { + result = FPProcessNaN(op); + } else { + int exp; + uint32_t sign; + if (IsFloat16()) { + sign = Float16Sign(op); + exp = Float16Exp(op); + exp = (exp == 0) ? (0x1F - 1) : static_cast(Bits(~exp, 4, 0)); + result = Float16Pack(sign, exp, 0); + } else if (IsFloat32()) { + sign = FloatSign(op); + exp = FloatExp(op); + exp = (exp == 0) ? (0xFF - 1) : static_cast(Bits(~exp, 7, 0)); + result = FloatPack(sign, exp, 0); + } else { + VIXL_ASSERT(IsFloat64()); + sign = DoubleSign(op); + exp = DoubleExp(op); + exp = (exp == 0) ? (0x7FF - 1) : static_cast(Bits(~exp, 10, 0)); + result = DoublePack(sign, exp, 0); + } + } + dst.SetFloat(i, result); + } + return dst; +} + + +LogicVRegister Simulator::frecpx(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + frecpx(vform, dst, src); + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + frecpx(vform, dst, src); + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + frecpx(vform, dst, src); + } + return dst; +} + +LogicVRegister Simulator::scvtf(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int fbits, + FPRounding round) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + SimFloat16 result = FixedToFloat16(src.Int(kFormatH, i), fbits, round); + dst.SetFloat(i, result); + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + float result = FixedToFloat(src.Int(kFormatS, i), fbits, round); + dst.SetFloat(i, result); + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + double result = FixedToDouble(src.Int(kFormatD, i), fbits, round); + dst.SetFloat(i, result); + } + } + return dst; +} + + +LogicVRegister Simulator::ucvtf(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int fbits, + FPRounding round) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + SimFloat16 result = UFixedToFloat16(src.Uint(kFormatH, i), fbits, round); + dst.SetFloat(i, result); + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + float result = UFixedToFloat(src.Uint(kFormatS, i), fbits, round); + dst.SetFloat(i, result); + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + double result = UFixedToDouble(src.Uint(kFormatD, i), fbits, round); + dst.SetFloat(i, result); + } + } + return dst; +} + + +} // namespace aarch64 +} // namespace vixl + +#endif // VIXL_INCLUDE_SIMULATOR_AARCH64 diff --git a/dep/vixl/src/aarch64/macro-assembler-aarch64.cc b/dep/vixl/src/aarch64/macro-assembler-aarch64.cc new file mode 100644 index 000000000..e881a8164 --- /dev/null +++ b/dep/vixl/src/aarch64/macro-assembler-aarch64.cc @@ -0,0 +1,3024 @@ +// Copyright 2015, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include + +#include "macro-assembler-aarch64.h" + +namespace vixl { +namespace aarch64 { + + +void Pool::Release() { + if (--monitor_ == 0) { + // Ensure the pool has not been blocked for too long. + VIXL_ASSERT(masm_->GetCursorOffset() < checkpoint_); + } +} + + +void Pool::SetNextCheckpoint(ptrdiff_t checkpoint) { + masm_->checkpoint_ = std::min(masm_->checkpoint_, checkpoint); + checkpoint_ = checkpoint; +} + + +LiteralPool::LiteralPool(MacroAssembler* masm) + : Pool(masm), + size_(0), + first_use_(-1), + recommended_checkpoint_(kNoCheckpointRequired) {} + + +LiteralPool::~LiteralPool() { + VIXL_ASSERT(IsEmpty()); + VIXL_ASSERT(!IsBlocked()); + for (std::vector::iterator it = deleted_on_destruction_.begin(); + it != deleted_on_destruction_.end(); + it++) { + delete *it; + } +} + + +void LiteralPool::Reset() { + std::vector::iterator it, end; + for (it = entries_.begin(), end = entries_.end(); it != end; ++it) { + RawLiteral* literal = *it; + if (literal->deletion_policy_ == RawLiteral::kDeletedOnPlacementByPool) { + delete literal; + } + } + entries_.clear(); + size_ = 0; + first_use_ = -1; + Pool::Reset(); + recommended_checkpoint_ = kNoCheckpointRequired; +} + + +void LiteralPool::CheckEmitFor(size_t amount, EmitOption option) { + if (IsEmpty() || IsBlocked()) return; + + ptrdiff_t distance = masm_->GetCursorOffset() + amount - first_use_; + if (distance >= kRecommendedLiteralPoolRange) { + Emit(option); + } +} + + +void LiteralPool::CheckEmitForBranch(size_t range) { + if (IsEmpty() || IsBlocked()) return; + if (GetMaxSize() >= range) Emit(); +} + +// We use a subclass to access the protected `ExactAssemblyScope` constructor +// giving us control over the pools. This allows us to use this scope within +// code emitting pools without creating a circular dependency. +// We keep the constructor private to restrict usage of this helper class. +class ExactAssemblyScopeWithoutPoolsCheck : public ExactAssemblyScope { + private: + ExactAssemblyScopeWithoutPoolsCheck(MacroAssembler* masm, size_t size) + : ExactAssemblyScope(masm, + size, + ExactAssemblyScope::kExactSize, + ExactAssemblyScope::kIgnorePools) {} + + friend void LiteralPool::Emit(LiteralPool::EmitOption); + friend void VeneerPool::Emit(VeneerPool::EmitOption, size_t); +}; + + +void LiteralPool::Emit(EmitOption option) { + // There is an issue if we are asked to emit a blocked or empty pool. + VIXL_ASSERT(!IsBlocked()); + VIXL_ASSERT(!IsEmpty()); + + size_t pool_size = GetSize(); + size_t emit_size = pool_size; + if (option == kBranchRequired) emit_size += kInstructionSize; + Label end_of_pool; + + VIXL_ASSERT(emit_size % kInstructionSize == 0); + { + CodeBufferCheckScope guard(masm_, + emit_size, + CodeBufferCheckScope::kCheck, + CodeBufferCheckScope::kExactSize); +#ifdef VIXL_DEBUG + // Also explicitly disallow usage of the `MacroAssembler` here. + masm_->SetAllowMacroInstructions(false); +#endif + if (option == kBranchRequired) { + ExactAssemblyScopeWithoutPoolsCheck guard(masm_, kInstructionSize); + masm_->b(&end_of_pool); + } + + { + // Marker indicating the size of the literal pool in 32-bit words. + VIXL_ASSERT((pool_size % kWRegSizeInBytes) == 0); + ExactAssemblyScopeWithoutPoolsCheck guard(masm_, kInstructionSize); + masm_->ldr(xzr, static_cast(pool_size / kWRegSizeInBytes)); + } + + // Now populate the literal pool. + std::vector::iterator it, end; + for (it = entries_.begin(), end = entries_.end(); it != end; ++it) { + VIXL_ASSERT((*it)->IsUsed()); + masm_->place(*it); + } + + if (option == kBranchRequired) masm_->bind(&end_of_pool); +#ifdef VIXL_DEBUG + masm_->SetAllowMacroInstructions(true); +#endif + } + + Reset(); +} + + +void LiteralPool::AddEntry(RawLiteral* literal) { + // A literal must be registered immediately before its first use. Here we + // cannot control that it is its first use, but we check no code has been + // emitted since its last use. + VIXL_ASSERT(masm_->GetCursorOffset() == literal->GetLastUse()); + + UpdateFirstUse(masm_->GetCursorOffset()); + VIXL_ASSERT(masm_->GetCursorOffset() >= first_use_); + entries_.push_back(literal); + size_ += literal->GetSize(); +} + + +void LiteralPool::UpdateFirstUse(ptrdiff_t use_position) { + first_use_ = std::min(first_use_, use_position); + if (first_use_ == -1) { + first_use_ = use_position; + SetNextRecommendedCheckpoint(GetNextRecommendedCheckpoint()); + SetNextCheckpoint(first_use_ + Instruction::kLoadLiteralRange); + } else { + VIXL_ASSERT(use_position > first_use_); + } +} + + +void VeneerPool::Reset() { + Pool::Reset(); + unresolved_branches_.Reset(); +} + + +void VeneerPool::Release() { + if (--monitor_ == 0) { + VIXL_ASSERT(IsEmpty() || + masm_->GetCursorOffset() < + unresolved_branches_.GetFirstLimit()); + } +} + + +void VeneerPool::RegisterUnresolvedBranch(ptrdiff_t branch_pos, + Label* label, + ImmBranchType branch_type) { + VIXL_ASSERT(!label->IsBound()); + BranchInfo branch_info = BranchInfo(branch_pos, label, branch_type); + unresolved_branches_.insert(branch_info); + UpdateNextCheckPoint(); + // TODO: In debug mode register the label with the assembler to make sure it + // is bound with masm Bind and not asm bind. +} + + +void VeneerPool::DeleteUnresolvedBranchInfoForLabel(Label* label) { + if (IsEmpty()) { + VIXL_ASSERT(checkpoint_ == kNoCheckpointRequired); + return; + } + + if (label->IsLinked()) { + Label::LabelLinksIterator links_it(label); + for (; !links_it.Done(); links_it.Advance()) { + ptrdiff_t link_offset = *links_it.Current(); + Instruction* link = masm_->GetInstructionAt(link_offset); + + // ADR instructions are not handled. + if (BranchTypeUsesVeneers(link->GetBranchType())) { + BranchInfo branch_info(link_offset, label, link->GetBranchType()); + unresolved_branches_.erase(branch_info); + } + } + } + + UpdateNextCheckPoint(); +} + + +bool VeneerPool::ShouldEmitVeneer(int64_t first_unreacheable_pc, + size_t amount) { + ptrdiff_t offset = + kPoolNonVeneerCodeSize + amount + GetMaxSize() + GetOtherPoolsMaxSize(); + return (masm_->GetCursorOffset() + offset) > first_unreacheable_pc; +} + + +void VeneerPool::CheckEmitFor(size_t amount, EmitOption option) { + if (IsEmpty()) return; + + VIXL_ASSERT(masm_->GetCursorOffset() + kPoolNonVeneerCodeSize < + unresolved_branches_.GetFirstLimit()); + + if (IsBlocked()) return; + + if (ShouldEmitVeneers(amount)) { + Emit(option, amount); + } else { + UpdateNextCheckPoint(); + } +} + + +void VeneerPool::Emit(EmitOption option, size_t amount) { + // There is an issue if we are asked to emit a blocked or empty pool. + VIXL_ASSERT(!IsBlocked()); + VIXL_ASSERT(!IsEmpty()); + + Label end; + if (option == kBranchRequired) { + ExactAssemblyScopeWithoutPoolsCheck guard(masm_, kInstructionSize); + masm_->b(&end); + } + + // We want to avoid generating veneer pools too often, so generate veneers for + // branches that don't immediately require a veneer but will soon go out of + // range. + static const size_t kVeneerEmissionMargin = 1 * KBytes; + + for (BranchInfoSetIterator it(&unresolved_branches_); !it.Done();) { + BranchInfo* branch_info = it.Current(); + if (ShouldEmitVeneer(branch_info->first_unreacheable_pc_, + amount + kVeneerEmissionMargin)) { + CodeBufferCheckScope scope(masm_, + kVeneerCodeSize, + CodeBufferCheckScope::kCheck, + CodeBufferCheckScope::kExactSize); + ptrdiff_t branch_pos = branch_info->pc_offset_; + Instruction* branch = masm_->GetInstructionAt(branch_pos); + Label* label = branch_info->label_; + + // Patch the branch to point to the current position, and emit a branch + // to the label. + Instruction* veneer = masm_->GetCursorAddress(); + branch->SetImmPCOffsetTarget(veneer); + { + ExactAssemblyScopeWithoutPoolsCheck guard(masm_, kInstructionSize); + masm_->b(label); + } + + // Update the label. The branch patched does not point to it any longer. + label->DeleteLink(branch_pos); + + it.DeleteCurrentAndAdvance(); + } else { + it.AdvanceToNextType(); + } + } + + UpdateNextCheckPoint(); + + masm_->bind(&end); +} + + +MacroAssembler::MacroAssembler(PositionIndependentCodeOption pic) + : Assembler(pic), +#ifdef VIXL_DEBUG + allow_macro_instructions_(true), +#endif + generate_simulator_code_(VIXL_AARCH64_GENERATE_SIMULATOR_CODE), + sp_(sp), + tmp_list_(ip0, ip1), + fptmp_list_(d31), + current_scratch_scope_(NULL), + literal_pool_(this), + veneer_pool_(this), + recommended_checkpoint_(Pool::kNoCheckpointRequired) { + checkpoint_ = GetNextCheckPoint(); +#ifndef VIXL_DEBUG + USE(allow_macro_instructions_); +#endif +} + + +MacroAssembler::MacroAssembler(size_t capacity, + PositionIndependentCodeOption pic) + : Assembler(capacity, pic), +#ifdef VIXL_DEBUG + allow_macro_instructions_(true), +#endif + generate_simulator_code_(VIXL_AARCH64_GENERATE_SIMULATOR_CODE), + sp_(sp), + tmp_list_(ip0, ip1), + fptmp_list_(d31), + current_scratch_scope_(NULL), + literal_pool_(this), + veneer_pool_(this), + recommended_checkpoint_(Pool::kNoCheckpointRequired) { + checkpoint_ = GetNextCheckPoint(); +} + + +MacroAssembler::MacroAssembler(byte* buffer, + size_t capacity, + PositionIndependentCodeOption pic) + : Assembler(buffer, capacity, pic), +#ifdef VIXL_DEBUG + allow_macro_instructions_(true), +#endif + generate_simulator_code_(VIXL_AARCH64_GENERATE_SIMULATOR_CODE), + sp_(sp), + tmp_list_(ip0, ip1), + fptmp_list_(d31), + current_scratch_scope_(NULL), + literal_pool_(this), + veneer_pool_(this), + recommended_checkpoint_(Pool::kNoCheckpointRequired) { + checkpoint_ = GetNextCheckPoint(); +} + + +MacroAssembler::~MacroAssembler() {} + + +void MacroAssembler::Reset() { + Assembler::Reset(); + + VIXL_ASSERT(!literal_pool_.IsBlocked()); + literal_pool_.Reset(); + veneer_pool_.Reset(); + + checkpoint_ = GetNextCheckPoint(); +} + + +void MacroAssembler::FinalizeCode(FinalizeOption option) { + if (!literal_pool_.IsEmpty()) { + // The user may decide to emit more code after Finalize, emit a branch if + // that's the case. + literal_pool_.Emit(option == kUnreachable ? Pool::kNoBranchRequired + : Pool::kBranchRequired); + } + VIXL_ASSERT(veneer_pool_.IsEmpty()); + + Assembler::FinalizeCode(); +} + + +void MacroAssembler::CheckEmitFor(size_t amount) { + CheckEmitPoolsFor(amount); + GetBuffer()->EnsureSpaceFor(amount); +} + + +void MacroAssembler::CheckEmitPoolsFor(size_t amount) { + literal_pool_.CheckEmitFor(amount); + veneer_pool_.CheckEmitFor(amount); + checkpoint_ = GetNextCheckPoint(); +} + + +int MacroAssembler::MoveImmediateHelper(MacroAssembler* masm, + const Register& rd, + uint64_t imm) { + bool emit_code = (masm != NULL); + VIXL_ASSERT(IsUint32(imm) || IsInt32(imm) || rd.Is64Bits()); + // The worst case for size is mov 64-bit immediate to sp: + // * up to 4 instructions to materialise the constant + // * 1 instruction to move to sp + MacroEmissionCheckScope guard(masm); + + // Immediates on Aarch64 can be produced using an initial value, and zero to + // three move keep operations. + // + // Initial values can be generated with: + // 1. 64-bit move zero (movz). + // 2. 32-bit move inverted (movn). + // 3. 64-bit move inverted. + // 4. 32-bit orr immediate. + // 5. 64-bit orr immediate. + // Move-keep may then be used to modify each of the 16-bit half words. + // + // The code below supports all five initial value generators, and + // applying move-keep operations to move-zero and move-inverted initial + // values. + + // Try to move the immediate in one instruction, and if that fails, switch to + // using multiple instructions. + if (OneInstrMoveImmediateHelper(masm, rd, imm)) { + return 1; + } else { + int instruction_count = 0; + unsigned reg_size = rd.GetSizeInBits(); + + // Generic immediate case. Imm will be represented by + // [imm3, imm2, imm1, imm0], where each imm is 16 bits. + // A move-zero or move-inverted is generated for the first non-zero or + // non-0xffff immX, and a move-keep for subsequent non-zero immX. + + uint64_t ignored_halfword = 0; + bool invert_move = false; + // If the number of 0xffff halfwords is greater than the number of 0x0000 + // halfwords, it's more efficient to use move-inverted. + if (CountClearHalfWords(~imm, reg_size) > + CountClearHalfWords(imm, reg_size)) { + ignored_halfword = 0xffff; + invert_move = true; + } + + // Mov instructions can't move values into the stack pointer, so set up a + // temporary register, if needed. + UseScratchRegisterScope temps; + Register temp; + if (emit_code) { + temps.Open(masm); + temp = rd.IsSP() ? temps.AcquireSameSizeAs(rd) : rd; + } + + // Iterate through the halfwords. Use movn/movz for the first non-ignored + // halfword, and movk for subsequent halfwords. + VIXL_ASSERT((reg_size % 16) == 0); + bool first_mov_done = false; + for (unsigned i = 0; i < (reg_size / 16); i++) { + uint64_t imm16 = (imm >> (16 * i)) & 0xffff; + if (imm16 != ignored_halfword) { + if (!first_mov_done) { + if (invert_move) { + if (emit_code) masm->movn(temp, ~imm16 & 0xffff, 16 * i); + instruction_count++; + } else { + if (emit_code) masm->movz(temp, imm16, 16 * i); + instruction_count++; + } + first_mov_done = true; + } else { + // Construct a wider constant. + if (emit_code) masm->movk(temp, imm16, 16 * i); + instruction_count++; + } + } + } + + VIXL_ASSERT(first_mov_done); + + // Move the temporary if the original destination register was the stack + // pointer. + if (rd.IsSP()) { + if (emit_code) masm->mov(rd, temp); + instruction_count++; + } + return instruction_count; + } +} + + +bool MacroAssembler::OneInstrMoveImmediateHelper(MacroAssembler* masm, + const Register& dst, + int64_t imm) { + bool emit_code = masm != NULL; + unsigned n, imm_s, imm_r; + int reg_size = dst.GetSizeInBits(); + + if (IsImmMovz(imm, reg_size) && !dst.IsSP()) { + // Immediate can be represented in a move zero instruction. Movz can't write + // to the stack pointer. + if (emit_code) { + masm->movz(dst, imm); + } + return true; + } else if (IsImmMovn(imm, reg_size) && !dst.IsSP()) { + // Immediate can be represented in a move negative instruction. Movn can't + // write to the stack pointer. + if (emit_code) { + masm->movn(dst, dst.Is64Bits() ? ~imm : (~imm & kWRegMask)); + } + return true; + } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) { + // Immediate can be represented in a logical orr instruction. + VIXL_ASSERT(!dst.IsZero()); + if (emit_code) { + masm->LogicalImmediate(dst, + AppropriateZeroRegFor(dst), + n, + imm_s, + imm_r, + ORR); + } + return true; + } + return false; +} + + +void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) { + VIXL_ASSERT((reg.Is(NoReg) || (type >= kBranchTypeFirstUsingReg)) && + ((bit == -1) || (type >= kBranchTypeFirstUsingBit))); + if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) { + B(static_cast(type), label); + } else { + switch (type) { + case always: + B(label); + break; + case never: + break; + case reg_zero: + Cbz(reg, label); + break; + case reg_not_zero: + Cbnz(reg, label); + break; + case reg_bit_clear: + Tbz(reg, bit, label); + break; + case reg_bit_set: + Tbnz(reg, bit, label); + break; + default: + VIXL_UNREACHABLE(); + } + } +} + + +void MacroAssembler::B(Label* label) { + // We don't need to check the size of the literal pool, because the size of + // the literal pool is already bounded by the literal range, which is smaller + // than the range of this branch. + VIXL_ASSERT(Instruction::GetImmBranchForwardRange(UncondBranchType) > + Instruction::kLoadLiteralRange); + SingleEmissionCheckScope guard(this); + b(label); +} + + +void MacroAssembler::B(Label* label, Condition cond) { + // We don't need to check the size of the literal pool, because the size of + // the literal pool is already bounded by the literal range, which is smaller + // than the range of this branch. + VIXL_ASSERT(Instruction::GetImmBranchForwardRange(CondBranchType) > + Instruction::kLoadLiteralRange); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT((cond != al) && (cond != nv)); + EmissionCheckScope guard(this, 2 * kInstructionSize); + + if (label->IsBound() && LabelIsOutOfRange(label, CondBranchType)) { + Label done; + b(&done, InvertCondition(cond)); + b(label); + bind(&done); + } else { + if (!label->IsBound()) { + veneer_pool_.RegisterUnresolvedBranch(GetCursorOffset(), + label, + CondBranchType); + } + b(label, cond); + } +} + + +void MacroAssembler::Cbnz(const Register& rt, Label* label) { + // We don't need to check the size of the literal pool, because the size of + // the literal pool is already bounded by the literal range, which is smaller + // than the range of this branch. + VIXL_ASSERT(Instruction::GetImmBranchForwardRange(CompareBranchType) > + Instruction::kLoadLiteralRange); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rt.IsZero()); + EmissionCheckScope guard(this, 2 * kInstructionSize); + + if (label->IsBound() && LabelIsOutOfRange(label, CondBranchType)) { + Label done; + cbz(rt, &done); + b(label); + bind(&done); + } else { + if (!label->IsBound()) { + veneer_pool_.RegisterUnresolvedBranch(GetCursorOffset(), + label, + CompareBranchType); + } + cbnz(rt, label); + } +} + + +void MacroAssembler::Cbz(const Register& rt, Label* label) { + // We don't need to check the size of the literal pool, because the size of + // the literal pool is already bounded by the literal range, which is smaller + // than the range of this branch. + VIXL_ASSERT(Instruction::GetImmBranchForwardRange(CompareBranchType) > + Instruction::kLoadLiteralRange); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rt.IsZero()); + EmissionCheckScope guard(this, 2 * kInstructionSize); + + if (label->IsBound() && LabelIsOutOfRange(label, CondBranchType)) { + Label done; + cbnz(rt, &done); + b(label); + bind(&done); + } else { + if (!label->IsBound()) { + veneer_pool_.RegisterUnresolvedBranch(GetCursorOffset(), + label, + CompareBranchType); + } + cbz(rt, label); + } +} + + +void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) { + // This is to avoid a situation where emitting a veneer for a TBZ/TBNZ branch + // can become impossible because we emit the literal pool first. + literal_pool_.CheckEmitForBranch( + Instruction::GetImmBranchForwardRange(TestBranchType)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rt.IsZero()); + EmissionCheckScope guard(this, 2 * kInstructionSize); + + if (label->IsBound() && LabelIsOutOfRange(label, TestBranchType)) { + Label done; + tbz(rt, bit_pos, &done); + b(label); + bind(&done); + } else { + if (!label->IsBound()) { + veneer_pool_.RegisterUnresolvedBranch(GetCursorOffset(), + label, + TestBranchType); + } + tbnz(rt, bit_pos, label); + } +} + + +void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) { + // This is to avoid a situation where emitting a veneer for a TBZ/TBNZ branch + // can become impossible because we emit the literal pool first. + literal_pool_.CheckEmitForBranch( + Instruction::GetImmBranchForwardRange(TestBranchType)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rt.IsZero()); + EmissionCheckScope guard(this, 2 * kInstructionSize); + + if (label->IsBound() && LabelIsOutOfRange(label, TestBranchType)) { + Label done; + tbnz(rt, bit_pos, &done); + b(label); + bind(&done); + } else { + if (!label->IsBound()) { + veneer_pool_.RegisterUnresolvedBranch(GetCursorOffset(), + label, + TestBranchType); + } + tbz(rt, bit_pos, label); + } +} + + +void MacroAssembler::Bind(Label* label) { + VIXL_ASSERT(allow_macro_instructions_); + veneer_pool_.DeleteUnresolvedBranchInfoForLabel(label); + bind(label); +} + + +// Bind a label to a specified offset from the start of the buffer. +void MacroAssembler::BindToOffset(Label* label, ptrdiff_t offset) { + VIXL_ASSERT(allow_macro_instructions_); + veneer_pool_.DeleteUnresolvedBranchInfoForLabel(label); + Assembler::BindToOffset(label, offset); +} + + +void MacroAssembler::And(const Register& rd, + const Register& rn, + const Operand& operand) { + VIXL_ASSERT(allow_macro_instructions_); + LogicalMacro(rd, rn, operand, AND); +} + + +void MacroAssembler::Ands(const Register& rd, + const Register& rn, + const Operand& operand) { + VIXL_ASSERT(allow_macro_instructions_); + LogicalMacro(rd, rn, operand, ANDS); +} + + +void MacroAssembler::Tst(const Register& rn, const Operand& operand) { + VIXL_ASSERT(allow_macro_instructions_); + Ands(AppropriateZeroRegFor(rn), rn, operand); +} + + +void MacroAssembler::Bic(const Register& rd, + const Register& rn, + const Operand& operand) { + VIXL_ASSERT(allow_macro_instructions_); + LogicalMacro(rd, rn, operand, BIC); +} + + +void MacroAssembler::Bics(const Register& rd, + const Register& rn, + const Operand& operand) { + VIXL_ASSERT(allow_macro_instructions_); + LogicalMacro(rd, rn, operand, BICS); +} + + +void MacroAssembler::Orr(const Register& rd, + const Register& rn, + const Operand& operand) { + VIXL_ASSERT(allow_macro_instructions_); + LogicalMacro(rd, rn, operand, ORR); +} + + +void MacroAssembler::Orn(const Register& rd, + const Register& rn, + const Operand& operand) { + VIXL_ASSERT(allow_macro_instructions_); + LogicalMacro(rd, rn, operand, ORN); +} + + +void MacroAssembler::Eor(const Register& rd, + const Register& rn, + const Operand& operand) { + VIXL_ASSERT(allow_macro_instructions_); + LogicalMacro(rd, rn, operand, EOR); +} + + +void MacroAssembler::Eon(const Register& rd, + const Register& rn, + const Operand& operand) { + VIXL_ASSERT(allow_macro_instructions_); + LogicalMacro(rd, rn, operand, EON); +} + + +void MacroAssembler::LogicalMacro(const Register& rd, + const Register& rn, + const Operand& operand, + LogicalOp op) { + // The worst case for size is logical immediate to sp: + // * up to 4 instructions to materialise the constant + // * 1 instruction to do the operation + // * 1 instruction to move to sp + MacroEmissionCheckScope guard(this); + UseScratchRegisterScope temps(this); + + if (operand.IsImmediate()) { + uint64_t immediate = operand.GetImmediate(); + unsigned reg_size = rd.GetSizeInBits(); + + // If the operation is NOT, invert the operation and immediate. + if ((op & NOT) == NOT) { + op = static_cast(op & ~NOT); + immediate = ~immediate; + } + + // Ignore the top 32 bits of an immediate if we're moving to a W register. + if (rd.Is32Bits()) { + // Check that the top 32 bits are consistent. + VIXL_ASSERT(((immediate >> kWRegSize) == 0) || + ((immediate >> kWRegSize) == 0xffffffff)); + immediate &= kWRegMask; + } + + VIXL_ASSERT(rd.Is64Bits() || IsUint32(immediate)); + + // Special cases for all set or all clear immediates. + if (immediate == 0) { + switch (op) { + case AND: + Mov(rd, 0); + return; + case ORR: + VIXL_FALLTHROUGH(); + case EOR: + Mov(rd, rn); + return; + case ANDS: + VIXL_FALLTHROUGH(); + case BICS: + break; + default: + VIXL_UNREACHABLE(); + } + } else if ((rd.Is64Bits() && (immediate == UINT64_C(0xffffffffffffffff))) || + (rd.Is32Bits() && (immediate == UINT64_C(0x00000000ffffffff)))) { + switch (op) { + case AND: + Mov(rd, rn); + return; + case ORR: + Mov(rd, immediate); + return; + case EOR: + Mvn(rd, rn); + return; + case ANDS: + VIXL_FALLTHROUGH(); + case BICS: + break; + default: + VIXL_UNREACHABLE(); + } + } + + unsigned n, imm_s, imm_r; + if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) { + // Immediate can be encoded in the instruction. + LogicalImmediate(rd, rn, n, imm_s, imm_r, op); + } else { + // Immediate can't be encoded: synthesize using move immediate. + Register temp = temps.AcquireSameSizeAs(rn); + + // If the left-hand input is the stack pointer, we can't pre-shift the + // immediate, as the encoding won't allow the subsequent post shift. + PreShiftImmMode mode = rn.IsSP() ? kNoShift : kAnyShift; + Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate, mode); + + if (rd.Is(sp)) { + // If rd is the stack pointer we cannot use it as the destination + // register so we use the temp register as an intermediate again. + Logical(temp, rn, imm_operand, op); + Mov(sp, temp); + } else { + Logical(rd, rn, imm_operand, op); + } + } + } else if (operand.IsExtendedRegister()) { + VIXL_ASSERT(operand.GetRegister().GetSizeInBits() <= rd.GetSizeInBits()); + // Add/sub extended supports shift <= 4. We want to support exactly the + // same modes here. + VIXL_ASSERT(operand.GetShiftAmount() <= 4); + VIXL_ASSERT( + operand.GetRegister().Is64Bits() || + ((operand.GetExtend() != UXTX) && (operand.GetExtend() != SXTX))); + + temps.Exclude(operand.GetRegister()); + Register temp = temps.AcquireSameSizeAs(rn); + EmitExtendShift(temp, + operand.GetRegister(), + operand.GetExtend(), + operand.GetShiftAmount()); + Logical(rd, rn, Operand(temp), op); + } else { + // The operand can be encoded in the instruction. + VIXL_ASSERT(operand.IsShiftedRegister()); + Logical(rd, rn, operand, op); + } +} + + +void MacroAssembler::Mov(const Register& rd, + const Operand& operand, + DiscardMoveMode discard_mode) { + VIXL_ASSERT(allow_macro_instructions_); + // The worst case for size is mov immediate with up to 4 instructions. + MacroEmissionCheckScope guard(this); + + if (operand.IsImmediate()) { + // Call the macro assembler for generic immediates. + Mov(rd, operand.GetImmediate()); + } else if (operand.IsShiftedRegister() && (operand.GetShiftAmount() != 0)) { + // Emit a shift instruction if moving a shifted register. This operation + // could also be achieved using an orr instruction (like orn used by Mvn), + // but using a shift instruction makes the disassembly clearer. + EmitShift(rd, + operand.GetRegister(), + operand.GetShift(), + operand.GetShiftAmount()); + } else if (operand.IsExtendedRegister()) { + // Emit an extend instruction if moving an extended register. This handles + // extend with post-shift operations, too. + EmitExtendShift(rd, + operand.GetRegister(), + operand.GetExtend(), + operand.GetShiftAmount()); + } else { + Mov(rd, operand.GetRegister(), discard_mode); + } +} + + +void MacroAssembler::Movi16bitHelper(const VRegister& vd, uint64_t imm) { + VIXL_ASSERT(IsUint16(imm)); + int byte1 = (imm & 0xff); + int byte2 = ((imm >> 8) & 0xff); + if (byte1 == byte2) { + movi(vd.Is64Bits() ? vd.V8B() : vd.V16B(), byte1); + } else if (byte1 == 0) { + movi(vd, byte2, LSL, 8); + } else if (byte2 == 0) { + movi(vd, byte1); + } else if (byte1 == 0xff) { + mvni(vd, ~byte2 & 0xff, LSL, 8); + } else if (byte2 == 0xff) { + mvni(vd, ~byte1 & 0xff); + } else { + UseScratchRegisterScope temps(this); + Register temp = temps.AcquireW(); + movz(temp, imm); + dup(vd, temp); + } +} + + +void MacroAssembler::Movi32bitHelper(const VRegister& vd, uint64_t imm) { + VIXL_ASSERT(IsUint32(imm)); + + uint8_t bytes[sizeof(imm)]; + memcpy(bytes, &imm, sizeof(imm)); + + // All bytes are either 0x00 or 0xff. + { + bool all0orff = true; + for (int i = 0; i < 4; ++i) { + if ((bytes[i] != 0) && (bytes[i] != 0xff)) { + all0orff = false; + break; + } + } + + if (all0orff == true) { + movi(vd.Is64Bits() ? vd.V1D() : vd.V2D(), ((imm << 32) | imm)); + return; + } + } + + // Of the 4 bytes, only one byte is non-zero. + for (int i = 0; i < 4; i++) { + if ((imm & (0xff << (i * 8))) == imm) { + movi(vd, bytes[i], LSL, i * 8); + return; + } + } + + // Of the 4 bytes, only one byte is not 0xff. + for (int i = 0; i < 4; i++) { + uint32_t mask = ~(0xff << (i * 8)); + if ((imm & mask) == mask) { + mvni(vd, ~bytes[i] & 0xff, LSL, i * 8); + return; + } + } + + // Immediate is of the form 0x00MMFFFF. + if ((imm & 0xff00ffff) == 0x0000ffff) { + movi(vd, bytes[2], MSL, 16); + return; + } + + // Immediate is of the form 0x0000MMFF. + if ((imm & 0xffff00ff) == 0x000000ff) { + movi(vd, bytes[1], MSL, 8); + return; + } + + // Immediate is of the form 0xFFMM0000. + if ((imm & 0xff00ffff) == 0xff000000) { + mvni(vd, ~bytes[2] & 0xff, MSL, 16); + return; + } + // Immediate is of the form 0xFFFFMM00. + if ((imm & 0xffff00ff) == 0xffff0000) { + mvni(vd, ~bytes[1] & 0xff, MSL, 8); + return; + } + + // Top and bottom 16-bits are equal. + if (((imm >> 16) & 0xffff) == (imm & 0xffff)) { + Movi16bitHelper(vd.Is64Bits() ? vd.V4H() : vd.V8H(), imm & 0xffff); + return; + } + + // Default case. + { + UseScratchRegisterScope temps(this); + Register temp = temps.AcquireW(); + Mov(temp, imm); + dup(vd, temp); + } +} + + +void MacroAssembler::Movi64bitHelper(const VRegister& vd, uint64_t imm) { + // All bytes are either 0x00 or 0xff. + { + bool all0orff = true; + for (int i = 0; i < 8; ++i) { + int byteval = (imm >> (i * 8)) & 0xff; + if (byteval != 0 && byteval != 0xff) { + all0orff = false; + break; + } + } + if (all0orff == true) { + movi(vd, imm); + return; + } + } + + // Top and bottom 32-bits are equal. + if (((imm >> 32) & 0xffffffff) == (imm & 0xffffffff)) { + Movi32bitHelper(vd.Is64Bits() ? vd.V2S() : vd.V4S(), imm & 0xffffffff); + return; + } + + // Default case. + { + UseScratchRegisterScope temps(this); + Register temp = temps.AcquireX(); + Mov(temp, imm); + if (vd.Is1D()) { + mov(vd.D(), 0, temp); + } else { + dup(vd.V2D(), temp); + } + } +} + + +void MacroAssembler::Movi(const VRegister& vd, + uint64_t imm, + Shift shift, + int shift_amount) { + VIXL_ASSERT(allow_macro_instructions_); + MacroEmissionCheckScope guard(this); + if (shift_amount != 0 || shift != LSL) { + movi(vd, imm, shift, shift_amount); + } else if (vd.Is8B() || vd.Is16B()) { + // 8-bit immediate. + VIXL_ASSERT(IsUint8(imm)); + movi(vd, imm); + } else if (vd.Is4H() || vd.Is8H()) { + // 16-bit immediate. + Movi16bitHelper(vd, imm); + } else if (vd.Is2S() || vd.Is4S()) { + // 32-bit immediate. + Movi32bitHelper(vd, imm); + } else { + // 64-bit immediate. + Movi64bitHelper(vd, imm); + } +} + + +void MacroAssembler::Movi(const VRegister& vd, uint64_t hi, uint64_t lo) { + // TODO: Move 128-bit values in a more efficient way. + VIXL_ASSERT(vd.Is128Bits()); + UseScratchRegisterScope temps(this); + Movi(vd.V2D(), lo); + Register temp = temps.AcquireX(); + Mov(temp, hi); + Ins(vd.V2D(), 1, temp); +} + + +void MacroAssembler::Mvn(const Register& rd, const Operand& operand) { + VIXL_ASSERT(allow_macro_instructions_); + // The worst case for size is mvn immediate with up to 4 instructions. + MacroEmissionCheckScope guard(this); + + if (operand.IsImmediate()) { + // Call the macro assembler for generic immediates. + Mvn(rd, operand.GetImmediate()); + } else if (operand.IsExtendedRegister()) { + UseScratchRegisterScope temps(this); + temps.Exclude(operand.GetRegister()); + + // Emit two instructions for the extend case. This differs from Mov, as + // the extend and invert can't be achieved in one instruction. + Register temp = temps.AcquireSameSizeAs(rd); + EmitExtendShift(temp, + operand.GetRegister(), + operand.GetExtend(), + operand.GetShiftAmount()); + mvn(rd, Operand(temp)); + } else { + // Otherwise, register and shifted register cases can be handled by the + // assembler directly, using orn. + mvn(rd, operand); + } +} + + +void MacroAssembler::Mov(const Register& rd, uint64_t imm) { + VIXL_ASSERT(allow_macro_instructions_); + MoveImmediateHelper(this, rd, imm); +} + + +void MacroAssembler::Ccmp(const Register& rn, + const Operand& operand, + StatusFlags nzcv, + Condition cond) { + VIXL_ASSERT(allow_macro_instructions_); + if (operand.IsImmediate() && (operand.GetImmediate() < 0)) { + ConditionalCompareMacro(rn, -operand.GetImmediate(), nzcv, cond, CCMN); + } else { + ConditionalCompareMacro(rn, operand, nzcv, cond, CCMP); + } +} + + +void MacroAssembler::Ccmn(const Register& rn, + const Operand& operand, + StatusFlags nzcv, + Condition cond) { + VIXL_ASSERT(allow_macro_instructions_); + if (operand.IsImmediate() && (operand.GetImmediate() < 0)) { + ConditionalCompareMacro(rn, -operand.GetImmediate(), nzcv, cond, CCMP); + } else { + ConditionalCompareMacro(rn, operand, nzcv, cond, CCMN); + } +} + + +void MacroAssembler::ConditionalCompareMacro(const Register& rn, + const Operand& operand, + StatusFlags nzcv, + Condition cond, + ConditionalCompareOp op) { + VIXL_ASSERT((cond != al) && (cond != nv)); + // The worst case for size is ccmp immediate: + // * up to 4 instructions to materialise the constant + // * 1 instruction for ccmp + MacroEmissionCheckScope guard(this); + + if ((operand.IsShiftedRegister() && (operand.GetShiftAmount() == 0)) || + (operand.IsImmediate() && + IsImmConditionalCompare(operand.GetImmediate()))) { + // The immediate can be encoded in the instruction, or the operand is an + // unshifted register: call the assembler. + ConditionalCompare(rn, operand, nzcv, cond, op); + } else { + UseScratchRegisterScope temps(this); + // The operand isn't directly supported by the instruction: perform the + // operation on a temporary register. + Register temp = temps.AcquireSameSizeAs(rn); + Mov(temp, operand); + ConditionalCompare(rn, temp, nzcv, cond, op); + } +} + + +void MacroAssembler::CselHelper(MacroAssembler* masm, + const Register& rd, + Operand left, + Operand right, + Condition cond, + bool* should_synthesise_left, + bool* should_synthesise_right) { + bool emit_code = (masm != NULL); + + VIXL_ASSERT(!emit_code || masm->allow_macro_instructions_); + VIXL_ASSERT((cond != al) && (cond != nv)); + VIXL_ASSERT(!rd.IsZero() && !rd.IsSP()); + VIXL_ASSERT(left.IsImmediate() || !left.GetRegister().IsSP()); + VIXL_ASSERT(right.IsImmediate() || !right.GetRegister().IsSP()); + + if (should_synthesise_left != NULL) *should_synthesise_left = false; + if (should_synthesise_right != NULL) *should_synthesise_right = false; + + // The worst case for size occurs when the inputs are two non encodable + // constants: + // * up to 4 instructions to materialise the left constant + // * up to 4 instructions to materialise the right constant + // * 1 instruction for csel + EmissionCheckScope guard(masm, 9 * kInstructionSize); + UseScratchRegisterScope temps; + if (masm != NULL) { + temps.Open(masm); + } + + // Try to handle cases where both inputs are immediates. + bool left_is_immediate = left.IsImmediate() || left.IsZero(); + bool right_is_immediate = right.IsImmediate() || right.IsZero(); + if (left_is_immediate && right_is_immediate && + CselSubHelperTwoImmediates(masm, + rd, + left.GetEquivalentImmediate(), + right.GetEquivalentImmediate(), + cond, + should_synthesise_left, + should_synthesise_right)) { + return; + } + + // Handle cases where one of the two inputs is -1, 0, or 1. + bool left_is_small_immediate = + left_is_immediate && ((-1 <= left.GetEquivalentImmediate()) && + (left.GetEquivalentImmediate() <= 1)); + bool right_is_small_immediate = + right_is_immediate && ((-1 <= right.GetEquivalentImmediate()) && + (right.GetEquivalentImmediate() <= 1)); + if (right_is_small_immediate || left_is_small_immediate) { + bool swapped_inputs = false; + if (!right_is_small_immediate) { + std::swap(left, right); + cond = InvertCondition(cond); + swapped_inputs = true; + } + CselSubHelperRightSmallImmediate(masm, + &temps, + rd, + left, + right, + cond, + swapped_inputs ? should_synthesise_right + : should_synthesise_left); + return; + } + + // Otherwise both inputs need to be available in registers. Synthesise them + // if necessary and emit the `csel`. + if (!left.IsPlainRegister()) { + if (emit_code) { + Register temp = temps.AcquireSameSizeAs(rd); + masm->Mov(temp, left); + left = temp; + } + if (should_synthesise_left != NULL) *should_synthesise_left = true; + } + if (!right.IsPlainRegister()) { + if (emit_code) { + Register temp = temps.AcquireSameSizeAs(rd); + masm->Mov(temp, right); + right = temp; + } + if (should_synthesise_right != NULL) *should_synthesise_right = true; + } + if (emit_code) { + VIXL_ASSERT(left.IsPlainRegister() && right.IsPlainRegister()); + if (left.GetRegister().Is(right.GetRegister())) { + masm->Mov(rd, left.GetRegister()); + } else { + masm->csel(rd, left.GetRegister(), right.GetRegister(), cond); + } + } +} + + +bool MacroAssembler::CselSubHelperTwoImmediates(MacroAssembler* masm, + const Register& rd, + int64_t left, + int64_t right, + Condition cond, + bool* should_synthesise_left, + bool* should_synthesise_right) { + bool emit_code = (masm != NULL); + if (should_synthesise_left != NULL) *should_synthesise_left = false; + if (should_synthesise_right != NULL) *should_synthesise_right = false; + + if (left == right) { + if (emit_code) masm->Mov(rd, left); + return true; + } else if (left == -right) { + if (should_synthesise_right != NULL) *should_synthesise_right = true; + if (emit_code) { + masm->Mov(rd, right); + masm->Cneg(rd, rd, cond); + } + return true; + } + + if (CselSubHelperTwoOrderedImmediates(masm, rd, left, right, cond)) { + return true; + } else { + std::swap(left, right); + if (CselSubHelperTwoOrderedImmediates(masm, + rd, + left, + right, + InvertCondition(cond))) { + return true; + } + } + + // TODO: Handle more situations. For example handle `csel rd, #5, #6, cond` + // with `cinc`. + return false; +} + + +bool MacroAssembler::CselSubHelperTwoOrderedImmediates(MacroAssembler* masm, + const Register& rd, + int64_t left, + int64_t right, + Condition cond) { + bool emit_code = (masm != NULL); + + if ((left == 1) && (right == 0)) { + if (emit_code) masm->cset(rd, cond); + return true; + } else if ((left == -1) && (right == 0)) { + if (emit_code) masm->csetm(rd, cond); + return true; + } + return false; +} + + +void MacroAssembler::CselSubHelperRightSmallImmediate( + MacroAssembler* masm, + UseScratchRegisterScope* temps, + const Register& rd, + const Operand& left, + const Operand& right, + Condition cond, + bool* should_synthesise_left) { + bool emit_code = (masm != NULL); + VIXL_ASSERT((right.IsImmediate() || right.IsZero()) && + (-1 <= right.GetEquivalentImmediate()) && + (right.GetEquivalentImmediate() <= 1)); + Register left_register; + + if (left.IsPlainRegister()) { + left_register = left.GetRegister(); + } else { + if (emit_code) { + left_register = temps->AcquireSameSizeAs(rd); + masm->Mov(left_register, left); + } + if (should_synthesise_left != NULL) *should_synthesise_left = true; + } + if (emit_code) { + int64_t imm = right.GetEquivalentImmediate(); + Register zr = AppropriateZeroRegFor(rd); + if (imm == 0) { + masm->csel(rd, left_register, zr, cond); + } else if (imm == 1) { + masm->csinc(rd, left_register, zr, cond); + } else { + VIXL_ASSERT(imm == -1); + masm->csinv(rd, left_register, zr, cond); + } + } +} + + +void MacroAssembler::Add(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S) { + VIXL_ASSERT(allow_macro_instructions_); + if (operand.IsImmediate() && (operand.GetImmediate() < 0) && + IsImmAddSub(-operand.GetImmediate())) { + AddSubMacro(rd, rn, -operand.GetImmediate(), S, SUB); + } else { + AddSubMacro(rd, rn, operand, S, ADD); + } +} + + +void MacroAssembler::Adds(const Register& rd, + const Register& rn, + const Operand& operand) { + Add(rd, rn, operand, SetFlags); +} + + +void MacroAssembler::Sub(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S) { + VIXL_ASSERT(allow_macro_instructions_); + if (operand.IsImmediate() && (operand.GetImmediate() < 0) && + IsImmAddSub(-operand.GetImmediate())) { + AddSubMacro(rd, rn, -operand.GetImmediate(), S, ADD); + } else { + AddSubMacro(rd, rn, operand, S, SUB); + } +} + + +void MacroAssembler::Subs(const Register& rd, + const Register& rn, + const Operand& operand) { + Sub(rd, rn, operand, SetFlags); +} + + +void MacroAssembler::Cmn(const Register& rn, const Operand& operand) { + VIXL_ASSERT(allow_macro_instructions_); + Adds(AppropriateZeroRegFor(rn), rn, operand); +} + + +void MacroAssembler::Cmp(const Register& rn, const Operand& operand) { + VIXL_ASSERT(allow_macro_instructions_); + Subs(AppropriateZeroRegFor(rn), rn, operand); +} + + +void MacroAssembler::Fcmp(const FPRegister& fn, + double value, + FPTrapFlags trap) { + VIXL_ASSERT(allow_macro_instructions_); + // The worst case for size is: + // * 1 to materialise the constant, using literal pool if necessary + // * 1 instruction for fcmp{e} + MacroEmissionCheckScope guard(this); + if (value != 0.0) { + UseScratchRegisterScope temps(this); + FPRegister tmp = temps.AcquireSameSizeAs(fn); + Fmov(tmp, value); + FPCompareMacro(fn, tmp, trap); + } else { + FPCompareMacro(fn, value, trap); + } +} + + +void MacroAssembler::Fcmpe(const FPRegister& fn, double value) { + Fcmp(fn, value, EnableTrap); +} + + +void MacroAssembler::Fmov(VRegister vd, double imm) { + VIXL_ASSERT(allow_macro_instructions_); + // Floating point immediates are loaded through the literal pool. + MacroEmissionCheckScope guard(this); + + if (vd.Is1H() || vd.Is4H() || vd.Is8H()) { + Fmov(vd, Float16(imm)); + return; + } + + if (vd.Is1S() || vd.Is2S() || vd.Is4S()) { + Fmov(vd, static_cast(imm)); + return; + } + + VIXL_ASSERT(vd.Is1D() || vd.Is2D()); + if (IsImmFP64(imm)) { + fmov(vd, imm); + } else { + uint64_t rawbits = DoubleToRawbits(imm); + if (vd.IsScalar()) { + if (rawbits == 0) { + fmov(vd, xzr); + } else { + ldr(vd, + new Literal(imm, + &literal_pool_, + RawLiteral::kDeletedOnPlacementByPool)); + } + } else { + // TODO: consider NEON support for load literal. + Movi(vd, rawbits); + } + } +} + + +void MacroAssembler::Fmov(VRegister vd, float imm) { + VIXL_ASSERT(allow_macro_instructions_); + // Floating point immediates are loaded through the literal pool. + MacroEmissionCheckScope guard(this); + + if (vd.Is1H() || vd.Is4H() || vd.Is8H()) { + Fmov(vd, Float16(imm)); + return; + } + + if (vd.Is1D() || vd.Is2D()) { + Fmov(vd, static_cast(imm)); + return; + } + + VIXL_ASSERT(vd.Is1S() || vd.Is2S() || vd.Is4S()); + if (IsImmFP32(imm)) { + fmov(vd, imm); + } else { + uint32_t rawbits = FloatToRawbits(imm); + if (vd.IsScalar()) { + if (rawbits == 0) { + fmov(vd, wzr); + } else { + ldr(vd, + new Literal(imm, + &literal_pool_, + RawLiteral::kDeletedOnPlacementByPool)); + } + } else { + // TODO: consider NEON support for load literal. + Movi(vd, rawbits); + } + } +} + + +void MacroAssembler::Fmov(VRegister vd, Float16 imm) { + VIXL_ASSERT(allow_macro_instructions_); + MacroEmissionCheckScope guard(this); + + if (vd.Is1S() || vd.Is2S() || vd.Is4S()) { + Fmov(vd, FPToFloat(imm, kIgnoreDefaultNaN)); + return; + } + + if (vd.Is1D() || vd.Is2D()) { + Fmov(vd, FPToDouble(imm, kIgnoreDefaultNaN)); + return; + } + + VIXL_ASSERT(vd.Is1H() || vd.Is4H() || vd.Is8H()); + uint16_t rawbits = Float16ToRawbits(imm); + if (IsImmFP16(imm)) { + fmov(vd, imm); + } else { + if (vd.IsScalar()) { + if (rawbits == 0x0) { + fmov(vd, wzr); + } else { + // We can use movz instead of the literal pool. + UseScratchRegisterScope temps(this); + Register temp = temps.AcquireW(); + Mov(temp, rawbits); + Fmov(vd, temp); + } + } else { + // TODO: consider NEON support for load literal. + Movi(vd, static_cast(rawbits)); + } + } +} + + +void MacroAssembler::Neg(const Register& rd, const Operand& operand) { + VIXL_ASSERT(allow_macro_instructions_); + if (operand.IsImmediate()) { + Mov(rd, -operand.GetImmediate()); + } else { + Sub(rd, AppropriateZeroRegFor(rd), operand); + } +} + + +void MacroAssembler::Negs(const Register& rd, const Operand& operand) { + VIXL_ASSERT(allow_macro_instructions_); + Subs(rd, AppropriateZeroRegFor(rd), operand); +} + + +bool MacroAssembler::TryOneInstrMoveImmediate(const Register& dst, + int64_t imm) { + return OneInstrMoveImmediateHelper(this, dst, imm); +} + + +Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst, + int64_t imm, + PreShiftImmMode mode) { + int reg_size = dst.GetSizeInBits(); + + // Encode the immediate in a single move instruction, if possible. + if (TryOneInstrMoveImmediate(dst, imm)) { + // The move was successful; nothing to do here. + } else { + // Pre-shift the immediate to the least-significant bits of the register. + int shift_low = CountTrailingZeros(imm, reg_size); + if (mode == kLimitShiftForSP) { + // When applied to the stack pointer, the subsequent arithmetic operation + // can use the extend form to shift left by a maximum of four bits. Right + // shifts are not allowed, so we filter them out later before the new + // immediate is tested. + shift_low = std::min(shift_low, 4); + } + int64_t imm_low = imm >> shift_low; + + // Pre-shift the immediate to the most-significant bits of the register, + // inserting set bits in the least-significant bits. + int shift_high = CountLeadingZeros(imm, reg_size); + int64_t imm_high = (imm << shift_high) | ((INT64_C(1) << shift_high) - 1); + + if ((mode != kNoShift) && TryOneInstrMoveImmediate(dst, imm_low)) { + // The new immediate has been moved into the destination's low bits: + // return a new leftward-shifting operand. + return Operand(dst, LSL, shift_low); + } else if ((mode == kAnyShift) && TryOneInstrMoveImmediate(dst, imm_high)) { + // The new immediate has been moved into the destination's high bits: + // return a new rightward-shifting operand. + return Operand(dst, LSR, shift_high); + } else { + Mov(dst, imm); + } + } + return Operand(dst); +} + + +void MacroAssembler::Move(const GenericOperand& dst, + const GenericOperand& src) { + if (dst.Equals(src)) { + return; + } + + VIXL_ASSERT(dst.IsValid() && src.IsValid()); + + // The sizes of the operands must match exactly. + VIXL_ASSERT(dst.GetSizeInBits() == src.GetSizeInBits()); + VIXL_ASSERT(dst.GetSizeInBits() <= kXRegSize); + int operand_size = static_cast(dst.GetSizeInBits()); + + if (dst.IsCPURegister() && src.IsCPURegister()) { + CPURegister dst_reg = dst.GetCPURegister(); + CPURegister src_reg = src.GetCPURegister(); + if (dst_reg.IsRegister() && src_reg.IsRegister()) { + Mov(Register(dst_reg), Register(src_reg)); + } else if (dst_reg.IsVRegister() && src_reg.IsVRegister()) { + Fmov(VRegister(dst_reg), VRegister(src_reg)); + } else { + if (dst_reg.IsRegister()) { + Fmov(Register(dst_reg), VRegister(src_reg)); + } else { + Fmov(VRegister(dst_reg), Register(src_reg)); + } + } + return; + } + + if (dst.IsMemOperand() && src.IsMemOperand()) { + UseScratchRegisterScope temps(this); + CPURegister temp = temps.AcquireCPURegisterOfSize(operand_size); + Ldr(temp, src.GetMemOperand()); + Str(temp, dst.GetMemOperand()); + return; + } + + if (dst.IsCPURegister()) { + Ldr(dst.GetCPURegister(), src.GetMemOperand()); + } else { + Str(src.GetCPURegister(), dst.GetMemOperand()); + } +} + + +void MacroAssembler::ComputeAddress(const Register& dst, + const MemOperand& mem_op) { + // We cannot handle pre-indexing or post-indexing. + VIXL_ASSERT(mem_op.GetAddrMode() == Offset); + Register base = mem_op.GetBaseRegister(); + if (mem_op.IsImmediateOffset()) { + Add(dst, base, mem_op.GetOffset()); + } else { + VIXL_ASSERT(mem_op.IsRegisterOffset()); + Register reg_offset = mem_op.GetRegisterOffset(); + Shift shift = mem_op.GetShift(); + Extend extend = mem_op.GetExtend(); + if (shift == NO_SHIFT) { + VIXL_ASSERT(extend != NO_EXTEND); + Add(dst, base, Operand(reg_offset, extend, mem_op.GetShiftAmount())); + } else { + VIXL_ASSERT(extend == NO_EXTEND); + Add(dst, base, Operand(reg_offset, shift, mem_op.GetShiftAmount())); + } + } +} + + +void MacroAssembler::AddSubMacro(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S, + AddSubOp op) { + // Worst case is add/sub immediate: + // * up to 4 instructions to materialise the constant + // * 1 instruction for add/sub + MacroEmissionCheckScope guard(this); + + if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() && + (S == LeaveFlags)) { + // The instruction would be a nop. Avoid generating useless code. + return; + } + + if ((operand.IsImmediate() && !IsImmAddSub(operand.GetImmediate())) || + (rn.IsZero() && !operand.IsShiftedRegister()) || + (operand.IsShiftedRegister() && (operand.GetShift() == ROR))) { + UseScratchRegisterScope temps(this); + Register temp = temps.AcquireSameSizeAs(rn); + if (operand.IsImmediate()) { + PreShiftImmMode mode = kAnyShift; + + // If the destination or source register is the stack pointer, we can + // only pre-shift the immediate right by values supported in the add/sub + // extend encoding. + if (rd.IsSP()) { + // If the destination is SP and flags will be set, we can't pre-shift + // the immediate at all. + mode = (S == SetFlags) ? kNoShift : kLimitShiftForSP; + } else if (rn.IsSP()) { + mode = kLimitShiftForSP; + } + + Operand imm_operand = + MoveImmediateForShiftedOp(temp, operand.GetImmediate(), mode); + AddSub(rd, rn, imm_operand, S, op); + } else { + Mov(temp, operand); + AddSub(rd, rn, temp, S, op); + } + } else { + AddSub(rd, rn, operand, S, op); + } +} + + +void MacroAssembler::Adc(const Register& rd, + const Register& rn, + const Operand& operand) { + VIXL_ASSERT(allow_macro_instructions_); + AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, ADC); +} + + +void MacroAssembler::Adcs(const Register& rd, + const Register& rn, + const Operand& operand) { + VIXL_ASSERT(allow_macro_instructions_); + AddSubWithCarryMacro(rd, rn, operand, SetFlags, ADC); +} + + +void MacroAssembler::Sbc(const Register& rd, + const Register& rn, + const Operand& operand) { + VIXL_ASSERT(allow_macro_instructions_); + AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, SBC); +} + + +void MacroAssembler::Sbcs(const Register& rd, + const Register& rn, + const Operand& operand) { + VIXL_ASSERT(allow_macro_instructions_); + AddSubWithCarryMacro(rd, rn, operand, SetFlags, SBC); +} + + +void MacroAssembler::Ngc(const Register& rd, const Operand& operand) { + VIXL_ASSERT(allow_macro_instructions_); + Register zr = AppropriateZeroRegFor(rd); + Sbc(rd, zr, operand); +} + + +void MacroAssembler::Ngcs(const Register& rd, const Operand& operand) { + VIXL_ASSERT(allow_macro_instructions_); + Register zr = AppropriateZeroRegFor(rd); + Sbcs(rd, zr, operand); +} + + +void MacroAssembler::AddSubWithCarryMacro(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S, + AddSubWithCarryOp op) { + VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits()); + // Worst case is addc/subc immediate: + // * up to 4 instructions to materialise the constant + // * 1 instruction for add/sub + MacroEmissionCheckScope guard(this); + UseScratchRegisterScope temps(this); + + if (operand.IsImmediate() || + (operand.IsShiftedRegister() && (operand.GetShift() == ROR))) { + // Add/sub with carry (immediate or ROR shifted register.) + Register temp = temps.AcquireSameSizeAs(rn); + Mov(temp, operand); + AddSubWithCarry(rd, rn, Operand(temp), S, op); + } else if (operand.IsShiftedRegister() && (operand.GetShiftAmount() != 0)) { + // Add/sub with carry (shifted register). + VIXL_ASSERT(operand.GetRegister().GetSizeInBits() == rd.GetSizeInBits()); + VIXL_ASSERT(operand.GetShift() != ROR); + VIXL_ASSERT( + IsUintN(rd.GetSizeInBits() == kXRegSize ? kXRegSizeLog2 : kWRegSizeLog2, + operand.GetShiftAmount())); + temps.Exclude(operand.GetRegister()); + Register temp = temps.AcquireSameSizeAs(rn); + EmitShift(temp, + operand.GetRegister(), + operand.GetShift(), + operand.GetShiftAmount()); + AddSubWithCarry(rd, rn, Operand(temp), S, op); + } else if (operand.IsExtendedRegister()) { + // Add/sub with carry (extended register). + VIXL_ASSERT(operand.GetRegister().GetSizeInBits() <= rd.GetSizeInBits()); + // Add/sub extended supports a shift <= 4. We want to support exactly the + // same modes. + VIXL_ASSERT(operand.GetShiftAmount() <= 4); + VIXL_ASSERT( + operand.GetRegister().Is64Bits() || + ((operand.GetExtend() != UXTX) && (operand.GetExtend() != SXTX))); + temps.Exclude(operand.GetRegister()); + Register temp = temps.AcquireSameSizeAs(rn); + EmitExtendShift(temp, + operand.GetRegister(), + operand.GetExtend(), + operand.GetShiftAmount()); + AddSubWithCarry(rd, rn, Operand(temp), S, op); + } else { + // The addressing mode is directly supported by the instruction. + AddSubWithCarry(rd, rn, operand, S, op); + } +} + + +#define DEFINE_FUNCTION(FN, REGTYPE, REG, OP) \ + void MacroAssembler::FN(const REGTYPE REG, const MemOperand& addr) { \ + VIXL_ASSERT(allow_macro_instructions_); \ + LoadStoreMacro(REG, addr, OP); \ + } +LS_MACRO_LIST(DEFINE_FUNCTION) +#undef DEFINE_FUNCTION + + +void MacroAssembler::LoadStoreMacro(const CPURegister& rt, + const MemOperand& addr, + LoadStoreOp op) { + // Worst case is ldr/str pre/post index: + // * 1 instruction for ldr/str + // * up to 4 instructions to materialise the constant + // * 1 instruction to update the base + MacroEmissionCheckScope guard(this); + + int64_t offset = addr.GetOffset(); + unsigned access_size = CalcLSDataSize(op); + + // Check if an immediate offset fits in the immediate field of the + // appropriate instruction. If not, emit two instructions to perform + // the operation. + if (addr.IsImmediateOffset() && !IsImmLSScaled(offset, access_size) && + !IsImmLSUnscaled(offset)) { + // Immediate offset that can't be encoded using unsigned or unscaled + // addressing modes. + UseScratchRegisterScope temps(this); + Register temp = temps.AcquireSameSizeAs(addr.GetBaseRegister()); + Mov(temp, addr.GetOffset()); + LoadStore(rt, MemOperand(addr.GetBaseRegister(), temp), op); + } else if (addr.IsPostIndex() && !IsImmLSUnscaled(offset)) { + // Post-index beyond unscaled addressing range. + LoadStore(rt, MemOperand(addr.GetBaseRegister()), op); + Add(addr.GetBaseRegister(), addr.GetBaseRegister(), Operand(offset)); + } else if (addr.IsPreIndex() && !IsImmLSUnscaled(offset)) { + // Pre-index beyond unscaled addressing range. + Add(addr.GetBaseRegister(), addr.GetBaseRegister(), Operand(offset)); + LoadStore(rt, MemOperand(addr.GetBaseRegister()), op); + } else { + // Encodable in one load/store instruction. + LoadStore(rt, addr, op); + } +} + + +#define DEFINE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \ + void MacroAssembler::FN(const REGTYPE REG, \ + const REGTYPE REG2, \ + const MemOperand& addr) { \ + VIXL_ASSERT(allow_macro_instructions_); \ + LoadStorePairMacro(REG, REG2, addr, OP); \ + } +LSPAIR_MACRO_LIST(DEFINE_FUNCTION) +#undef DEFINE_FUNCTION + +void MacroAssembler::LoadStorePairMacro(const CPURegister& rt, + const CPURegister& rt2, + const MemOperand& addr, + LoadStorePairOp op) { + // TODO(all): Should we support register offset for load-store-pair? + VIXL_ASSERT(!addr.IsRegisterOffset()); + // Worst case is ldp/stp immediate: + // * 1 instruction for ldp/stp + // * up to 4 instructions to materialise the constant + // * 1 instruction to update the base + MacroEmissionCheckScope guard(this); + + int64_t offset = addr.GetOffset(); + unsigned access_size = CalcLSPairDataSize(op); + + // Check if the offset fits in the immediate field of the appropriate + // instruction. If not, emit two instructions to perform the operation. + if (IsImmLSPair(offset, access_size)) { + // Encodable in one load/store pair instruction. + LoadStorePair(rt, rt2, addr, op); + } else { + Register base = addr.GetBaseRegister(); + if (addr.IsImmediateOffset()) { + UseScratchRegisterScope temps(this); + Register temp = temps.AcquireSameSizeAs(base); + Add(temp, base, offset); + LoadStorePair(rt, rt2, MemOperand(temp), op); + } else if (addr.IsPostIndex()) { + LoadStorePair(rt, rt2, MemOperand(base), op); + Add(base, base, offset); + } else { + VIXL_ASSERT(addr.IsPreIndex()); + Add(base, base, offset); + LoadStorePair(rt, rt2, MemOperand(base), op); + } + } +} + + +void MacroAssembler::Prfm(PrefetchOperation op, const MemOperand& addr) { + MacroEmissionCheckScope guard(this); + + // There are no pre- or post-index modes for prfm. + VIXL_ASSERT(addr.IsImmediateOffset() || addr.IsRegisterOffset()); + + // The access size is implicitly 8 bytes for all prefetch operations. + unsigned size = kXRegSizeInBytesLog2; + + // Check if an immediate offset fits in the immediate field of the + // appropriate instruction. If not, emit two instructions to perform + // the operation. + if (addr.IsImmediateOffset() && !IsImmLSScaled(addr.GetOffset(), size) && + !IsImmLSUnscaled(addr.GetOffset())) { + // Immediate offset that can't be encoded using unsigned or unscaled + // addressing modes. + UseScratchRegisterScope temps(this); + Register temp = temps.AcquireSameSizeAs(addr.GetBaseRegister()); + Mov(temp, addr.GetOffset()); + Prefetch(op, MemOperand(addr.GetBaseRegister(), temp)); + } else { + // Simple register-offsets are encodable in one instruction. + Prefetch(op, addr); + } +} + + +void MacroAssembler::Push(const CPURegister& src0, + const CPURegister& src1, + const CPURegister& src2, + const CPURegister& src3) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(AreSameSizeAndType(src0, src1, src2, src3)); + VIXL_ASSERT(src0.IsValid()); + + int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid(); + int size = src0.GetSizeInBytes(); + + PrepareForPush(count, size); + PushHelper(count, size, src0, src1, src2, src3); +} + + +void MacroAssembler::Pop(const CPURegister& dst0, + const CPURegister& dst1, + const CPURegister& dst2, + const CPURegister& dst3) { + // It is not valid to pop into the same register more than once in one + // instruction, not even into the zero register. + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!AreAliased(dst0, dst1, dst2, dst3)); + VIXL_ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3)); + VIXL_ASSERT(dst0.IsValid()); + + int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid(); + int size = dst0.GetSizeInBytes(); + + PrepareForPop(count, size); + PopHelper(count, size, dst0, dst1, dst2, dst3); +} + + +void MacroAssembler::PushCPURegList(CPURegList registers) { + VIXL_ASSERT(!registers.Overlaps(*GetScratchRegisterList())); + VIXL_ASSERT(!registers.Overlaps(*GetScratchFPRegisterList())); + VIXL_ASSERT(allow_macro_instructions_); + + int reg_size = registers.GetRegisterSizeInBytes(); + PrepareForPush(registers.GetCount(), reg_size); + + // Bump the stack pointer and store two registers at the bottom. + int size = registers.GetTotalSizeInBytes(); + const CPURegister& bottom_0 = registers.PopLowestIndex(); + const CPURegister& bottom_1 = registers.PopLowestIndex(); + if (bottom_0.IsValid() && bottom_1.IsValid()) { + Stp(bottom_0, bottom_1, MemOperand(StackPointer(), -size, PreIndex)); + } else if (bottom_0.IsValid()) { + Str(bottom_0, MemOperand(StackPointer(), -size, PreIndex)); + } + + int offset = 2 * reg_size; + while (!registers.IsEmpty()) { + const CPURegister& src0 = registers.PopLowestIndex(); + const CPURegister& src1 = registers.PopLowestIndex(); + if (src1.IsValid()) { + Stp(src0, src1, MemOperand(StackPointer(), offset)); + } else { + Str(src0, MemOperand(StackPointer(), offset)); + } + offset += 2 * reg_size; + } +} + + +void MacroAssembler::PopCPURegList(CPURegList registers) { + VIXL_ASSERT(!registers.Overlaps(*GetScratchRegisterList())); + VIXL_ASSERT(!registers.Overlaps(*GetScratchFPRegisterList())); + VIXL_ASSERT(allow_macro_instructions_); + + int reg_size = registers.GetRegisterSizeInBytes(); + PrepareForPop(registers.GetCount(), reg_size); + + + int size = registers.GetTotalSizeInBytes(); + const CPURegister& bottom_0 = registers.PopLowestIndex(); + const CPURegister& bottom_1 = registers.PopLowestIndex(); + + int offset = 2 * reg_size; + while (!registers.IsEmpty()) { + const CPURegister& dst0 = registers.PopLowestIndex(); + const CPURegister& dst1 = registers.PopLowestIndex(); + if (dst1.IsValid()) { + Ldp(dst0, dst1, MemOperand(StackPointer(), offset)); + } else { + Ldr(dst0, MemOperand(StackPointer(), offset)); + } + offset += 2 * reg_size; + } + + // Load the two registers at the bottom and drop the stack pointer. + if (bottom_0.IsValid() && bottom_1.IsValid()) { + Ldp(bottom_0, bottom_1, MemOperand(StackPointer(), size, PostIndex)); + } else if (bottom_0.IsValid()) { + Ldr(bottom_0, MemOperand(StackPointer(), size, PostIndex)); + } +} + + +void MacroAssembler::PushMultipleTimes(int count, Register src) { + VIXL_ASSERT(allow_macro_instructions_); + int size = src.GetSizeInBytes(); + + PrepareForPush(count, size); + // Push up to four registers at a time if possible because if the current + // stack pointer is sp and the register size is 32, registers must be pushed + // in blocks of four in order to maintain the 16-byte alignment for sp. + while (count >= 4) { + PushHelper(4, size, src, src, src, src); + count -= 4; + } + if (count >= 2) { + PushHelper(2, size, src, src, NoReg, NoReg); + count -= 2; + } + if (count == 1) { + PushHelper(1, size, src, NoReg, NoReg, NoReg); + count -= 1; + } + VIXL_ASSERT(count == 0); +} + + +void MacroAssembler::PushHelper(int count, + int size, + const CPURegister& src0, + const CPURegister& src1, + const CPURegister& src2, + const CPURegister& src3) { + // Ensure that we don't unintentionally modify scratch or debug registers. + // Worst case for size is 2 stp. + ExactAssemblyScope scope(this, + 2 * kInstructionSize, + ExactAssemblyScope::kMaximumSize); + + VIXL_ASSERT(AreSameSizeAndType(src0, src1, src2, src3)); + VIXL_ASSERT(size == src0.GetSizeInBytes()); + + // When pushing multiple registers, the store order is chosen such that + // Push(a, b) is equivalent to Push(a) followed by Push(b). + switch (count) { + case 1: + VIXL_ASSERT(src1.IsNone() && src2.IsNone() && src3.IsNone()); + str(src0, MemOperand(StackPointer(), -1 * size, PreIndex)); + break; + case 2: + VIXL_ASSERT(src2.IsNone() && src3.IsNone()); + stp(src1, src0, MemOperand(StackPointer(), -2 * size, PreIndex)); + break; + case 3: + VIXL_ASSERT(src3.IsNone()); + stp(src2, src1, MemOperand(StackPointer(), -3 * size, PreIndex)); + str(src0, MemOperand(StackPointer(), 2 * size)); + break; + case 4: + // Skip over 4 * size, then fill in the gap. This allows four W registers + // to be pushed using sp, whilst maintaining 16-byte alignment for sp at + // all times. + stp(src3, src2, MemOperand(StackPointer(), -4 * size, PreIndex)); + stp(src1, src0, MemOperand(StackPointer(), 2 * size)); + break; + default: + VIXL_UNREACHABLE(); + } +} + + +void MacroAssembler::PopHelper(int count, + int size, + const CPURegister& dst0, + const CPURegister& dst1, + const CPURegister& dst2, + const CPURegister& dst3) { + // Ensure that we don't unintentionally modify scratch or debug registers. + // Worst case for size is 2 ldp. + ExactAssemblyScope scope(this, + 2 * kInstructionSize, + ExactAssemblyScope::kMaximumSize); + + VIXL_ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3)); + VIXL_ASSERT(size == dst0.GetSizeInBytes()); + + // When popping multiple registers, the load order is chosen such that + // Pop(a, b) is equivalent to Pop(a) followed by Pop(b). + switch (count) { + case 1: + VIXL_ASSERT(dst1.IsNone() && dst2.IsNone() && dst3.IsNone()); + ldr(dst0, MemOperand(StackPointer(), 1 * size, PostIndex)); + break; + case 2: + VIXL_ASSERT(dst2.IsNone() && dst3.IsNone()); + ldp(dst0, dst1, MemOperand(StackPointer(), 2 * size, PostIndex)); + break; + case 3: + VIXL_ASSERT(dst3.IsNone()); + ldr(dst2, MemOperand(StackPointer(), 2 * size)); + ldp(dst0, dst1, MemOperand(StackPointer(), 3 * size, PostIndex)); + break; + case 4: + // Load the higher addresses first, then load the lower addresses and skip + // the whole block in the second instruction. This allows four W registers + // to be popped using sp, whilst maintaining 16-byte alignment for sp at + // all times. + ldp(dst2, dst3, MemOperand(StackPointer(), 2 * size)); + ldp(dst0, dst1, MemOperand(StackPointer(), 4 * size, PostIndex)); + break; + default: + VIXL_UNREACHABLE(); + } +} + + +void MacroAssembler::PrepareForPush(int count, int size) { + if (sp.Is(StackPointer())) { + // If the current stack pointer is sp, then it must be aligned to 16 bytes + // on entry and the total size of the specified registers must also be a + // multiple of 16 bytes. + VIXL_ASSERT((count * size) % 16 == 0); + } else { + // Even if the current stack pointer is not the system stack pointer (sp), + // the system stack pointer will still be modified in order to comply with + // ABI rules about accessing memory below the system stack pointer. + BumpSystemStackPointer(count * size); + } +} + + +void MacroAssembler::PrepareForPop(int count, int size) { + USE(count, size); + if (sp.Is(StackPointer())) { + // If the current stack pointer is sp, then it must be aligned to 16 bytes + // on entry and the total size of the specified registers must also be a + // multiple of 16 bytes. + VIXL_ASSERT((count * size) % 16 == 0); + } +} + +void MacroAssembler::Poke(const Register& src, const Operand& offset) { + VIXL_ASSERT(allow_macro_instructions_); + if (offset.IsImmediate()) { + VIXL_ASSERT(offset.GetImmediate() >= 0); + } + + Str(src, MemOperand(StackPointer(), offset)); +} + + +void MacroAssembler::Peek(const Register& dst, const Operand& offset) { + VIXL_ASSERT(allow_macro_instructions_); + if (offset.IsImmediate()) { + VIXL_ASSERT(offset.GetImmediate() >= 0); + } + + Ldr(dst, MemOperand(StackPointer(), offset)); +} + + +void MacroAssembler::Claim(const Operand& size) { + VIXL_ASSERT(allow_macro_instructions_); + + if (size.IsZero()) { + return; + } + + if (size.IsImmediate()) { + VIXL_ASSERT(size.GetImmediate() > 0); + if (sp.Is(StackPointer())) { + VIXL_ASSERT((size.GetImmediate() % 16) == 0); + } + } + + if (!sp.Is(StackPointer())) { + BumpSystemStackPointer(size); + } + + Sub(StackPointer(), StackPointer(), size); +} + + +void MacroAssembler::Drop(const Operand& size) { + VIXL_ASSERT(allow_macro_instructions_); + + if (size.IsZero()) { + return; + } + + if (size.IsImmediate()) { + VIXL_ASSERT(size.GetImmediate() > 0); + if (sp.Is(StackPointer())) { + VIXL_ASSERT((size.GetImmediate() % 16) == 0); + } + } + + Add(StackPointer(), StackPointer(), size); +} + + +void MacroAssembler::PushCalleeSavedRegisters() { + // Ensure that the macro-assembler doesn't use any scratch registers. + // 10 stp will be emitted. + // TODO(all): Should we use GetCalleeSaved and SavedFP. + ExactAssemblyScope scope(this, 10 * kInstructionSize); + + // This method must not be called unless the current stack pointer is sp. + VIXL_ASSERT(sp.Is(StackPointer())); + + MemOperand tos(sp, -2 * static_cast(kXRegSizeInBytes), PreIndex); + + stp(x29, x30, tos); + stp(x27, x28, tos); + stp(x25, x26, tos); + stp(x23, x24, tos); + stp(x21, x22, tos); + stp(x19, x20, tos); + + stp(d14, d15, tos); + stp(d12, d13, tos); + stp(d10, d11, tos); + stp(d8, d9, tos); +} + + +void MacroAssembler::PopCalleeSavedRegisters() { + // Ensure that the macro-assembler doesn't use any scratch registers. + // 10 ldp will be emitted. + // TODO(all): Should we use GetCalleeSaved and SavedFP. + ExactAssemblyScope scope(this, 10 * kInstructionSize); + + // This method must not be called unless the current stack pointer is sp. + VIXL_ASSERT(sp.Is(StackPointer())); + + MemOperand tos(sp, 2 * kXRegSizeInBytes, PostIndex); + + ldp(d8, d9, tos); + ldp(d10, d11, tos); + ldp(d12, d13, tos); + ldp(d14, d15, tos); + + ldp(x19, x20, tos); + ldp(x21, x22, tos); + ldp(x23, x24, tos); + ldp(x25, x26, tos); + ldp(x27, x28, tos); + ldp(x29, x30, tos); +} + +void MacroAssembler::LoadCPURegList(CPURegList registers, + const MemOperand& src) { + LoadStoreCPURegListHelper(kLoad, registers, src); +} + +void MacroAssembler::StoreCPURegList(CPURegList registers, + const MemOperand& dst) { + LoadStoreCPURegListHelper(kStore, registers, dst); +} + + +void MacroAssembler::LoadStoreCPURegListHelper(LoadStoreCPURegListAction op, + CPURegList registers, + const MemOperand& mem) { + // We do not handle pre-indexing or post-indexing. + VIXL_ASSERT(!(mem.IsPreIndex() || mem.IsPostIndex())); + VIXL_ASSERT(!registers.Overlaps(tmp_list_)); + VIXL_ASSERT(!registers.Overlaps(fptmp_list_)); + VIXL_ASSERT(!registers.IncludesAliasOf(sp)); + + UseScratchRegisterScope temps(this); + + MemOperand loc = BaseMemOperandForLoadStoreCPURegList(registers, mem, &temps); + const int reg_size = registers.GetRegisterSizeInBytes(); + + VIXL_ASSERT(IsPowerOf2(reg_size)); + + // Since we are operating on register pairs, we would like to align on double + // the standard size; on the other hand, we don't want to insert an extra + // operation, which will happen if the number of registers is even. Note that + // the alignment of the base pointer is unknown here, but we assume that it + // is more likely to be aligned. + if (((loc.GetOffset() & (2 * reg_size - 1)) != 0) && + ((registers.GetCount() % 2) != 0)) { + if (op == kStore) { + Str(registers.PopLowestIndex(), loc); + } else { + VIXL_ASSERT(op == kLoad); + Ldr(registers.PopLowestIndex(), loc); + } + loc.AddOffset(reg_size); + } + while (registers.GetCount() >= 2) { + const CPURegister& dst0 = registers.PopLowestIndex(); + const CPURegister& dst1 = registers.PopLowestIndex(); + if (op == kStore) { + Stp(dst0, dst1, loc); + } else { + VIXL_ASSERT(op == kLoad); + Ldp(dst0, dst1, loc); + } + loc.AddOffset(2 * reg_size); + } + if (!registers.IsEmpty()) { + if (op == kStore) { + Str(registers.PopLowestIndex(), loc); + } else { + VIXL_ASSERT(op == kLoad); + Ldr(registers.PopLowestIndex(), loc); + } + } +} + +MemOperand MacroAssembler::BaseMemOperandForLoadStoreCPURegList( + const CPURegList& registers, + const MemOperand& mem, + UseScratchRegisterScope* scratch_scope) { + // If necessary, pre-compute the base address for the accesses. + if (mem.IsRegisterOffset()) { + Register reg_base = scratch_scope->AcquireX(); + ComputeAddress(reg_base, mem); + return MemOperand(reg_base); + + } else if (mem.IsImmediateOffset()) { + int reg_size = registers.GetRegisterSizeInBytes(); + int total_size = registers.GetTotalSizeInBytes(); + int64_t min_offset = mem.GetOffset(); + int64_t max_offset = + mem.GetOffset() + std::max(0, total_size - 2 * reg_size); + if ((registers.GetCount() >= 2) && + (!Assembler::IsImmLSPair(min_offset, WhichPowerOf2(reg_size)) || + !Assembler::IsImmLSPair(max_offset, WhichPowerOf2(reg_size)))) { + Register reg_base = scratch_scope->AcquireX(); + ComputeAddress(reg_base, mem); + return MemOperand(reg_base); + } + } + + return mem; +} + +void MacroAssembler::BumpSystemStackPointer(const Operand& space) { + VIXL_ASSERT(!sp.Is(StackPointer())); + // TODO: Several callers rely on this not using scratch registers, so we use + // the assembler directly here. However, this means that large immediate + // values of 'space' cannot be handled. + ExactAssemblyScope scope(this, kInstructionSize); + sub(sp, StackPointer(), space); +} + + +// TODO(all): Fix printf for NEON registers, and resolve whether we should be +// using FPRegister or VRegister here. + +// This is the main Printf implementation. All callee-saved registers are +// preserved, but NZCV and the caller-saved registers may be clobbered. +void MacroAssembler::PrintfNoPreserve(const char* format, + const CPURegister& arg0, + const CPURegister& arg1, + const CPURegister& arg2, + const CPURegister& arg3) { + // We cannot handle a caller-saved stack pointer. It doesn't make much sense + // in most cases anyway, so this restriction shouldn't be too serious. + VIXL_ASSERT(!kCallerSaved.IncludesAliasOf(StackPointer())); + + // The provided arguments, and their proper PCS registers. + CPURegister args[kPrintfMaxArgCount] = {arg0, arg1, arg2, arg3}; + CPURegister pcs[kPrintfMaxArgCount]; + + int arg_count = kPrintfMaxArgCount; + + // The PCS varargs registers for printf. Note that x0 is used for the printf + // format string. + static const CPURegList kPCSVarargs = + CPURegList(CPURegister::kRegister, kXRegSize, 1, arg_count); + static const CPURegList kPCSVarargsFP = + CPURegList(CPURegister::kVRegister, kDRegSize, 0, arg_count - 1); + + // We can use caller-saved registers as scratch values, except for the + // arguments and the PCS registers where they might need to go. + UseScratchRegisterScope temps(this); + temps.Include(kCallerSaved); + temps.Include(kCallerSavedV); + temps.Exclude(kPCSVarargs); + temps.Exclude(kPCSVarargsFP); + temps.Exclude(arg0, arg1, arg2, arg3); + + // Copies of the arg lists that we can iterate through. + CPURegList pcs_varargs = kPCSVarargs; + CPURegList pcs_varargs_fp = kPCSVarargsFP; + + // Place the arguments. There are lots of clever tricks and optimizations we + // could use here, but Printf is a debug tool so instead we just try to keep + // it simple: Move each input that isn't already in the right place to a + // scratch register, then move everything back. + for (unsigned i = 0; i < kPrintfMaxArgCount; i++) { + // Work out the proper PCS register for this argument. + if (args[i].IsRegister()) { + pcs[i] = pcs_varargs.PopLowestIndex().X(); + // We might only need a W register here. We need to know the size of the + // argument so we can properly encode it for the simulator call. + if (args[i].Is32Bits()) pcs[i] = pcs[i].W(); + } else if (args[i].IsVRegister()) { + // In C, floats are always cast to doubles for varargs calls. + pcs[i] = pcs_varargs_fp.PopLowestIndex().D(); + } else { + VIXL_ASSERT(args[i].IsNone()); + arg_count = i; + break; + } + + // If the argument is already in the right place, leave it where it is. + if (args[i].Aliases(pcs[i])) continue; + + // Otherwise, if the argument is in a PCS argument register, allocate an + // appropriate scratch register and then move it out of the way. + if (kPCSVarargs.IncludesAliasOf(args[i]) || + kPCSVarargsFP.IncludesAliasOf(args[i])) { + if (args[i].IsRegister()) { + Register old_arg = Register(args[i]); + Register new_arg = temps.AcquireSameSizeAs(old_arg); + Mov(new_arg, old_arg); + args[i] = new_arg; + } else { + FPRegister old_arg = FPRegister(args[i]); + FPRegister new_arg = temps.AcquireSameSizeAs(old_arg); + Fmov(new_arg, old_arg); + args[i] = new_arg; + } + } + } + + // Do a second pass to move values into their final positions and perform any + // conversions that may be required. + for (int i = 0; i < arg_count; i++) { + VIXL_ASSERT(pcs[i].GetType() == args[i].GetType()); + if (pcs[i].IsRegister()) { + Mov(Register(pcs[i]), Register(args[i]), kDiscardForSameWReg); + } else { + VIXL_ASSERT(pcs[i].IsVRegister()); + if (pcs[i].GetSizeInBits() == args[i].GetSizeInBits()) { + Fmov(FPRegister(pcs[i]), FPRegister(args[i])); + } else { + Fcvt(FPRegister(pcs[i]), FPRegister(args[i])); + } + } + } + + // Load the format string into x0, as per the procedure-call standard. + // + // To make the code as portable as possible, the format string is encoded + // directly in the instruction stream. It might be cleaner to encode it in a + // literal pool, but since Printf is usually used for debugging, it is + // beneficial for it to be minimally dependent on other features. + temps.Exclude(x0); + Label format_address; + Adr(x0, &format_address); + + // Emit the format string directly in the instruction stream. + { + BlockPoolsScope scope(this); + // Data emitted: + // branch + // strlen(format) + 1 (includes null termination) + // padding to next instruction + // unreachable + EmissionCheckScope guard(this, + AlignUp(strlen(format) + 1, kInstructionSize) + + 2 * kInstructionSize); + Label after_data; + B(&after_data); + Bind(&format_address); + EmitString(format); + Unreachable(); + Bind(&after_data); + } + + // We don't pass any arguments on the stack, but we still need to align the C + // stack pointer to a 16-byte boundary for PCS compliance. + if (!sp.Is(StackPointer())) { + Bic(sp, StackPointer(), 0xf); + } + + // Actually call printf. This part needs special handling for the simulator, + // since the system printf function will use a different instruction set and + // the procedure-call standard will not be compatible. + if (generate_simulator_code_) { + ExactAssemblyScope scope(this, kPrintfLength); + hlt(kPrintfOpcode); + dc32(arg_count); // kPrintfArgCountOffset + + // Determine the argument pattern. + uint32_t arg_pattern_list = 0; + for (int i = 0; i < arg_count; i++) { + uint32_t arg_pattern; + if (pcs[i].IsRegister()) { + arg_pattern = pcs[i].Is32Bits() ? kPrintfArgW : kPrintfArgX; + } else { + VIXL_ASSERT(pcs[i].Is64Bits()); + arg_pattern = kPrintfArgD; + } + VIXL_ASSERT(arg_pattern < (1 << kPrintfArgPatternBits)); + arg_pattern_list |= (arg_pattern << (kPrintfArgPatternBits * i)); + } + dc32(arg_pattern_list); // kPrintfArgPatternListOffset + } else { + Register tmp = temps.AcquireX(); + Mov(tmp, reinterpret_cast(printf)); + Blr(tmp); + } +} + + +void MacroAssembler::Printf(const char* format, + CPURegister arg0, + CPURegister arg1, + CPURegister arg2, + CPURegister arg3) { + // We can only print sp if it is the current stack pointer. + if (!sp.Is(StackPointer())) { + VIXL_ASSERT(!sp.Aliases(arg0)); + VIXL_ASSERT(!sp.Aliases(arg1)); + VIXL_ASSERT(!sp.Aliases(arg2)); + VIXL_ASSERT(!sp.Aliases(arg3)); + } + + // Make sure that the macro assembler doesn't try to use any of our arguments + // as scratch registers. + UseScratchRegisterScope exclude_all(this); + exclude_all.ExcludeAll(); + + // Preserve all caller-saved registers as well as NZCV. + // If sp is the stack pointer, PushCPURegList asserts that the size of each + // list is a multiple of 16 bytes. + PushCPURegList(kCallerSaved); + PushCPURegList(kCallerSavedV); + + { + UseScratchRegisterScope temps(this); + // We can use caller-saved registers as scratch values (except for argN). + temps.Include(kCallerSaved); + temps.Include(kCallerSavedV); + temps.Exclude(arg0, arg1, arg2, arg3); + + // If any of the arguments are the current stack pointer, allocate a new + // register for them, and adjust the value to compensate for pushing the + // caller-saved registers. + bool arg0_sp = StackPointer().Aliases(arg0); + bool arg1_sp = StackPointer().Aliases(arg1); + bool arg2_sp = StackPointer().Aliases(arg2); + bool arg3_sp = StackPointer().Aliases(arg3); + if (arg0_sp || arg1_sp || arg2_sp || arg3_sp) { + // Allocate a register to hold the original stack pointer value, to pass + // to PrintfNoPreserve as an argument. + Register arg_sp = temps.AcquireX(); + Add(arg_sp, + StackPointer(), + kCallerSaved.GetTotalSizeInBytes() + + kCallerSavedV.GetTotalSizeInBytes()); + if (arg0_sp) arg0 = Register(arg_sp.GetCode(), arg0.GetSizeInBits()); + if (arg1_sp) arg1 = Register(arg_sp.GetCode(), arg1.GetSizeInBits()); + if (arg2_sp) arg2 = Register(arg_sp.GetCode(), arg2.GetSizeInBits()); + if (arg3_sp) arg3 = Register(arg_sp.GetCode(), arg3.GetSizeInBits()); + } + + // Preserve NZCV. + Register tmp = temps.AcquireX(); + Mrs(tmp, NZCV); + Push(tmp, xzr); + temps.Release(tmp); + + PrintfNoPreserve(format, arg0, arg1, arg2, arg3); + + // Restore NZCV. + tmp = temps.AcquireX(); + Pop(xzr, tmp); + Msr(NZCV, tmp); + temps.Release(tmp); + } + + PopCPURegList(kCallerSavedV); + PopCPURegList(kCallerSaved); +} + +void MacroAssembler::Trace(TraceParameters parameters, TraceCommand command) { + VIXL_ASSERT(allow_macro_instructions_); + + if (generate_simulator_code_) { + // The arguments to the trace pseudo instruction need to be contiguous in + // memory, so make sure we don't try to emit a literal pool. + ExactAssemblyScope scope(this, kTraceLength); + + Label start; + bind(&start); + + // Refer to simulator-aarch64.h for a description of the marker and its + // arguments. + hlt(kTraceOpcode); + + VIXL_ASSERT(GetSizeOfCodeGeneratedSince(&start) == kTraceParamsOffset); + dc32(parameters); + + VIXL_ASSERT(GetSizeOfCodeGeneratedSince(&start) == kTraceCommandOffset); + dc32(command); + } else { + // Emit nothing on real hardware. + USE(parameters, command); + } +} + + +void MacroAssembler::Log(TraceParameters parameters) { + VIXL_ASSERT(allow_macro_instructions_); + + if (generate_simulator_code_) { + // The arguments to the log pseudo instruction need to be contiguous in + // memory, so make sure we don't try to emit a literal pool. + ExactAssemblyScope scope(this, kLogLength); + + Label start; + bind(&start); + + // Refer to simulator-aarch64.h for a description of the marker and its + // arguments. + hlt(kLogOpcode); + + VIXL_ASSERT(GetSizeOfCodeGeneratedSince(&start) == kLogParamsOffset); + dc32(parameters); + } else { + // Emit nothing on real hardware. + USE(parameters); + } +} + + +void MacroAssembler::EnableInstrumentation() { + VIXL_ASSERT(!isprint(InstrumentStateEnable)); + ExactAssemblyScope scope(this, kInstructionSize); + movn(xzr, InstrumentStateEnable); +} + + +void MacroAssembler::DisableInstrumentation() { + VIXL_ASSERT(!isprint(InstrumentStateDisable)); + ExactAssemblyScope scope(this, kInstructionSize); + movn(xzr, InstrumentStateDisable); +} + + +void MacroAssembler::AnnotateInstrumentation(const char* marker_name) { + VIXL_ASSERT(strlen(marker_name) == 2); + + // We allow only printable characters in the marker names. Unprintable + // characters are reserved for controlling features of the instrumentation. + VIXL_ASSERT(isprint(marker_name[0]) && isprint(marker_name[1])); + + ExactAssemblyScope scope(this, kInstructionSize); + movn(xzr, (marker_name[1] << 8) | marker_name[0]); +} + + +void MacroAssembler::SetSimulatorCPUFeatures(const CPUFeatures& features) { + ConfigureSimulatorCPUFeaturesHelper(features, kSetCPUFeaturesOpcode); +} + + +void MacroAssembler::EnableSimulatorCPUFeatures(const CPUFeatures& features) { + ConfigureSimulatorCPUFeaturesHelper(features, kEnableCPUFeaturesOpcode); +} + + +void MacroAssembler::DisableSimulatorCPUFeatures(const CPUFeatures& features) { + ConfigureSimulatorCPUFeaturesHelper(features, kDisableCPUFeaturesOpcode); +} + + +void MacroAssembler::ConfigureSimulatorCPUFeaturesHelper( + const CPUFeatures& features, DebugHltOpcode action) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(generate_simulator_code_); + + typedef ConfigureCPUFeaturesElementType ElementType; + VIXL_ASSERT(CPUFeatures::kNumberOfFeatures <= + std::numeric_limits::max()); + + size_t count = features.Count(); + + size_t preamble_length = kConfigureCPUFeaturesListOffset; + size_t list_length = (count + 1) * sizeof(ElementType); + size_t padding_length = AlignUp(list_length, kInstructionSize) - list_length; + + size_t total_length = preamble_length + list_length + padding_length; + + // Check the overall code size as well as the size of each component. + ExactAssemblyScope guard_total(this, total_length); + + { // Preamble: the opcode itself. + ExactAssemblyScope guard_preamble(this, preamble_length); + hlt(action); + } + { // A kNone-terminated list of features. + ExactAssemblyScope guard_list(this, list_length); + for (CPUFeatures::const_iterator it = features.begin(); + it != features.end(); + ++it) { + dc(static_cast(*it)); + } + dc(static_cast(CPUFeatures::kNone)); + } + { // Padding for instruction alignment. + ExactAssemblyScope guard_padding(this, padding_length); + for (size_t size = 0; size < padding_length; size += sizeof(ElementType)) { + // The exact value is arbitrary. + dc(static_cast(CPUFeatures::kNone)); + } + } +} + +void MacroAssembler::SaveSimulatorCPUFeatures() { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(generate_simulator_code_); + SingleEmissionCheckScope guard(this); + hlt(kSaveCPUFeaturesOpcode); +} + + +void MacroAssembler::RestoreSimulatorCPUFeatures() { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(generate_simulator_code_); + SingleEmissionCheckScope guard(this); + hlt(kRestoreCPUFeaturesOpcode); +} + + +void UseScratchRegisterScope::Open(MacroAssembler* masm) { + VIXL_ASSERT(masm_ == NULL); + VIXL_ASSERT(masm != NULL); + masm_ = masm; + + CPURegList* available = masm->GetScratchRegisterList(); + CPURegList* available_fp = masm->GetScratchFPRegisterList(); + old_available_ = available->GetList(); + old_availablefp_ = available_fp->GetList(); + VIXL_ASSERT(available->GetType() == CPURegister::kRegister); + VIXL_ASSERT(available_fp->GetType() == CPURegister::kVRegister); + + parent_ = masm->GetCurrentScratchRegisterScope(); + masm->SetCurrentScratchRegisterScope(this); +} + + +void UseScratchRegisterScope::Close() { + if (masm_ != NULL) { + // Ensure that scopes nest perfectly, and do not outlive their parents. + // This is a run-time check because the order of destruction of objects in + // the _same_ scope is implementation-defined, and is likely to change in + // optimised builds. + VIXL_CHECK(masm_->GetCurrentScratchRegisterScope() == this); + masm_->SetCurrentScratchRegisterScope(parent_); + + masm_->GetScratchRegisterList()->SetList(old_available_); + masm_->GetScratchFPRegisterList()->SetList(old_availablefp_); + + masm_ = NULL; + } +} + + +bool UseScratchRegisterScope::IsAvailable(const CPURegister& reg) const { + return masm_->GetScratchRegisterList()->IncludesAliasOf(reg) || + masm_->GetScratchFPRegisterList()->IncludesAliasOf(reg); +} + + +Register UseScratchRegisterScope::AcquireRegisterOfSize(int size_in_bits) { + int code = AcquireNextAvailable(masm_->GetScratchRegisterList()).GetCode(); + return Register(code, size_in_bits); +} + + +FPRegister UseScratchRegisterScope::AcquireVRegisterOfSize(int size_in_bits) { + int code = AcquireNextAvailable(masm_->GetScratchFPRegisterList()).GetCode(); + return FPRegister(code, size_in_bits); +} + + +void UseScratchRegisterScope::Release(const CPURegister& reg) { + VIXL_ASSERT(masm_ != NULL); + if (reg.IsRegister()) { + ReleaseByCode(masm_->GetScratchRegisterList(), reg.GetCode()); + } else if (reg.IsVRegister()) { + ReleaseByCode(masm_->GetScratchFPRegisterList(), reg.GetCode()); + } else { + VIXL_ASSERT(reg.IsNone()); + } +} + + +void UseScratchRegisterScope::Include(const CPURegList& list) { + VIXL_ASSERT(masm_ != NULL); + if (list.GetType() == CPURegister::kRegister) { + // Make sure that neither sp nor xzr are included the list. + IncludeByRegList(masm_->GetScratchRegisterList(), + list.GetList() & ~(xzr.GetBit() | sp.GetBit())); + } else { + VIXL_ASSERT(list.GetType() == CPURegister::kVRegister); + IncludeByRegList(masm_->GetScratchFPRegisterList(), list.GetList()); + } +} + + +void UseScratchRegisterScope::Include(const Register& reg1, + const Register& reg2, + const Register& reg3, + const Register& reg4) { + VIXL_ASSERT(masm_ != NULL); + RegList include = + reg1.GetBit() | reg2.GetBit() | reg3.GetBit() | reg4.GetBit(); + // Make sure that neither sp nor xzr are included the list. + include &= ~(xzr.GetBit() | sp.GetBit()); + + IncludeByRegList(masm_->GetScratchRegisterList(), include); +} + + +void UseScratchRegisterScope::Include(const FPRegister& reg1, + const FPRegister& reg2, + const FPRegister& reg3, + const FPRegister& reg4) { + RegList include = + reg1.GetBit() | reg2.GetBit() | reg3.GetBit() | reg4.GetBit(); + IncludeByRegList(masm_->GetScratchFPRegisterList(), include); +} + + +void UseScratchRegisterScope::Exclude(const CPURegList& list) { + if (list.GetType() == CPURegister::kRegister) { + ExcludeByRegList(masm_->GetScratchRegisterList(), list.GetList()); + } else { + VIXL_ASSERT(list.GetType() == CPURegister::kVRegister); + ExcludeByRegList(masm_->GetScratchFPRegisterList(), list.GetList()); + } +} + + +void UseScratchRegisterScope::Exclude(const Register& reg1, + const Register& reg2, + const Register& reg3, + const Register& reg4) { + RegList exclude = + reg1.GetBit() | reg2.GetBit() | reg3.GetBit() | reg4.GetBit(); + ExcludeByRegList(masm_->GetScratchRegisterList(), exclude); +} + + +void UseScratchRegisterScope::Exclude(const FPRegister& reg1, + const FPRegister& reg2, + const FPRegister& reg3, + const FPRegister& reg4) { + RegList excludefp = + reg1.GetBit() | reg2.GetBit() | reg3.GetBit() | reg4.GetBit(); + ExcludeByRegList(masm_->GetScratchFPRegisterList(), excludefp); +} + + +void UseScratchRegisterScope::Exclude(const CPURegister& reg1, + const CPURegister& reg2, + const CPURegister& reg3, + const CPURegister& reg4) { + RegList exclude = 0; + RegList excludefp = 0; + + const CPURegister regs[] = {reg1, reg2, reg3, reg4}; + + for (size_t i = 0; i < ArrayLength(regs); i++) { + if (regs[i].IsRegister()) { + exclude |= regs[i].GetBit(); + } else if (regs[i].IsFPRegister()) { + excludefp |= regs[i].GetBit(); + } else { + VIXL_ASSERT(regs[i].IsNone()); + } + } + + ExcludeByRegList(masm_->GetScratchRegisterList(), exclude); + ExcludeByRegList(masm_->GetScratchFPRegisterList(), excludefp); +} + + +void UseScratchRegisterScope::ExcludeAll() { + ExcludeByRegList(masm_->GetScratchRegisterList(), + masm_->GetScratchRegisterList()->GetList()); + ExcludeByRegList(masm_->GetScratchFPRegisterList(), + masm_->GetScratchFPRegisterList()->GetList()); +} + + +CPURegister UseScratchRegisterScope::AcquireNextAvailable( + CPURegList* available) { + VIXL_CHECK(!available->IsEmpty()); + CPURegister result = available->PopLowestIndex(); + VIXL_ASSERT(!AreAliased(result, xzr, sp)); + return result; +} + + +void UseScratchRegisterScope::ReleaseByCode(CPURegList* available, int code) { + ReleaseByRegList(available, static_cast(1) << code); +} + + +void UseScratchRegisterScope::ReleaseByRegList(CPURegList* available, + RegList regs) { + available->SetList(available->GetList() | regs); +} + + +void UseScratchRegisterScope::IncludeByRegList(CPURegList* available, + RegList regs) { + available->SetList(available->GetList() | regs); +} + + +void UseScratchRegisterScope::ExcludeByRegList(CPURegList* available, + RegList exclude) { + available->SetList(available->GetList() & ~exclude); +} + +} // namespace aarch64 +} // namespace vixl diff --git a/dep/vixl/src/aarch64/operands-aarch64.cc b/dep/vixl/src/aarch64/operands-aarch64.cc new file mode 100644 index 000000000..20364616a --- /dev/null +++ b/dep/vixl/src/aarch64/operands-aarch64.cc @@ -0,0 +1,528 @@ +// Copyright 2016, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "operands-aarch64.h" + +namespace vixl { +namespace aarch64 { + +// CPURegList utilities. +CPURegister CPURegList::PopLowestIndex() { + if (IsEmpty()) { + return NoCPUReg; + } + int index = CountTrailingZeros(list_); + VIXL_ASSERT((1 << index) & list_); + Remove(index); + return CPURegister(index, size_, type_); +} + + +CPURegister CPURegList::PopHighestIndex() { + VIXL_ASSERT(IsValid()); + if (IsEmpty()) { + return NoCPUReg; + } + int index = CountLeadingZeros(list_); + index = kRegListSizeInBits - 1 - index; + VIXL_ASSERT((1 << index) & list_); + Remove(index); + return CPURegister(index, size_, type_); +} + + +bool CPURegList::IsValid() const { + if ((type_ == CPURegister::kRegister) || (type_ == CPURegister::kVRegister)) { + bool is_valid = true; + // Try to create a CPURegister for each element in the list. + for (int i = 0; i < kRegListSizeInBits; i++) { + if (((list_ >> i) & 1) != 0) { + is_valid &= CPURegister(i, size_, type_).IsValid(); + } + } + return is_valid; + } else if (type_ == CPURegister::kNoRegister) { + // We can't use IsEmpty here because that asserts IsValid(). + return list_ == 0; + } else { + return false; + } +} + + +void CPURegList::RemoveCalleeSaved() { + if (GetType() == CPURegister::kRegister) { + Remove(GetCalleeSaved(GetRegisterSizeInBits())); + } else if (GetType() == CPURegister::kVRegister) { + Remove(GetCalleeSavedV(GetRegisterSizeInBits())); + } else { + VIXL_ASSERT(GetType() == CPURegister::kNoRegister); + VIXL_ASSERT(IsEmpty()); + // The list must already be empty, so do nothing. + } +} + + +CPURegList CPURegList::Union(const CPURegList& list_1, + const CPURegList& list_2, + const CPURegList& list_3) { + return Union(list_1, Union(list_2, list_3)); +} + + +CPURegList CPURegList::Union(const CPURegList& list_1, + const CPURegList& list_2, + const CPURegList& list_3, + const CPURegList& list_4) { + return Union(Union(list_1, list_2), Union(list_3, list_4)); +} + + +CPURegList CPURegList::Intersection(const CPURegList& list_1, + const CPURegList& list_2, + const CPURegList& list_3) { + return Intersection(list_1, Intersection(list_2, list_3)); +} + + +CPURegList CPURegList::Intersection(const CPURegList& list_1, + const CPURegList& list_2, + const CPURegList& list_3, + const CPURegList& list_4) { + return Intersection(Intersection(list_1, list_2), + Intersection(list_3, list_4)); +} + + +CPURegList CPURegList::GetCalleeSaved(unsigned size) { + return CPURegList(CPURegister::kRegister, size, 19, 29); +} + + +CPURegList CPURegList::GetCalleeSavedV(unsigned size) { + return CPURegList(CPURegister::kVRegister, size, 8, 15); +} + + +CPURegList CPURegList::GetCallerSaved(unsigned size) { + // Registers x0-x18 and lr (x30) are caller-saved. + CPURegList list = CPURegList(CPURegister::kRegister, size, 0, 18); + // Do not use lr directly to avoid initialisation order fiasco bugs for users. + list.Combine(Register(30, kXRegSize)); + return list; +} + + +CPURegList CPURegList::GetCallerSavedV(unsigned size) { + // Registers d0-d7 and d16-d31 are caller-saved. + CPURegList list = CPURegList(CPURegister::kVRegister, size, 0, 7); + list.Combine(CPURegList(CPURegister::kVRegister, size, 16, 31)); + return list; +} + + +const CPURegList kCalleeSaved = CPURegList::GetCalleeSaved(); +const CPURegList kCalleeSavedV = CPURegList::GetCalleeSavedV(); +const CPURegList kCallerSaved = CPURegList::GetCallerSaved(); +const CPURegList kCallerSavedV = CPURegList::GetCallerSavedV(); + + +// Registers. +#define WREG(n) w##n, +const Register Register::wregisters[] = {AARCH64_REGISTER_CODE_LIST(WREG)}; +#undef WREG + +#define XREG(n) x##n, +const Register Register::xregisters[] = {AARCH64_REGISTER_CODE_LIST(XREG)}; +#undef XREG + +#define BREG(n) b##n, +const VRegister VRegister::bregisters[] = {AARCH64_REGISTER_CODE_LIST(BREG)}; +#undef BREG + +#define HREG(n) h##n, +const VRegister VRegister::hregisters[] = {AARCH64_REGISTER_CODE_LIST(HREG)}; +#undef HREG + +#define SREG(n) s##n, +const VRegister VRegister::sregisters[] = {AARCH64_REGISTER_CODE_LIST(SREG)}; +#undef SREG + +#define DREG(n) d##n, +const VRegister VRegister::dregisters[] = {AARCH64_REGISTER_CODE_LIST(DREG)}; +#undef DREG + +#define QREG(n) q##n, +const VRegister VRegister::qregisters[] = {AARCH64_REGISTER_CODE_LIST(QREG)}; +#undef QREG + +#define VREG(n) v##n, +const VRegister VRegister::vregisters[] = {AARCH64_REGISTER_CODE_LIST(VREG)}; +#undef VREG + + +const Register& Register::GetWRegFromCode(unsigned code) { + if (code == kSPRegInternalCode) { + return wsp; + } else { + VIXL_ASSERT(code < kNumberOfRegisters); + return wregisters[code]; + } +} + + +const Register& Register::GetXRegFromCode(unsigned code) { + if (code == kSPRegInternalCode) { + return sp; + } else { + VIXL_ASSERT(code < kNumberOfRegisters); + return xregisters[code]; + } +} + + +const VRegister& VRegister::GetBRegFromCode(unsigned code) { + VIXL_ASSERT(code < kNumberOfVRegisters); + return bregisters[code]; +} + + +const VRegister& VRegister::GetHRegFromCode(unsigned code) { + VIXL_ASSERT(code < kNumberOfVRegisters); + return hregisters[code]; +} + + +const VRegister& VRegister::GetSRegFromCode(unsigned code) { + VIXL_ASSERT(code < kNumberOfVRegisters); + return sregisters[code]; +} + + +const VRegister& VRegister::GetDRegFromCode(unsigned code) { + VIXL_ASSERT(code < kNumberOfVRegisters); + return dregisters[code]; +} + + +const VRegister& VRegister::GetQRegFromCode(unsigned code) { + VIXL_ASSERT(code < kNumberOfVRegisters); + return qregisters[code]; +} + + +const VRegister& VRegister::GetVRegFromCode(unsigned code) { + VIXL_ASSERT(code < kNumberOfVRegisters); + return vregisters[code]; +} + + +const Register& CPURegister::W() const { + VIXL_ASSERT(IsValidRegister()); + return Register::GetWRegFromCode(code_); +} + + +const Register& CPURegister::X() const { + VIXL_ASSERT(IsValidRegister()); + return Register::GetXRegFromCode(code_); +} + + +const VRegister& CPURegister::B() const { + VIXL_ASSERT(IsValidVRegister()); + return VRegister::GetBRegFromCode(code_); +} + + +const VRegister& CPURegister::H() const { + VIXL_ASSERT(IsValidVRegister()); + return VRegister::GetHRegFromCode(code_); +} + + +const VRegister& CPURegister::S() const { + VIXL_ASSERT(IsValidVRegister()); + return VRegister::GetSRegFromCode(code_); +} + + +const VRegister& CPURegister::D() const { + VIXL_ASSERT(IsValidVRegister()); + return VRegister::GetDRegFromCode(code_); +} + + +const VRegister& CPURegister::Q() const { + VIXL_ASSERT(IsValidVRegister()); + return VRegister::GetQRegFromCode(code_); +} + + +const VRegister& CPURegister::V() const { + VIXL_ASSERT(IsValidVRegister()); + return VRegister::GetVRegFromCode(code_); +} + + +// Operand. +Operand::Operand(int64_t immediate) + : immediate_(immediate), + reg_(NoReg), + shift_(NO_SHIFT), + extend_(NO_EXTEND), + shift_amount_(0) {} + + +Operand::Operand(Register reg, Shift shift, unsigned shift_amount) + : reg_(reg), + shift_(shift), + extend_(NO_EXTEND), + shift_amount_(shift_amount) { + VIXL_ASSERT(shift != MSL); + VIXL_ASSERT(reg.Is64Bits() || (shift_amount < kWRegSize)); + VIXL_ASSERT(reg.Is32Bits() || (shift_amount < kXRegSize)); + VIXL_ASSERT(!reg.IsSP()); +} + + +Operand::Operand(Register reg, Extend extend, unsigned shift_amount) + : reg_(reg), + shift_(NO_SHIFT), + extend_(extend), + shift_amount_(shift_amount) { + VIXL_ASSERT(reg.IsValid()); + VIXL_ASSERT(shift_amount <= 4); + VIXL_ASSERT(!reg.IsSP()); + + // Extend modes SXTX and UXTX require a 64-bit register. + VIXL_ASSERT(reg.Is64Bits() || ((extend != SXTX) && (extend != UXTX))); +} + + +bool Operand::IsImmediate() const { return reg_.Is(NoReg); } + + +bool Operand::IsPlainRegister() const { + return reg_.IsValid() && + (((shift_ == NO_SHIFT) && (extend_ == NO_EXTEND)) || + // No-op shifts. + ((shift_ != NO_SHIFT) && (shift_amount_ == 0)) || + // No-op extend operations. + // We can't include [US]XTW here without knowing more about the + // context; they are only no-ops for 32-bit operations. + // + // For example, this operand could be replaced with w1: + // __ Add(w0, w0, Operand(w1, UXTW)); + // However, no plain register can replace it in this context: + // __ Add(x0, x0, Operand(w1, UXTW)); + (((extend_ == UXTX) || (extend_ == SXTX)) && (shift_amount_ == 0))); +} + + +bool Operand::IsShiftedRegister() const { + return reg_.IsValid() && (shift_ != NO_SHIFT); +} + + +bool Operand::IsExtendedRegister() const { + return reg_.IsValid() && (extend_ != NO_EXTEND); +} + + +bool Operand::IsZero() const { + if (IsImmediate()) { + return GetImmediate() == 0; + } else { + return GetRegister().IsZero(); + } +} + + +Operand Operand::ToExtendedRegister() const { + VIXL_ASSERT(IsShiftedRegister()); + VIXL_ASSERT((shift_ == LSL) && (shift_amount_ <= 4)); + return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_); +} + + +// MemOperand +MemOperand::MemOperand() + : base_(NoReg), + regoffset_(NoReg), + offset_(0), + addrmode_(Offset), + shift_(NO_SHIFT), + extend_(NO_EXTEND) {} + + +MemOperand::MemOperand(Register base, int64_t offset, AddrMode addrmode) + : base_(base), + regoffset_(NoReg), + offset_(offset), + addrmode_(addrmode), + shift_(NO_SHIFT), + extend_(NO_EXTEND), + shift_amount_(0) { + VIXL_ASSERT(base.Is64Bits() && !base.IsZero()); +} + + +MemOperand::MemOperand(Register base, + Register regoffset, + Extend extend, + unsigned shift_amount) + : base_(base), + regoffset_(regoffset), + offset_(0), + addrmode_(Offset), + shift_(NO_SHIFT), + extend_(extend), + shift_amount_(shift_amount) { + VIXL_ASSERT(base.Is64Bits() && !base.IsZero()); + VIXL_ASSERT(!regoffset.IsSP()); + VIXL_ASSERT((extend == UXTW) || (extend == SXTW) || (extend == SXTX)); + + // SXTX extend mode requires a 64-bit offset register. + VIXL_ASSERT(regoffset.Is64Bits() || (extend != SXTX)); +} + + +MemOperand::MemOperand(Register base, + Register regoffset, + Shift shift, + unsigned shift_amount) + : base_(base), + regoffset_(regoffset), + offset_(0), + addrmode_(Offset), + shift_(shift), + extend_(NO_EXTEND), + shift_amount_(shift_amount) { + VIXL_ASSERT(base.Is64Bits() && !base.IsZero()); + VIXL_ASSERT(regoffset.Is64Bits() && !regoffset.IsSP()); + VIXL_ASSERT(shift == LSL); +} + + +MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode) + : base_(base), + regoffset_(NoReg), + addrmode_(addrmode), + shift_(NO_SHIFT), + extend_(NO_EXTEND), + shift_amount_(0) { + VIXL_ASSERT(base.Is64Bits() && !base.IsZero()); + + if (offset.IsImmediate()) { + offset_ = offset.GetImmediate(); + } else if (offset.IsShiftedRegister()) { + VIXL_ASSERT((addrmode == Offset) || (addrmode == PostIndex)); + + regoffset_ = offset.GetRegister(); + shift_ = offset.GetShift(); + shift_amount_ = offset.GetShiftAmount(); + + extend_ = NO_EXTEND; + offset_ = 0; + + // These assertions match those in the shifted-register constructor. + VIXL_ASSERT(regoffset_.Is64Bits() && !regoffset_.IsSP()); + VIXL_ASSERT(shift_ == LSL); + } else { + VIXL_ASSERT(offset.IsExtendedRegister()); + VIXL_ASSERT(addrmode == Offset); + + regoffset_ = offset.GetRegister(); + extend_ = offset.GetExtend(); + shift_amount_ = offset.GetShiftAmount(); + + shift_ = NO_SHIFT; + offset_ = 0; + + // These assertions match those in the extended-register constructor. + VIXL_ASSERT(!regoffset_.IsSP()); + VIXL_ASSERT((extend_ == UXTW) || (extend_ == SXTW) || (extend_ == SXTX)); + VIXL_ASSERT((regoffset_.Is64Bits() || (extend_ != SXTX))); + } +} + + +bool MemOperand::IsImmediateOffset() const { + return (addrmode_ == Offset) && regoffset_.Is(NoReg); +} + + +bool MemOperand::IsRegisterOffset() const { + return (addrmode_ == Offset) && !regoffset_.Is(NoReg); +} + + +bool MemOperand::IsPreIndex() const { return addrmode_ == PreIndex; } + + +bool MemOperand::IsPostIndex() const { return addrmode_ == PostIndex; } + + +void MemOperand::AddOffset(int64_t offset) { + VIXL_ASSERT(IsImmediateOffset()); + offset_ += offset; +} + + +GenericOperand::GenericOperand(const CPURegister& reg) + : cpu_register_(reg), mem_op_size_(0) { + if (reg.IsQ()) { + VIXL_ASSERT(reg.GetSizeInBits() > static_cast(kXRegSize)); + // Support for Q registers is not implemented yet. + VIXL_UNIMPLEMENTED(); + } +} + + +GenericOperand::GenericOperand(const MemOperand& mem_op, size_t mem_op_size) + : cpu_register_(NoReg), mem_op_(mem_op), mem_op_size_(mem_op_size) { + if (mem_op_size_ > kXRegSizeInBytes) { + // We only support generic operands up to the size of X registers. + VIXL_UNIMPLEMENTED(); + } +} + +bool GenericOperand::Equals(const GenericOperand& other) const { + if (!IsValid() || !other.IsValid()) { + // Two invalid generic operands are considered equal. + return !IsValid() && !other.IsValid(); + } + if (IsCPURegister() && other.IsCPURegister()) { + return GetCPURegister().Is(other.GetCPURegister()); + } else if (IsMemOperand() && other.IsMemOperand()) { + return GetMemOperand().Equals(other.GetMemOperand()) && + (GetMemOperandSizeInBytes() == other.GetMemOperandSizeInBytes()); + } + return false; +} +} +} // namespace vixl::aarch64 diff --git a/dep/vixl/src/aarch64/pointer-auth-aarch64.cc b/dep/vixl/src/aarch64/pointer-auth-aarch64.cc new file mode 100644 index 000000000..55cf4ca59 --- /dev/null +++ b/dep/vixl/src/aarch64/pointer-auth-aarch64.cc @@ -0,0 +1,197 @@ +// Copyright 2018, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64 + +#include "simulator-aarch64.h" + +#include "utils-vixl.h" + +namespace vixl { +namespace aarch64 { + +// Randomly generated example keys for simulating only. +const Simulator::PACKey Simulator::kPACKeyIA = {0xc31718727de20f71, + 0xab9fd4e14b2fec51, + 0}; +const Simulator::PACKey Simulator::kPACKeyIB = {0xeebb163b474e04c8, + 0x5267ac6fc280fb7c, + 1}; +const Simulator::PACKey Simulator::kPACKeyDA = {0x5caef808deb8b1e2, + 0xd347cbc06b7b0f77, + 0}; +const Simulator::PACKey Simulator::kPACKeyDB = {0xe06aa1a949ba8cc7, + 0xcfde69e3db6d0432, + 1}; + +// The general PAC key isn't intended to be used with AuthPAC so we ensure the +// key number is invalid and asserts if used incorrectly. +const Simulator::PACKey Simulator::kPACKeyGA = {0xfcd98a44d564b3d5, + 0x6c56df1904bf0ddc, + -1}; + +static uint64_t GetNibble(uint64_t in_data, int position) { + return (in_data >> position) & 0xf; +} + +static uint64_t ShuffleNibbles(uint64_t in_data) { + static int in_positions[16] = + {4, 36, 52, 40, 44, 0, 24, 12, 56, 60, 8, 32, 16, 28, 20, 48}; + uint64_t out_data = 0; + for (int i = 0; i < 16; i++) { + out_data |= GetNibble(in_data, in_positions[i]) << (4 * i); + } + return out_data; +} + +static uint64_t SubstituteNibbles(uint64_t in_data) { + // Randomly chosen substitutes. + static uint64_t subs[16] = + {4, 7, 3, 9, 10, 14, 0, 1, 15, 2, 8, 6, 12, 5, 11, 13}; + uint64_t out_data = 0; + for (int i = 0; i < 16; i++) { + int index = (in_data >> (4 * i)) & 0xf; + out_data |= subs[index] << (4 * i); + } + return out_data; +} + +// Rotate nibble to the left by the amount specified. +static uint64_t RotNibble(uint64_t in_cell, int amount) { + VIXL_ASSERT((amount >= 0) && (amount <= 3)); + + in_cell &= 0xf; + uint64_t temp = (in_cell << 4) | in_cell; + return (temp >> (4 - amount)) & 0xf; +} + +static uint64_t BigShuffle(uint64_t in_data) { + uint64_t out_data = 0; + for (int i = 0; i < 4; i++) { + uint64_t n12 = GetNibble(in_data, 4 * (i + 12)); + uint64_t n8 = GetNibble(in_data, 4 * (i + 8)); + uint64_t n4 = GetNibble(in_data, 4 * (i + 4)); + uint64_t n0 = GetNibble(in_data, 4 * (i + 0)); + + uint64_t t0 = RotNibble(n8, 2) ^ RotNibble(n4, 1) ^ RotNibble(n0, 1); + uint64_t t1 = RotNibble(n12, 1) ^ RotNibble(n4, 2) ^ RotNibble(n0, 1); + uint64_t t2 = RotNibble(n12, 2) ^ RotNibble(n8, 1) ^ RotNibble(n0, 1); + uint64_t t3 = RotNibble(n12, 1) ^ RotNibble(n8, 1) ^ RotNibble(n4, 2); + + out_data |= t3 << (4 * (i + 0)); + out_data |= t2 << (4 * (i + 4)); + out_data |= t1 << (4 * (i + 8)); + out_data |= t0 << (4 * (i + 12)); + } + return out_data; +} + +// A simple, non-standard hash function invented for simulating. It mixes +// reasonably well, however it is unlikely to be cryptographically secure and +// may have a higher collision chance than other hashing algorithms. +uint64_t Simulator::ComputePAC(uint64_t data, uint64_t context, PACKey key) { + uint64_t working_value = data ^ key.high; + working_value = BigShuffle(working_value); + working_value = ShuffleNibbles(working_value); + working_value ^= key.low; + working_value = ShuffleNibbles(working_value); + working_value = BigShuffle(working_value); + working_value ^= context; + working_value = SubstituteNibbles(working_value); + working_value = BigShuffle(working_value); + working_value = SubstituteNibbles(working_value); + + return working_value; +} + +// The TTBR is selected by bit 63 or 55 depending on TBI for pointers without +// codes, but is always 55 once a PAC code is added to a pointer. For this +// reason, it must be calculated at the call site. +uint64_t Simulator::CalculatePACMask(uint64_t ptr, PointerType type, int ttbr) { + int bottom_pac_bit = GetBottomPACBit(ptr, ttbr); + int top_pac_bit = GetTopPACBit(ptr, type); + return ExtractUnsignedBitfield64(top_pac_bit, + bottom_pac_bit, + 0xffffffffffffffff & ~kTTBRMask) + << bottom_pac_bit; +} + +uint64_t Simulator::AuthPAC(uint64_t ptr, + uint64_t context, + PACKey key, + PointerType type) { + VIXL_ASSERT((key.number == 0) || (key.number == 1)); + + uint64_t pac_mask = CalculatePACMask(ptr, type, (ptr >> 55) & 1); + uint64_t original_ptr = + ((ptr & kTTBRMask) == 0) ? (ptr & ~pac_mask) : (ptr | pac_mask); + + uint64_t pac = ComputePAC(original_ptr, context, key); + + uint64_t error_code = 1 << key.number; + if ((pac & pac_mask) == (ptr & pac_mask)) { + return original_ptr; + } else { + int error_lsb = GetTopPACBit(ptr, type) - 2; + uint64_t error_mask = UINT64_C(0x3) << error_lsb; + return (original_ptr & ~error_mask) | (error_code << error_lsb); + } +} + +uint64_t Simulator::AddPAC(uint64_t ptr, + uint64_t context, + PACKey key, + PointerType type) { + int top_pac_bit = GetTopPACBit(ptr, type); + + // TODO: Properly handle the case where extension bits are bad and TBI is + // turned off, and also test me. + VIXL_ASSERT(HasTBI(ptr, type)); + int ttbr = (ptr >> 55) & 1; + uint64_t pac_mask = CalculatePACMask(ptr, type, ttbr); + uint64_t ext_ptr = (ttbr == 0) ? (ptr & ~pac_mask) : (ptr | pac_mask); + + uint64_t pac = ComputePAC(ext_ptr, context, key); + + // If the pointer isn't all zeroes or all ones in the PAC bitfield, corrupt + // the resulting code. + if (((ptr & (pac_mask | kTTBRMask)) != 0x0) && + ((~ptr & (pac_mask | kTTBRMask)) != 0x0)) { + pac ^= UINT64_C(1) << (top_pac_bit - 1); + } + + uint64_t ttbr_shifted = static_cast(ttbr) << 55; + return (pac & pac_mask) | ttbr_shifted | (ptr & ~pac_mask); +} + +uint64_t Simulator::StripPAC(uint64_t ptr, PointerType type) { + uint64_t pac_mask = CalculatePACMask(ptr, type, (ptr >> 55) & 1); + return ((ptr & kTTBRMask) == 0) ? (ptr & ~pac_mask) : (ptr | pac_mask); +} +} // namespace aarch64 +} // namespace vixl + +#endif // VIXL_INCLUDE_SIMULATOR_AARCH64 diff --git a/dep/vixl/src/aarch64/simulator-aarch64.cc b/dep/vixl/src/aarch64/simulator-aarch64.cc new file mode 100644 index 000000000..4763a54aa --- /dev/null +++ b/dep/vixl/src/aarch64/simulator-aarch64.cc @@ -0,0 +1,6658 @@ +// Copyright 2015, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64 + +#include +#include +#include + +#include "simulator-aarch64.h" + +namespace vixl { +namespace aarch64 { + +using vixl::internal::SimFloat16; + +const Instruction* Simulator::kEndOfSimAddress = NULL; + +void SimSystemRegister::SetBits(int msb, int lsb, uint32_t bits) { + int width = msb - lsb + 1; + VIXL_ASSERT(IsUintN(width, bits) || IsIntN(width, bits)); + + bits <<= lsb; + uint32_t mask = ((1 << width) - 1) << lsb; + VIXL_ASSERT((mask & write_ignore_mask_) == 0); + + value_ = (value_ & ~mask) | (bits & mask); +} + + +SimSystemRegister SimSystemRegister::DefaultValueFor(SystemRegister id) { + switch (id) { + case NZCV: + return SimSystemRegister(0x00000000, NZCVWriteIgnoreMask); + case FPCR: + return SimSystemRegister(0x00000000, FPCRWriteIgnoreMask); + default: + VIXL_UNREACHABLE(); + return SimSystemRegister(); + } +} + + +Simulator::Simulator(Decoder* decoder, FILE* stream) + : cpu_features_auditor_(decoder, CPUFeatures::All()) { + // Ensure that shift operations act as the simulator expects. + VIXL_ASSERT((static_cast(-1) >> 1) == -1); + VIXL_ASSERT((static_cast(-1) >> 1) == 0x7fffffff); + + instruction_stats_ = false; + + // Set up the decoder. + decoder_ = decoder; + decoder_->AppendVisitor(this); + + stream_ = stream; + + print_disasm_ = new PrintDisassembler(stream_); + // The Simulator and Disassembler share the same available list, held by the + // auditor. The Disassembler only annotates instructions with features that + // are _not_ available, so registering the auditor should have no effect + // unless the simulator is about to abort (due to missing features). In + // practice, this means that with trace enabled, the simulator will crash just + // after the disassembler prints the instruction, with the missing features + // enumerated. + print_disasm_->RegisterCPUFeaturesAuditor(&cpu_features_auditor_); + + SetColouredTrace(false); + trace_parameters_ = LOG_NONE; + + ResetState(); + + // Allocate and set up the simulator stack. + stack_ = new byte[stack_size_]; + stack_limit_ = stack_ + stack_protection_size_; + // Configure the starting stack pointer. + // - Find the top of the stack. + byte* tos = stack_ + stack_size_; + // - There's a protection region at both ends of the stack. + tos -= stack_protection_size_; + // - The stack pointer must be 16-byte aligned. + tos = AlignDown(tos, 16); + WriteSp(tos); + + instrumentation_ = NULL; + + // Print a warning about exclusive-access instructions, but only the first + // time they are encountered. This warning can be silenced using + // SilenceExclusiveAccessWarning(). + print_exclusive_access_warning_ = true; +} + + +void Simulator::ResetState() { + // Reset the system registers. + nzcv_ = SimSystemRegister::DefaultValueFor(NZCV); + fpcr_ = SimSystemRegister::DefaultValueFor(FPCR); + + // Reset registers to 0. + pc_ = NULL; + pc_modified_ = false; + for (unsigned i = 0; i < kNumberOfRegisters; i++) { + WriteXRegister(i, 0xbadbeef); + } + // Set FP registers to a value that is a NaN in both 32-bit and 64-bit FP. + uint64_t nan_bits[] = { + UINT64_C(0x7ff00cab7f8ba9e1), UINT64_C(0x7ff0dead7f8beef1), + }; + VIXL_ASSERT(IsSignallingNaN(RawbitsToDouble(nan_bits[0] & kDRegMask))); + VIXL_ASSERT(IsSignallingNaN(RawbitsToFloat(nan_bits[0] & kSRegMask))); + + qreg_t q_bits; + VIXL_ASSERT(sizeof(q_bits) == sizeof(nan_bits)); + memcpy(&q_bits, nan_bits, sizeof(nan_bits)); + + for (unsigned i = 0; i < kNumberOfVRegisters; i++) { + WriteQRegister(i, q_bits); + } + // Returning to address 0 exits the Simulator. + WriteLr(kEndOfSimAddress); +} + + +Simulator::~Simulator() { + delete[] stack_; + // The decoder may outlive the simulator. + decoder_->RemoveVisitor(print_disasm_); + delete print_disasm_; + + decoder_->RemoveVisitor(instrumentation_); + delete instrumentation_; +} + + +void Simulator::Run() { + // Flush any written registers before executing anything, so that + // manually-set registers are logged _before_ the first instruction. + LogAllWrittenRegisters(); + + while (pc_ != kEndOfSimAddress) { + ExecuteInstruction(); + } +} + + +void Simulator::RunFrom(const Instruction* first) { + WritePc(first, NoBranchLog); + Run(); +} + + +const char* Simulator::xreg_names[] = {"x0", "x1", "x2", "x3", "x4", "x5", + "x6", "x7", "x8", "x9", "x10", "x11", + "x12", "x13", "x14", "x15", "x16", "x17", + "x18", "x19", "x20", "x21", "x22", "x23", + "x24", "x25", "x26", "x27", "x28", "x29", + "lr", "xzr", "sp"}; + +const char* Simulator::wreg_names[] = {"w0", "w1", "w2", "w3", "w4", "w5", + "w6", "w7", "w8", "w9", "w10", "w11", + "w12", "w13", "w14", "w15", "w16", "w17", + "w18", "w19", "w20", "w21", "w22", "w23", + "w24", "w25", "w26", "w27", "w28", "w29", + "w30", "wzr", "wsp"}; + +const char* Simulator::hreg_names[] = {"h0", "h1", "h2", "h3", "h4", "h5", + "h6", "h7", "h8", "h9", "h10", "h11", + "h12", "h13", "h14", "h15", "h16", "h17", + "h18", "h19", "h20", "h21", "h22", "h23", + "h24", "h25", "h26", "h27", "h28", "h29", + "h30", "h31"}; + +const char* Simulator::sreg_names[] = {"s0", "s1", "s2", "s3", "s4", "s5", + "s6", "s7", "s8", "s9", "s10", "s11", + "s12", "s13", "s14", "s15", "s16", "s17", + "s18", "s19", "s20", "s21", "s22", "s23", + "s24", "s25", "s26", "s27", "s28", "s29", + "s30", "s31"}; + +const char* Simulator::dreg_names[] = {"d0", "d1", "d2", "d3", "d4", "d5", + "d6", "d7", "d8", "d9", "d10", "d11", + "d12", "d13", "d14", "d15", "d16", "d17", + "d18", "d19", "d20", "d21", "d22", "d23", + "d24", "d25", "d26", "d27", "d28", "d29", + "d30", "d31"}; + +const char* Simulator::vreg_names[] = {"v0", "v1", "v2", "v3", "v4", "v5", + "v6", "v7", "v8", "v9", "v10", "v11", + "v12", "v13", "v14", "v15", "v16", "v17", + "v18", "v19", "v20", "v21", "v22", "v23", + "v24", "v25", "v26", "v27", "v28", "v29", + "v30", "v31"}; + + +const char* Simulator::WRegNameForCode(unsigned code, Reg31Mode mode) { + VIXL_ASSERT(code < kNumberOfRegisters); + // If the code represents the stack pointer, index the name after zr. + if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) { + code = kZeroRegCode + 1; + } + return wreg_names[code]; +} + + +const char* Simulator::XRegNameForCode(unsigned code, Reg31Mode mode) { + VIXL_ASSERT(code < kNumberOfRegisters); + // If the code represents the stack pointer, index the name after zr. + if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) { + code = kZeroRegCode + 1; + } + return xreg_names[code]; +} + + +const char* Simulator::HRegNameForCode(unsigned code) { + VIXL_ASSERT(code < kNumberOfFPRegisters); + return hreg_names[code]; +} + + +const char* Simulator::SRegNameForCode(unsigned code) { + VIXL_ASSERT(code < kNumberOfFPRegisters); + return sreg_names[code]; +} + + +const char* Simulator::DRegNameForCode(unsigned code) { + VIXL_ASSERT(code < kNumberOfFPRegisters); + return dreg_names[code]; +} + + +const char* Simulator::VRegNameForCode(unsigned code) { + VIXL_ASSERT(code < kNumberOfVRegisters); + return vreg_names[code]; +} + + +#define COLOUR(colour_code) "\033[0;" colour_code "m" +#define COLOUR_BOLD(colour_code) "\033[1;" colour_code "m" +#define COLOUR_HIGHLIGHT "\033[43m" +#define NORMAL "" +#define GREY "30" +#define RED "31" +#define GREEN "32" +#define YELLOW "33" +#define BLUE "34" +#define MAGENTA "35" +#define CYAN "36" +#define WHITE "37" +void Simulator::SetColouredTrace(bool value) { + coloured_trace_ = value; + + clr_normal = value ? COLOUR(NORMAL) : ""; + clr_flag_name = value ? COLOUR_BOLD(WHITE) : ""; + clr_flag_value = value ? COLOUR(NORMAL) : ""; + clr_reg_name = value ? COLOUR_BOLD(CYAN) : ""; + clr_reg_value = value ? COLOUR(CYAN) : ""; + clr_vreg_name = value ? COLOUR_BOLD(MAGENTA) : ""; + clr_vreg_value = value ? COLOUR(MAGENTA) : ""; + clr_memory_address = value ? COLOUR_BOLD(BLUE) : ""; + clr_warning = value ? COLOUR_BOLD(YELLOW) : ""; + clr_warning_message = value ? COLOUR(YELLOW) : ""; + clr_printf = value ? COLOUR(GREEN) : ""; + clr_branch_marker = value ? COLOUR(GREY) COLOUR_HIGHLIGHT : ""; + + if (value) { + print_disasm_->SetCPUFeaturesPrefix("// Needs: " COLOUR_BOLD(RED)); + print_disasm_->SetCPUFeaturesSuffix(COLOUR(NORMAL)); + } else { + print_disasm_->SetCPUFeaturesPrefix("// Needs: "); + print_disasm_->SetCPUFeaturesSuffix(""); + } +} + + +void Simulator::SetTraceParameters(int parameters) { + bool disasm_before = trace_parameters_ & LOG_DISASM; + trace_parameters_ = parameters; + bool disasm_after = trace_parameters_ & LOG_DISASM; + + if (disasm_before != disasm_after) { + if (disasm_after) { + decoder_->InsertVisitorBefore(print_disasm_, this); + } else { + decoder_->RemoveVisitor(print_disasm_); + } + } +} + + +void Simulator::SetInstructionStats(bool value) { + if (value != instruction_stats_) { + if (value) { + if (instrumentation_ == NULL) { + // Set the sample period to 10, as the VIXL examples and tests are + // short. + instrumentation_ = new Instrument("vixl_stats.csv", 10); + } + decoder_->AppendVisitor(instrumentation_); + } else if (instrumentation_ != NULL) { + decoder_->RemoveVisitor(instrumentation_); + } + instruction_stats_ = value; + } +} + +// Helpers --------------------------------------------------------------------- +uint64_t Simulator::AddWithCarry(unsigned reg_size, + bool set_flags, + uint64_t left, + uint64_t right, + int carry_in) { + VIXL_ASSERT((carry_in == 0) || (carry_in == 1)); + VIXL_ASSERT((reg_size == kXRegSize) || (reg_size == kWRegSize)); + + uint64_t max_uint = (reg_size == kWRegSize) ? kWMaxUInt : kXMaxUInt; + uint64_t reg_mask = (reg_size == kWRegSize) ? kWRegMask : kXRegMask; + uint64_t sign_mask = (reg_size == kWRegSize) ? kWSignMask : kXSignMask; + + left &= reg_mask; + right &= reg_mask; + uint64_t result = (left + right + carry_in) & reg_mask; + + if (set_flags) { + ReadNzcv().SetN(CalcNFlag(result, reg_size)); + ReadNzcv().SetZ(CalcZFlag(result)); + + // Compute the C flag by comparing the result to the max unsigned integer. + uint64_t max_uint_2op = max_uint - carry_in; + bool C = (left > max_uint_2op) || ((max_uint_2op - left) < right); + ReadNzcv().SetC(C ? 1 : 0); + + // Overflow iff the sign bit is the same for the two inputs and different + // for the result. + uint64_t left_sign = left & sign_mask; + uint64_t right_sign = right & sign_mask; + uint64_t result_sign = result & sign_mask; + bool V = (left_sign == right_sign) && (left_sign != result_sign); + ReadNzcv().SetV(V ? 1 : 0); + + LogSystemRegister(NZCV); + } + return result; +} + + +int64_t Simulator::ShiftOperand(unsigned reg_size, + int64_t value, + Shift shift_type, + unsigned amount) const { + VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize)); + if (amount == 0) { + return value; + } + uint64_t uvalue = static_cast(value); + uint64_t mask = kWRegMask; + bool is_negative = (uvalue & kWSignMask) != 0; + if (reg_size == kXRegSize) { + mask = kXRegMask; + is_negative = (uvalue & kXSignMask) != 0; + } + + switch (shift_type) { + case LSL: + uvalue <<= amount; + break; + case LSR: + uvalue >>= amount; + break; + case ASR: + uvalue >>= amount; + if (is_negative) { + // Simulate sign-extension to 64 bits. + uvalue |= ~UINT64_C(0) << (reg_size - amount); + } + break; + case ROR: { + uvalue = RotateRight(uvalue, amount, reg_size); + break; + } + default: + VIXL_UNIMPLEMENTED(); + return 0; + } + uvalue &= mask; + + int64_t result; + memcpy(&result, &uvalue, sizeof(result)); + return result; +} + + +int64_t Simulator::ExtendValue(unsigned reg_size, + int64_t value, + Extend extend_type, + unsigned left_shift) const { + switch (extend_type) { + case UXTB: + value &= kByteMask; + break; + case UXTH: + value &= kHalfWordMask; + break; + case UXTW: + value &= kWordMask; + break; + case SXTB: + value &= kByteMask; + if ((value & 0x80) != 0) { + value |= ~UINT64_C(0) << 8; + } + break; + case SXTH: + value &= kHalfWordMask; + if ((value & 0x8000) != 0) { + value |= ~UINT64_C(0) << 16; + } + break; + case SXTW: + value &= kWordMask; + if ((value & 0x80000000) != 0) { + value |= ~UINT64_C(0) << 32; + } + break; + case UXTX: + case SXTX: + break; + default: + VIXL_UNREACHABLE(); + } + return ShiftOperand(reg_size, value, LSL, left_shift); +} + + +void Simulator::FPCompare(double val0, double val1, FPTrapFlags trap) { + AssertSupportedFPCR(); + + // TODO: This assumes that the C++ implementation handles comparisons in the + // way that we expect (as per AssertSupportedFPCR()). + bool process_exception = false; + if ((IsNaN(val0) != 0) || (IsNaN(val1) != 0)) { + ReadNzcv().SetRawValue(FPUnorderedFlag); + if (IsSignallingNaN(val0) || IsSignallingNaN(val1) || + (trap == EnableTrap)) { + process_exception = true; + } + } else if (val0 < val1) { + ReadNzcv().SetRawValue(FPLessThanFlag); + } else if (val0 > val1) { + ReadNzcv().SetRawValue(FPGreaterThanFlag); + } else if (val0 == val1) { + ReadNzcv().SetRawValue(FPEqualFlag); + } else { + VIXL_UNREACHABLE(); + } + LogSystemRegister(NZCV); + if (process_exception) FPProcessException(); +} + + +uint64_t Simulator::ComputeMemOperandAddress(const MemOperand& mem_op) const { + VIXL_ASSERT(mem_op.IsValid()); + int64_t base = ReadRegister(mem_op.GetBaseRegister()); + if (mem_op.IsImmediateOffset()) { + return base + mem_op.GetOffset(); + } else { + VIXL_ASSERT(mem_op.GetRegisterOffset().IsValid()); + int64_t offset = ReadRegister(mem_op.GetRegisterOffset()); + unsigned shift_amount = mem_op.GetShiftAmount(); + if (mem_op.GetShift() != NO_SHIFT) { + offset = ShiftOperand(kXRegSize, offset, mem_op.GetShift(), shift_amount); + } + if (mem_op.GetExtend() != NO_EXTEND) { + offset = ExtendValue(kXRegSize, offset, mem_op.GetExtend(), shift_amount); + } + return static_cast(base + offset); + } +} + + +Simulator::PrintRegisterFormat Simulator::GetPrintRegisterFormatForSize( + unsigned reg_size, unsigned lane_size) { + VIXL_ASSERT(reg_size >= lane_size); + + uint32_t format = 0; + if (reg_size != lane_size) { + switch (reg_size) { + default: + VIXL_UNREACHABLE(); + break; + case kQRegSizeInBytes: + format = kPrintRegAsQVector; + break; + case kDRegSizeInBytes: + format = kPrintRegAsDVector; + break; + } + } + + switch (lane_size) { + default: + VIXL_UNREACHABLE(); + break; + case kQRegSizeInBytes: + format |= kPrintReg1Q; + break; + case kDRegSizeInBytes: + format |= kPrintReg1D; + break; + case kSRegSizeInBytes: + format |= kPrintReg1S; + break; + case kHRegSizeInBytes: + format |= kPrintReg1H; + break; + case kBRegSizeInBytes: + format |= kPrintReg1B; + break; + } + // These sizes would be duplicate case labels. + VIXL_STATIC_ASSERT(kXRegSizeInBytes == kDRegSizeInBytes); + VIXL_STATIC_ASSERT(kWRegSizeInBytes == kSRegSizeInBytes); + VIXL_STATIC_ASSERT(kPrintXReg == kPrintReg1D); + VIXL_STATIC_ASSERT(kPrintWReg == kPrintReg1S); + + return static_cast(format); +} + + +Simulator::PrintRegisterFormat Simulator::GetPrintRegisterFormat( + VectorFormat vform) { + switch (vform) { + default: + VIXL_UNREACHABLE(); + return kPrintReg16B; + case kFormat16B: + return kPrintReg16B; + case kFormat8B: + return kPrintReg8B; + case kFormat8H: + return kPrintReg8H; + case kFormat4H: + return kPrintReg4H; + case kFormat4S: + return kPrintReg4S; + case kFormat2S: + return kPrintReg2S; + case kFormat2D: + return kPrintReg2D; + case kFormat1D: + return kPrintReg1D; + + case kFormatB: + return kPrintReg1B; + case kFormatH: + return kPrintReg1H; + case kFormatS: + return kPrintReg1S; + case kFormatD: + return kPrintReg1D; + } +} + + +Simulator::PrintRegisterFormat Simulator::GetPrintRegisterFormatFP( + VectorFormat vform) { + switch (vform) { + default: + VIXL_UNREACHABLE(); + return kPrintReg16B; + case kFormat8H: + return kPrintReg8HFP; + case kFormat4H: + return kPrintReg4HFP; + case kFormat4S: + return kPrintReg4SFP; + case kFormat2S: + return kPrintReg2SFP; + case kFormat2D: + return kPrintReg2DFP; + case kFormat1D: + return kPrintReg1DFP; + case kFormatH: + return kPrintReg1HFP; + case kFormatS: + return kPrintReg1SFP; + case kFormatD: + return kPrintReg1DFP; + } +} + + +void Simulator::PrintWrittenRegisters() { + for (unsigned i = 0; i < kNumberOfRegisters; i++) { + if (registers_[i].WrittenSinceLastLog()) PrintRegister(i); + } +} + + +void Simulator::PrintWrittenVRegisters() { + for (unsigned i = 0; i < kNumberOfVRegisters; i++) { + // At this point there is no type information, so print as a raw 1Q. + if (vregisters_[i].WrittenSinceLastLog()) PrintVRegister(i, kPrintReg1Q); + } +} + + +void Simulator::PrintSystemRegisters() { + PrintSystemRegister(NZCV); + PrintSystemRegister(FPCR); +} + + +void Simulator::PrintRegisters() { + for (unsigned i = 0; i < kNumberOfRegisters; i++) { + PrintRegister(i); + } +} + + +void Simulator::PrintVRegisters() { + for (unsigned i = 0; i < kNumberOfVRegisters; i++) { + // At this point there is no type information, so print as a raw 1Q. + PrintVRegister(i, kPrintReg1Q); + } +} + + +// Print a register's name and raw value. +// +// Only the least-significant `size_in_bytes` bytes of the register are printed, +// but the value is aligned as if the whole register had been printed. +// +// For typical register updates, size_in_bytes should be set to kXRegSizeInBytes +// -- the default -- so that the whole register is printed. Other values of +// size_in_bytes are intended for use when the register hasn't actually been +// updated (such as in PrintWrite). +// +// No newline is printed. This allows the caller to print more details (such as +// a memory access annotation). +void Simulator::PrintRegisterRawHelper(unsigned code, + Reg31Mode r31mode, + int size_in_bytes) { + // The template for all supported sizes. + // "# x{code}: 0xffeeddccbbaa9988" + // "# w{code}: 0xbbaa9988" + // "# w{code}<15:0>: 0x9988" + // "# w{code}<7:0>: 0x88" + unsigned padding_chars = (kXRegSizeInBytes - size_in_bytes) * 2; + + const char* name = ""; + const char* suffix = ""; + switch (size_in_bytes) { + case kXRegSizeInBytes: + name = XRegNameForCode(code, r31mode); + break; + case kWRegSizeInBytes: + name = WRegNameForCode(code, r31mode); + break; + case 2: + name = WRegNameForCode(code, r31mode); + suffix = "<15:0>"; + padding_chars -= strlen(suffix); + break; + case 1: + name = WRegNameForCode(code, r31mode); + suffix = "<7:0>"; + padding_chars -= strlen(suffix); + break; + default: + VIXL_UNREACHABLE(); + } + fprintf(stream_, "# %s%5s%s: ", clr_reg_name, name, suffix); + + // Print leading padding spaces. + VIXL_ASSERT(padding_chars < (kXRegSizeInBytes * 2)); + for (unsigned i = 0; i < padding_chars; i++) { + putc(' ', stream_); + } + + // Print the specified bits in hexadecimal format. + uint64_t bits = ReadRegister(code, r31mode); + bits &= kXRegMask >> ((kXRegSizeInBytes - size_in_bytes) * 8); + VIXL_STATIC_ASSERT(sizeof(bits) == kXRegSizeInBytes); + + int chars = size_in_bytes * 2; + fprintf(stream_, + "%s0x%0*" PRIx64 "%s", + clr_reg_value, + chars, + bits, + clr_normal); +} + + +void Simulator::PrintRegister(unsigned code, Reg31Mode r31mode) { + registers_[code].NotifyRegisterLogged(); + + // Don't print writes into xzr. + if ((code == kZeroRegCode) && (r31mode == Reg31IsZeroRegister)) { + return; + } + + // The template for all x and w registers: + // "# x{code}: 0x{value}" + // "# w{code}: 0x{value}" + + PrintRegisterRawHelper(code, r31mode); + fprintf(stream_, "\n"); +} + + +// Print a register's name and raw value. +// +// The `bytes` and `lsb` arguments can be used to limit the bytes that are +// printed. These arguments are intended for use in cases where register hasn't +// actually been updated (such as in PrintVWrite). +// +// No newline is printed. This allows the caller to print more details (such as +// a floating-point interpretation or a memory access annotation). +void Simulator::PrintVRegisterRawHelper(unsigned code, int bytes, int lsb) { + // The template for vector types: + // "# v{code}: 0xffeeddccbbaa99887766554433221100". + // An example with bytes=4 and lsb=8: + // "# v{code}: 0xbbaa9988 ". + fprintf(stream_, + "# %s%5s: %s", + clr_vreg_name, + VRegNameForCode(code), + clr_vreg_value); + + int msb = lsb + bytes - 1; + int byte = kQRegSizeInBytes - 1; + + // Print leading padding spaces. (Two spaces per byte.) + while (byte > msb) { + fprintf(stream_, " "); + byte--; + } + + // Print the specified part of the value, byte by byte. + qreg_t rawbits = ReadQRegister(code); + fprintf(stream_, "0x"); + while (byte >= lsb) { + fprintf(stream_, "%02x", rawbits.val[byte]); + byte--; + } + + // Print trailing padding spaces. + while (byte >= 0) { + fprintf(stream_, " "); + byte--; + } + fprintf(stream_, "%s", clr_normal); +} + + +// Print each of the specified lanes of a register as a float or double value. +// +// The `lane_count` and `lslane` arguments can be used to limit the lanes that +// are printed. These arguments are intended for use in cases where register +// hasn't actually been updated (such as in PrintVWrite). +// +// No newline is printed. This allows the caller to print more details (such as +// a memory access annotation). +void Simulator::PrintVRegisterFPHelper(unsigned code, + unsigned lane_size_in_bytes, + int lane_count, + int rightmost_lane) { + VIXL_ASSERT((lane_size_in_bytes == kHRegSizeInBytes) || + (lane_size_in_bytes == kSRegSizeInBytes) || + (lane_size_in_bytes == kDRegSizeInBytes)); + + unsigned msb = ((lane_count + rightmost_lane) * lane_size_in_bytes); + VIXL_ASSERT(msb <= kQRegSizeInBytes); + + // For scalar types ((lane_count == 1) && (rightmost_lane == 0)), a register + // name is used: + // " (h{code}: {value})" + // " (s{code}: {value})" + // " (d{code}: {value})" + // For vector types, "..." is used to represent one or more omitted lanes. + // " (..., {value}, {value}, ...)" + if (lane_size_in_bytes == kHRegSizeInBytes) { + // TODO: Trace tests will fail until we regenerate them. + return; + } + if ((lane_count == 1) && (rightmost_lane == 0)) { + const char* name; + switch (lane_size_in_bytes) { + case kHRegSizeInBytes: + name = HRegNameForCode(code); + break; + case kSRegSizeInBytes: + name = SRegNameForCode(code); + break; + case kDRegSizeInBytes: + name = DRegNameForCode(code); + break; + default: + name = NULL; + VIXL_UNREACHABLE(); + } + fprintf(stream_, " (%s%s: ", clr_vreg_name, name); + } else { + if (msb < (kQRegSizeInBytes - 1)) { + fprintf(stream_, " (..., "); + } else { + fprintf(stream_, " ("); + } + } + + // Print the list of values. + const char* separator = ""; + int leftmost_lane = rightmost_lane + lane_count - 1; + for (int lane = leftmost_lane; lane >= rightmost_lane; lane--) { + double value; + switch (lane_size_in_bytes) { + case kHRegSizeInBytes: + value = ReadVRegister(code).GetLane(lane); + break; + case kSRegSizeInBytes: + value = ReadVRegister(code).GetLane(lane); + break; + case kDRegSizeInBytes: + value = ReadVRegister(code).GetLane(lane); + break; + default: + value = 0.0; + VIXL_UNREACHABLE(); + } + if (IsNaN(value)) { + // The output for NaNs is implementation defined. Always print `nan`, so + // that traces are coherent across different implementations. + fprintf(stream_, "%s%snan%s", separator, clr_vreg_value, clr_normal); + } else { + fprintf(stream_, + "%s%s%#g%s", + separator, + clr_vreg_value, + value, + clr_normal); + } + separator = ", "; + } + + if (rightmost_lane > 0) { + fprintf(stream_, ", ..."); + } + fprintf(stream_, ")"); +} + + +void Simulator::PrintVRegister(unsigned code, PrintRegisterFormat format) { + vregisters_[code].NotifyRegisterLogged(); + + int lane_size_log2 = format & kPrintRegLaneSizeMask; + + int reg_size_log2; + if (format & kPrintRegAsQVector) { + reg_size_log2 = kQRegSizeInBytesLog2; + } else if (format & kPrintRegAsDVector) { + reg_size_log2 = kDRegSizeInBytesLog2; + } else { + // Scalar types. + reg_size_log2 = lane_size_log2; + } + + int lane_count = 1 << (reg_size_log2 - lane_size_log2); + int lane_size = 1 << lane_size_log2; + + // The template for vector types: + // "# v{code}: 0x{rawbits} (..., {value}, ...)". + // The template for scalar types: + // "# v{code}: 0x{rawbits} ({reg}:{value})". + // The values in parentheses after the bit representations are floating-point + // interpretations. They are displayed only if the kPrintVRegAsFP bit is set. + + PrintVRegisterRawHelper(code); + if (format & kPrintRegAsFP) { + PrintVRegisterFPHelper(code, lane_size, lane_count); + } + + fprintf(stream_, "\n"); +} + + +void Simulator::PrintSystemRegister(SystemRegister id) { + switch (id) { + case NZCV: + fprintf(stream_, + "# %sNZCV: %sN:%d Z:%d C:%d V:%d%s\n", + clr_flag_name, + clr_flag_value, + ReadNzcv().GetN(), + ReadNzcv().GetZ(), + ReadNzcv().GetC(), + ReadNzcv().GetV(), + clr_normal); + break; + case FPCR: { + static const char* rmode[] = {"0b00 (Round to Nearest)", + "0b01 (Round towards Plus Infinity)", + "0b10 (Round towards Minus Infinity)", + "0b11 (Round towards Zero)"}; + VIXL_ASSERT(ReadFpcr().GetRMode() < ArrayLength(rmode)); + fprintf(stream_, + "# %sFPCR: %sAHP:%d DN:%d FZ:%d RMode:%s%s\n", + clr_flag_name, + clr_flag_value, + ReadFpcr().GetAHP(), + ReadFpcr().GetDN(), + ReadFpcr().GetFZ(), + rmode[ReadFpcr().GetRMode()], + clr_normal); + break; + } + default: + VIXL_UNREACHABLE(); + } +} + + +void Simulator::PrintRead(uintptr_t address, + unsigned reg_code, + PrintRegisterFormat format) { + registers_[reg_code].NotifyRegisterLogged(); + + USE(format); + + // The template is "# {reg}: 0x{value} <- {address}". + PrintRegisterRawHelper(reg_code, Reg31IsZeroRegister); + fprintf(stream_, + " <- %s0x%016" PRIxPTR "%s\n", + clr_memory_address, + address, + clr_normal); +} + + +void Simulator::PrintVRead(uintptr_t address, + unsigned reg_code, + PrintRegisterFormat format, + unsigned lane) { + vregisters_[reg_code].NotifyRegisterLogged(); + + // The template is "# v{code}: 0x{rawbits} <- address". + PrintVRegisterRawHelper(reg_code); + if (format & kPrintRegAsFP) { + PrintVRegisterFPHelper(reg_code, + GetPrintRegLaneSizeInBytes(format), + GetPrintRegLaneCount(format), + lane); + } + fprintf(stream_, + " <- %s0x%016" PRIxPTR "%s\n", + clr_memory_address, + address, + clr_normal); +} + + +void Simulator::PrintWrite(uintptr_t address, + unsigned reg_code, + PrintRegisterFormat format) { + VIXL_ASSERT(GetPrintRegLaneCount(format) == 1); + + // The template is "# v{code}: 0x{value} -> {address}". To keep the trace tidy + // and readable, the value is aligned with the values in the register trace. + PrintRegisterRawHelper(reg_code, + Reg31IsZeroRegister, + GetPrintRegSizeInBytes(format)); + fprintf(stream_, + " -> %s0x%016" PRIxPTR "%s\n", + clr_memory_address, + address, + clr_normal); +} + + +void Simulator::PrintVWrite(uintptr_t address, + unsigned reg_code, + PrintRegisterFormat format, + unsigned lane) { + // The templates: + // "# v{code}: 0x{rawbits} -> {address}" + // "# v{code}: 0x{rawbits} (..., {value}, ...) -> {address}". + // "# v{code}: 0x{rawbits} ({reg}:{value}) -> {address}" + // Because this trace doesn't represent a change to the source register's + // value, only the relevant part of the value is printed. To keep the trace + // tidy and readable, the raw value is aligned with the other values in the + // register trace. + int lane_count = GetPrintRegLaneCount(format); + int lane_size = GetPrintRegLaneSizeInBytes(format); + int reg_size = GetPrintRegSizeInBytes(format); + PrintVRegisterRawHelper(reg_code, reg_size, lane_size * lane); + if (format & kPrintRegAsFP) { + PrintVRegisterFPHelper(reg_code, lane_size, lane_count, lane); + } + fprintf(stream_, + " -> %s0x%016" PRIxPTR "%s\n", + clr_memory_address, + address, + clr_normal); +} + + +void Simulator::PrintTakenBranch(const Instruction* target) { + fprintf(stream_, + "# %sBranch%s to 0x%016" PRIx64 ".\n", + clr_branch_marker, + clr_normal, + reinterpret_cast(target)); +} + + +// Visitors--------------------------------------------------------------------- + +void Simulator::VisitUnimplemented(const Instruction* instr) { + printf("Unimplemented instruction at %p: 0x%08" PRIx32 "\n", + reinterpret_cast(instr), + instr->GetInstructionBits()); + VIXL_UNIMPLEMENTED(); +} + + +void Simulator::VisitUnallocated(const Instruction* instr) { + printf("Unallocated instruction at %p: 0x%08" PRIx32 "\n", + reinterpret_cast(instr), + instr->GetInstructionBits()); + VIXL_UNIMPLEMENTED(); +} + + +void Simulator::VisitPCRelAddressing(const Instruction* instr) { + VIXL_ASSERT((instr->Mask(PCRelAddressingMask) == ADR) || + (instr->Mask(PCRelAddressingMask) == ADRP)); + + WriteRegister(instr->GetRd(), instr->GetImmPCOffsetTarget()); +} + + +void Simulator::VisitUnconditionalBranch(const Instruction* instr) { + switch (instr->Mask(UnconditionalBranchMask)) { + case BL: + WriteLr(instr->GetNextInstruction()); + VIXL_FALLTHROUGH(); + case B: + WritePc(instr->GetImmPCOffsetTarget()); + break; + default: + VIXL_UNREACHABLE(); + } +} + + +void Simulator::VisitConditionalBranch(const Instruction* instr) { + VIXL_ASSERT(instr->Mask(ConditionalBranchMask) == B_cond); + if (ConditionPassed(instr->GetConditionBranch())) { + WritePc(instr->GetImmPCOffsetTarget()); + } +} + + +void Simulator::VisitUnconditionalBranchToRegister(const Instruction* instr) { + bool authenticate = false; + bool link = false; + uint64_t addr = 0; + uint64_t context = 0; + Instruction* target; + + switch (instr->Mask(UnconditionalBranchToRegisterMask)) { + case BLR: + link = true; + VIXL_FALLTHROUGH(); + case BR: + case RET: + addr = ReadXRegister(instr->GetRn()); + break; + + case BLRAAZ: + case BLRABZ: + link = true; + VIXL_FALLTHROUGH(); + case BRAAZ: + case BRABZ: + authenticate = true; + addr = ReadXRegister(instr->GetRn()); + break; + + case BLRAA: + case BLRAB: + link = true; + VIXL_FALLTHROUGH(); + case BRAA: + case BRAB: + authenticate = true; + addr = ReadXRegister(instr->GetRn()); + context = ReadXRegister(instr->GetRd()); + break; + + case RETAA: + case RETAB: + authenticate = true; + addr = ReadXRegister(kLinkRegCode); + context = ReadXRegister(31, Reg31IsStackPointer); + break; + default: + VIXL_UNREACHABLE(); + } + + if (link) { + WriteLr(instr->GetNextInstruction()); + } + + if (authenticate) { + PACKey key = (instr->ExtractBit(10) == 0) ? kPACKeyIA : kPACKeyIB; + addr = AuthPAC(addr, context, key, kInstructionPointer); + + int error_lsb = GetTopPACBit(addr, kInstructionPointer) - 2; + if (((addr >> error_lsb) & 0x3) != 0x0) { + VIXL_ABORT_WITH_MSG("Failed to authenticate pointer."); + } + } + + target = Instruction::Cast(addr); + WritePc(target); +} + + +void Simulator::VisitTestBranch(const Instruction* instr) { + unsigned bit_pos = + (instr->GetImmTestBranchBit5() << 5) | instr->GetImmTestBranchBit40(); + bool bit_zero = ((ReadXRegister(instr->GetRt()) >> bit_pos) & 1) == 0; + bool take_branch = false; + switch (instr->Mask(TestBranchMask)) { + case TBZ: + take_branch = bit_zero; + break; + case TBNZ: + take_branch = !bit_zero; + break; + default: + VIXL_UNIMPLEMENTED(); + } + if (take_branch) { + WritePc(instr->GetImmPCOffsetTarget()); + } +} + + +void Simulator::VisitCompareBranch(const Instruction* instr) { + unsigned rt = instr->GetRt(); + bool take_branch = false; + switch (instr->Mask(CompareBranchMask)) { + case CBZ_w: + take_branch = (ReadWRegister(rt) == 0); + break; + case CBZ_x: + take_branch = (ReadXRegister(rt) == 0); + break; + case CBNZ_w: + take_branch = (ReadWRegister(rt) != 0); + break; + case CBNZ_x: + take_branch = (ReadXRegister(rt) != 0); + break; + default: + VIXL_UNIMPLEMENTED(); + } + if (take_branch) { + WritePc(instr->GetImmPCOffsetTarget()); + } +} + + +void Simulator::AddSubHelper(const Instruction* instr, int64_t op2) { + unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize; + bool set_flags = instr->GetFlagsUpdate(); + int64_t new_val = 0; + Instr operation = instr->Mask(AddSubOpMask); + + switch (operation) { + case ADD: + case ADDS: { + new_val = AddWithCarry(reg_size, + set_flags, + ReadRegister(reg_size, + instr->GetRn(), + instr->GetRnMode()), + op2); + break; + } + case SUB: + case SUBS: { + new_val = AddWithCarry(reg_size, + set_flags, + ReadRegister(reg_size, + instr->GetRn(), + instr->GetRnMode()), + ~op2, + 1); + break; + } + default: + VIXL_UNREACHABLE(); + } + + WriteRegister(reg_size, + instr->GetRd(), + new_val, + LogRegWrites, + instr->GetRdMode()); +} + + +void Simulator::VisitAddSubShifted(const Instruction* instr) { + unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize; + int64_t op2 = ShiftOperand(reg_size, + ReadRegister(reg_size, instr->GetRm()), + static_cast(instr->GetShiftDP()), + instr->GetImmDPShift()); + AddSubHelper(instr, op2); +} + + +void Simulator::VisitAddSubImmediate(const Instruction* instr) { + int64_t op2 = instr->GetImmAddSub() + << ((instr->GetShiftAddSub() == 1) ? 12 : 0); + AddSubHelper(instr, op2); +} + + +void Simulator::VisitAddSubExtended(const Instruction* instr) { + unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize; + int64_t op2 = ExtendValue(reg_size, + ReadRegister(reg_size, instr->GetRm()), + static_cast(instr->GetExtendMode()), + instr->GetImmExtendShift()); + AddSubHelper(instr, op2); +} + + +void Simulator::VisitAddSubWithCarry(const Instruction* instr) { + unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize; + int64_t op2 = ReadRegister(reg_size, instr->GetRm()); + int64_t new_val; + + if ((instr->Mask(AddSubOpMask) == SUB) || + (instr->Mask(AddSubOpMask) == SUBS)) { + op2 = ~op2; + } + + new_val = AddWithCarry(reg_size, + instr->GetFlagsUpdate(), + ReadRegister(reg_size, instr->GetRn()), + op2, + ReadC()); + + WriteRegister(reg_size, instr->GetRd(), new_val); +} + + +void Simulator::VisitLogicalShifted(const Instruction* instr) { + unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize; + Shift shift_type = static_cast(instr->GetShiftDP()); + unsigned shift_amount = instr->GetImmDPShift(); + int64_t op2 = ShiftOperand(reg_size, + ReadRegister(reg_size, instr->GetRm()), + shift_type, + shift_amount); + if (instr->Mask(NOT) == NOT) { + op2 = ~op2; + } + LogicalHelper(instr, op2); +} + + +void Simulator::VisitLogicalImmediate(const Instruction* instr) { + LogicalHelper(instr, instr->GetImmLogical()); +} + + +void Simulator::LogicalHelper(const Instruction* instr, int64_t op2) { + unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize; + int64_t op1 = ReadRegister(reg_size, instr->GetRn()); + int64_t result = 0; + bool update_flags = false; + + // Switch on the logical operation, stripping out the NOT bit, as it has a + // different meaning for logical immediate instructions. + switch (instr->Mask(LogicalOpMask & ~NOT)) { + case ANDS: + update_flags = true; + VIXL_FALLTHROUGH(); + case AND: + result = op1 & op2; + break; + case ORR: + result = op1 | op2; + break; + case EOR: + result = op1 ^ op2; + break; + default: + VIXL_UNIMPLEMENTED(); + } + + if (update_flags) { + ReadNzcv().SetN(CalcNFlag(result, reg_size)); + ReadNzcv().SetZ(CalcZFlag(result)); + ReadNzcv().SetC(0); + ReadNzcv().SetV(0); + LogSystemRegister(NZCV); + } + + WriteRegister(reg_size, + instr->GetRd(), + result, + LogRegWrites, + instr->GetRdMode()); +} + + +void Simulator::VisitConditionalCompareRegister(const Instruction* instr) { + unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize; + ConditionalCompareHelper(instr, ReadRegister(reg_size, instr->GetRm())); +} + + +void Simulator::VisitConditionalCompareImmediate(const Instruction* instr) { + ConditionalCompareHelper(instr, instr->GetImmCondCmp()); +} + + +void Simulator::ConditionalCompareHelper(const Instruction* instr, + int64_t op2) { + unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize; + int64_t op1 = ReadRegister(reg_size, instr->GetRn()); + + if (ConditionPassed(instr->GetCondition())) { + // If the condition passes, set the status flags to the result of comparing + // the operands. + if (instr->Mask(ConditionalCompareMask) == CCMP) { + AddWithCarry(reg_size, true, op1, ~op2, 1); + } else { + VIXL_ASSERT(instr->Mask(ConditionalCompareMask) == CCMN); + AddWithCarry(reg_size, true, op1, op2, 0); + } + } else { + // If the condition fails, set the status flags to the nzcv immediate. + ReadNzcv().SetFlags(instr->GetNzcv()); + LogSystemRegister(NZCV); + } +} + + +void Simulator::VisitLoadStoreUnsignedOffset(const Instruction* instr) { + int offset = instr->GetImmLSUnsigned() << instr->GetSizeLS(); + LoadStoreHelper(instr, offset, Offset); +} + + +void Simulator::VisitLoadStoreUnscaledOffset(const Instruction* instr) { + LoadStoreHelper(instr, instr->GetImmLS(), Offset); +} + + +void Simulator::VisitLoadStorePreIndex(const Instruction* instr) { + LoadStoreHelper(instr, instr->GetImmLS(), PreIndex); +} + + +void Simulator::VisitLoadStorePostIndex(const Instruction* instr) { + LoadStoreHelper(instr, instr->GetImmLS(), PostIndex); +} + + +void Simulator::VisitLoadStoreRegisterOffset(const Instruction* instr) { + Extend ext = static_cast(instr->GetExtendMode()); + VIXL_ASSERT((ext == UXTW) || (ext == UXTX) || (ext == SXTW) || (ext == SXTX)); + unsigned shift_amount = instr->GetImmShiftLS() * instr->GetSizeLS(); + + int64_t offset = + ExtendValue(kXRegSize, ReadXRegister(instr->GetRm()), ext, shift_amount); + LoadStoreHelper(instr, offset, Offset); +} + + +void Simulator::LoadStoreHelper(const Instruction* instr, + int64_t offset, + AddrMode addrmode) { + unsigned srcdst = instr->GetRt(); + uintptr_t address = AddressModeHelper(instr->GetRn(), offset, addrmode); + + LoadStoreOp op = static_cast(instr->Mask(LoadStoreMask)); + switch (op) { + case LDRB_w: + WriteWRegister(srcdst, Memory::Read(address), NoRegLog); + break; + case LDRH_w: + WriteWRegister(srcdst, Memory::Read(address), NoRegLog); + break; + case LDR_w: + WriteWRegister(srcdst, Memory::Read(address), NoRegLog); + break; + case LDR_x: + WriteXRegister(srcdst, Memory::Read(address), NoRegLog); + break; + case LDRSB_w: + WriteWRegister(srcdst, Memory::Read(address), NoRegLog); + break; + case LDRSH_w: + WriteWRegister(srcdst, Memory::Read(address), NoRegLog); + break; + case LDRSB_x: + WriteXRegister(srcdst, Memory::Read(address), NoRegLog); + break; + case LDRSH_x: + WriteXRegister(srcdst, Memory::Read(address), NoRegLog); + break; + case LDRSW_x: + WriteXRegister(srcdst, Memory::Read(address), NoRegLog); + break; + case LDR_b: + WriteBRegister(srcdst, Memory::Read(address), NoRegLog); + break; + case LDR_h: + WriteHRegister(srcdst, Memory::Read(address), NoRegLog); + break; + case LDR_s: + WriteSRegister(srcdst, Memory::Read(address), NoRegLog); + break; + case LDR_d: + WriteDRegister(srcdst, Memory::Read(address), NoRegLog); + break; + case LDR_q: + WriteQRegister(srcdst, Memory::Read(address), NoRegLog); + break; + + case STRB_w: + Memory::Write(address, ReadWRegister(srcdst)); + break; + case STRH_w: + Memory::Write(address, ReadWRegister(srcdst)); + break; + case STR_w: + Memory::Write(address, ReadWRegister(srcdst)); + break; + case STR_x: + Memory::Write(address, ReadXRegister(srcdst)); + break; + case STR_b: + Memory::Write(address, ReadBRegister(srcdst)); + break; + case STR_h: + Memory::Write(address, ReadHRegisterBits(srcdst)); + break; + case STR_s: + Memory::Write(address, ReadSRegister(srcdst)); + break; + case STR_d: + Memory::Write(address, ReadDRegister(srcdst)); + break; + case STR_q: + Memory::Write(address, ReadQRegister(srcdst)); + break; + + // Ignore prfm hint instructions. + case PRFM: + break; + + default: + VIXL_UNIMPLEMENTED(); + } + + unsigned access_size = 1 << instr->GetSizeLS(); + if (instr->IsLoad()) { + if ((op == LDR_s) || (op == LDR_d)) { + LogVRead(address, srcdst, GetPrintRegisterFormatForSizeFP(access_size)); + } else if ((op == LDR_b) || (op == LDR_h) || (op == LDR_q)) { + LogVRead(address, srcdst, GetPrintRegisterFormatForSize(access_size)); + } else { + LogRead(address, srcdst, GetPrintRegisterFormatForSize(access_size)); + } + } else if (instr->IsStore()) { + if ((op == STR_s) || (op == STR_d)) { + LogVWrite(address, srcdst, GetPrintRegisterFormatForSizeFP(access_size)); + } else if ((op == STR_b) || (op == STR_h) || (op == STR_q)) { + LogVWrite(address, srcdst, GetPrintRegisterFormatForSize(access_size)); + } else { + LogWrite(address, srcdst, GetPrintRegisterFormatForSize(access_size)); + } + } else { + VIXL_ASSERT(op == PRFM); + } + + local_monitor_.MaybeClear(); +} + + +void Simulator::VisitLoadStorePairOffset(const Instruction* instr) { + LoadStorePairHelper(instr, Offset); +} + + +void Simulator::VisitLoadStorePairPreIndex(const Instruction* instr) { + LoadStorePairHelper(instr, PreIndex); +} + + +void Simulator::VisitLoadStorePairPostIndex(const Instruction* instr) { + LoadStorePairHelper(instr, PostIndex); +} + + +void Simulator::VisitLoadStorePairNonTemporal(const Instruction* instr) { + LoadStorePairHelper(instr, Offset); +} + + +void Simulator::LoadStorePairHelper(const Instruction* instr, + AddrMode addrmode) { + unsigned rt = instr->GetRt(); + unsigned rt2 = instr->GetRt2(); + int element_size = 1 << instr->GetSizeLSPair(); + int64_t offset = instr->GetImmLSPair() * element_size; + uintptr_t address = AddressModeHelper(instr->GetRn(), offset, addrmode); + uintptr_t address2 = address + element_size; + + LoadStorePairOp op = + static_cast(instr->Mask(LoadStorePairMask)); + + // 'rt' and 'rt2' can only be aliased for stores. + VIXL_ASSERT(((op & LoadStorePairLBit) == 0) || (rt != rt2)); + + switch (op) { + // Use NoRegLog to suppress the register trace (LOG_REGS, LOG_FP_REGS). We + // will print a more detailed log. + case LDP_w: { + WriteWRegister(rt, Memory::Read(address), NoRegLog); + WriteWRegister(rt2, Memory::Read(address2), NoRegLog); + break; + } + case LDP_s: { + WriteSRegister(rt, Memory::Read(address), NoRegLog); + WriteSRegister(rt2, Memory::Read(address2), NoRegLog); + break; + } + case LDP_x: { + WriteXRegister(rt, Memory::Read(address), NoRegLog); + WriteXRegister(rt2, Memory::Read(address2), NoRegLog); + break; + } + case LDP_d: { + WriteDRegister(rt, Memory::Read(address), NoRegLog); + WriteDRegister(rt2, Memory::Read(address2), NoRegLog); + break; + } + case LDP_q: { + WriteQRegister(rt, Memory::Read(address), NoRegLog); + WriteQRegister(rt2, Memory::Read(address2), NoRegLog); + break; + } + case LDPSW_x: { + WriteXRegister(rt, Memory::Read(address), NoRegLog); + WriteXRegister(rt2, Memory::Read(address2), NoRegLog); + break; + } + case STP_w: { + Memory::Write(address, ReadWRegister(rt)); + Memory::Write(address2, ReadWRegister(rt2)); + break; + } + case STP_s: { + Memory::Write(address, ReadSRegister(rt)); + Memory::Write(address2, ReadSRegister(rt2)); + break; + } + case STP_x: { + Memory::Write(address, ReadXRegister(rt)); + Memory::Write(address2, ReadXRegister(rt2)); + break; + } + case STP_d: { + Memory::Write(address, ReadDRegister(rt)); + Memory::Write(address2, ReadDRegister(rt2)); + break; + } + case STP_q: { + Memory::Write(address, ReadQRegister(rt)); + Memory::Write(address2, ReadQRegister(rt2)); + break; + } + default: + VIXL_UNREACHABLE(); + } + + // Print a detailed trace (including the memory address) instead of the basic + // register:value trace generated by set_*reg(). + if (instr->IsLoad()) { + if ((op == LDP_s) || (op == LDP_d)) { + LogVRead(address, rt, GetPrintRegisterFormatForSizeFP(element_size)); + LogVRead(address2, rt2, GetPrintRegisterFormatForSizeFP(element_size)); + } else if (op == LDP_q) { + LogVRead(address, rt, GetPrintRegisterFormatForSize(element_size)); + LogVRead(address2, rt2, GetPrintRegisterFormatForSize(element_size)); + } else { + LogRead(address, rt, GetPrintRegisterFormatForSize(element_size)); + LogRead(address2, rt2, GetPrintRegisterFormatForSize(element_size)); + } + } else { + if ((op == STP_s) || (op == STP_d)) { + LogVWrite(address, rt, GetPrintRegisterFormatForSizeFP(element_size)); + LogVWrite(address2, rt2, GetPrintRegisterFormatForSizeFP(element_size)); + } else if (op == STP_q) { + LogVWrite(address, rt, GetPrintRegisterFormatForSize(element_size)); + LogVWrite(address2, rt2, GetPrintRegisterFormatForSize(element_size)); + } else { + LogWrite(address, rt, GetPrintRegisterFormatForSize(element_size)); + LogWrite(address2, rt2, GetPrintRegisterFormatForSize(element_size)); + } + } + + local_monitor_.MaybeClear(); +} + + +void Simulator::PrintExclusiveAccessWarning() { + if (print_exclusive_access_warning_) { + fprintf(stderr, + "%sWARNING:%s VIXL simulator support for " + "load-/store-/clear-exclusive " + "instructions is limited. Refer to the README for details.%s\n", + clr_warning, + clr_warning_message, + clr_normal); + print_exclusive_access_warning_ = false; + } +} + +template +void Simulator::CompareAndSwapHelper(const Instruction* instr) { + unsigned rs = instr->GetRs(); + unsigned rt = instr->GetRt(); + unsigned rn = instr->GetRn(); + + unsigned element_size = sizeof(T); + uint64_t address = ReadRegister(rn, Reg31IsStackPointer); + + bool is_acquire = instr->ExtractBit(22) == 1; + bool is_release = instr->ExtractBit(15) == 1; + + T comparevalue = ReadRegister(rs); + T newvalue = ReadRegister(rt); + + // The architecture permits that the data read clears any exclusive monitors + // associated with that location, even if the compare subsequently fails. + local_monitor_.Clear(); + + T data = Memory::Read(address); + if (is_acquire) { + // Approximate load-acquire by issuing a full barrier after the load. + __sync_synchronize(); + } + + if (data == comparevalue) { + if (is_release) { + // Approximate store-release by issuing a full barrier before the store. + __sync_synchronize(); + } + Memory::Write(address, newvalue); + LogWrite(address, rt, GetPrintRegisterFormatForSize(element_size)); + } + WriteRegister(rs, data); + LogRead(address, rs, GetPrintRegisterFormatForSize(element_size)); +} + +template +void Simulator::CompareAndSwapPairHelper(const Instruction* instr) { + VIXL_ASSERT((sizeof(T) == 4) || (sizeof(T) == 8)); + unsigned rs = instr->GetRs(); + unsigned rt = instr->GetRt(); + unsigned rn = instr->GetRn(); + + VIXL_ASSERT((rs % 2 == 0) && (rs % 2 == 0)); + + unsigned element_size = sizeof(T); + uint64_t address = ReadRegister(rn, Reg31IsStackPointer); + uint64_t address2 = address + element_size; + + bool is_acquire = instr->ExtractBit(22) == 1; + bool is_release = instr->ExtractBit(15) == 1; + + T comparevalue_high = ReadRegister(rs + 1); + T comparevalue_low = ReadRegister(rs); + T newvalue_high = ReadRegister(rt + 1); + T newvalue_low = ReadRegister(rt); + + // The architecture permits that the data read clears any exclusive monitors + // associated with that location, even if the compare subsequently fails. + local_monitor_.Clear(); + + T data_high = Memory::Read(address); + T data_low = Memory::Read(address2); + + if (is_acquire) { + // Approximate load-acquire by issuing a full barrier after the load. + __sync_synchronize(); + } + + bool same = + (data_high == comparevalue_high) && (data_low == comparevalue_low); + if (same) { + if (is_release) { + // Approximate store-release by issuing a full barrier before the store. + __sync_synchronize(); + } + + Memory::Write(address, newvalue_high); + Memory::Write(address2, newvalue_low); + } + + WriteRegister(rs + 1, data_high); + WriteRegister(rs, data_low); + + LogRead(address, rs + 1, GetPrintRegisterFormatForSize(element_size)); + LogRead(address2, rs, GetPrintRegisterFormatForSize(element_size)); + + if (same) { + LogWrite(address, rt + 1, GetPrintRegisterFormatForSize(element_size)); + LogWrite(address2, rt, GetPrintRegisterFormatForSize(element_size)); + } +} + + +void Simulator::VisitLoadStoreExclusive(const Instruction* instr) { + PrintExclusiveAccessWarning(); + + unsigned rs = instr->GetRs(); + unsigned rt = instr->GetRt(); + unsigned rt2 = instr->GetRt2(); + unsigned rn = instr->GetRn(); + + LoadStoreExclusive op = + static_cast(instr->Mask(LoadStoreExclusiveMask)); + + bool is_exclusive = !instr->GetLdStXNotExclusive(); + bool is_acquire_release = !is_exclusive || instr->GetLdStXAcquireRelease(); + bool is_load = instr->GetLdStXLoad(); + bool is_pair = instr->GetLdStXPair(); + + unsigned element_size = 1 << instr->GetLdStXSizeLog2(); + unsigned access_size = is_pair ? element_size * 2 : element_size; + uint64_t address = ReadRegister(rn, Reg31IsStackPointer); + + // Verify that the address is available to the host. + VIXL_ASSERT(address == static_cast(address)); + + // Check the alignment of `address`. + if (AlignDown(address, access_size) != address) { + VIXL_ALIGNMENT_EXCEPTION(); + } + + // The sp must be aligned to 16 bytes when it is accessed. + if ((rn == 31) && (AlignDown(address, 16) != address)) { + VIXL_ALIGNMENT_EXCEPTION(); + } + + + switch (op) { + case CAS_w: + case CASA_w: + case CASL_w: + case CASAL_w: + CompareAndSwapHelper(instr); + break; + case CAS_x: + case CASA_x: + case CASL_x: + case CASAL_x: + CompareAndSwapHelper(instr); + break; + case CASB: + case CASAB: + case CASLB: + case CASALB: + CompareAndSwapHelper(instr); + break; + case CASH: + case CASAH: + case CASLH: + case CASALH: + CompareAndSwapHelper(instr); + break; + case CASP_w: + case CASPA_w: + case CASPL_w: + case CASPAL_w: + CompareAndSwapPairHelper(instr); + break; + case CASP_x: + case CASPA_x: + case CASPL_x: + case CASPAL_x: + CompareAndSwapPairHelper(instr); + break; + default: + if (is_load) { + if (is_exclusive) { + local_monitor_.MarkExclusive(address, access_size); + } else { + // Any non-exclusive load can clear the local monitor as a side + // effect. We don't need to do this, but it is useful to stress the + // simulated code. + local_monitor_.Clear(); + } + + // Use NoRegLog to suppress the register trace (LOG_REGS, LOG_FP_REGS). + // We will print a more detailed log. + switch (op) { + case LDXRB_w: + case LDAXRB_w: + case LDARB_w: + case LDLARB: + WriteWRegister(rt, Memory::Read(address), NoRegLog); + break; + case LDXRH_w: + case LDAXRH_w: + case LDARH_w: + case LDLARH: + WriteWRegister(rt, Memory::Read(address), NoRegLog); + break; + case LDXR_w: + case LDAXR_w: + case LDAR_w: + case LDLAR_w: + WriteWRegister(rt, Memory::Read(address), NoRegLog); + break; + case LDXR_x: + case LDAXR_x: + case LDAR_x: + case LDLAR_x: + WriteXRegister(rt, Memory::Read(address), NoRegLog); + break; + case LDXP_w: + case LDAXP_w: + WriteWRegister(rt, Memory::Read(address), NoRegLog); + WriteWRegister(rt2, + Memory::Read(address + element_size), + NoRegLog); + break; + case LDXP_x: + case LDAXP_x: + WriteXRegister(rt, Memory::Read(address), NoRegLog); + WriteXRegister(rt2, + Memory::Read(address + element_size), + NoRegLog); + break; + default: + VIXL_UNREACHABLE(); + } + + if (is_acquire_release) { + // Approximate load-acquire by issuing a full barrier after the load. + __sync_synchronize(); + } + + LogRead(address, rt, GetPrintRegisterFormatForSize(element_size)); + if (is_pair) { + LogRead(address + element_size, + rt2, + GetPrintRegisterFormatForSize(element_size)); + } + } else { + if (is_acquire_release) { + // Approximate store-release by issuing a full barrier before the + // store. + __sync_synchronize(); + } + + bool do_store = true; + if (is_exclusive) { + do_store = local_monitor_.IsExclusive(address, access_size) && + global_monitor_.IsExclusive(address, access_size); + WriteWRegister(rs, do_store ? 0 : 1); + + // - All exclusive stores explicitly clear the local monitor. + local_monitor_.Clear(); + } else { + // - Any other store can clear the local monitor as a side effect. + local_monitor_.MaybeClear(); + } + + if (do_store) { + switch (op) { + case STXRB_w: + case STLXRB_w: + case STLRB_w: + case STLLRB: + Memory::Write(address, ReadWRegister(rt)); + break; + case STXRH_w: + case STLXRH_w: + case STLRH_w: + case STLLRH: + Memory::Write(address, ReadWRegister(rt)); + break; + case STXR_w: + case STLXR_w: + case STLR_w: + case STLLR_w: + Memory::Write(address, ReadWRegister(rt)); + break; + case STXR_x: + case STLXR_x: + case STLR_x: + case STLLR_x: + Memory::Write(address, ReadXRegister(rt)); + break; + case STXP_w: + case STLXP_w: + Memory::Write(address, ReadWRegister(rt)); + Memory::Write(address + element_size, + ReadWRegister(rt2)); + break; + case STXP_x: + case STLXP_x: + Memory::Write(address, ReadXRegister(rt)); + Memory::Write(address + element_size, + ReadXRegister(rt2)); + break; + default: + VIXL_UNREACHABLE(); + } + + LogWrite(address, rt, GetPrintRegisterFormatForSize(element_size)); + if (is_pair) { + LogWrite(address + element_size, + rt2, + GetPrintRegisterFormatForSize(element_size)); + } + } + } + } +} + +template +void Simulator::AtomicMemorySimpleHelper(const Instruction* instr) { + unsigned rs = instr->GetRs(); + unsigned rt = instr->GetRt(); + unsigned rn = instr->GetRn(); + + bool is_acquire = (instr->ExtractBit(23) == 1) && (rt != kZeroRegCode); + bool is_release = instr->ExtractBit(22) == 1; + + unsigned element_size = sizeof(T); + uint64_t address = ReadRegister(rn, Reg31IsStackPointer); + + // Verify that the address is available to the host. + VIXL_ASSERT(address == static_cast(address)); + + T value = ReadRegister(rs); + + T data = Memory::Read(address); + + if (is_acquire) { + // Approximate load-acquire by issuing a full barrier after the load. + __sync_synchronize(); + } + + T result = 0; + switch (instr->Mask(AtomicMemorySimpleOpMask)) { + case LDADDOp: + result = data + value; + break; + case LDCLROp: + VIXL_ASSERT(!std::numeric_limits::is_signed); + result = data & ~value; + break; + case LDEOROp: + VIXL_ASSERT(!std::numeric_limits::is_signed); + result = data ^ value; + break; + case LDSETOp: + VIXL_ASSERT(!std::numeric_limits::is_signed); + result = data | value; + break; + + // Signed/Unsigned difference is done via the templated type T. + case LDSMAXOp: + case LDUMAXOp: + result = (data > value) ? data : value; + break; + case LDSMINOp: + case LDUMINOp: + result = (data > value) ? value : data; + break; + } + + if (is_release) { + // Approximate store-release by issuing a full barrier before the store. + __sync_synchronize(); + } + + Memory::Write(address, result); + WriteRegister(rt, data, NoRegLog); + + LogRead(address, rt, GetPrintRegisterFormatForSize(element_size)); + LogWrite(address, rs, GetPrintRegisterFormatForSize(element_size)); +} + +template +void Simulator::AtomicMemorySwapHelper(const Instruction* instr) { + unsigned rs = instr->GetRs(); + unsigned rt = instr->GetRt(); + unsigned rn = instr->GetRn(); + + bool is_acquire = (instr->ExtractBit(23) == 1) && (rt != kZeroRegCode); + bool is_release = instr->ExtractBit(22) == 1; + + unsigned element_size = sizeof(T); + uint64_t address = ReadRegister(rn, Reg31IsStackPointer); + + // Verify that the address is available to the host. + VIXL_ASSERT(address == static_cast(address)); + + T data = Memory::Read(address); + if (is_acquire) { + // Approximate load-acquire by issuing a full barrier after the load. + __sync_synchronize(); + } + + if (is_release) { + // Approximate store-release by issuing a full barrier before the store. + __sync_synchronize(); + } + Memory::Write(address, ReadRegister(rs)); + + WriteRegister(rt, data); + + LogRead(address, rt, GetPrintRegisterFormat(element_size)); + LogWrite(address, rs, GetPrintRegisterFormat(element_size)); +} + +template +void Simulator::LoadAcquireRCpcHelper(const Instruction* instr) { + unsigned rt = instr->GetRt(); + unsigned rn = instr->GetRn(); + + unsigned element_size = sizeof(T); + uint64_t address = ReadRegister(rn, Reg31IsStackPointer); + + // Verify that the address is available to the host. + VIXL_ASSERT(address == static_cast(address)); + WriteRegister(rt, Memory::Read(address)); + + // Approximate load-acquire by issuing a full barrier after the load. + __sync_synchronize(); + + LogRead(address, rt, GetPrintRegisterFormat(element_size)); +} + +#define ATOMIC_MEMORY_SIMPLE_UINT_LIST(V) \ + V(LDADD) \ + V(LDCLR) \ + V(LDEOR) \ + V(LDSET) \ + V(LDUMAX) \ + V(LDUMIN) + +#define ATOMIC_MEMORY_SIMPLE_INT_LIST(V) \ + V(LDSMAX) \ + V(LDSMIN) + +void Simulator::VisitAtomicMemory(const Instruction* instr) { + switch (instr->Mask(AtomicMemoryMask)) { +// clang-format off +#define SIM_FUNC_B(A) \ + case A##B: \ + case A##AB: \ + case A##LB: \ + case A##ALB: +#define SIM_FUNC_H(A) \ + case A##H: \ + case A##AH: \ + case A##LH: \ + case A##ALH: +#define SIM_FUNC_w(A) \ + case A##_w: \ + case A##A_w: \ + case A##L_w: \ + case A##AL_w: +#define SIM_FUNC_x(A) \ + case A##_x: \ + case A##A_x: \ + case A##L_x: \ + case A##AL_x: + + ATOMIC_MEMORY_SIMPLE_UINT_LIST(SIM_FUNC_B) + AtomicMemorySimpleHelper(instr); + break; + ATOMIC_MEMORY_SIMPLE_INT_LIST(SIM_FUNC_B) + AtomicMemorySimpleHelper(instr); + break; + ATOMIC_MEMORY_SIMPLE_UINT_LIST(SIM_FUNC_H) + AtomicMemorySimpleHelper(instr); + break; + ATOMIC_MEMORY_SIMPLE_INT_LIST(SIM_FUNC_H) + AtomicMemorySimpleHelper(instr); + break; + ATOMIC_MEMORY_SIMPLE_UINT_LIST(SIM_FUNC_w) + AtomicMemorySimpleHelper(instr); + break; + ATOMIC_MEMORY_SIMPLE_INT_LIST(SIM_FUNC_w) + AtomicMemorySimpleHelper(instr); + break; + ATOMIC_MEMORY_SIMPLE_UINT_LIST(SIM_FUNC_x) + AtomicMemorySimpleHelper(instr); + break; + ATOMIC_MEMORY_SIMPLE_INT_LIST(SIM_FUNC_x) + AtomicMemorySimpleHelper(instr); + break; + // clang-format on + + case SWPB: + case SWPAB: + case SWPLB: + case SWPALB: + AtomicMemorySwapHelper(instr); + break; + case SWPH: + case SWPAH: + case SWPLH: + case SWPALH: + AtomicMemorySwapHelper(instr); + break; + case SWP_w: + case SWPA_w: + case SWPL_w: + case SWPAL_w: + AtomicMemorySwapHelper(instr); + break; + case SWP_x: + case SWPA_x: + case SWPL_x: + case SWPAL_x: + AtomicMemorySwapHelper(instr); + break; + case LDAPRB: + LoadAcquireRCpcHelper(instr); + break; + case LDAPRH: + LoadAcquireRCpcHelper(instr); + break; + case LDAPR_w: + LoadAcquireRCpcHelper(instr); + break; + case LDAPR_x: + LoadAcquireRCpcHelper(instr); + break; + } +} + + +void Simulator::VisitLoadLiteral(const Instruction* instr) { + unsigned rt = instr->GetRt(); + uint64_t address = instr->GetLiteralAddress(); + + // Verify that the calculated address is available to the host. + VIXL_ASSERT(address == static_cast(address)); + + switch (instr->Mask(LoadLiteralMask)) { + // Use NoRegLog to suppress the register trace (LOG_REGS, LOG_VREGS), then + // print a more detailed log. + case LDR_w_lit: + WriteWRegister(rt, Memory::Read(address), NoRegLog); + LogRead(address, rt, kPrintWReg); + break; + case LDR_x_lit: + WriteXRegister(rt, Memory::Read(address), NoRegLog); + LogRead(address, rt, kPrintXReg); + break; + case LDR_s_lit: + WriteSRegister(rt, Memory::Read(address), NoRegLog); + LogVRead(address, rt, kPrintSReg); + break; + case LDR_d_lit: + WriteDRegister(rt, Memory::Read(address), NoRegLog); + LogVRead(address, rt, kPrintDReg); + break; + case LDR_q_lit: + WriteQRegister(rt, Memory::Read(address), NoRegLog); + LogVRead(address, rt, kPrintReg1Q); + break; + case LDRSW_x_lit: + WriteXRegister(rt, Memory::Read(address), NoRegLog); + LogRead(address, rt, kPrintWReg); + break; + + // Ignore prfm hint instructions. + case PRFM_lit: + break; + + default: + VIXL_UNREACHABLE(); + } + + local_monitor_.MaybeClear(); +} + + +uintptr_t Simulator::AddressModeHelper(unsigned addr_reg, + int64_t offset, + AddrMode addrmode) { + uint64_t address = ReadXRegister(addr_reg, Reg31IsStackPointer); + + if ((addr_reg == 31) && ((address % 16) != 0)) { + // When the base register is SP the stack pointer is required to be + // quadword aligned prior to the address calculation and write-backs. + // Misalignment will cause a stack alignment fault. + VIXL_ALIGNMENT_EXCEPTION(); + } + + if ((addrmode == PreIndex) || (addrmode == PostIndex)) { + VIXL_ASSERT(offset != 0); + // Only preindex should log the register update here. For Postindex, the + // update will be printed automatically by LogWrittenRegisters _after_ the + // memory access itself is logged. + RegLogMode log_mode = (addrmode == PreIndex) ? LogRegWrites : NoRegLog; + WriteXRegister(addr_reg, address + offset, log_mode, Reg31IsStackPointer); + } + + if ((addrmode == Offset) || (addrmode == PreIndex)) { + address += offset; + } + + // Verify that the calculated address is available to the host. + VIXL_ASSERT(address == static_cast(address)); + + return static_cast(address); +} + + +void Simulator::VisitMoveWideImmediate(const Instruction* instr) { + MoveWideImmediateOp mov_op = + static_cast(instr->Mask(MoveWideImmediateMask)); + int64_t new_xn_val = 0; + + bool is_64_bits = instr->GetSixtyFourBits() == 1; + // Shift is limited for W operations. + VIXL_ASSERT(is_64_bits || (instr->GetShiftMoveWide() < 2)); + + // Get the shifted immediate. + int64_t shift = instr->GetShiftMoveWide() * 16; + int64_t shifted_imm16 = static_cast(instr->GetImmMoveWide()) + << shift; + + // Compute the new value. + switch (mov_op) { + case MOVN_w: + case MOVN_x: { + new_xn_val = ~shifted_imm16; + if (!is_64_bits) new_xn_val &= kWRegMask; + break; + } + case MOVK_w: + case MOVK_x: { + unsigned reg_code = instr->GetRd(); + int64_t prev_xn_val = + is_64_bits ? ReadXRegister(reg_code) : ReadWRegister(reg_code); + new_xn_val = (prev_xn_val & ~(INT64_C(0xffff) << shift)) | shifted_imm16; + break; + } + case MOVZ_w: + case MOVZ_x: { + new_xn_val = shifted_imm16; + break; + } + default: + VIXL_UNREACHABLE(); + } + + // Update the destination register. + WriteXRegister(instr->GetRd(), new_xn_val); +} + + +void Simulator::VisitConditionalSelect(const Instruction* instr) { + uint64_t new_val = ReadXRegister(instr->GetRn()); + + if (ConditionFailed(static_cast(instr->GetCondition()))) { + new_val = ReadXRegister(instr->GetRm()); + switch (instr->Mask(ConditionalSelectMask)) { + case CSEL_w: + case CSEL_x: + break; + case CSINC_w: + case CSINC_x: + new_val++; + break; + case CSINV_w: + case CSINV_x: + new_val = ~new_val; + break; + case CSNEG_w: + case CSNEG_x: + new_val = -new_val; + break; + default: + VIXL_UNIMPLEMENTED(); + } + } + unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize; + WriteRegister(reg_size, instr->GetRd(), new_val); +} + + +// clang-format off +#define PAUTH_MODES(V) \ + V(IA, ReadXRegister(src), kPACKeyIA, kInstructionPointer) \ + V(IB, ReadXRegister(src), kPACKeyIB, kInstructionPointer) \ + V(IZA, 0x00000000, kPACKeyIA, kInstructionPointer) \ + V(IZB, 0x00000000, kPACKeyIB, kInstructionPointer) \ + V(DA, ReadXRegister(src), kPACKeyDA, kDataPointer) \ + V(DB, ReadXRegister(src), kPACKeyDB, kDataPointer) \ + V(DZA, 0x00000000, kPACKeyDA, kDataPointer) \ + V(DZB, 0x00000000, kPACKeyDB, kDataPointer) +// clang-format on + +void Simulator::VisitDataProcessing1Source(const Instruction* instr) { + unsigned dst = instr->GetRd(); + unsigned src = instr->GetRn(); + + switch (instr->Mask(DataProcessing1SourceMask)) { +#define DEFINE_PAUTH_FUNCS(SUFFIX, MOD, KEY, D) \ + case PAC##SUFFIX: { \ + uint64_t ptr = ReadXRegister(dst); \ + WriteXRegister(dst, AddPAC(ptr, MOD, KEY, D)); \ + break; \ + } \ + case AUT##SUFFIX: { \ + uint64_t ptr = ReadXRegister(dst); \ + WriteXRegister(dst, AuthPAC(ptr, MOD, KEY, D)); \ + break; \ + } + + PAUTH_MODES(DEFINE_PAUTH_FUNCS) +#undef DEFINE_PAUTH_FUNCS + + case XPACI: + WriteXRegister(dst, StripPAC(ReadXRegister(dst), kInstructionPointer)); + break; + case XPACD: + WriteXRegister(dst, StripPAC(ReadXRegister(dst), kDataPointer)); + break; + case RBIT_w: + WriteWRegister(dst, ReverseBits(ReadWRegister(src))); + break; + case RBIT_x: + WriteXRegister(dst, ReverseBits(ReadXRegister(src))); + break; + case REV16_w: + WriteWRegister(dst, ReverseBytes(ReadWRegister(src), 1)); + break; + case REV16_x: + WriteXRegister(dst, ReverseBytes(ReadXRegister(src), 1)); + break; + case REV_w: + WriteWRegister(dst, ReverseBytes(ReadWRegister(src), 2)); + break; + case REV32_x: + WriteXRegister(dst, ReverseBytes(ReadXRegister(src), 2)); + break; + case REV_x: + WriteXRegister(dst, ReverseBytes(ReadXRegister(src), 3)); + break; + case CLZ_w: + WriteWRegister(dst, CountLeadingZeros(ReadWRegister(src))); + break; + case CLZ_x: + WriteXRegister(dst, CountLeadingZeros(ReadXRegister(src))); + break; + case CLS_w: + WriteWRegister(dst, CountLeadingSignBits(ReadWRegister(src))); + break; + case CLS_x: + WriteXRegister(dst, CountLeadingSignBits(ReadXRegister(src))); + break; + default: + VIXL_UNIMPLEMENTED(); + } +} + + +uint32_t Simulator::Poly32Mod2(unsigned n, uint64_t data, uint32_t poly) { + VIXL_ASSERT((n > 32) && (n <= 64)); + for (unsigned i = (n - 1); i >= 32; i--) { + if (((data >> i) & 1) != 0) { + uint64_t polysh32 = (uint64_t)poly << (i - 32); + uint64_t mask = (UINT64_C(1) << i) - 1; + data = ((data & mask) ^ polysh32); + } + } + return data & 0xffffffff; +} + + +template +uint32_t Simulator::Crc32Checksum(uint32_t acc, T val, uint32_t poly) { + unsigned size = sizeof(val) * 8; // Number of bits in type T. + VIXL_ASSERT((size == 8) || (size == 16) || (size == 32)); + uint64_t tempacc = static_cast(ReverseBits(acc)) << size; + uint64_t tempval = static_cast(ReverseBits(val)) << 32; + return ReverseBits(Poly32Mod2(32 + size, tempacc ^ tempval, poly)); +} + + +uint32_t Simulator::Crc32Checksum(uint32_t acc, uint64_t val, uint32_t poly) { + // Poly32Mod2 cannot handle inputs with more than 32 bits, so compute + // the CRC of each 32-bit word sequentially. + acc = Crc32Checksum(acc, (uint32_t)(val & 0xffffffff), poly); + return Crc32Checksum(acc, (uint32_t)(val >> 32), poly); +} + + +void Simulator::VisitDataProcessing2Source(const Instruction* instr) { + Shift shift_op = NO_SHIFT; + int64_t result = 0; + unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize; + + switch (instr->Mask(DataProcessing2SourceMask)) { + case SDIV_w: { + int32_t rn = ReadWRegister(instr->GetRn()); + int32_t rm = ReadWRegister(instr->GetRm()); + if ((rn == kWMinInt) && (rm == -1)) { + result = kWMinInt; + } else if (rm == 0) { + // Division by zero can be trapped, but not on A-class processors. + result = 0; + } else { + result = rn / rm; + } + break; + } + case SDIV_x: { + int64_t rn = ReadXRegister(instr->GetRn()); + int64_t rm = ReadXRegister(instr->GetRm()); + if ((rn == kXMinInt) && (rm == -1)) { + result = kXMinInt; + } else if (rm == 0) { + // Division by zero can be trapped, but not on A-class processors. + result = 0; + } else { + result = rn / rm; + } + break; + } + case UDIV_w: { + uint32_t rn = static_cast(ReadWRegister(instr->GetRn())); + uint32_t rm = static_cast(ReadWRegister(instr->GetRm())); + if (rm == 0) { + // Division by zero can be trapped, but not on A-class processors. + result = 0; + } else { + result = rn / rm; + } + break; + } + case UDIV_x: { + uint64_t rn = static_cast(ReadXRegister(instr->GetRn())); + uint64_t rm = static_cast(ReadXRegister(instr->GetRm())); + if (rm == 0) { + // Division by zero can be trapped, but not on A-class processors. + result = 0; + } else { + result = rn / rm; + } + break; + } + case LSLV_w: + case LSLV_x: + shift_op = LSL; + break; + case LSRV_w: + case LSRV_x: + shift_op = LSR; + break; + case ASRV_w: + case ASRV_x: + shift_op = ASR; + break; + case RORV_w: + case RORV_x: + shift_op = ROR; + break; + case PACGA: { + uint64_t dst = static_cast(ReadXRegister(instr->GetRn())); + uint64_t src = static_cast( + ReadXRegister(instr->GetRm(), Reg31IsStackPointer)); + uint64_t code = ComputePAC(dst, src, kPACKeyGA); + result = code & 0xffffffff00000000; + break; + } + case CRC32B: { + uint32_t acc = ReadRegister(instr->GetRn()); + uint8_t val = ReadRegister(instr->GetRm()); + result = Crc32Checksum(acc, val, CRC32_POLY); + break; + } + case CRC32H: { + uint32_t acc = ReadRegister(instr->GetRn()); + uint16_t val = ReadRegister(instr->GetRm()); + result = Crc32Checksum(acc, val, CRC32_POLY); + break; + } + case CRC32W: { + uint32_t acc = ReadRegister(instr->GetRn()); + uint32_t val = ReadRegister(instr->GetRm()); + result = Crc32Checksum(acc, val, CRC32_POLY); + break; + } + case CRC32X: { + uint32_t acc = ReadRegister(instr->GetRn()); + uint64_t val = ReadRegister(instr->GetRm()); + result = Crc32Checksum(acc, val, CRC32_POLY); + reg_size = kWRegSize; + break; + } + case CRC32CB: { + uint32_t acc = ReadRegister(instr->GetRn()); + uint8_t val = ReadRegister(instr->GetRm()); + result = Crc32Checksum(acc, val, CRC32C_POLY); + break; + } + case CRC32CH: { + uint32_t acc = ReadRegister(instr->GetRn()); + uint16_t val = ReadRegister(instr->GetRm()); + result = Crc32Checksum(acc, val, CRC32C_POLY); + break; + } + case CRC32CW: { + uint32_t acc = ReadRegister(instr->GetRn()); + uint32_t val = ReadRegister(instr->GetRm()); + result = Crc32Checksum(acc, val, CRC32C_POLY); + break; + } + case CRC32CX: { + uint32_t acc = ReadRegister(instr->GetRn()); + uint64_t val = ReadRegister(instr->GetRm()); + result = Crc32Checksum(acc, val, CRC32C_POLY); + reg_size = kWRegSize; + break; + } + default: + VIXL_UNIMPLEMENTED(); + } + + if (shift_op != NO_SHIFT) { + // Shift distance encoded in the least-significant five/six bits of the + // register. + int mask = (instr->GetSixtyFourBits() == 1) ? 0x3f : 0x1f; + unsigned shift = ReadWRegister(instr->GetRm()) & mask; + result = ShiftOperand(reg_size, + ReadRegister(reg_size, instr->GetRn()), + shift_op, + shift); + } + WriteRegister(reg_size, instr->GetRd(), result); +} + + +// The algorithm used is adapted from the one described in section 8.2 of +// Hacker's Delight, by Henry S. Warren, Jr. +template +static int64_t MultiplyHigh(T u, T v) { + uint64_t u0, v0, w0, u1, v1, w1, w2, t; + uint64_t sign_mask = UINT64_C(0x8000000000000000); + uint64_t sign_ext = 0; + if (std::numeric_limits::is_signed) { + sign_ext = UINT64_C(0xffffffff00000000); + } + + VIXL_ASSERT(sizeof(u) == sizeof(uint64_t)); + VIXL_ASSERT(sizeof(u) == sizeof(u0)); + + u0 = u & 0xffffffff; + u1 = u >> 32 | (((u & sign_mask) != 0) ? sign_ext : 0); + v0 = v & 0xffffffff; + v1 = v >> 32 | (((v & sign_mask) != 0) ? sign_ext : 0); + + w0 = u0 * v0; + t = u1 * v0 + (w0 >> 32); + + w1 = t & 0xffffffff; + w2 = t >> 32 | (((t & sign_mask) != 0) ? sign_ext : 0); + w1 = u0 * v1 + w1; + w1 = w1 >> 32 | (((w1 & sign_mask) != 0) ? sign_ext : 0); + + uint64_t value = u1 * v1 + w2 + w1; + int64_t result; + memcpy(&result, &value, sizeof(result)); + return result; +} + + +void Simulator::VisitDataProcessing3Source(const Instruction* instr) { + unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize; + + uint64_t result = 0; + // Extract and sign- or zero-extend 32-bit arguments for widening operations. + uint64_t rn_u32 = ReadRegister(instr->GetRn()); + uint64_t rm_u32 = ReadRegister(instr->GetRm()); + int64_t rn_s32 = ReadRegister(instr->GetRn()); + int64_t rm_s32 = ReadRegister(instr->GetRm()); + uint64_t rn_u64 = ReadXRegister(instr->GetRn()); + uint64_t rm_u64 = ReadXRegister(instr->GetRm()); + switch (instr->Mask(DataProcessing3SourceMask)) { + case MADD_w: + case MADD_x: + result = ReadXRegister(instr->GetRa()) + (rn_u64 * rm_u64); + break; + case MSUB_w: + case MSUB_x: + result = ReadXRegister(instr->GetRa()) - (rn_u64 * rm_u64); + break; + case SMADDL_x: + result = ReadXRegister(instr->GetRa()) + + static_cast(rn_s32 * rm_s32); + break; + case SMSUBL_x: + result = ReadXRegister(instr->GetRa()) - + static_cast(rn_s32 * rm_s32); + break; + case UMADDL_x: + result = ReadXRegister(instr->GetRa()) + (rn_u32 * rm_u32); + break; + case UMSUBL_x: + result = ReadXRegister(instr->GetRa()) - (rn_u32 * rm_u32); + break; + case UMULH_x: + result = MultiplyHigh(ReadRegister(instr->GetRn()), + ReadRegister(instr->GetRm())); + break; + case SMULH_x: + result = MultiplyHigh(ReadXRegister(instr->GetRn()), + ReadXRegister(instr->GetRm())); + break; + default: + VIXL_UNIMPLEMENTED(); + } + WriteRegister(reg_size, instr->GetRd(), result); +} + + +void Simulator::VisitBitfield(const Instruction* instr) { + unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize; + int64_t reg_mask = instr->GetSixtyFourBits() ? kXRegMask : kWRegMask; + int R = instr->GetImmR(); + int S = instr->GetImmS(); + int diff = S - R; + uint64_t mask; + if (diff >= 0) { + mask = ~UINT64_C(0) >> (64 - (diff + 1)); + mask = (static_cast(diff) < (reg_size - 1)) ? mask : reg_mask; + } else { + mask = ~UINT64_C(0) >> (64 - (S + 1)); + mask = RotateRight(mask, R, reg_size); + diff += reg_size; + } + + // inzero indicates if the extracted bitfield is inserted into the + // destination register value or in zero. + // If extend is true, extend the sign of the extracted bitfield. + bool inzero = false; + bool extend = false; + switch (instr->Mask(BitfieldMask)) { + case BFM_x: + case BFM_w: + break; + case SBFM_x: + case SBFM_w: + inzero = true; + extend = true; + break; + case UBFM_x: + case UBFM_w: + inzero = true; + break; + default: + VIXL_UNIMPLEMENTED(); + } + + uint64_t dst = inzero ? 0 : ReadRegister(reg_size, instr->GetRd()); + uint64_t src = ReadRegister(reg_size, instr->GetRn()); + // Rotate source bitfield into place. + uint64_t result = RotateRight(src, R, reg_size); + // Determine the sign extension. + uint64_t topbits = (diff == 63) ? 0 : (~UINT64_C(0) << (diff + 1)); + uint64_t signbits = extend && ((src >> S) & 1) ? topbits : 0; + + // Merge sign extension, dest/zero and bitfield. + result = signbits | (result & mask) | (dst & ~mask); + + WriteRegister(reg_size, instr->GetRd(), result); +} + + +void Simulator::VisitExtract(const Instruction* instr) { + unsigned lsb = instr->GetImmS(); + unsigned reg_size = (instr->GetSixtyFourBits() == 1) ? kXRegSize : kWRegSize; + uint64_t low_res = + static_cast(ReadRegister(reg_size, instr->GetRm())) >> lsb; + uint64_t high_res = + (lsb == 0) ? 0 : ReadRegister(reg_size, instr->GetRn()) + << (reg_size - lsb); + WriteRegister(reg_size, instr->GetRd(), low_res | high_res); +} + + +void Simulator::VisitFPImmediate(const Instruction* instr) { + AssertSupportedFPCR(); + unsigned dest = instr->GetRd(); + switch (instr->Mask(FPImmediateMask)) { + case FMOV_h_imm: + WriteHRegister(dest, Float16ToRawbits(instr->GetImmFP16())); + break; + case FMOV_s_imm: + WriteSRegister(dest, instr->GetImmFP32()); + break; + case FMOV_d_imm: + WriteDRegister(dest, instr->GetImmFP64()); + break; + default: + VIXL_UNREACHABLE(); + } +} + + +void Simulator::VisitFPIntegerConvert(const Instruction* instr) { + AssertSupportedFPCR(); + + unsigned dst = instr->GetRd(); + unsigned src = instr->GetRn(); + + FPRounding round = ReadRMode(); + + switch (instr->Mask(FPIntegerConvertMask)) { + case FCVTAS_wh: + WriteWRegister(dst, FPToInt32(ReadHRegister(src), FPTieAway)); + break; + case FCVTAS_xh: + WriteXRegister(dst, FPToInt64(ReadHRegister(src), FPTieAway)); + break; + case FCVTAS_ws: + WriteWRegister(dst, FPToInt32(ReadSRegister(src), FPTieAway)); + break; + case FCVTAS_xs: + WriteXRegister(dst, FPToInt64(ReadSRegister(src), FPTieAway)); + break; + case FCVTAS_wd: + WriteWRegister(dst, FPToInt32(ReadDRegister(src), FPTieAway)); + break; + case FCVTAS_xd: + WriteXRegister(dst, FPToInt64(ReadDRegister(src), FPTieAway)); + break; + case FCVTAU_wh: + WriteWRegister(dst, FPToUInt32(ReadHRegister(src), FPTieAway)); + break; + case FCVTAU_xh: + WriteXRegister(dst, FPToUInt64(ReadHRegister(src), FPTieAway)); + break; + case FCVTAU_ws: + WriteWRegister(dst, FPToUInt32(ReadSRegister(src), FPTieAway)); + break; + case FCVTAU_xs: + WriteXRegister(dst, FPToUInt64(ReadSRegister(src), FPTieAway)); + break; + case FCVTAU_wd: + WriteWRegister(dst, FPToUInt32(ReadDRegister(src), FPTieAway)); + break; + case FCVTAU_xd: + WriteXRegister(dst, FPToUInt64(ReadDRegister(src), FPTieAway)); + break; + case FCVTMS_wh: + WriteWRegister(dst, FPToInt32(ReadHRegister(src), FPNegativeInfinity)); + break; + case FCVTMS_xh: + WriteXRegister(dst, FPToInt64(ReadHRegister(src), FPNegativeInfinity)); + break; + case FCVTMS_ws: + WriteWRegister(dst, FPToInt32(ReadSRegister(src), FPNegativeInfinity)); + break; + case FCVTMS_xs: + WriteXRegister(dst, FPToInt64(ReadSRegister(src), FPNegativeInfinity)); + break; + case FCVTMS_wd: + WriteWRegister(dst, FPToInt32(ReadDRegister(src), FPNegativeInfinity)); + break; + case FCVTMS_xd: + WriteXRegister(dst, FPToInt64(ReadDRegister(src), FPNegativeInfinity)); + break; + case FCVTMU_wh: + WriteWRegister(dst, FPToUInt32(ReadHRegister(src), FPNegativeInfinity)); + break; + case FCVTMU_xh: + WriteXRegister(dst, FPToUInt64(ReadHRegister(src), FPNegativeInfinity)); + break; + case FCVTMU_ws: + WriteWRegister(dst, FPToUInt32(ReadSRegister(src), FPNegativeInfinity)); + break; + case FCVTMU_xs: + WriteXRegister(dst, FPToUInt64(ReadSRegister(src), FPNegativeInfinity)); + break; + case FCVTMU_wd: + WriteWRegister(dst, FPToUInt32(ReadDRegister(src), FPNegativeInfinity)); + break; + case FCVTMU_xd: + WriteXRegister(dst, FPToUInt64(ReadDRegister(src), FPNegativeInfinity)); + break; + case FCVTPS_wh: + WriteWRegister(dst, FPToInt32(ReadHRegister(src), FPPositiveInfinity)); + break; + case FCVTPS_xh: + WriteXRegister(dst, FPToInt64(ReadHRegister(src), FPPositiveInfinity)); + break; + case FCVTPS_ws: + WriteWRegister(dst, FPToInt32(ReadSRegister(src), FPPositiveInfinity)); + break; + case FCVTPS_xs: + WriteXRegister(dst, FPToInt64(ReadSRegister(src), FPPositiveInfinity)); + break; + case FCVTPS_wd: + WriteWRegister(dst, FPToInt32(ReadDRegister(src), FPPositiveInfinity)); + break; + case FCVTPS_xd: + WriteXRegister(dst, FPToInt64(ReadDRegister(src), FPPositiveInfinity)); + break; + case FCVTPU_wh: + WriteWRegister(dst, FPToUInt32(ReadHRegister(src), FPPositiveInfinity)); + break; + case FCVTPU_xh: + WriteXRegister(dst, FPToUInt64(ReadHRegister(src), FPPositiveInfinity)); + break; + case FCVTPU_ws: + WriteWRegister(dst, FPToUInt32(ReadSRegister(src), FPPositiveInfinity)); + break; + case FCVTPU_xs: + WriteXRegister(dst, FPToUInt64(ReadSRegister(src), FPPositiveInfinity)); + break; + case FCVTPU_wd: + WriteWRegister(dst, FPToUInt32(ReadDRegister(src), FPPositiveInfinity)); + break; + case FCVTPU_xd: + WriteXRegister(dst, FPToUInt64(ReadDRegister(src), FPPositiveInfinity)); + break; + case FCVTNS_wh: + WriteWRegister(dst, FPToInt32(ReadHRegister(src), FPTieEven)); + break; + case FCVTNS_xh: + WriteXRegister(dst, FPToInt64(ReadHRegister(src), FPTieEven)); + break; + case FCVTNS_ws: + WriteWRegister(dst, FPToInt32(ReadSRegister(src), FPTieEven)); + break; + case FCVTNS_xs: + WriteXRegister(dst, FPToInt64(ReadSRegister(src), FPTieEven)); + break; + case FCVTNS_wd: + WriteWRegister(dst, FPToInt32(ReadDRegister(src), FPTieEven)); + break; + case FCVTNS_xd: + WriteXRegister(dst, FPToInt64(ReadDRegister(src), FPTieEven)); + break; + case FCVTNU_wh: + WriteWRegister(dst, FPToUInt32(ReadHRegister(src), FPTieEven)); + break; + case FCVTNU_xh: + WriteXRegister(dst, FPToUInt64(ReadHRegister(src), FPTieEven)); + break; + case FCVTNU_ws: + WriteWRegister(dst, FPToUInt32(ReadSRegister(src), FPTieEven)); + break; + case FCVTNU_xs: + WriteXRegister(dst, FPToUInt64(ReadSRegister(src), FPTieEven)); + break; + case FCVTNU_wd: + WriteWRegister(dst, FPToUInt32(ReadDRegister(src), FPTieEven)); + break; + case FCVTNU_xd: + WriteXRegister(dst, FPToUInt64(ReadDRegister(src), FPTieEven)); + break; + case FCVTZS_wh: + WriteWRegister(dst, FPToInt32(ReadHRegister(src), FPZero)); + break; + case FCVTZS_xh: + WriteXRegister(dst, FPToInt64(ReadHRegister(src), FPZero)); + break; + case FCVTZS_ws: + WriteWRegister(dst, FPToInt32(ReadSRegister(src), FPZero)); + break; + case FCVTZS_xs: + WriteXRegister(dst, FPToInt64(ReadSRegister(src), FPZero)); + break; + case FCVTZS_wd: + WriteWRegister(dst, FPToInt32(ReadDRegister(src), FPZero)); + break; + case FCVTZS_xd: + WriteXRegister(dst, FPToInt64(ReadDRegister(src), FPZero)); + break; + case FCVTZU_wh: + WriteWRegister(dst, FPToUInt32(ReadHRegister(src), FPZero)); + break; + case FCVTZU_xh: + WriteXRegister(dst, FPToUInt64(ReadHRegister(src), FPZero)); + break; + case FCVTZU_ws: + WriteWRegister(dst, FPToUInt32(ReadSRegister(src), FPZero)); + break; + case FCVTZU_xs: + WriteXRegister(dst, FPToUInt64(ReadSRegister(src), FPZero)); + break; + case FCVTZU_wd: + WriteWRegister(dst, FPToUInt32(ReadDRegister(src), FPZero)); + break; + case FCVTZU_xd: + WriteXRegister(dst, FPToUInt64(ReadDRegister(src), FPZero)); + break; + case FJCVTZS: + WriteWRegister(dst, FPToFixedJS(ReadDRegister(src))); + break; + case FMOV_hw: + WriteHRegister(dst, ReadWRegister(src) & kHRegMask); + break; + case FMOV_wh: + WriteWRegister(dst, ReadHRegisterBits(src)); + break; + case FMOV_xh: + WriteXRegister(dst, ReadHRegisterBits(src)); + break; + case FMOV_hx: + WriteHRegister(dst, ReadXRegister(src) & kHRegMask); + break; + case FMOV_ws: + WriteWRegister(dst, ReadSRegisterBits(src)); + break; + case FMOV_xd: + WriteXRegister(dst, ReadDRegisterBits(src)); + break; + case FMOV_sw: + WriteSRegisterBits(dst, ReadWRegister(src)); + break; + case FMOV_dx: + WriteDRegisterBits(dst, ReadXRegister(src)); + break; + case FMOV_d1_x: + LogicVRegister(ReadVRegister(dst)) + .SetUint(kFormatD, 1, ReadXRegister(src)); + break; + case FMOV_x_d1: + WriteXRegister(dst, LogicVRegister(ReadVRegister(src)).Uint(kFormatD, 1)); + break; + + // A 32-bit input can be handled in the same way as a 64-bit input, since + // the sign- or zero-extension will not affect the conversion. + case SCVTF_dx: + WriteDRegister(dst, FixedToDouble(ReadXRegister(src), 0, round)); + break; + case SCVTF_dw: + WriteDRegister(dst, FixedToDouble(ReadWRegister(src), 0, round)); + break; + case UCVTF_dx: + WriteDRegister(dst, UFixedToDouble(ReadXRegister(src), 0, round)); + break; + case UCVTF_dw: { + WriteDRegister(dst, + UFixedToDouble(ReadRegister(src), 0, round)); + break; + } + case SCVTF_sx: + WriteSRegister(dst, FixedToFloat(ReadXRegister(src), 0, round)); + break; + case SCVTF_sw: + WriteSRegister(dst, FixedToFloat(ReadWRegister(src), 0, round)); + break; + case UCVTF_sx: + WriteSRegister(dst, UFixedToFloat(ReadXRegister(src), 0, round)); + break; + case UCVTF_sw: { + WriteSRegister(dst, UFixedToFloat(ReadRegister(src), 0, round)); + break; + } + case SCVTF_hx: + WriteHRegister(dst, FixedToFloat16(ReadXRegister(src), 0, round)); + break; + case SCVTF_hw: + WriteHRegister(dst, FixedToFloat16(ReadWRegister(src), 0, round)); + break; + case UCVTF_hx: + WriteHRegister(dst, UFixedToFloat16(ReadXRegister(src), 0, round)); + break; + case UCVTF_hw: { + WriteHRegister(dst, + UFixedToFloat16(ReadRegister(src), 0, round)); + break; + } + + default: + VIXL_UNREACHABLE(); + } +} + + +void Simulator::VisitFPFixedPointConvert(const Instruction* instr) { + AssertSupportedFPCR(); + + unsigned dst = instr->GetRd(); + unsigned src = instr->GetRn(); + int fbits = 64 - instr->GetFPScale(); + + FPRounding round = ReadRMode(); + + switch (instr->Mask(FPFixedPointConvertMask)) { + // A 32-bit input can be handled in the same way as a 64-bit input, since + // the sign- or zero-extension will not affect the conversion. + case SCVTF_dx_fixed: + WriteDRegister(dst, FixedToDouble(ReadXRegister(src), fbits, round)); + break; + case SCVTF_dw_fixed: + WriteDRegister(dst, FixedToDouble(ReadWRegister(src), fbits, round)); + break; + case UCVTF_dx_fixed: + WriteDRegister(dst, UFixedToDouble(ReadXRegister(src), fbits, round)); + break; + case UCVTF_dw_fixed: { + WriteDRegister(dst, + UFixedToDouble(ReadRegister(src), fbits, round)); + break; + } + case SCVTF_sx_fixed: + WriteSRegister(dst, FixedToFloat(ReadXRegister(src), fbits, round)); + break; + case SCVTF_sw_fixed: + WriteSRegister(dst, FixedToFloat(ReadWRegister(src), fbits, round)); + break; + case UCVTF_sx_fixed: + WriteSRegister(dst, UFixedToFloat(ReadXRegister(src), fbits, round)); + break; + case UCVTF_sw_fixed: { + WriteSRegister(dst, + UFixedToFloat(ReadRegister(src), fbits, round)); + break; + } + case SCVTF_hx_fixed: + WriteHRegister(dst, FixedToFloat16(ReadXRegister(src), fbits, round)); + break; + case SCVTF_hw_fixed: + WriteHRegister(dst, FixedToFloat16(ReadWRegister(src), fbits, round)); + break; + case UCVTF_hx_fixed: + WriteHRegister(dst, UFixedToFloat16(ReadXRegister(src), fbits, round)); + break; + case UCVTF_hw_fixed: { + WriteHRegister(dst, + UFixedToFloat16(ReadRegister(src), + fbits, + round)); + break; + } + case FCVTZS_xd_fixed: + WriteXRegister(dst, + FPToInt64(ReadDRegister(src) * std::pow(2.0, fbits), + FPZero)); + break; + case FCVTZS_wd_fixed: + WriteWRegister(dst, + FPToInt32(ReadDRegister(src) * std::pow(2.0, fbits), + FPZero)); + break; + case FCVTZU_xd_fixed: + WriteXRegister(dst, + FPToUInt64(ReadDRegister(src) * std::pow(2.0, fbits), + FPZero)); + break; + case FCVTZU_wd_fixed: + WriteWRegister(dst, + FPToUInt32(ReadDRegister(src) * std::pow(2.0, fbits), + FPZero)); + break; + case FCVTZS_xs_fixed: + WriteXRegister(dst, + FPToInt64(ReadSRegister(src) * std::pow(2.0f, fbits), + FPZero)); + break; + case FCVTZS_ws_fixed: + WriteWRegister(dst, + FPToInt32(ReadSRegister(src) * std::pow(2.0f, fbits), + FPZero)); + break; + case FCVTZU_xs_fixed: + WriteXRegister(dst, + FPToUInt64(ReadSRegister(src) * std::pow(2.0f, fbits), + FPZero)); + break; + case FCVTZU_ws_fixed: + WriteWRegister(dst, + FPToUInt32(ReadSRegister(src) * std::pow(2.0f, fbits), + FPZero)); + break; + case FCVTZS_xh_fixed: { + double output = + static_cast(ReadHRegister(src)) * std::pow(2.0, fbits); + WriteXRegister(dst, FPToInt64(output, FPZero)); + break; + } + case FCVTZS_wh_fixed: { + double output = + static_cast(ReadHRegister(src)) * std::pow(2.0, fbits); + WriteWRegister(dst, FPToInt32(output, FPZero)); + break; + } + case FCVTZU_xh_fixed: { + double output = + static_cast(ReadHRegister(src)) * std::pow(2.0, fbits); + WriteXRegister(dst, FPToUInt64(output, FPZero)); + break; + } + case FCVTZU_wh_fixed: { + double output = + static_cast(ReadHRegister(src)) * std::pow(2.0, fbits); + WriteWRegister(dst, FPToUInt32(output, FPZero)); + break; + } + default: + VIXL_UNREACHABLE(); + } +} + + +void Simulator::VisitFPCompare(const Instruction* instr) { + AssertSupportedFPCR(); + + FPTrapFlags trap = DisableTrap; + switch (instr->Mask(FPCompareMask)) { + case FCMPE_h: + trap = EnableTrap; + VIXL_FALLTHROUGH(); + case FCMP_h: + FPCompare(ReadHRegister(instr->GetRn()), + ReadHRegister(instr->GetRm()), + trap); + break; + case FCMPE_s: + trap = EnableTrap; + VIXL_FALLTHROUGH(); + case FCMP_s: + FPCompare(ReadSRegister(instr->GetRn()), + ReadSRegister(instr->GetRm()), + trap); + break; + case FCMPE_d: + trap = EnableTrap; + VIXL_FALLTHROUGH(); + case FCMP_d: + FPCompare(ReadDRegister(instr->GetRn()), + ReadDRegister(instr->GetRm()), + trap); + break; + case FCMPE_h_zero: + trap = EnableTrap; + VIXL_FALLTHROUGH(); + case FCMP_h_zero: + FPCompare(ReadHRegister(instr->GetRn()), SimFloat16(0.0), trap); + break; + case FCMPE_s_zero: + trap = EnableTrap; + VIXL_FALLTHROUGH(); + case FCMP_s_zero: + FPCompare(ReadSRegister(instr->GetRn()), 0.0f, trap); + break; + case FCMPE_d_zero: + trap = EnableTrap; + VIXL_FALLTHROUGH(); + case FCMP_d_zero: + FPCompare(ReadDRegister(instr->GetRn()), 0.0, trap); + break; + default: + VIXL_UNIMPLEMENTED(); + } +} + + +void Simulator::VisitFPConditionalCompare(const Instruction* instr) { + AssertSupportedFPCR(); + + FPTrapFlags trap = DisableTrap; + switch (instr->Mask(FPConditionalCompareMask)) { + case FCCMPE_h: + trap = EnableTrap; + VIXL_FALLTHROUGH(); + case FCCMP_h: + if (ConditionPassed(instr->GetCondition())) { + FPCompare(ReadHRegister(instr->GetRn()), + ReadHRegister(instr->GetRm()), + trap); + } else { + ReadNzcv().SetFlags(instr->GetNzcv()); + LogSystemRegister(NZCV); + } + break; + case FCCMPE_s: + trap = EnableTrap; + VIXL_FALLTHROUGH(); + case FCCMP_s: + if (ConditionPassed(instr->GetCondition())) { + FPCompare(ReadSRegister(instr->GetRn()), + ReadSRegister(instr->GetRm()), + trap); + } else { + ReadNzcv().SetFlags(instr->GetNzcv()); + LogSystemRegister(NZCV); + } + break; + case FCCMPE_d: + trap = EnableTrap; + VIXL_FALLTHROUGH(); + case FCCMP_d: + if (ConditionPassed(instr->GetCondition())) { + FPCompare(ReadDRegister(instr->GetRn()), + ReadDRegister(instr->GetRm()), + trap); + } else { + ReadNzcv().SetFlags(instr->GetNzcv()); + LogSystemRegister(NZCV); + } + break; + default: + VIXL_UNIMPLEMENTED(); + } +} + + +void Simulator::VisitFPConditionalSelect(const Instruction* instr) { + AssertSupportedFPCR(); + + Instr selected; + if (ConditionPassed(instr->GetCondition())) { + selected = instr->GetRn(); + } else { + selected = instr->GetRm(); + } + + switch (instr->Mask(FPConditionalSelectMask)) { + case FCSEL_h: + WriteHRegister(instr->GetRd(), ReadHRegister(selected)); + break; + case FCSEL_s: + WriteSRegister(instr->GetRd(), ReadSRegister(selected)); + break; + case FCSEL_d: + WriteDRegister(instr->GetRd(), ReadDRegister(selected)); + break; + default: + VIXL_UNIMPLEMENTED(); + } +} + + +void Simulator::VisitFPDataProcessing1Source(const Instruction* instr) { + AssertSupportedFPCR(); + + FPRounding fpcr_rounding = static_cast(ReadFpcr().GetRMode()); + VectorFormat vform; + switch (instr->Mask(FPTypeMask)) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case FP64: + vform = kFormatD; + break; + case FP32: + vform = kFormatS; + break; + case FP16: + vform = kFormatH; + break; + } + + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + bool inexact_exception = false; + + unsigned fd = instr->GetRd(); + unsigned fn = instr->GetRn(); + + switch (instr->Mask(FPDataProcessing1SourceMask)) { + case FMOV_h: + WriteHRegister(fd, ReadHRegister(fn)); + return; + case FMOV_s: + WriteSRegister(fd, ReadSRegister(fn)); + return; + case FMOV_d: + WriteDRegister(fd, ReadDRegister(fn)); + return; + case FABS_h: + case FABS_s: + case FABS_d: + fabs_(vform, ReadVRegister(fd), ReadVRegister(fn)); + // Explicitly log the register update whilst we have type information. + LogVRegister(fd, GetPrintRegisterFormatFP(vform)); + return; + case FNEG_h: + case FNEG_s: + case FNEG_d: + fneg(vform, ReadVRegister(fd), ReadVRegister(fn)); + // Explicitly log the register update whilst we have type information. + LogVRegister(fd, GetPrintRegisterFormatFP(vform)); + return; + case FCVT_ds: + WriteDRegister(fd, FPToDouble(ReadSRegister(fn), ReadDN())); + return; + case FCVT_sd: + WriteSRegister(fd, FPToFloat(ReadDRegister(fn), FPTieEven, ReadDN())); + return; + case FCVT_hs: + WriteHRegister(fd, + Float16ToRawbits( + FPToFloat16(ReadSRegister(fn), FPTieEven, ReadDN()))); + return; + case FCVT_sh: + WriteSRegister(fd, FPToFloat(ReadHRegister(fn), ReadDN())); + return; + case FCVT_dh: + WriteDRegister(fd, FPToDouble(ReadHRegister(fn), ReadDN())); + return; + case FCVT_hd: + WriteHRegister(fd, + Float16ToRawbits( + FPToFloat16(ReadDRegister(fn), FPTieEven, ReadDN()))); + return; + case FSQRT_h: + case FSQRT_s: + case FSQRT_d: + fsqrt(vform, rd, rn); + // Explicitly log the register update whilst we have type information. + LogVRegister(fd, GetPrintRegisterFormatFP(vform)); + return; + case FRINTI_h: + case FRINTI_s: + case FRINTI_d: + break; // Use FPCR rounding mode. + case FRINTX_h: + case FRINTX_s: + case FRINTX_d: + inexact_exception = true; + break; + case FRINTA_h: + case FRINTA_s: + case FRINTA_d: + fpcr_rounding = FPTieAway; + break; + case FRINTM_h: + case FRINTM_s: + case FRINTM_d: + fpcr_rounding = FPNegativeInfinity; + break; + case FRINTN_h: + case FRINTN_s: + case FRINTN_d: + fpcr_rounding = FPTieEven; + break; + case FRINTP_h: + case FRINTP_s: + case FRINTP_d: + fpcr_rounding = FPPositiveInfinity; + break; + case FRINTZ_h: + case FRINTZ_s: + case FRINTZ_d: + fpcr_rounding = FPZero; + break; + default: + VIXL_UNIMPLEMENTED(); + } + + // Only FRINT* instructions fall through the switch above. + frint(vform, rd, rn, fpcr_rounding, inexact_exception); + // Explicitly log the register update whilst we have type information. + LogVRegister(fd, GetPrintRegisterFormatFP(vform)); +} + + +void Simulator::VisitFPDataProcessing2Source(const Instruction* instr) { + AssertSupportedFPCR(); + + VectorFormat vform; + switch (instr->Mask(FPTypeMask)) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case FP64: + vform = kFormatD; + break; + case FP32: + vform = kFormatS; + break; + case FP16: + vform = kFormatH; + break; + } + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + SimVRegister& rm = ReadVRegister(instr->GetRm()); + + switch (instr->Mask(FPDataProcessing2SourceMask)) { + case FADD_h: + case FADD_s: + case FADD_d: + fadd(vform, rd, rn, rm); + break; + case FSUB_h: + case FSUB_s: + case FSUB_d: + fsub(vform, rd, rn, rm); + break; + case FMUL_h: + case FMUL_s: + case FMUL_d: + fmul(vform, rd, rn, rm); + break; + case FNMUL_h: + case FNMUL_s: + case FNMUL_d: + fnmul(vform, rd, rn, rm); + break; + case FDIV_h: + case FDIV_s: + case FDIV_d: + fdiv(vform, rd, rn, rm); + break; + case FMAX_h: + case FMAX_s: + case FMAX_d: + fmax(vform, rd, rn, rm); + break; + case FMIN_h: + case FMIN_s: + case FMIN_d: + fmin(vform, rd, rn, rm); + break; + case FMAXNM_h: + case FMAXNM_s: + case FMAXNM_d: + fmaxnm(vform, rd, rn, rm); + break; + case FMINNM_h: + case FMINNM_s: + case FMINNM_d: + fminnm(vform, rd, rn, rm); + break; + default: + VIXL_UNREACHABLE(); + } + // Explicitly log the register update whilst we have type information. + LogVRegister(instr->GetRd(), GetPrintRegisterFormatFP(vform)); +} + + +void Simulator::VisitFPDataProcessing3Source(const Instruction* instr) { + AssertSupportedFPCR(); + + unsigned fd = instr->GetRd(); + unsigned fn = instr->GetRn(); + unsigned fm = instr->GetRm(); + unsigned fa = instr->GetRa(); + + switch (instr->Mask(FPDataProcessing3SourceMask)) { + // fd = fa +/- (fn * fm) + case FMADD_h: + WriteHRegister(fd, + FPMulAdd(ReadHRegister(fa), + ReadHRegister(fn), + ReadHRegister(fm))); + break; + case FMSUB_h: + WriteHRegister(fd, + FPMulAdd(ReadHRegister(fa), + -ReadHRegister(fn), + ReadHRegister(fm))); + break; + case FMADD_s: + WriteSRegister(fd, + FPMulAdd(ReadSRegister(fa), + ReadSRegister(fn), + ReadSRegister(fm))); + break; + case FMSUB_s: + WriteSRegister(fd, + FPMulAdd(ReadSRegister(fa), + -ReadSRegister(fn), + ReadSRegister(fm))); + break; + case FMADD_d: + WriteDRegister(fd, + FPMulAdd(ReadDRegister(fa), + ReadDRegister(fn), + ReadDRegister(fm))); + break; + case FMSUB_d: + WriteDRegister(fd, + FPMulAdd(ReadDRegister(fa), + -ReadDRegister(fn), + ReadDRegister(fm))); + break; + // Negated variants of the above. + case FNMADD_h: + WriteHRegister(fd, + FPMulAdd(-ReadHRegister(fa), + -ReadHRegister(fn), + ReadHRegister(fm))); + break; + case FNMSUB_h: + WriteHRegister(fd, + FPMulAdd(-ReadHRegister(fa), + ReadHRegister(fn), + ReadHRegister(fm))); + break; + case FNMADD_s: + WriteSRegister(fd, + FPMulAdd(-ReadSRegister(fa), + -ReadSRegister(fn), + ReadSRegister(fm))); + break; + case FNMSUB_s: + WriteSRegister(fd, + FPMulAdd(-ReadSRegister(fa), + ReadSRegister(fn), + ReadSRegister(fm))); + break; + case FNMADD_d: + WriteDRegister(fd, + FPMulAdd(-ReadDRegister(fa), + -ReadDRegister(fn), + ReadDRegister(fm))); + break; + case FNMSUB_d: + WriteDRegister(fd, + FPMulAdd(-ReadDRegister(fa), + ReadDRegister(fn), + ReadDRegister(fm))); + break; + default: + VIXL_UNIMPLEMENTED(); + } +} + + +bool Simulator::FPProcessNaNs(const Instruction* instr) { + unsigned fd = instr->GetRd(); + unsigned fn = instr->GetRn(); + unsigned fm = instr->GetRm(); + bool done = false; + + if (instr->Mask(FP64) == FP64) { + double result = FPProcessNaNs(ReadDRegister(fn), ReadDRegister(fm)); + if (IsNaN(result)) { + WriteDRegister(fd, result); + done = true; + } + } else if (instr->Mask(FP32) == FP32) { + float result = FPProcessNaNs(ReadSRegister(fn), ReadSRegister(fm)); + if (IsNaN(result)) { + WriteSRegister(fd, result); + done = true; + } + } else { + VIXL_ASSERT(instr->Mask(FP16) == FP16); + VIXL_UNIMPLEMENTED(); + } + + return done; +} + + +void Simulator::SysOp_W(int op, int64_t val) { + switch (op) { + case IVAU: + case CVAC: + case CVAU: + case CIVAC: { + // Perform a dummy memory access to ensure that we have read access + // to the specified address. + volatile uint8_t y = Memory::Read(val); + USE(y); + // TODO: Implement "case ZVA:". + break; + } + default: + VIXL_UNIMPLEMENTED(); + } +} + + +// clang-format off +#define PAUTH_SYSTEM_MODES(V) \ + V(A1716, 17, ReadXRegister(16), kPACKeyIA) \ + V(B1716, 17, ReadXRegister(16), kPACKeyIB) \ + V(AZ, 30, 0x00000000, kPACKeyIA) \ + V(BZ, 30, 0x00000000, kPACKeyIB) \ + V(ASP, 30, ReadXRegister(31, Reg31IsStackPointer), kPACKeyIA) \ + V(BSP, 30, ReadXRegister(31, Reg31IsStackPointer), kPACKeyIB) +// clang-format on + + +void Simulator::VisitSystem(const Instruction* instr) { + // Some system instructions hijack their Op and Cp fields to represent a + // range of immediates instead of indicating a different instruction. This + // makes the decoding tricky. + if (instr->GetInstructionBits() == XPACLRI) { + WriteXRegister(30, StripPAC(ReadXRegister(30), kInstructionPointer)); + } else if (instr->Mask(SystemPAuthFMask) == SystemPAuthFixed) { + switch (instr->Mask(SystemPAuthMask)) { +#define DEFINE_PAUTH_FUNCS(SUFFIX, DST, MOD, KEY) \ + case PACI##SUFFIX: \ + WriteXRegister(DST, \ + AddPAC(ReadXRegister(DST), MOD, KEY, kInstructionPointer)); \ + break; \ + case AUTI##SUFFIX: \ + WriteXRegister(DST, \ + AuthPAC(ReadXRegister(DST), \ + MOD, \ + KEY, \ + kInstructionPointer)); \ + break; + + PAUTH_SYSTEM_MODES(DEFINE_PAUTH_FUNCS) +#undef DEFINE_PAUTH_FUNCS + } + } else if (instr->Mask(SystemExclusiveMonitorFMask) == + SystemExclusiveMonitorFixed) { + VIXL_ASSERT(instr->Mask(SystemExclusiveMonitorMask) == CLREX); + switch (instr->Mask(SystemExclusiveMonitorMask)) { + case CLREX: { + PrintExclusiveAccessWarning(); + ClearLocalMonitor(); + break; + } + } + } else if (instr->Mask(SystemSysRegFMask) == SystemSysRegFixed) { + switch (instr->Mask(SystemSysRegMask)) { + case MRS: { + switch (instr->GetImmSystemRegister()) { + case NZCV: + WriteXRegister(instr->GetRt(), ReadNzcv().GetRawValue()); + break; + case FPCR: + WriteXRegister(instr->GetRt(), ReadFpcr().GetRawValue()); + break; + default: + VIXL_UNIMPLEMENTED(); + } + break; + } + case MSR: { + switch (instr->GetImmSystemRegister()) { + case NZCV: + ReadNzcv().SetRawValue(ReadWRegister(instr->GetRt())); + LogSystemRegister(NZCV); + break; + case FPCR: + ReadFpcr().SetRawValue(ReadWRegister(instr->GetRt())); + LogSystemRegister(FPCR); + break; + default: + VIXL_UNIMPLEMENTED(); + } + break; + } + } + } else if (instr->Mask(SystemHintFMask) == SystemHintFixed) { + VIXL_ASSERT(instr->Mask(SystemHintMask) == HINT); + switch (instr->GetImmHint()) { + case NOP: + case ESB: + case CSDB: + break; + default: + VIXL_UNIMPLEMENTED(); + } + } else if (instr->Mask(MemBarrierFMask) == MemBarrierFixed) { + __sync_synchronize(); + } else if ((instr->Mask(SystemSysFMask) == SystemSysFixed)) { + switch (instr->Mask(SystemSysMask)) { + case SYS: + SysOp_W(instr->GetSysOp(), ReadXRegister(instr->GetRt())); + break; + default: + VIXL_UNIMPLEMENTED(); + } + } else { + VIXL_UNIMPLEMENTED(); + } +} + + +void Simulator::VisitException(const Instruction* instr) { + switch (instr->Mask(ExceptionMask)) { + case HLT: + switch (instr->GetImmException()) { + case kUnreachableOpcode: + DoUnreachable(instr); + return; + case kTraceOpcode: + DoTrace(instr); + return; + case kLogOpcode: + DoLog(instr); + return; + case kPrintfOpcode: + DoPrintf(instr); + return; + case kRuntimeCallOpcode: + DoRuntimeCall(instr); + return; + case kSetCPUFeaturesOpcode: + case kEnableCPUFeaturesOpcode: + case kDisableCPUFeaturesOpcode: + DoConfigureCPUFeatures(instr); + return; + case kSaveCPUFeaturesOpcode: + DoSaveCPUFeatures(instr); + return; + case kRestoreCPUFeaturesOpcode: + DoRestoreCPUFeatures(instr); + return; + default: + HostBreakpoint(); + return; + } + case BRK: + HostBreakpoint(); + return; + default: + VIXL_UNIMPLEMENTED(); + } +} + + +void Simulator::VisitCrypto2RegSHA(const Instruction* instr) { + VisitUnimplemented(instr); +} + + +void Simulator::VisitCrypto3RegSHA(const Instruction* instr) { + VisitUnimplemented(instr); +} + + +void Simulator::VisitCryptoAES(const Instruction* instr) { + VisitUnimplemented(instr); +} + + +void Simulator::VisitNEON2RegMisc(const Instruction* instr) { + NEONFormatDecoder nfd(instr); + VectorFormat vf = nfd.GetVectorFormat(); + + static const NEONFormatMap map_lp = + {{23, 22, 30}, {NF_4H, NF_8H, NF_2S, NF_4S, NF_1D, NF_2D}}; + VectorFormat vf_lp = nfd.GetVectorFormat(&map_lp); + + static const NEONFormatMap map_fcvtl = {{22}, {NF_4S, NF_2D}}; + VectorFormat vf_fcvtl = nfd.GetVectorFormat(&map_fcvtl); + + static const NEONFormatMap map_fcvtn = {{22, 30}, + {NF_4H, NF_8H, NF_2S, NF_4S}}; + VectorFormat vf_fcvtn = nfd.GetVectorFormat(&map_fcvtn); + + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + + if (instr->Mask(NEON2RegMiscOpcode) <= NEON_NEG_opcode) { + // These instructions all use a two bit size field, except NOT and RBIT, + // which use the field to encode the operation. + switch (instr->Mask(NEON2RegMiscMask)) { + case NEON_REV64: + rev64(vf, rd, rn); + break; + case NEON_REV32: + rev32(vf, rd, rn); + break; + case NEON_REV16: + rev16(vf, rd, rn); + break; + case NEON_SUQADD: + suqadd(vf, rd, rn); + break; + case NEON_USQADD: + usqadd(vf, rd, rn); + break; + case NEON_CLS: + cls(vf, rd, rn); + break; + case NEON_CLZ: + clz(vf, rd, rn); + break; + case NEON_CNT: + cnt(vf, rd, rn); + break; + case NEON_SQABS: + abs(vf, rd, rn).SignedSaturate(vf); + break; + case NEON_SQNEG: + neg(vf, rd, rn).SignedSaturate(vf); + break; + case NEON_CMGT_zero: + cmp(vf, rd, rn, 0, gt); + break; + case NEON_CMGE_zero: + cmp(vf, rd, rn, 0, ge); + break; + case NEON_CMEQ_zero: + cmp(vf, rd, rn, 0, eq); + break; + case NEON_CMLE_zero: + cmp(vf, rd, rn, 0, le); + break; + case NEON_CMLT_zero: + cmp(vf, rd, rn, 0, lt); + break; + case NEON_ABS: + abs(vf, rd, rn); + break; + case NEON_NEG: + neg(vf, rd, rn); + break; + case NEON_SADDLP: + saddlp(vf_lp, rd, rn); + break; + case NEON_UADDLP: + uaddlp(vf_lp, rd, rn); + break; + case NEON_SADALP: + sadalp(vf_lp, rd, rn); + break; + case NEON_UADALP: + uadalp(vf_lp, rd, rn); + break; + case NEON_RBIT_NOT: + vf = nfd.GetVectorFormat(nfd.LogicalFormatMap()); + switch (instr->GetFPType()) { + case 0: + not_(vf, rd, rn); + break; + case 1: + rbit(vf, rd, rn); + break; + default: + VIXL_UNIMPLEMENTED(); + } + break; + } + } else { + VectorFormat fpf = nfd.GetVectorFormat(nfd.FPFormatMap()); + FPRounding fpcr_rounding = static_cast(ReadFpcr().GetRMode()); + bool inexact_exception = false; + + // These instructions all use a one bit size field, except XTN, SQXTUN, + // SHLL, SQXTN and UQXTN, which use a two bit size field. + switch (instr->Mask(NEON2RegMiscFPMask)) { + case NEON_FABS: + fabs_(fpf, rd, rn); + return; + case NEON_FNEG: + fneg(fpf, rd, rn); + return; + case NEON_FSQRT: + fsqrt(fpf, rd, rn); + return; + case NEON_FCVTL: + if (instr->Mask(NEON_Q)) { + fcvtl2(vf_fcvtl, rd, rn); + } else { + fcvtl(vf_fcvtl, rd, rn); + } + return; + case NEON_FCVTN: + if (instr->Mask(NEON_Q)) { + fcvtn2(vf_fcvtn, rd, rn); + } else { + fcvtn(vf_fcvtn, rd, rn); + } + return; + case NEON_FCVTXN: + if (instr->Mask(NEON_Q)) { + fcvtxn2(vf_fcvtn, rd, rn); + } else { + fcvtxn(vf_fcvtn, rd, rn); + } + return; + + // The following instructions break from the switch statement, rather + // than return. + case NEON_FRINTI: + break; // Use FPCR rounding mode. + case NEON_FRINTX: + inexact_exception = true; + break; + case NEON_FRINTA: + fpcr_rounding = FPTieAway; + break; + case NEON_FRINTM: + fpcr_rounding = FPNegativeInfinity; + break; + case NEON_FRINTN: + fpcr_rounding = FPTieEven; + break; + case NEON_FRINTP: + fpcr_rounding = FPPositiveInfinity; + break; + case NEON_FRINTZ: + fpcr_rounding = FPZero; + break; + + case NEON_FCVTNS: + fcvts(fpf, rd, rn, FPTieEven); + return; + case NEON_FCVTNU: + fcvtu(fpf, rd, rn, FPTieEven); + return; + case NEON_FCVTPS: + fcvts(fpf, rd, rn, FPPositiveInfinity); + return; + case NEON_FCVTPU: + fcvtu(fpf, rd, rn, FPPositiveInfinity); + return; + case NEON_FCVTMS: + fcvts(fpf, rd, rn, FPNegativeInfinity); + return; + case NEON_FCVTMU: + fcvtu(fpf, rd, rn, FPNegativeInfinity); + return; + case NEON_FCVTZS: + fcvts(fpf, rd, rn, FPZero); + return; + case NEON_FCVTZU: + fcvtu(fpf, rd, rn, FPZero); + return; + case NEON_FCVTAS: + fcvts(fpf, rd, rn, FPTieAway); + return; + case NEON_FCVTAU: + fcvtu(fpf, rd, rn, FPTieAway); + return; + case NEON_SCVTF: + scvtf(fpf, rd, rn, 0, fpcr_rounding); + return; + case NEON_UCVTF: + ucvtf(fpf, rd, rn, 0, fpcr_rounding); + return; + case NEON_URSQRTE: + ursqrte(fpf, rd, rn); + return; + case NEON_URECPE: + urecpe(fpf, rd, rn); + return; + case NEON_FRSQRTE: + frsqrte(fpf, rd, rn); + return; + case NEON_FRECPE: + frecpe(fpf, rd, rn, fpcr_rounding); + return; + case NEON_FCMGT_zero: + fcmp_zero(fpf, rd, rn, gt); + return; + case NEON_FCMGE_zero: + fcmp_zero(fpf, rd, rn, ge); + return; + case NEON_FCMEQ_zero: + fcmp_zero(fpf, rd, rn, eq); + return; + case NEON_FCMLE_zero: + fcmp_zero(fpf, rd, rn, le); + return; + case NEON_FCMLT_zero: + fcmp_zero(fpf, rd, rn, lt); + return; + default: + if ((NEON_XTN_opcode <= instr->Mask(NEON2RegMiscOpcode)) && + (instr->Mask(NEON2RegMiscOpcode) <= NEON_UQXTN_opcode)) { + switch (instr->Mask(NEON2RegMiscMask)) { + case NEON_XTN: + xtn(vf, rd, rn); + return; + case NEON_SQXTN: + sqxtn(vf, rd, rn); + return; + case NEON_UQXTN: + uqxtn(vf, rd, rn); + return; + case NEON_SQXTUN: + sqxtun(vf, rd, rn); + return; + case NEON_SHLL: + vf = nfd.GetVectorFormat(nfd.LongIntegerFormatMap()); + if (instr->Mask(NEON_Q)) { + shll2(vf, rd, rn); + } else { + shll(vf, rd, rn); + } + return; + default: + VIXL_UNIMPLEMENTED(); + } + } else { + VIXL_UNIMPLEMENTED(); + } + } + + // Only FRINT* instructions fall through the switch above. + frint(fpf, rd, rn, fpcr_rounding, inexact_exception); + } +} + + +void Simulator::VisitNEON2RegMiscFP16(const Instruction* instr) { + static const NEONFormatMap map_half = {{30}, {NF_4H, NF_8H}}; + NEONFormatDecoder nfd(instr); + VectorFormat fpf = nfd.GetVectorFormat(&map_half); + + FPRounding fpcr_rounding = static_cast(ReadFpcr().GetRMode()); + + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + + switch (instr->Mask(NEON2RegMiscFP16Mask)) { + case NEON_SCVTF_H: + scvtf(fpf, rd, rn, 0, fpcr_rounding); + return; + case NEON_UCVTF_H: + ucvtf(fpf, rd, rn, 0, fpcr_rounding); + return; + case NEON_FCVTNS_H: + fcvts(fpf, rd, rn, FPTieEven); + return; + case NEON_FCVTNU_H: + fcvtu(fpf, rd, rn, FPTieEven); + return; + case NEON_FCVTPS_H: + fcvts(fpf, rd, rn, FPPositiveInfinity); + return; + case NEON_FCVTPU_H: + fcvtu(fpf, rd, rn, FPPositiveInfinity); + return; + case NEON_FCVTMS_H: + fcvts(fpf, rd, rn, FPNegativeInfinity); + return; + case NEON_FCVTMU_H: + fcvtu(fpf, rd, rn, FPNegativeInfinity); + return; + case NEON_FCVTZS_H: + fcvts(fpf, rd, rn, FPZero); + return; + case NEON_FCVTZU_H: + fcvtu(fpf, rd, rn, FPZero); + return; + case NEON_FCVTAS_H: + fcvts(fpf, rd, rn, FPTieAway); + return; + case NEON_FCVTAU_H: + fcvtu(fpf, rd, rn, FPTieAway); + return; + case NEON_FRINTI_H: + frint(fpf, rd, rn, fpcr_rounding, false); + return; + case NEON_FRINTX_H: + frint(fpf, rd, rn, fpcr_rounding, true); + return; + case NEON_FRINTA_H: + frint(fpf, rd, rn, FPTieAway, false); + return; + case NEON_FRINTM_H: + frint(fpf, rd, rn, FPNegativeInfinity, false); + return; + case NEON_FRINTN_H: + frint(fpf, rd, rn, FPTieEven, false); + return; + case NEON_FRINTP_H: + frint(fpf, rd, rn, FPPositiveInfinity, false); + return; + case NEON_FRINTZ_H: + frint(fpf, rd, rn, FPZero, false); + return; + case NEON_FABS_H: + fabs_(fpf, rd, rn); + return; + case NEON_FNEG_H: + fneg(fpf, rd, rn); + return; + case NEON_FSQRT_H: + fsqrt(fpf, rd, rn); + return; + case NEON_FRSQRTE_H: + frsqrte(fpf, rd, rn); + return; + case NEON_FRECPE_H: + frecpe(fpf, rd, rn, fpcr_rounding); + return; + case NEON_FCMGT_H_zero: + fcmp_zero(fpf, rd, rn, gt); + return; + case NEON_FCMGE_H_zero: + fcmp_zero(fpf, rd, rn, ge); + return; + case NEON_FCMEQ_H_zero: + fcmp_zero(fpf, rd, rn, eq); + return; + case NEON_FCMLE_H_zero: + fcmp_zero(fpf, rd, rn, le); + return; + case NEON_FCMLT_H_zero: + fcmp_zero(fpf, rd, rn, lt); + return; + default: + VIXL_UNIMPLEMENTED(); + return; + } +} + + +void Simulator::VisitNEON3Same(const Instruction* instr) { + NEONFormatDecoder nfd(instr); + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + SimVRegister& rm = ReadVRegister(instr->GetRm()); + + if (instr->Mask(NEON3SameLogicalFMask) == NEON3SameLogicalFixed) { + VectorFormat vf = nfd.GetVectorFormat(nfd.LogicalFormatMap()); + switch (instr->Mask(NEON3SameLogicalMask)) { + case NEON_AND: + and_(vf, rd, rn, rm); + break; + case NEON_ORR: + orr(vf, rd, rn, rm); + break; + case NEON_ORN: + orn(vf, rd, rn, rm); + break; + case NEON_EOR: + eor(vf, rd, rn, rm); + break; + case NEON_BIC: + bic(vf, rd, rn, rm); + break; + case NEON_BIF: + bif(vf, rd, rn, rm); + break; + case NEON_BIT: + bit(vf, rd, rn, rm); + break; + case NEON_BSL: + bsl(vf, rd, rn, rm); + break; + default: + VIXL_UNIMPLEMENTED(); + } + } else if (instr->Mask(NEON3SameFPFMask) == NEON3SameFPFixed) { + VectorFormat vf = nfd.GetVectorFormat(nfd.FPFormatMap()); + switch (instr->Mask(NEON3SameFPMask)) { + case NEON_FADD: + fadd(vf, rd, rn, rm); + break; + case NEON_FSUB: + fsub(vf, rd, rn, rm); + break; + case NEON_FMUL: + fmul(vf, rd, rn, rm); + break; + case NEON_FDIV: + fdiv(vf, rd, rn, rm); + break; + case NEON_FMAX: + fmax(vf, rd, rn, rm); + break; + case NEON_FMIN: + fmin(vf, rd, rn, rm); + break; + case NEON_FMAXNM: + fmaxnm(vf, rd, rn, rm); + break; + case NEON_FMINNM: + fminnm(vf, rd, rn, rm); + break; + case NEON_FMLA: + fmla(vf, rd, rn, rm); + break; + case NEON_FMLS: + fmls(vf, rd, rn, rm); + break; + case NEON_FMULX: + fmulx(vf, rd, rn, rm); + break; + case NEON_FACGE: + fabscmp(vf, rd, rn, rm, ge); + break; + case NEON_FACGT: + fabscmp(vf, rd, rn, rm, gt); + break; + case NEON_FCMEQ: + fcmp(vf, rd, rn, rm, eq); + break; + case NEON_FCMGE: + fcmp(vf, rd, rn, rm, ge); + break; + case NEON_FCMGT: + fcmp(vf, rd, rn, rm, gt); + break; + case NEON_FRECPS: + frecps(vf, rd, rn, rm); + break; + case NEON_FRSQRTS: + frsqrts(vf, rd, rn, rm); + break; + case NEON_FABD: + fabd(vf, rd, rn, rm); + break; + case NEON_FADDP: + faddp(vf, rd, rn, rm); + break; + case NEON_FMAXP: + fmaxp(vf, rd, rn, rm); + break; + case NEON_FMAXNMP: + fmaxnmp(vf, rd, rn, rm); + break; + case NEON_FMINP: + fminp(vf, rd, rn, rm); + break; + case NEON_FMINNMP: + fminnmp(vf, rd, rn, rm); + break; + default: + VIXL_UNIMPLEMENTED(); + } + } else { + VectorFormat vf = nfd.GetVectorFormat(); + switch (instr->Mask(NEON3SameMask)) { + case NEON_ADD: + add(vf, rd, rn, rm); + break; + case NEON_ADDP: + addp(vf, rd, rn, rm); + break; + case NEON_CMEQ: + cmp(vf, rd, rn, rm, eq); + break; + case NEON_CMGE: + cmp(vf, rd, rn, rm, ge); + break; + case NEON_CMGT: + cmp(vf, rd, rn, rm, gt); + break; + case NEON_CMHI: + cmp(vf, rd, rn, rm, hi); + break; + case NEON_CMHS: + cmp(vf, rd, rn, rm, hs); + break; + case NEON_CMTST: + cmptst(vf, rd, rn, rm); + break; + case NEON_MLS: + mls(vf, rd, rn, rm); + break; + case NEON_MLA: + mla(vf, rd, rn, rm); + break; + case NEON_MUL: + mul(vf, rd, rn, rm); + break; + case NEON_PMUL: + pmul(vf, rd, rn, rm); + break; + case NEON_SMAX: + smax(vf, rd, rn, rm); + break; + case NEON_SMAXP: + smaxp(vf, rd, rn, rm); + break; + case NEON_SMIN: + smin(vf, rd, rn, rm); + break; + case NEON_SMINP: + sminp(vf, rd, rn, rm); + break; + case NEON_SUB: + sub(vf, rd, rn, rm); + break; + case NEON_UMAX: + umax(vf, rd, rn, rm); + break; + case NEON_UMAXP: + umaxp(vf, rd, rn, rm); + break; + case NEON_UMIN: + umin(vf, rd, rn, rm); + break; + case NEON_UMINP: + uminp(vf, rd, rn, rm); + break; + case NEON_SSHL: + sshl(vf, rd, rn, rm); + break; + case NEON_USHL: + ushl(vf, rd, rn, rm); + break; + case NEON_SABD: + absdiff(vf, rd, rn, rm, true); + break; + case NEON_UABD: + absdiff(vf, rd, rn, rm, false); + break; + case NEON_SABA: + saba(vf, rd, rn, rm); + break; + case NEON_UABA: + uaba(vf, rd, rn, rm); + break; + case NEON_UQADD: + add(vf, rd, rn, rm).UnsignedSaturate(vf); + break; + case NEON_SQADD: + add(vf, rd, rn, rm).SignedSaturate(vf); + break; + case NEON_UQSUB: + sub(vf, rd, rn, rm).UnsignedSaturate(vf); + break; + case NEON_SQSUB: + sub(vf, rd, rn, rm).SignedSaturate(vf); + break; + case NEON_SQDMULH: + sqdmulh(vf, rd, rn, rm); + break; + case NEON_SQRDMULH: + sqrdmulh(vf, rd, rn, rm); + break; + case NEON_UQSHL: + ushl(vf, rd, rn, rm).UnsignedSaturate(vf); + break; + case NEON_SQSHL: + sshl(vf, rd, rn, rm).SignedSaturate(vf); + break; + case NEON_URSHL: + ushl(vf, rd, rn, rm).Round(vf); + break; + case NEON_SRSHL: + sshl(vf, rd, rn, rm).Round(vf); + break; + case NEON_UQRSHL: + ushl(vf, rd, rn, rm).Round(vf).UnsignedSaturate(vf); + break; + case NEON_SQRSHL: + sshl(vf, rd, rn, rm).Round(vf).SignedSaturate(vf); + break; + case NEON_UHADD: + add(vf, rd, rn, rm).Uhalve(vf); + break; + case NEON_URHADD: + add(vf, rd, rn, rm).Uhalve(vf).Round(vf); + break; + case NEON_SHADD: + add(vf, rd, rn, rm).Halve(vf); + break; + case NEON_SRHADD: + add(vf, rd, rn, rm).Halve(vf).Round(vf); + break; + case NEON_UHSUB: + sub(vf, rd, rn, rm).Uhalve(vf); + break; + case NEON_SHSUB: + sub(vf, rd, rn, rm).Halve(vf); + break; + default: + VIXL_UNIMPLEMENTED(); + } + } +} + + +void Simulator::VisitNEON3SameFP16(const Instruction* instr) { + NEONFormatDecoder nfd(instr); + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + SimVRegister& rm = ReadVRegister(instr->GetRm()); + + VectorFormat vf = nfd.GetVectorFormat(nfd.FP16FormatMap()); + switch (instr->Mask(NEON3SameFP16Mask)) { +#define SIM_FUNC(A, B) \ + case NEON_##A##_H: \ + B(vf, rd, rn, rm); \ + break; + SIM_FUNC(FMAXNM, fmaxnm); + SIM_FUNC(FMLA, fmla); + SIM_FUNC(FADD, fadd); + SIM_FUNC(FMULX, fmulx); + SIM_FUNC(FMAX, fmax); + SIM_FUNC(FRECPS, frecps); + SIM_FUNC(FMINNM, fminnm); + SIM_FUNC(FMLS, fmls); + SIM_FUNC(FSUB, fsub); + SIM_FUNC(FMIN, fmin); + SIM_FUNC(FRSQRTS, frsqrts); + SIM_FUNC(FMAXNMP, fmaxnmp); + SIM_FUNC(FADDP, faddp); + SIM_FUNC(FMUL, fmul); + SIM_FUNC(FMAXP, fmaxp); + SIM_FUNC(FDIV, fdiv); + SIM_FUNC(FMINNMP, fminnmp); + SIM_FUNC(FABD, fabd); + SIM_FUNC(FMINP, fminp); +#undef SIM_FUNC + case NEON_FCMEQ_H: + fcmp(vf, rd, rn, rm, eq); + break; + case NEON_FCMGE_H: + fcmp(vf, rd, rn, rm, ge); + break; + case NEON_FACGE_H: + fabscmp(vf, rd, rn, rm, ge); + break; + case NEON_FCMGT_H: + fcmp(vf, rd, rn, rm, gt); + break; + case NEON_FACGT_H: + fabscmp(vf, rd, rn, rm, gt); + break; + default: + VIXL_UNIMPLEMENTED(); + break; + } +} + +void Simulator::VisitNEON3SameExtra(const Instruction* instr) { + NEONFormatDecoder nfd(instr); + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + SimVRegister& rm = ReadVRegister(instr->GetRm()); + int rot = 0; + VectorFormat vf = nfd.GetVectorFormat(); + if (instr->Mask(NEON3SameExtraFCMLAMask) == NEON_FCMLA) { + rot = instr->GetImmRotFcmlaVec(); + fcmla(vf, rd, rn, rm, rot); + } else if (instr->Mask(NEON3SameExtraFCADDMask) == NEON_FCADD) { + rot = instr->GetImmRotFcadd(); + fcadd(vf, rd, rn, rm, rot); + } else { + switch (instr->Mask(NEON3SameExtraMask)) { + case NEON_SDOT: + sdot(vf, rd, rn, rm); + break; + case NEON_SQRDMLAH: + sqrdmlah(vf, rd, rn, rm); + break; + case NEON_UDOT: + udot(vf, rd, rn, rm); + break; + case NEON_SQRDMLSH: + sqrdmlsh(vf, rd, rn, rm); + break; + default: + VIXL_UNIMPLEMENTED(); + break; + } + } +} + + +void Simulator::VisitNEON3Different(const Instruction* instr) { + NEONFormatDecoder nfd(instr); + VectorFormat vf = nfd.GetVectorFormat(); + VectorFormat vf_l = nfd.GetVectorFormat(nfd.LongIntegerFormatMap()); + + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + SimVRegister& rm = ReadVRegister(instr->GetRm()); + + switch (instr->Mask(NEON3DifferentMask)) { + case NEON_PMULL: + pmull(vf_l, rd, rn, rm); + break; + case NEON_PMULL2: + pmull2(vf_l, rd, rn, rm); + break; + case NEON_UADDL: + uaddl(vf_l, rd, rn, rm); + break; + case NEON_UADDL2: + uaddl2(vf_l, rd, rn, rm); + break; + case NEON_SADDL: + saddl(vf_l, rd, rn, rm); + break; + case NEON_SADDL2: + saddl2(vf_l, rd, rn, rm); + break; + case NEON_USUBL: + usubl(vf_l, rd, rn, rm); + break; + case NEON_USUBL2: + usubl2(vf_l, rd, rn, rm); + break; + case NEON_SSUBL: + ssubl(vf_l, rd, rn, rm); + break; + case NEON_SSUBL2: + ssubl2(vf_l, rd, rn, rm); + break; + case NEON_SABAL: + sabal(vf_l, rd, rn, rm); + break; + case NEON_SABAL2: + sabal2(vf_l, rd, rn, rm); + break; + case NEON_UABAL: + uabal(vf_l, rd, rn, rm); + break; + case NEON_UABAL2: + uabal2(vf_l, rd, rn, rm); + break; + case NEON_SABDL: + sabdl(vf_l, rd, rn, rm); + break; + case NEON_SABDL2: + sabdl2(vf_l, rd, rn, rm); + break; + case NEON_UABDL: + uabdl(vf_l, rd, rn, rm); + break; + case NEON_UABDL2: + uabdl2(vf_l, rd, rn, rm); + break; + case NEON_SMLAL: + smlal(vf_l, rd, rn, rm); + break; + case NEON_SMLAL2: + smlal2(vf_l, rd, rn, rm); + break; + case NEON_UMLAL: + umlal(vf_l, rd, rn, rm); + break; + case NEON_UMLAL2: + umlal2(vf_l, rd, rn, rm); + break; + case NEON_SMLSL: + smlsl(vf_l, rd, rn, rm); + break; + case NEON_SMLSL2: + smlsl2(vf_l, rd, rn, rm); + break; + case NEON_UMLSL: + umlsl(vf_l, rd, rn, rm); + break; + case NEON_UMLSL2: + umlsl2(vf_l, rd, rn, rm); + break; + case NEON_SMULL: + smull(vf_l, rd, rn, rm); + break; + case NEON_SMULL2: + smull2(vf_l, rd, rn, rm); + break; + case NEON_UMULL: + umull(vf_l, rd, rn, rm); + break; + case NEON_UMULL2: + umull2(vf_l, rd, rn, rm); + break; + case NEON_SQDMLAL: + sqdmlal(vf_l, rd, rn, rm); + break; + case NEON_SQDMLAL2: + sqdmlal2(vf_l, rd, rn, rm); + break; + case NEON_SQDMLSL: + sqdmlsl(vf_l, rd, rn, rm); + break; + case NEON_SQDMLSL2: + sqdmlsl2(vf_l, rd, rn, rm); + break; + case NEON_SQDMULL: + sqdmull(vf_l, rd, rn, rm); + break; + case NEON_SQDMULL2: + sqdmull2(vf_l, rd, rn, rm); + break; + case NEON_UADDW: + uaddw(vf_l, rd, rn, rm); + break; + case NEON_UADDW2: + uaddw2(vf_l, rd, rn, rm); + break; + case NEON_SADDW: + saddw(vf_l, rd, rn, rm); + break; + case NEON_SADDW2: + saddw2(vf_l, rd, rn, rm); + break; + case NEON_USUBW: + usubw(vf_l, rd, rn, rm); + break; + case NEON_USUBW2: + usubw2(vf_l, rd, rn, rm); + break; + case NEON_SSUBW: + ssubw(vf_l, rd, rn, rm); + break; + case NEON_SSUBW2: + ssubw2(vf_l, rd, rn, rm); + break; + case NEON_ADDHN: + addhn(vf, rd, rn, rm); + break; + case NEON_ADDHN2: + addhn2(vf, rd, rn, rm); + break; + case NEON_RADDHN: + raddhn(vf, rd, rn, rm); + break; + case NEON_RADDHN2: + raddhn2(vf, rd, rn, rm); + break; + case NEON_SUBHN: + subhn(vf, rd, rn, rm); + break; + case NEON_SUBHN2: + subhn2(vf, rd, rn, rm); + break; + case NEON_RSUBHN: + rsubhn(vf, rd, rn, rm); + break; + case NEON_RSUBHN2: + rsubhn2(vf, rd, rn, rm); + break; + default: + VIXL_UNIMPLEMENTED(); + } +} + + +void Simulator::VisitNEONAcrossLanes(const Instruction* instr) { + NEONFormatDecoder nfd(instr); + + static const NEONFormatMap map_half = {{30}, {NF_4H, NF_8H}}; + + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + + if (instr->Mask(NEONAcrossLanesFP16FMask) == NEONAcrossLanesFP16Fixed) { + VectorFormat vf = nfd.GetVectorFormat(&map_half); + switch (instr->Mask(NEONAcrossLanesFP16Mask)) { + case NEON_FMAXV_H: + fmaxv(vf, rd, rn); + break; + case NEON_FMINV_H: + fminv(vf, rd, rn); + break; + case NEON_FMAXNMV_H: + fmaxnmv(vf, rd, rn); + break; + case NEON_FMINNMV_H: + fminnmv(vf, rd, rn); + break; + default: + VIXL_UNIMPLEMENTED(); + } + } else if (instr->Mask(NEONAcrossLanesFPFMask) == NEONAcrossLanesFPFixed) { + // The input operand's VectorFormat is passed for these instructions. + VectorFormat vf = nfd.GetVectorFormat(nfd.FPFormatMap()); + + switch (instr->Mask(NEONAcrossLanesFPMask)) { + case NEON_FMAXV: + fmaxv(vf, rd, rn); + break; + case NEON_FMINV: + fminv(vf, rd, rn); + break; + case NEON_FMAXNMV: + fmaxnmv(vf, rd, rn); + break; + case NEON_FMINNMV: + fminnmv(vf, rd, rn); + break; + default: + VIXL_UNIMPLEMENTED(); + } + } else { + VectorFormat vf = nfd.GetVectorFormat(); + + switch (instr->Mask(NEONAcrossLanesMask)) { + case NEON_ADDV: + addv(vf, rd, rn); + break; + case NEON_SMAXV: + smaxv(vf, rd, rn); + break; + case NEON_SMINV: + sminv(vf, rd, rn); + break; + case NEON_UMAXV: + umaxv(vf, rd, rn); + break; + case NEON_UMINV: + uminv(vf, rd, rn); + break; + case NEON_SADDLV: + saddlv(vf, rd, rn); + break; + case NEON_UADDLV: + uaddlv(vf, rd, rn); + break; + default: + VIXL_UNIMPLEMENTED(); + } + } +} + + +void Simulator::VisitNEONByIndexedElement(const Instruction* instr) { + NEONFormatDecoder nfd(instr); + static const NEONFormatMap map_half = {{30}, {NF_4H, NF_8H}}; + VectorFormat vf_r = nfd.GetVectorFormat(); + VectorFormat vf_half = nfd.GetVectorFormat(&map_half); + VectorFormat vf = nfd.GetVectorFormat(nfd.LongIntegerFormatMap()); + + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + + ByElementOp Op = NULL; + + int rm_reg = instr->GetRm(); + int index = (instr->GetNEONH() << 1) | instr->GetNEONL(); + if (instr->GetNEONSize() == 1) { + rm_reg &= 0xf; + index = (index << 1) | instr->GetNEONM(); + } + + switch (instr->Mask(NEONByIndexedElementMask)) { + case NEON_MUL_byelement: + Op = &Simulator::mul; + vf = vf_r; + break; + case NEON_MLA_byelement: + Op = &Simulator::mla; + vf = vf_r; + break; + case NEON_MLS_byelement: + Op = &Simulator::mls; + vf = vf_r; + break; + case NEON_SQDMULH_byelement: + Op = &Simulator::sqdmulh; + vf = vf_r; + break; + case NEON_SQRDMULH_byelement: + Op = &Simulator::sqrdmulh; + vf = vf_r; + break; + case NEON_SDOT_byelement: + Op = &Simulator::sdot; + vf = vf_r; + break; + case NEON_SQRDMLAH_byelement: + Op = &Simulator::sqrdmlah; + vf = vf_r; + break; + case NEON_UDOT_byelement: + Op = &Simulator::udot; + vf = vf_r; + break; + case NEON_SQRDMLSH_byelement: + Op = &Simulator::sqrdmlsh; + vf = vf_r; + break; + case NEON_SMULL_byelement: + if (instr->Mask(NEON_Q)) { + Op = &Simulator::smull2; + } else { + Op = &Simulator::smull; + } + break; + case NEON_UMULL_byelement: + if (instr->Mask(NEON_Q)) { + Op = &Simulator::umull2; + } else { + Op = &Simulator::umull; + } + break; + case NEON_SMLAL_byelement: + if (instr->Mask(NEON_Q)) { + Op = &Simulator::smlal2; + } else { + Op = &Simulator::smlal; + } + break; + case NEON_UMLAL_byelement: + if (instr->Mask(NEON_Q)) { + Op = &Simulator::umlal2; + } else { + Op = &Simulator::umlal; + } + break; + case NEON_SMLSL_byelement: + if (instr->Mask(NEON_Q)) { + Op = &Simulator::smlsl2; + } else { + Op = &Simulator::smlsl; + } + break; + case NEON_UMLSL_byelement: + if (instr->Mask(NEON_Q)) { + Op = &Simulator::umlsl2; + } else { + Op = &Simulator::umlsl; + } + break; + case NEON_SQDMULL_byelement: + if (instr->Mask(NEON_Q)) { + Op = &Simulator::sqdmull2; + } else { + Op = &Simulator::sqdmull; + } + break; + case NEON_SQDMLAL_byelement: + if (instr->Mask(NEON_Q)) { + Op = &Simulator::sqdmlal2; + } else { + Op = &Simulator::sqdmlal; + } + break; + case NEON_SQDMLSL_byelement: + if (instr->Mask(NEON_Q)) { + Op = &Simulator::sqdmlsl2; + } else { + Op = &Simulator::sqdmlsl; + } + break; + default: + index = instr->GetNEONH(); + if (instr->GetFPType() == 0) { + rm_reg &= 0xf; + index = (index << 2) | (instr->GetNEONL() << 1) | instr->GetNEONM(); + } else if ((instr->GetFPType() & 1) == 0) { + index = (index << 1) | instr->GetNEONL(); + } + + vf = nfd.GetVectorFormat(nfd.FPFormatMap()); + + switch (instr->Mask(NEONByIndexedElementFPMask)) { + case NEON_FMUL_H_byelement: + vf = vf_half; + VIXL_FALLTHROUGH(); + case NEON_FMUL_byelement: + Op = &Simulator::fmul; + break; + case NEON_FMLA_H_byelement: + vf = vf_half; + VIXL_FALLTHROUGH(); + case NEON_FMLA_byelement: + Op = &Simulator::fmla; + break; + case NEON_FMLS_H_byelement: + vf = vf_half; + VIXL_FALLTHROUGH(); + case NEON_FMLS_byelement: + Op = &Simulator::fmls; + break; + case NEON_FMULX_H_byelement: + vf = vf_half; + VIXL_FALLTHROUGH(); + case NEON_FMULX_byelement: + Op = &Simulator::fmulx; + break; + default: + if (instr->GetNEONSize() == 2) + index = instr->GetNEONH(); + else + index = (instr->GetNEONH() << 1) | instr->GetNEONL(); + switch (instr->Mask(NEONByIndexedElementFPComplexMask)) { + case NEON_FCMLA_byelement: + vf = vf_r; + fcmla(vf, + rd, + rn, + ReadVRegister(instr->GetRm()), + index, + instr->GetImmRotFcmlaSca()); + return; + default: + VIXL_UNIMPLEMENTED(); + } + } + } + + (this->*Op)(vf, rd, rn, ReadVRegister(rm_reg), index); +} + + +void Simulator::VisitNEONCopy(const Instruction* instr) { + NEONFormatDecoder nfd(instr, NEONFormatDecoder::TriangularFormatMap()); + VectorFormat vf = nfd.GetVectorFormat(); + + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + int imm5 = instr->GetImmNEON5(); + int tz = CountTrailingZeros(imm5, 32); + int reg_index = imm5 >> (tz + 1); + + if (instr->Mask(NEONCopyInsElementMask) == NEON_INS_ELEMENT) { + int imm4 = instr->GetImmNEON4(); + int rn_index = imm4 >> tz; + ins_element(vf, rd, reg_index, rn, rn_index); + } else if (instr->Mask(NEONCopyInsGeneralMask) == NEON_INS_GENERAL) { + ins_immediate(vf, rd, reg_index, ReadXRegister(instr->GetRn())); + } else if (instr->Mask(NEONCopyUmovMask) == NEON_UMOV) { + uint64_t value = LogicVRegister(rn).Uint(vf, reg_index); + value &= MaxUintFromFormat(vf); + WriteXRegister(instr->GetRd(), value); + } else if (instr->Mask(NEONCopyUmovMask) == NEON_SMOV) { + int64_t value = LogicVRegister(rn).Int(vf, reg_index); + if (instr->GetNEONQ()) { + WriteXRegister(instr->GetRd(), value); + } else { + WriteWRegister(instr->GetRd(), (int32_t)value); + } + } else if (instr->Mask(NEONCopyDupElementMask) == NEON_DUP_ELEMENT) { + dup_element(vf, rd, rn, reg_index); + } else if (instr->Mask(NEONCopyDupGeneralMask) == NEON_DUP_GENERAL) { + dup_immediate(vf, rd, ReadXRegister(instr->GetRn())); + } else { + VIXL_UNIMPLEMENTED(); + } +} + + +void Simulator::VisitNEONExtract(const Instruction* instr) { + NEONFormatDecoder nfd(instr, NEONFormatDecoder::LogicalFormatMap()); + VectorFormat vf = nfd.GetVectorFormat(); + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + SimVRegister& rm = ReadVRegister(instr->GetRm()); + if (instr->Mask(NEONExtractMask) == NEON_EXT) { + int index = instr->GetImmNEONExt(); + ext(vf, rd, rn, rm, index); + } else { + VIXL_UNIMPLEMENTED(); + } +} + + +void Simulator::NEONLoadStoreMultiStructHelper(const Instruction* instr, + AddrMode addr_mode) { + NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap()); + VectorFormat vf = nfd.GetVectorFormat(); + + uint64_t addr_base = ReadXRegister(instr->GetRn(), Reg31IsStackPointer); + int reg_size = RegisterSizeInBytesFromFormat(vf); + + int reg[4]; + uint64_t addr[4]; + for (int i = 0; i < 4; i++) { + reg[i] = (instr->GetRt() + i) % kNumberOfVRegisters; + addr[i] = addr_base + (i * reg_size); + } + int count = 1; + bool log_read = true; + + // Bit 23 determines whether this is an offset or post-index addressing mode. + // In offset mode, bits 20 to 16 should be zero; these bits encode the + // register or immediate in post-index mode. + if ((instr->ExtractBit(23) == 0) && (instr->ExtractBits(20, 16) != 0)) { + VIXL_UNREACHABLE(); + } + + // We use the PostIndex mask here, as it works in this case for both Offset + // and PostIndex addressing. + switch (instr->Mask(NEONLoadStoreMultiStructPostIndexMask)) { + case NEON_LD1_4v: + case NEON_LD1_4v_post: + ld1(vf, ReadVRegister(reg[3]), addr[3]); + count++; + VIXL_FALLTHROUGH(); + case NEON_LD1_3v: + case NEON_LD1_3v_post: + ld1(vf, ReadVRegister(reg[2]), addr[2]); + count++; + VIXL_FALLTHROUGH(); + case NEON_LD1_2v: + case NEON_LD1_2v_post: + ld1(vf, ReadVRegister(reg[1]), addr[1]); + count++; + VIXL_FALLTHROUGH(); + case NEON_LD1_1v: + case NEON_LD1_1v_post: + ld1(vf, ReadVRegister(reg[0]), addr[0]); + break; + case NEON_ST1_4v: + case NEON_ST1_4v_post: + st1(vf, ReadVRegister(reg[3]), addr[3]); + count++; + VIXL_FALLTHROUGH(); + case NEON_ST1_3v: + case NEON_ST1_3v_post: + st1(vf, ReadVRegister(reg[2]), addr[2]); + count++; + VIXL_FALLTHROUGH(); + case NEON_ST1_2v: + case NEON_ST1_2v_post: + st1(vf, ReadVRegister(reg[1]), addr[1]); + count++; + VIXL_FALLTHROUGH(); + case NEON_ST1_1v: + case NEON_ST1_1v_post: + st1(vf, ReadVRegister(reg[0]), addr[0]); + log_read = false; + break; + case NEON_LD2_post: + case NEON_LD2: + ld2(vf, ReadVRegister(reg[0]), ReadVRegister(reg[1]), addr[0]); + count = 2; + break; + case NEON_ST2: + case NEON_ST2_post: + st2(vf, ReadVRegister(reg[0]), ReadVRegister(reg[1]), addr[0]); + count = 2; + log_read = false; + break; + case NEON_LD3_post: + case NEON_LD3: + ld3(vf, + ReadVRegister(reg[0]), + ReadVRegister(reg[1]), + ReadVRegister(reg[2]), + addr[0]); + count = 3; + break; + case NEON_ST3: + case NEON_ST3_post: + st3(vf, + ReadVRegister(reg[0]), + ReadVRegister(reg[1]), + ReadVRegister(reg[2]), + addr[0]); + count = 3; + log_read = false; + break; + case NEON_ST4: + case NEON_ST4_post: + st4(vf, + ReadVRegister(reg[0]), + ReadVRegister(reg[1]), + ReadVRegister(reg[2]), + ReadVRegister(reg[3]), + addr[0]); + count = 4; + log_read = false; + break; + case NEON_LD4_post: + case NEON_LD4: + ld4(vf, + ReadVRegister(reg[0]), + ReadVRegister(reg[1]), + ReadVRegister(reg[2]), + ReadVRegister(reg[3]), + addr[0]); + count = 4; + break; + default: + VIXL_UNIMPLEMENTED(); + } + + // Explicitly log the register update whilst we have type information. + for (int i = 0; i < count; i++) { + // For de-interleaving loads, only print the base address. + int lane_size = LaneSizeInBytesFromFormat(vf); + PrintRegisterFormat format = GetPrintRegisterFormatTryFP( + GetPrintRegisterFormatForSize(reg_size, lane_size)); + if (log_read) { + LogVRead(addr_base, reg[i], format); + } else { + LogVWrite(addr_base, reg[i], format); + } + } + + if (addr_mode == PostIndex) { + int rm = instr->GetRm(); + // The immediate post index addressing mode is indicated by rm = 31. + // The immediate is implied by the number of vector registers used. + addr_base += (rm == 31) ? RegisterSizeInBytesFromFormat(vf) * count + : ReadXRegister(rm); + WriteXRegister(instr->GetRn(), addr_base); + } else { + VIXL_ASSERT(addr_mode == Offset); + } +} + + +void Simulator::VisitNEONLoadStoreMultiStruct(const Instruction* instr) { + NEONLoadStoreMultiStructHelper(instr, Offset); +} + + +void Simulator::VisitNEONLoadStoreMultiStructPostIndex( + const Instruction* instr) { + NEONLoadStoreMultiStructHelper(instr, PostIndex); +} + + +void Simulator::NEONLoadStoreSingleStructHelper(const Instruction* instr, + AddrMode addr_mode) { + uint64_t addr = ReadXRegister(instr->GetRn(), Reg31IsStackPointer); + int rt = instr->GetRt(); + + // Bit 23 determines whether this is an offset or post-index addressing mode. + // In offset mode, bits 20 to 16 should be zero; these bits encode the + // register or immediate in post-index mode. + if ((instr->ExtractBit(23) == 0) && (instr->ExtractBits(20, 16) != 0)) { + VIXL_UNREACHABLE(); + } + + // We use the PostIndex mask here, as it works in this case for both Offset + // and PostIndex addressing. + bool do_load = false; + + NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap()); + VectorFormat vf_t = nfd.GetVectorFormat(); + + VectorFormat vf = kFormat16B; + switch (instr->Mask(NEONLoadStoreSingleStructPostIndexMask)) { + case NEON_LD1_b: + case NEON_LD1_b_post: + case NEON_LD2_b: + case NEON_LD2_b_post: + case NEON_LD3_b: + case NEON_LD3_b_post: + case NEON_LD4_b: + case NEON_LD4_b_post: + do_load = true; + VIXL_FALLTHROUGH(); + case NEON_ST1_b: + case NEON_ST1_b_post: + case NEON_ST2_b: + case NEON_ST2_b_post: + case NEON_ST3_b: + case NEON_ST3_b_post: + case NEON_ST4_b: + case NEON_ST4_b_post: + break; + + case NEON_LD1_h: + case NEON_LD1_h_post: + case NEON_LD2_h: + case NEON_LD2_h_post: + case NEON_LD3_h: + case NEON_LD3_h_post: + case NEON_LD4_h: + case NEON_LD4_h_post: + do_load = true; + VIXL_FALLTHROUGH(); + case NEON_ST1_h: + case NEON_ST1_h_post: + case NEON_ST2_h: + case NEON_ST2_h_post: + case NEON_ST3_h: + case NEON_ST3_h_post: + case NEON_ST4_h: + case NEON_ST4_h_post: + vf = kFormat8H; + break; + case NEON_LD1_s: + case NEON_LD1_s_post: + case NEON_LD2_s: + case NEON_LD2_s_post: + case NEON_LD3_s: + case NEON_LD3_s_post: + case NEON_LD4_s: + case NEON_LD4_s_post: + do_load = true; + VIXL_FALLTHROUGH(); + case NEON_ST1_s: + case NEON_ST1_s_post: + case NEON_ST2_s: + case NEON_ST2_s_post: + case NEON_ST3_s: + case NEON_ST3_s_post: + case NEON_ST4_s: + case NEON_ST4_s_post: { + VIXL_STATIC_ASSERT((NEON_LD1_s | (1 << NEONLSSize_offset)) == NEON_LD1_d); + VIXL_STATIC_ASSERT((NEON_LD1_s_post | (1 << NEONLSSize_offset)) == + NEON_LD1_d_post); + VIXL_STATIC_ASSERT((NEON_ST1_s | (1 << NEONLSSize_offset)) == NEON_ST1_d); + VIXL_STATIC_ASSERT((NEON_ST1_s_post | (1 << NEONLSSize_offset)) == + NEON_ST1_d_post); + vf = ((instr->GetNEONLSSize() & 1) == 0) ? kFormat4S : kFormat2D; + break; + } + + case NEON_LD1R: + case NEON_LD1R_post: { + vf = vf_t; + ld1r(vf, ReadVRegister(rt), addr); + do_load = true; + break; + } + + case NEON_LD2R: + case NEON_LD2R_post: { + vf = vf_t; + int rt2 = (rt + 1) % kNumberOfVRegisters; + ld2r(vf, ReadVRegister(rt), ReadVRegister(rt2), addr); + do_load = true; + break; + } + + case NEON_LD3R: + case NEON_LD3R_post: { + vf = vf_t; + int rt2 = (rt + 1) % kNumberOfVRegisters; + int rt3 = (rt2 + 1) % kNumberOfVRegisters; + ld3r(vf, ReadVRegister(rt), ReadVRegister(rt2), ReadVRegister(rt3), addr); + do_load = true; + break; + } + + case NEON_LD4R: + case NEON_LD4R_post: { + vf = vf_t; + int rt2 = (rt + 1) % kNumberOfVRegisters; + int rt3 = (rt2 + 1) % kNumberOfVRegisters; + int rt4 = (rt3 + 1) % kNumberOfVRegisters; + ld4r(vf, + ReadVRegister(rt), + ReadVRegister(rt2), + ReadVRegister(rt3), + ReadVRegister(rt4), + addr); + do_load = true; + break; + } + default: + VIXL_UNIMPLEMENTED(); + } + + PrintRegisterFormat print_format = + GetPrintRegisterFormatTryFP(GetPrintRegisterFormat(vf)); + // Make sure that the print_format only includes a single lane. + print_format = + static_cast(print_format & ~kPrintRegAsVectorMask); + + int esize = LaneSizeInBytesFromFormat(vf); + int index_shift = LaneSizeInBytesLog2FromFormat(vf); + int lane = instr->GetNEONLSIndex(index_shift); + int scale = 0; + int rt2 = (rt + 1) % kNumberOfVRegisters; + int rt3 = (rt2 + 1) % kNumberOfVRegisters; + int rt4 = (rt3 + 1) % kNumberOfVRegisters; + switch (instr->Mask(NEONLoadStoreSingleLenMask)) { + case NEONLoadStoreSingle1: + scale = 1; + if (do_load) { + ld1(vf, ReadVRegister(rt), lane, addr); + LogVRead(addr, rt, print_format, lane); + } else { + st1(vf, ReadVRegister(rt), lane, addr); + LogVWrite(addr, rt, print_format, lane); + } + break; + case NEONLoadStoreSingle2: + scale = 2; + if (do_load) { + ld2(vf, ReadVRegister(rt), ReadVRegister(rt2), lane, addr); + LogVRead(addr, rt, print_format, lane); + LogVRead(addr + esize, rt2, print_format, lane); + } else { + st2(vf, ReadVRegister(rt), ReadVRegister(rt2), lane, addr); + LogVWrite(addr, rt, print_format, lane); + LogVWrite(addr + esize, rt2, print_format, lane); + } + break; + case NEONLoadStoreSingle3: + scale = 3; + if (do_load) { + ld3(vf, + ReadVRegister(rt), + ReadVRegister(rt2), + ReadVRegister(rt3), + lane, + addr); + LogVRead(addr, rt, print_format, lane); + LogVRead(addr + esize, rt2, print_format, lane); + LogVRead(addr + (2 * esize), rt3, print_format, lane); + } else { + st3(vf, + ReadVRegister(rt), + ReadVRegister(rt2), + ReadVRegister(rt3), + lane, + addr); + LogVWrite(addr, rt, print_format, lane); + LogVWrite(addr + esize, rt2, print_format, lane); + LogVWrite(addr + (2 * esize), rt3, print_format, lane); + } + break; + case NEONLoadStoreSingle4: + scale = 4; + if (do_load) { + ld4(vf, + ReadVRegister(rt), + ReadVRegister(rt2), + ReadVRegister(rt3), + ReadVRegister(rt4), + lane, + addr); + LogVRead(addr, rt, print_format, lane); + LogVRead(addr + esize, rt2, print_format, lane); + LogVRead(addr + (2 * esize), rt3, print_format, lane); + LogVRead(addr + (3 * esize), rt4, print_format, lane); + } else { + st4(vf, + ReadVRegister(rt), + ReadVRegister(rt2), + ReadVRegister(rt3), + ReadVRegister(rt4), + lane, + addr); + LogVWrite(addr, rt, print_format, lane); + LogVWrite(addr + esize, rt2, print_format, lane); + LogVWrite(addr + (2 * esize), rt3, print_format, lane); + LogVWrite(addr + (3 * esize), rt4, print_format, lane); + } + break; + default: + VIXL_UNIMPLEMENTED(); + } + + if (addr_mode == PostIndex) { + int rm = instr->GetRm(); + int lane_size = LaneSizeInBytesFromFormat(vf); + WriteXRegister(instr->GetRn(), + addr + + ((rm == 31) ? (scale * lane_size) : ReadXRegister(rm))); + } +} + + +void Simulator::VisitNEONLoadStoreSingleStruct(const Instruction* instr) { + NEONLoadStoreSingleStructHelper(instr, Offset); +} + + +void Simulator::VisitNEONLoadStoreSingleStructPostIndex( + const Instruction* instr) { + NEONLoadStoreSingleStructHelper(instr, PostIndex); +} + + +void Simulator::VisitNEONModifiedImmediate(const Instruction* instr) { + SimVRegister& rd = ReadVRegister(instr->GetRd()); + int cmode = instr->GetNEONCmode(); + int cmode_3_1 = (cmode >> 1) & 7; + int cmode_3 = (cmode >> 3) & 1; + int cmode_2 = (cmode >> 2) & 1; + int cmode_1 = (cmode >> 1) & 1; + int cmode_0 = cmode & 1; + int half_enc = instr->ExtractBit(11); + int q = instr->GetNEONQ(); + int op_bit = instr->GetNEONModImmOp(); + uint64_t imm8 = instr->GetImmNEONabcdefgh(); + // Find the format and immediate value + uint64_t imm = 0; + VectorFormat vform = kFormatUndefined; + switch (cmode_3_1) { + case 0x0: + case 0x1: + case 0x2: + case 0x3: + vform = (q == 1) ? kFormat4S : kFormat2S; + imm = imm8 << (8 * cmode_3_1); + break; + case 0x4: + case 0x5: + vform = (q == 1) ? kFormat8H : kFormat4H; + imm = imm8 << (8 * cmode_1); + break; + case 0x6: + vform = (q == 1) ? kFormat4S : kFormat2S; + if (cmode_0 == 0) { + imm = imm8 << 8 | 0x000000ff; + } else { + imm = imm8 << 16 | 0x0000ffff; + } + break; + case 0x7: + if (cmode_0 == 0 && op_bit == 0) { + vform = q ? kFormat16B : kFormat8B; + imm = imm8; + } else if (cmode_0 == 0 && op_bit == 1) { + vform = q ? kFormat2D : kFormat1D; + imm = 0; + for (int i = 0; i < 8; ++i) { + if (imm8 & (1 << i)) { + imm |= (UINT64_C(0xff) << (8 * i)); + } + } + } else { // cmode_0 == 1, cmode == 0xf. + if (half_enc == 1) { + vform = q ? kFormat8H : kFormat4H; + imm = Float16ToRawbits(instr->GetImmNEONFP16()); + } else if (op_bit == 0) { + vform = q ? kFormat4S : kFormat2S; + imm = FloatToRawbits(instr->GetImmNEONFP32()); + } else if (q == 1) { + vform = kFormat2D; + imm = DoubleToRawbits(instr->GetImmNEONFP64()); + } else { + VIXL_ASSERT((q == 0) && (op_bit == 1) && (cmode == 0xf)); + VisitUnallocated(instr); + } + } + break; + default: + VIXL_UNREACHABLE(); + break; + } + + // Find the operation + NEONModifiedImmediateOp op; + if (cmode_3 == 0) { + if (cmode_0 == 0) { + op = op_bit ? NEONModifiedImmediate_MVNI : NEONModifiedImmediate_MOVI; + } else { // cmode<0> == '1' + op = op_bit ? NEONModifiedImmediate_BIC : NEONModifiedImmediate_ORR; + } + } else { // cmode<3> == '1' + if (cmode_2 == 0) { + if (cmode_0 == 0) { + op = op_bit ? NEONModifiedImmediate_MVNI : NEONModifiedImmediate_MOVI; + } else { // cmode<0> == '1' + op = op_bit ? NEONModifiedImmediate_BIC : NEONModifiedImmediate_ORR; + } + } else { // cmode<2> == '1' + if (cmode_1 == 0) { + op = op_bit ? NEONModifiedImmediate_MVNI : NEONModifiedImmediate_MOVI; + } else { // cmode<1> == '1' + if (cmode_0 == 0) { + op = NEONModifiedImmediate_MOVI; + } else { // cmode<0> == '1' + op = NEONModifiedImmediate_MOVI; + } + } + } + } + + // Call the logic function + if (op == NEONModifiedImmediate_ORR) { + orr(vform, rd, rd, imm); + } else if (op == NEONModifiedImmediate_BIC) { + bic(vform, rd, rd, imm); + } else if (op == NEONModifiedImmediate_MOVI) { + movi(vform, rd, imm); + } else if (op == NEONModifiedImmediate_MVNI) { + mvni(vform, rd, imm); + } else { + VisitUnimplemented(instr); + } +} + + +void Simulator::VisitNEONScalar2RegMisc(const Instruction* instr) { + NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap()); + VectorFormat vf = nfd.GetVectorFormat(); + + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + + if (instr->Mask(NEON2RegMiscOpcode) <= NEON_NEG_scalar_opcode) { + // These instructions all use a two bit size field, except NOT and RBIT, + // which use the field to encode the operation. + switch (instr->Mask(NEONScalar2RegMiscMask)) { + case NEON_CMEQ_zero_scalar: + cmp(vf, rd, rn, 0, eq); + break; + case NEON_CMGE_zero_scalar: + cmp(vf, rd, rn, 0, ge); + break; + case NEON_CMGT_zero_scalar: + cmp(vf, rd, rn, 0, gt); + break; + case NEON_CMLT_zero_scalar: + cmp(vf, rd, rn, 0, lt); + break; + case NEON_CMLE_zero_scalar: + cmp(vf, rd, rn, 0, le); + break; + case NEON_ABS_scalar: + abs(vf, rd, rn); + break; + case NEON_SQABS_scalar: + abs(vf, rd, rn).SignedSaturate(vf); + break; + case NEON_NEG_scalar: + neg(vf, rd, rn); + break; + case NEON_SQNEG_scalar: + neg(vf, rd, rn).SignedSaturate(vf); + break; + case NEON_SUQADD_scalar: + suqadd(vf, rd, rn); + break; + case NEON_USQADD_scalar: + usqadd(vf, rd, rn); + break; + default: + VIXL_UNIMPLEMENTED(); + break; + } + } else { + VectorFormat fpf = nfd.GetVectorFormat(nfd.FPScalarFormatMap()); + FPRounding fpcr_rounding = static_cast(ReadFpcr().GetRMode()); + + // These instructions all use a one bit size field, except SQXTUN, SQXTN + // and UQXTN, which use a two bit size field. + switch (instr->Mask(NEONScalar2RegMiscFPMask)) { + case NEON_FRECPE_scalar: + frecpe(fpf, rd, rn, fpcr_rounding); + break; + case NEON_FRECPX_scalar: + frecpx(fpf, rd, rn); + break; + case NEON_FRSQRTE_scalar: + frsqrte(fpf, rd, rn); + break; + case NEON_FCMGT_zero_scalar: + fcmp_zero(fpf, rd, rn, gt); + break; + case NEON_FCMGE_zero_scalar: + fcmp_zero(fpf, rd, rn, ge); + break; + case NEON_FCMEQ_zero_scalar: + fcmp_zero(fpf, rd, rn, eq); + break; + case NEON_FCMLE_zero_scalar: + fcmp_zero(fpf, rd, rn, le); + break; + case NEON_FCMLT_zero_scalar: + fcmp_zero(fpf, rd, rn, lt); + break; + case NEON_SCVTF_scalar: + scvtf(fpf, rd, rn, 0, fpcr_rounding); + break; + case NEON_UCVTF_scalar: + ucvtf(fpf, rd, rn, 0, fpcr_rounding); + break; + case NEON_FCVTNS_scalar: + fcvts(fpf, rd, rn, FPTieEven); + break; + case NEON_FCVTNU_scalar: + fcvtu(fpf, rd, rn, FPTieEven); + break; + case NEON_FCVTPS_scalar: + fcvts(fpf, rd, rn, FPPositiveInfinity); + break; + case NEON_FCVTPU_scalar: + fcvtu(fpf, rd, rn, FPPositiveInfinity); + break; + case NEON_FCVTMS_scalar: + fcvts(fpf, rd, rn, FPNegativeInfinity); + break; + case NEON_FCVTMU_scalar: + fcvtu(fpf, rd, rn, FPNegativeInfinity); + break; + case NEON_FCVTZS_scalar: + fcvts(fpf, rd, rn, FPZero); + break; + case NEON_FCVTZU_scalar: + fcvtu(fpf, rd, rn, FPZero); + break; + case NEON_FCVTAS_scalar: + fcvts(fpf, rd, rn, FPTieAway); + break; + case NEON_FCVTAU_scalar: + fcvtu(fpf, rd, rn, FPTieAway); + break; + case NEON_FCVTXN_scalar: + // Unlike all of the other FP instructions above, fcvtxn encodes dest + // size S as size<0>=1. There's only one case, so we ignore the form. + VIXL_ASSERT(instr->ExtractBit(22) == 1); + fcvtxn(kFormatS, rd, rn); + break; + default: + switch (instr->Mask(NEONScalar2RegMiscMask)) { + case NEON_SQXTN_scalar: + sqxtn(vf, rd, rn); + break; + case NEON_UQXTN_scalar: + uqxtn(vf, rd, rn); + break; + case NEON_SQXTUN_scalar: + sqxtun(vf, rd, rn); + break; + default: + VIXL_UNIMPLEMENTED(); + } + } + } +} + + +void Simulator::VisitNEONScalar2RegMiscFP16(const Instruction* instr) { + VectorFormat fpf = kFormatH; + FPRounding fpcr_rounding = static_cast(ReadFpcr().GetRMode()); + + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + + switch (instr->Mask(NEONScalar2RegMiscFP16Mask)) { + case NEON_FRECPE_H_scalar: + frecpe(fpf, rd, rn, fpcr_rounding); + break; + case NEON_FRECPX_H_scalar: + frecpx(fpf, rd, rn); + break; + case NEON_FRSQRTE_H_scalar: + frsqrte(fpf, rd, rn); + break; + case NEON_FCMGT_H_zero_scalar: + fcmp_zero(fpf, rd, rn, gt); + break; + case NEON_FCMGE_H_zero_scalar: + fcmp_zero(fpf, rd, rn, ge); + break; + case NEON_FCMEQ_H_zero_scalar: + fcmp_zero(fpf, rd, rn, eq); + break; + case NEON_FCMLE_H_zero_scalar: + fcmp_zero(fpf, rd, rn, le); + break; + case NEON_FCMLT_H_zero_scalar: + fcmp_zero(fpf, rd, rn, lt); + break; + case NEON_SCVTF_H_scalar: + scvtf(fpf, rd, rn, 0, fpcr_rounding); + break; + case NEON_UCVTF_H_scalar: + ucvtf(fpf, rd, rn, 0, fpcr_rounding); + break; + case NEON_FCVTNS_H_scalar: + fcvts(fpf, rd, rn, FPTieEven); + break; + case NEON_FCVTNU_H_scalar: + fcvtu(fpf, rd, rn, FPTieEven); + break; + case NEON_FCVTPS_H_scalar: + fcvts(fpf, rd, rn, FPPositiveInfinity); + break; + case NEON_FCVTPU_H_scalar: + fcvtu(fpf, rd, rn, FPPositiveInfinity); + break; + case NEON_FCVTMS_H_scalar: + fcvts(fpf, rd, rn, FPNegativeInfinity); + break; + case NEON_FCVTMU_H_scalar: + fcvtu(fpf, rd, rn, FPNegativeInfinity); + break; + case NEON_FCVTZS_H_scalar: + fcvts(fpf, rd, rn, FPZero); + break; + case NEON_FCVTZU_H_scalar: + fcvtu(fpf, rd, rn, FPZero); + break; + case NEON_FCVTAS_H_scalar: + fcvts(fpf, rd, rn, FPTieAway); + break; + case NEON_FCVTAU_H_scalar: + fcvtu(fpf, rd, rn, FPTieAway); + break; + } +} + + +void Simulator::VisitNEONScalar3Diff(const Instruction* instr) { + NEONFormatDecoder nfd(instr, NEONFormatDecoder::LongScalarFormatMap()); + VectorFormat vf = nfd.GetVectorFormat(); + + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + SimVRegister& rm = ReadVRegister(instr->GetRm()); + switch (instr->Mask(NEONScalar3DiffMask)) { + case NEON_SQDMLAL_scalar: + sqdmlal(vf, rd, rn, rm); + break; + case NEON_SQDMLSL_scalar: + sqdmlsl(vf, rd, rn, rm); + break; + case NEON_SQDMULL_scalar: + sqdmull(vf, rd, rn, rm); + break; + default: + VIXL_UNIMPLEMENTED(); + } +} + + +void Simulator::VisitNEONScalar3Same(const Instruction* instr) { + NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap()); + VectorFormat vf = nfd.GetVectorFormat(); + + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + SimVRegister& rm = ReadVRegister(instr->GetRm()); + + if (instr->Mask(NEONScalar3SameFPFMask) == NEONScalar3SameFPFixed) { + vf = nfd.GetVectorFormat(nfd.FPScalarFormatMap()); + switch (instr->Mask(NEONScalar3SameFPMask)) { + case NEON_FMULX_scalar: + fmulx(vf, rd, rn, rm); + break; + case NEON_FACGE_scalar: + fabscmp(vf, rd, rn, rm, ge); + break; + case NEON_FACGT_scalar: + fabscmp(vf, rd, rn, rm, gt); + break; + case NEON_FCMEQ_scalar: + fcmp(vf, rd, rn, rm, eq); + break; + case NEON_FCMGE_scalar: + fcmp(vf, rd, rn, rm, ge); + break; + case NEON_FCMGT_scalar: + fcmp(vf, rd, rn, rm, gt); + break; + case NEON_FRECPS_scalar: + frecps(vf, rd, rn, rm); + break; + case NEON_FRSQRTS_scalar: + frsqrts(vf, rd, rn, rm); + break; + case NEON_FABD_scalar: + fabd(vf, rd, rn, rm); + break; + default: + VIXL_UNIMPLEMENTED(); + } + } else { + switch (instr->Mask(NEONScalar3SameMask)) { + case NEON_ADD_scalar: + add(vf, rd, rn, rm); + break; + case NEON_SUB_scalar: + sub(vf, rd, rn, rm); + break; + case NEON_CMEQ_scalar: + cmp(vf, rd, rn, rm, eq); + break; + case NEON_CMGE_scalar: + cmp(vf, rd, rn, rm, ge); + break; + case NEON_CMGT_scalar: + cmp(vf, rd, rn, rm, gt); + break; + case NEON_CMHI_scalar: + cmp(vf, rd, rn, rm, hi); + break; + case NEON_CMHS_scalar: + cmp(vf, rd, rn, rm, hs); + break; + case NEON_CMTST_scalar: + cmptst(vf, rd, rn, rm); + break; + case NEON_USHL_scalar: + ushl(vf, rd, rn, rm); + break; + case NEON_SSHL_scalar: + sshl(vf, rd, rn, rm); + break; + case NEON_SQDMULH_scalar: + sqdmulh(vf, rd, rn, rm); + break; + case NEON_SQRDMULH_scalar: + sqrdmulh(vf, rd, rn, rm); + break; + case NEON_UQADD_scalar: + add(vf, rd, rn, rm).UnsignedSaturate(vf); + break; + case NEON_SQADD_scalar: + add(vf, rd, rn, rm).SignedSaturate(vf); + break; + case NEON_UQSUB_scalar: + sub(vf, rd, rn, rm).UnsignedSaturate(vf); + break; + case NEON_SQSUB_scalar: + sub(vf, rd, rn, rm).SignedSaturate(vf); + break; + case NEON_UQSHL_scalar: + ushl(vf, rd, rn, rm).UnsignedSaturate(vf); + break; + case NEON_SQSHL_scalar: + sshl(vf, rd, rn, rm).SignedSaturate(vf); + break; + case NEON_URSHL_scalar: + ushl(vf, rd, rn, rm).Round(vf); + break; + case NEON_SRSHL_scalar: + sshl(vf, rd, rn, rm).Round(vf); + break; + case NEON_UQRSHL_scalar: + ushl(vf, rd, rn, rm).Round(vf).UnsignedSaturate(vf); + break; + case NEON_SQRSHL_scalar: + sshl(vf, rd, rn, rm).Round(vf).SignedSaturate(vf); + break; + default: + VIXL_UNIMPLEMENTED(); + } + } +} + +void Simulator::VisitNEONScalar3SameFP16(const Instruction* instr) { + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + SimVRegister& rm = ReadVRegister(instr->GetRm()); + + switch (instr->Mask(NEONScalar3SameFP16Mask)) { + case NEON_FABD_H_scalar: + fabd(kFormatH, rd, rn, rm); + break; + case NEON_FMULX_H_scalar: + fmulx(kFormatH, rd, rn, rm); + break; + case NEON_FCMEQ_H_scalar: + fcmp(kFormatH, rd, rn, rm, eq); + break; + case NEON_FCMGE_H_scalar: + fcmp(kFormatH, rd, rn, rm, ge); + break; + case NEON_FCMGT_H_scalar: + fcmp(kFormatH, rd, rn, rm, gt); + break; + case NEON_FACGE_H_scalar: + fabscmp(kFormatH, rd, rn, rm, ge); + break; + case NEON_FACGT_H_scalar: + fabscmp(kFormatH, rd, rn, rm, gt); + break; + case NEON_FRECPS_H_scalar: + frecps(kFormatH, rd, rn, rm); + break; + case NEON_FRSQRTS_H_scalar: + frsqrts(kFormatH, rd, rn, rm); + break; + default: + VIXL_UNREACHABLE(); + } +} + + +void Simulator::VisitNEONScalar3SameExtra(const Instruction* instr) { + NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap()); + VectorFormat vf = nfd.GetVectorFormat(); + + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + SimVRegister& rm = ReadVRegister(instr->GetRm()); + + switch (instr->Mask(NEONScalar3SameExtraMask)) { + case NEON_SQRDMLAH_scalar: + sqrdmlah(vf, rd, rn, rm); + break; + case NEON_SQRDMLSH_scalar: + sqrdmlsh(vf, rd, rn, rm); + break; + default: + VIXL_UNIMPLEMENTED(); + } +} + +void Simulator::VisitNEONScalarByIndexedElement(const Instruction* instr) { + NEONFormatDecoder nfd(instr, NEONFormatDecoder::LongScalarFormatMap()); + VectorFormat vf = nfd.GetVectorFormat(); + VectorFormat vf_r = nfd.GetVectorFormat(nfd.ScalarFormatMap()); + + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + ByElementOp Op = NULL; + + int rm_reg = instr->GetRm(); + int index = (instr->GetNEONH() << 1) | instr->GetNEONL(); + if (instr->GetNEONSize() == 1) { + rm_reg &= 0xf; + index = (index << 1) | instr->GetNEONM(); + } + + switch (instr->Mask(NEONScalarByIndexedElementMask)) { + case NEON_SQDMULL_byelement_scalar: + Op = &Simulator::sqdmull; + break; + case NEON_SQDMLAL_byelement_scalar: + Op = &Simulator::sqdmlal; + break; + case NEON_SQDMLSL_byelement_scalar: + Op = &Simulator::sqdmlsl; + break; + case NEON_SQDMULH_byelement_scalar: + Op = &Simulator::sqdmulh; + vf = vf_r; + break; + case NEON_SQRDMULH_byelement_scalar: + Op = &Simulator::sqrdmulh; + vf = vf_r; + break; + case NEON_SQRDMLAH_byelement_scalar: + Op = &Simulator::sqrdmlah; + vf = vf_r; + break; + case NEON_SQRDMLSH_byelement_scalar: + Op = &Simulator::sqrdmlsh; + vf = vf_r; + break; + default: + vf = nfd.GetVectorFormat(nfd.FPScalarFormatMap()); + index = instr->GetNEONH(); + if (instr->GetFPType() == 0) { + index = (index << 2) | (instr->GetNEONL() << 1) | instr->GetNEONM(); + rm_reg &= 0xf; + vf = kFormatH; + } else if ((instr->GetFPType() & 1) == 0) { + index = (index << 1) | instr->GetNEONL(); + } + switch (instr->Mask(NEONScalarByIndexedElementFPMask)) { + case NEON_FMUL_H_byelement_scalar: + case NEON_FMUL_byelement_scalar: + Op = &Simulator::fmul; + break; + case NEON_FMLA_H_byelement_scalar: + case NEON_FMLA_byelement_scalar: + Op = &Simulator::fmla; + break; + case NEON_FMLS_H_byelement_scalar: + case NEON_FMLS_byelement_scalar: + Op = &Simulator::fmls; + break; + case NEON_FMULX_H_byelement_scalar: + case NEON_FMULX_byelement_scalar: + Op = &Simulator::fmulx; + break; + default: + VIXL_UNIMPLEMENTED(); + } + } + + (this->*Op)(vf, rd, rn, ReadVRegister(rm_reg), index); +} + + +void Simulator::VisitNEONScalarCopy(const Instruction* instr) { + NEONFormatDecoder nfd(instr, NEONFormatDecoder::TriangularScalarFormatMap()); + VectorFormat vf = nfd.GetVectorFormat(); + + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + + if (instr->Mask(NEONScalarCopyMask) == NEON_DUP_ELEMENT_scalar) { + int imm5 = instr->GetImmNEON5(); + int tz = CountTrailingZeros(imm5, 32); + int rn_index = imm5 >> (tz + 1); + dup_element(vf, rd, rn, rn_index); + } else { + VIXL_UNIMPLEMENTED(); + } +} + + +void Simulator::VisitNEONScalarPairwise(const Instruction* instr) { + NEONFormatDecoder nfd(instr, NEONFormatDecoder::FPScalarPairwiseFormatMap()); + VectorFormat vf = nfd.GetVectorFormat(); + + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + switch (instr->Mask(NEONScalarPairwiseMask)) { + case NEON_ADDP_scalar: { + // All pairwise operations except ADDP use bit U to differentiate FP16 + // from FP32/FP64 variations. + NEONFormatDecoder nfd_addp(instr, NEONFormatDecoder::FPScalarFormatMap()); + addp(nfd_addp.GetVectorFormat(), rd, rn); + break; + } + case NEON_FADDP_h_scalar: + case NEON_FADDP_scalar: + faddp(vf, rd, rn); + break; + case NEON_FMAXP_h_scalar: + case NEON_FMAXP_scalar: + fmaxp(vf, rd, rn); + break; + case NEON_FMAXNMP_h_scalar: + case NEON_FMAXNMP_scalar: + fmaxnmp(vf, rd, rn); + break; + case NEON_FMINP_h_scalar: + case NEON_FMINP_scalar: + fminp(vf, rd, rn); + break; + case NEON_FMINNMP_h_scalar: + case NEON_FMINNMP_scalar: + fminnmp(vf, rd, rn); + break; + default: + VIXL_UNIMPLEMENTED(); + } +} + + +void Simulator::VisitNEONScalarShiftImmediate(const Instruction* instr) { + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + FPRounding fpcr_rounding = static_cast(ReadFpcr().GetRMode()); + + static const NEONFormatMap map = {{22, 21, 20, 19}, + {NF_UNDEF, + NF_B, + NF_H, + NF_H, + NF_S, + NF_S, + NF_S, + NF_S, + NF_D, + NF_D, + NF_D, + NF_D, + NF_D, + NF_D, + NF_D, + NF_D}}; + NEONFormatDecoder nfd(instr, &map); + VectorFormat vf = nfd.GetVectorFormat(); + + int highestSetBit = HighestSetBitPosition(instr->GetImmNEONImmh()); + int immhimmb = instr->GetImmNEONImmhImmb(); + int right_shift = (16 << highestSetBit) - immhimmb; + int left_shift = immhimmb - (8 << highestSetBit); + switch (instr->Mask(NEONScalarShiftImmediateMask)) { + case NEON_SHL_scalar: + shl(vf, rd, rn, left_shift); + break; + case NEON_SLI_scalar: + sli(vf, rd, rn, left_shift); + break; + case NEON_SQSHL_imm_scalar: + sqshl(vf, rd, rn, left_shift); + break; + case NEON_UQSHL_imm_scalar: + uqshl(vf, rd, rn, left_shift); + break; + case NEON_SQSHLU_scalar: + sqshlu(vf, rd, rn, left_shift); + break; + case NEON_SRI_scalar: + sri(vf, rd, rn, right_shift); + break; + case NEON_SSHR_scalar: + sshr(vf, rd, rn, right_shift); + break; + case NEON_USHR_scalar: + ushr(vf, rd, rn, right_shift); + break; + case NEON_SRSHR_scalar: + sshr(vf, rd, rn, right_shift).Round(vf); + break; + case NEON_URSHR_scalar: + ushr(vf, rd, rn, right_shift).Round(vf); + break; + case NEON_SSRA_scalar: + ssra(vf, rd, rn, right_shift); + break; + case NEON_USRA_scalar: + usra(vf, rd, rn, right_shift); + break; + case NEON_SRSRA_scalar: + srsra(vf, rd, rn, right_shift); + break; + case NEON_URSRA_scalar: + ursra(vf, rd, rn, right_shift); + break; + case NEON_UQSHRN_scalar: + uqshrn(vf, rd, rn, right_shift); + break; + case NEON_UQRSHRN_scalar: + uqrshrn(vf, rd, rn, right_shift); + break; + case NEON_SQSHRN_scalar: + sqshrn(vf, rd, rn, right_shift); + break; + case NEON_SQRSHRN_scalar: + sqrshrn(vf, rd, rn, right_shift); + break; + case NEON_SQSHRUN_scalar: + sqshrun(vf, rd, rn, right_shift); + break; + case NEON_SQRSHRUN_scalar: + sqrshrun(vf, rd, rn, right_shift); + break; + case NEON_FCVTZS_imm_scalar: + fcvts(vf, rd, rn, FPZero, right_shift); + break; + case NEON_FCVTZU_imm_scalar: + fcvtu(vf, rd, rn, FPZero, right_shift); + break; + case NEON_SCVTF_imm_scalar: + scvtf(vf, rd, rn, right_shift, fpcr_rounding); + break; + case NEON_UCVTF_imm_scalar: + ucvtf(vf, rd, rn, right_shift, fpcr_rounding); + break; + default: + VIXL_UNIMPLEMENTED(); + } +} + + +void Simulator::VisitNEONShiftImmediate(const Instruction* instr) { + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + FPRounding fpcr_rounding = static_cast(ReadFpcr().GetRMode()); + + // 00010->8B, 00011->16B, 001x0->4H, 001x1->8H, + // 01xx0->2S, 01xx1->4S, 1xxx1->2D, all others undefined. + static const NEONFormatMap map = {{22, 21, 20, 19, 30}, + {NF_UNDEF, NF_UNDEF, NF_8B, NF_16B, + NF_4H, NF_8H, NF_4H, NF_8H, + NF_2S, NF_4S, NF_2S, NF_4S, + NF_2S, NF_4S, NF_2S, NF_4S, + NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, + NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, + NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, + NF_UNDEF, NF_2D, NF_UNDEF, NF_2D}}; + NEONFormatDecoder nfd(instr, &map); + VectorFormat vf = nfd.GetVectorFormat(); + + // 0001->8H, 001x->4S, 01xx->2D, all others undefined. + static const NEONFormatMap map_l = + {{22, 21, 20, 19}, + {NF_UNDEF, NF_8H, NF_4S, NF_4S, NF_2D, NF_2D, NF_2D, NF_2D}}; + VectorFormat vf_l = nfd.GetVectorFormat(&map_l); + + int highestSetBit = HighestSetBitPosition(instr->GetImmNEONImmh()); + int immhimmb = instr->GetImmNEONImmhImmb(); + int right_shift = (16 << highestSetBit) - immhimmb; + int left_shift = immhimmb - (8 << highestSetBit); + + switch (instr->Mask(NEONShiftImmediateMask)) { + case NEON_SHL: + shl(vf, rd, rn, left_shift); + break; + case NEON_SLI: + sli(vf, rd, rn, left_shift); + break; + case NEON_SQSHLU: + sqshlu(vf, rd, rn, left_shift); + break; + case NEON_SRI: + sri(vf, rd, rn, right_shift); + break; + case NEON_SSHR: + sshr(vf, rd, rn, right_shift); + break; + case NEON_USHR: + ushr(vf, rd, rn, right_shift); + break; + case NEON_SRSHR: + sshr(vf, rd, rn, right_shift).Round(vf); + break; + case NEON_URSHR: + ushr(vf, rd, rn, right_shift).Round(vf); + break; + case NEON_SSRA: + ssra(vf, rd, rn, right_shift); + break; + case NEON_USRA: + usra(vf, rd, rn, right_shift); + break; + case NEON_SRSRA: + srsra(vf, rd, rn, right_shift); + break; + case NEON_URSRA: + ursra(vf, rd, rn, right_shift); + break; + case NEON_SQSHL_imm: + sqshl(vf, rd, rn, left_shift); + break; + case NEON_UQSHL_imm: + uqshl(vf, rd, rn, left_shift); + break; + case NEON_SCVTF_imm: + scvtf(vf, rd, rn, right_shift, fpcr_rounding); + break; + case NEON_UCVTF_imm: + ucvtf(vf, rd, rn, right_shift, fpcr_rounding); + break; + case NEON_FCVTZS_imm: + fcvts(vf, rd, rn, FPZero, right_shift); + break; + case NEON_FCVTZU_imm: + fcvtu(vf, rd, rn, FPZero, right_shift); + break; + case NEON_SSHLL: + vf = vf_l; + if (instr->Mask(NEON_Q)) { + sshll2(vf, rd, rn, left_shift); + } else { + sshll(vf, rd, rn, left_shift); + } + break; + case NEON_USHLL: + vf = vf_l; + if (instr->Mask(NEON_Q)) { + ushll2(vf, rd, rn, left_shift); + } else { + ushll(vf, rd, rn, left_shift); + } + break; + case NEON_SHRN: + if (instr->Mask(NEON_Q)) { + shrn2(vf, rd, rn, right_shift); + } else { + shrn(vf, rd, rn, right_shift); + } + break; + case NEON_RSHRN: + if (instr->Mask(NEON_Q)) { + rshrn2(vf, rd, rn, right_shift); + } else { + rshrn(vf, rd, rn, right_shift); + } + break; + case NEON_UQSHRN: + if (instr->Mask(NEON_Q)) { + uqshrn2(vf, rd, rn, right_shift); + } else { + uqshrn(vf, rd, rn, right_shift); + } + break; + case NEON_UQRSHRN: + if (instr->Mask(NEON_Q)) { + uqrshrn2(vf, rd, rn, right_shift); + } else { + uqrshrn(vf, rd, rn, right_shift); + } + break; + case NEON_SQSHRN: + if (instr->Mask(NEON_Q)) { + sqshrn2(vf, rd, rn, right_shift); + } else { + sqshrn(vf, rd, rn, right_shift); + } + break; + case NEON_SQRSHRN: + if (instr->Mask(NEON_Q)) { + sqrshrn2(vf, rd, rn, right_shift); + } else { + sqrshrn(vf, rd, rn, right_shift); + } + break; + case NEON_SQSHRUN: + if (instr->Mask(NEON_Q)) { + sqshrun2(vf, rd, rn, right_shift); + } else { + sqshrun(vf, rd, rn, right_shift); + } + break; + case NEON_SQRSHRUN: + if (instr->Mask(NEON_Q)) { + sqrshrun2(vf, rd, rn, right_shift); + } else { + sqrshrun(vf, rd, rn, right_shift); + } + break; + default: + VIXL_UNIMPLEMENTED(); + } +} + + +void Simulator::VisitNEONTable(const Instruction* instr) { + NEONFormatDecoder nfd(instr, NEONFormatDecoder::LogicalFormatMap()); + VectorFormat vf = nfd.GetVectorFormat(); + + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + SimVRegister& rn2 = ReadVRegister((instr->GetRn() + 1) % kNumberOfVRegisters); + SimVRegister& rn3 = ReadVRegister((instr->GetRn() + 2) % kNumberOfVRegisters); + SimVRegister& rn4 = ReadVRegister((instr->GetRn() + 3) % kNumberOfVRegisters); + SimVRegister& rm = ReadVRegister(instr->GetRm()); + + switch (instr->Mask(NEONTableMask)) { + case NEON_TBL_1v: + tbl(vf, rd, rn, rm); + break; + case NEON_TBL_2v: + tbl(vf, rd, rn, rn2, rm); + break; + case NEON_TBL_3v: + tbl(vf, rd, rn, rn2, rn3, rm); + break; + case NEON_TBL_4v: + tbl(vf, rd, rn, rn2, rn3, rn4, rm); + break; + case NEON_TBX_1v: + tbx(vf, rd, rn, rm); + break; + case NEON_TBX_2v: + tbx(vf, rd, rn, rn2, rm); + break; + case NEON_TBX_3v: + tbx(vf, rd, rn, rn2, rn3, rm); + break; + case NEON_TBX_4v: + tbx(vf, rd, rn, rn2, rn3, rn4, rm); + break; + default: + VIXL_UNIMPLEMENTED(); + } +} + + +void Simulator::VisitNEONPerm(const Instruction* instr) { + NEONFormatDecoder nfd(instr); + VectorFormat vf = nfd.GetVectorFormat(); + + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + SimVRegister& rm = ReadVRegister(instr->GetRm()); + + switch (instr->Mask(NEONPermMask)) { + case NEON_TRN1: + trn1(vf, rd, rn, rm); + break; + case NEON_TRN2: + trn2(vf, rd, rn, rm); + break; + case NEON_UZP1: + uzp1(vf, rd, rn, rm); + break; + case NEON_UZP2: + uzp2(vf, rd, rn, rm); + break; + case NEON_ZIP1: + zip1(vf, rd, rn, rm); + break; + case NEON_ZIP2: + zip2(vf, rd, rn, rm); + break; + default: + VIXL_UNIMPLEMENTED(); + } +} + + +void Simulator::DoUnreachable(const Instruction* instr) { + VIXL_ASSERT((instr->Mask(ExceptionMask) == HLT) && + (instr->GetImmException() == kUnreachableOpcode)); + + fprintf(stream_, + "Hit UNREACHABLE marker at pc=%p.\n", + reinterpret_cast(instr)); + abort(); +} + + +void Simulator::DoTrace(const Instruction* instr) { + VIXL_ASSERT((instr->Mask(ExceptionMask) == HLT) && + (instr->GetImmException() == kTraceOpcode)); + + // Read the arguments encoded inline in the instruction stream. + uint32_t parameters; + uint32_t command; + + VIXL_STATIC_ASSERT(sizeof(*instr) == 1); + memcpy(¶meters, instr + kTraceParamsOffset, sizeof(parameters)); + memcpy(&command, instr + kTraceCommandOffset, sizeof(command)); + + switch (command) { + case TRACE_ENABLE: + SetTraceParameters(GetTraceParameters() | parameters); + break; + case TRACE_DISABLE: + SetTraceParameters(GetTraceParameters() & ~parameters); + break; + default: + VIXL_UNREACHABLE(); + } + + WritePc(instr->GetInstructionAtOffset(kTraceLength)); +} + + +void Simulator::DoLog(const Instruction* instr) { + VIXL_ASSERT((instr->Mask(ExceptionMask) == HLT) && + (instr->GetImmException() == kLogOpcode)); + + // Read the arguments encoded inline in the instruction stream. + uint32_t parameters; + + VIXL_STATIC_ASSERT(sizeof(*instr) == 1); + memcpy(¶meters, instr + kTraceParamsOffset, sizeof(parameters)); + + // We don't support a one-shot LOG_DISASM. + VIXL_ASSERT((parameters & LOG_DISASM) == 0); + // Print the requested information. + if (parameters & LOG_SYSREGS) PrintSystemRegisters(); + if (parameters & LOG_REGS) PrintRegisters(); + if (parameters & LOG_VREGS) PrintVRegisters(); + + WritePc(instr->GetInstructionAtOffset(kLogLength)); +} + + +void Simulator::DoPrintf(const Instruction* instr) { + VIXL_ASSERT((instr->Mask(ExceptionMask) == HLT) && + (instr->GetImmException() == kPrintfOpcode)); + + // Read the arguments encoded inline in the instruction stream. + uint32_t arg_count; + uint32_t arg_pattern_list; + VIXL_STATIC_ASSERT(sizeof(*instr) == 1); + memcpy(&arg_count, instr + kPrintfArgCountOffset, sizeof(arg_count)); + memcpy(&arg_pattern_list, + instr + kPrintfArgPatternListOffset, + sizeof(arg_pattern_list)); + + VIXL_ASSERT(arg_count <= kPrintfMaxArgCount); + VIXL_ASSERT((arg_pattern_list >> (kPrintfArgPatternBits * arg_count)) == 0); + + // We need to call the host printf function with a set of arguments defined by + // arg_pattern_list. Because we don't know the types and sizes of the + // arguments, this is very difficult to do in a robust and portable way. To + // work around the problem, we pick apart the format string, and print one + // format placeholder at a time. + + // Allocate space for the format string. We take a copy, so we can modify it. + // Leave enough space for one extra character per expected argument (plus the + // '\0' termination). + const char* format_base = ReadRegister(0); + VIXL_ASSERT(format_base != NULL); + size_t length = strlen(format_base) + 1; + char* const format = new char[length + arg_count]; + + // A list of chunks, each with exactly one format placeholder. + const char* chunks[kPrintfMaxArgCount]; + + // Copy the format string and search for format placeholders. + uint32_t placeholder_count = 0; + char* format_scratch = format; + for (size_t i = 0; i < length; i++) { + if (format_base[i] != '%') { + *format_scratch++ = format_base[i]; + } else { + if (format_base[i + 1] == '%') { + // Ignore explicit "%%" sequences. + *format_scratch++ = format_base[i]; + i++; + // Chunks after the first are passed as format strings to printf, so we + // need to escape '%' characters in those chunks. + if (placeholder_count > 0) *format_scratch++ = format_base[i]; + } else { + VIXL_CHECK(placeholder_count < arg_count); + // Insert '\0' before placeholders, and store their locations. + *format_scratch++ = '\0'; + chunks[placeholder_count++] = format_scratch; + *format_scratch++ = format_base[i]; + } + } + } + VIXL_CHECK(placeholder_count == arg_count); + + // Finally, call printf with each chunk, passing the appropriate register + // argument. Normally, printf returns the number of bytes transmitted, so we + // can emulate a single printf call by adding the result from each chunk. If + // any call returns a negative (error) value, though, just return that value. + + printf("%s", clr_printf); + + // Because '\0' is inserted before each placeholder, the first string in + // 'format' contains no format placeholders and should be printed literally. + int result = printf("%s", format); + int pcs_r = 1; // Start at x1. x0 holds the format string. + int pcs_f = 0; // Start at d0. + if (result >= 0) { + for (uint32_t i = 0; i < placeholder_count; i++) { + int part_result = -1; + + uint32_t arg_pattern = arg_pattern_list >> (i * kPrintfArgPatternBits); + arg_pattern &= (1 << kPrintfArgPatternBits) - 1; + switch (arg_pattern) { + case kPrintfArgW: + part_result = printf(chunks[i], ReadWRegister(pcs_r++)); + break; + case kPrintfArgX: + part_result = printf(chunks[i], ReadXRegister(pcs_r++)); + break; + case kPrintfArgD: + part_result = printf(chunks[i], ReadDRegister(pcs_f++)); + break; + default: + VIXL_UNREACHABLE(); + } + + if (part_result < 0) { + // Handle error values. + result = part_result; + break; + } + + result += part_result; + } + } + + printf("%s", clr_normal); + + // Printf returns its result in x0 (just like the C library's printf). + WriteXRegister(0, result); + + // The printf parameters are inlined in the code, so skip them. + WritePc(instr->GetInstructionAtOffset(kPrintfLength)); + + // Set LR as if we'd just called a native printf function. + WriteLr(ReadPc()); + + delete[] format; +} + + +#ifdef VIXL_HAS_SIMULATED_RUNTIME_CALL_SUPPORT +void Simulator::DoRuntimeCall(const Instruction* instr) { + VIXL_STATIC_ASSERT(kRuntimeCallAddressSize == sizeof(uintptr_t)); + // The appropriate `Simulator::SimulateRuntimeCall()` wrapper and the function + // to call are passed inlined in the assembly. + uintptr_t call_wrapper_address = + Memory::Read(instr + kRuntimeCallWrapperOffset); + uintptr_t function_address = + Memory::Read(instr + kRuntimeCallFunctionOffset); + RuntimeCallType call_type = static_cast( + Memory::Read(instr + kRuntimeCallTypeOffset)); + auto runtime_call_wrapper = + reinterpret_cast(call_wrapper_address); + + if (call_type == kCallRuntime) { + WriteRegister(kLinkRegCode, + instr->GetInstructionAtOffset(kRuntimeCallLength)); + } + runtime_call_wrapper(this, function_address); + // Read the return address from `lr` and write it into `pc`. + WritePc(ReadRegister(kLinkRegCode)); +} +#else +void Simulator::DoRuntimeCall(const Instruction* instr) { + USE(instr); + VIXL_UNREACHABLE(); +} +#endif + + +void Simulator::DoConfigureCPUFeatures(const Instruction* instr) { + VIXL_ASSERT(instr->Mask(ExceptionMask) == HLT); + + typedef ConfigureCPUFeaturesElementType ElementType; + VIXL_ASSERT(CPUFeatures::kNumberOfFeatures < + std::numeric_limits::max()); + + // k{Set,Enable,Disable}CPUFeatures have the same parameter encoding. + + size_t element_size = sizeof(ElementType); + size_t offset = kConfigureCPUFeaturesListOffset; + + // Read the kNone-terminated list of features. + CPUFeatures parameters; + while (true) { + ElementType feature = Memory::Read(instr + offset); + offset += element_size; + if (feature == static_cast(CPUFeatures::kNone)) break; + parameters.Combine(static_cast(feature)); + } + + switch (instr->GetImmException()) { + case kSetCPUFeaturesOpcode: + SetCPUFeatures(parameters); + break; + case kEnableCPUFeaturesOpcode: + GetCPUFeatures()->Combine(parameters); + break; + case kDisableCPUFeaturesOpcode: + GetCPUFeatures()->Remove(parameters); + break; + default: + VIXL_UNREACHABLE(); + break; + } + + WritePc(instr->GetInstructionAtOffset(AlignUp(offset, kInstructionSize))); +} + + +void Simulator::DoSaveCPUFeatures(const Instruction* instr) { + VIXL_ASSERT((instr->Mask(ExceptionMask) == HLT) && + (instr->GetImmException() == kSaveCPUFeaturesOpcode)); + USE(instr); + + saved_cpu_features_.push_back(*GetCPUFeatures()); +} + + +void Simulator::DoRestoreCPUFeatures(const Instruction* instr) { + VIXL_ASSERT((instr->Mask(ExceptionMask) == HLT) && + (instr->GetImmException() == kRestoreCPUFeaturesOpcode)); + USE(instr); + + SetCPUFeatures(saved_cpu_features_.back()); + saved_cpu_features_.pop_back(); +} + + +} // namespace aarch64 +} // namespace vixl + +#endif // VIXL_INCLUDE_SIMULATOR_AARCH64 diff --git a/dep/vixl/src/code-buffer-vixl.cc b/dep/vixl/src/code-buffer-vixl.cc new file mode 100644 index 000000000..0fdd373fe --- /dev/null +++ b/dep/vixl/src/code-buffer-vixl.cc @@ -0,0 +1,178 @@ +// Copyright 2017, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +extern "C" { +#include +} + +#include "code-buffer-vixl.h" +#include "utils-vixl.h" + +namespace vixl { + + +CodeBuffer::CodeBuffer(size_t capacity) + : buffer_(NULL), + managed_(true), + cursor_(NULL), + dirty_(false), + capacity_(capacity) { + if (capacity_ == 0) { + return; + } +#ifdef VIXL_CODE_BUFFER_MALLOC + buffer_ = reinterpret_cast(malloc(capacity_)); +#elif defined(VIXL_CODE_BUFFER_MMAP) + buffer_ = reinterpret_cast(mmap(NULL, + capacity, + PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, + -1, + 0)); +#else +#error Unknown code buffer allocator. +#endif + VIXL_CHECK(buffer_ != NULL); + // Aarch64 instructions must be word aligned, we assert the default allocator + // always returns word align memory. + VIXL_ASSERT(IsWordAligned(buffer_)); + + cursor_ = buffer_; +} + + +CodeBuffer::CodeBuffer(byte* buffer, size_t capacity) + : buffer_(reinterpret_cast(buffer)), + managed_(false), + cursor_(reinterpret_cast(buffer)), + dirty_(false), + capacity_(capacity) { + VIXL_ASSERT(buffer_ != NULL); +} + + +CodeBuffer::~CodeBuffer() { + VIXL_ASSERT(!IsDirty()); + if (managed_) { +#ifdef VIXL_CODE_BUFFER_MALLOC + free(buffer_); +#elif defined(VIXL_CODE_BUFFER_MMAP) + munmap(buffer_, capacity_); +#else +#error Unknown code buffer allocator. +#endif + } +} + + +#ifdef VIXL_CODE_BUFFER_MMAP +void CodeBuffer::SetExecutable() { + int ret = mprotect(buffer_, capacity_, PROT_READ | PROT_EXEC); + VIXL_CHECK(ret == 0); +} +#endif + + +#ifdef VIXL_CODE_BUFFER_MMAP +void CodeBuffer::SetWritable() { + int ret = mprotect(buffer_, capacity_, PROT_READ | PROT_WRITE); + VIXL_CHECK(ret == 0); +} +#endif + + +void CodeBuffer::EmitString(const char* string) { + VIXL_ASSERT(HasSpaceFor(strlen(string) + 1)); + char* dst = reinterpret_cast(cursor_); + dirty_ = true; + char* null_char = stpcpy(dst, string); + cursor_ = reinterpret_cast(null_char) + 1; +} + + +void CodeBuffer::EmitData(const void* data, size_t size) { + VIXL_ASSERT(HasSpaceFor(size)); + dirty_ = true; + memcpy(cursor_, data, size); + cursor_ = cursor_ + size; +} + + +void CodeBuffer::UpdateData(size_t offset, const void* data, size_t size) { + dirty_ = true; + byte* dst = buffer_ + offset; + VIXL_ASSERT(dst + size <= cursor_); + memcpy(dst, data, size); +} + + +void CodeBuffer::Align() { + byte* end = AlignUp(cursor_, 4); + const size_t padding_size = end - cursor_; + VIXL_ASSERT(padding_size <= 4); + EmitZeroedBytes(static_cast(padding_size)); +} + +void CodeBuffer::EmitZeroedBytes(int n) { + EnsureSpaceFor(n); + dirty_ = true; + memset(cursor_, 0, n); + cursor_ += n; +} + +void CodeBuffer::Reset() { +#ifdef VIXL_DEBUG + if (managed_) { + // Fill with zeros (there is no useful value common to A32 and T32). + memset(buffer_, 0, capacity_); + } +#endif + cursor_ = buffer_; + SetClean(); +} + + +void CodeBuffer::Grow(size_t new_capacity) { + VIXL_ASSERT(managed_); + VIXL_ASSERT(new_capacity > capacity_); + ptrdiff_t cursor_offset = GetCursorOffset(); +#ifdef VIXL_CODE_BUFFER_MALLOC + buffer_ = static_cast(realloc(buffer_, new_capacity)); + VIXL_CHECK(buffer_ != NULL); +#elif defined(VIXL_CODE_BUFFER_MMAP) + buffer_ = static_cast( + mremap(buffer_, capacity_, new_capacity, MREMAP_MAYMOVE)); + VIXL_CHECK(buffer_ != MAP_FAILED); +#else +#error Unknown code buffer allocator. +#endif + + cursor_ = buffer_ + cursor_offset; + capacity_ = new_capacity; +} + + +} // namespace vixl diff --git a/dep/vixl/src/compiler-intrinsics-vixl.cc b/dep/vixl/src/compiler-intrinsics-vixl.cc new file mode 100644 index 000000000..ae182c7d8 --- /dev/null +++ b/dep/vixl/src/compiler-intrinsics-vixl.cc @@ -0,0 +1,144 @@ +// Copyright 2015, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "compiler-intrinsics-vixl.h" + +namespace vixl { + + +int CountLeadingSignBitsFallBack(int64_t value, int width) { + VIXL_ASSERT(IsPowerOf2(width) && (width <= 64)); + if (value >= 0) { + return CountLeadingZeros(value, width) - 1; + } else { + return CountLeadingZeros(~value, width) - 1; + } +} + + +int CountLeadingZerosFallBack(uint64_t value, int width) { + VIXL_ASSERT(IsPowerOf2(width) && (width <= 64)); + if (value == 0) { + return width; + } + int count = 0; + value = value << (64 - width); + if ((value & UINT64_C(0xffffffff00000000)) == 0) { + count += 32; + value = value << 32; + } + if ((value & UINT64_C(0xffff000000000000)) == 0) { + count += 16; + value = value << 16; + } + if ((value & UINT64_C(0xff00000000000000)) == 0) { + count += 8; + value = value << 8; + } + if ((value & UINT64_C(0xf000000000000000)) == 0) { + count += 4; + value = value << 4; + } + if ((value & UINT64_C(0xc000000000000000)) == 0) { + count += 2; + value = value << 2; + } + if ((value & UINT64_C(0x8000000000000000)) == 0) { + count += 1; + } + count += (value == 0); + return count; +} + + +int CountSetBitsFallBack(uint64_t value, int width) { + VIXL_ASSERT(IsPowerOf2(width) && (width <= 64)); + + // Mask out unused bits to ensure that they are not counted. + value &= (UINT64_C(0xffffffffffffffff) >> (64 - width)); + + // Add up the set bits. + // The algorithm works by adding pairs of bit fields together iteratively, + // where the size of each bit field doubles each time. + // An example for an 8-bit value: + // Bits: h g f e d c b a + // \ | \ | \ | \ | + // value = h+g f+e d+c b+a + // \ | \ | + // value = h+g+f+e d+c+b+a + // \ | + // value = h+g+f+e+d+c+b+a + const uint64_t kMasks[] = { + UINT64_C(0x5555555555555555), + UINT64_C(0x3333333333333333), + UINT64_C(0x0f0f0f0f0f0f0f0f), + UINT64_C(0x00ff00ff00ff00ff), + UINT64_C(0x0000ffff0000ffff), + UINT64_C(0x00000000ffffffff), + }; + + for (unsigned i = 0; i < (sizeof(kMasks) / sizeof(kMasks[0])); i++) { + int shift = 1 << i; + value = ((value >> shift) & kMasks[i]) + (value & kMasks[i]); + } + + return static_cast(value); +} + + +int CountTrailingZerosFallBack(uint64_t value, int width) { + VIXL_ASSERT(IsPowerOf2(width) && (width <= 64)); + int count = 0; + value = value << (64 - width); + if ((value & UINT64_C(0xffffffff)) == 0) { + count += 32; + value = value >> 32; + } + if ((value & 0xffff) == 0) { + count += 16; + value = value >> 16; + } + if ((value & 0xff) == 0) { + count += 8; + value = value >> 8; + } + if ((value & 0xf) == 0) { + count += 4; + value = value >> 4; + } + if ((value & 0x3) == 0) { + count += 2; + value = value >> 2; + } + if ((value & 0x1) == 0) { + count += 1; + } + count += (value == 0); + return count - (64 - width); +} + + +} // namespace vixl diff --git a/dep/vixl/src/cpu-features.cc b/dep/vixl/src/cpu-features.cc new file mode 100644 index 000000000..c3666700e --- /dev/null +++ b/dep/vixl/src/cpu-features.cc @@ -0,0 +1,211 @@ +// Copyright 2018, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include + +#include "cpu-features.h" +#include "globals-vixl.h" +#include "utils-vixl.h" + +namespace vixl { + +static uint64_t MakeFeatureMask(CPUFeatures::Feature feature) { + if (feature == CPUFeatures::kNone) { + return 0; + } else { + // Check that the shift is well-defined, and that the feature is valid. + VIXL_STATIC_ASSERT(CPUFeatures::kNumberOfFeatures <= + (sizeof(uint64_t) * 8)); + VIXL_ASSERT(feature < CPUFeatures::kNumberOfFeatures); + return UINT64_C(1) << feature; + } +} + +CPUFeatures::CPUFeatures(Feature feature0, + Feature feature1, + Feature feature2, + Feature feature3) + : features_(0) { + Combine(feature0, feature1, feature2, feature3); +} + +CPUFeatures CPUFeatures::All() { + CPUFeatures all; + // Check that the shift is well-defined. + VIXL_STATIC_ASSERT(CPUFeatures::kNumberOfFeatures < (sizeof(uint64_t) * 8)); + all.features_ = (UINT64_C(1) << kNumberOfFeatures) - 1; + return all; +} + +CPUFeatures CPUFeatures::InferFromOS() { + // TODO: Actually infer features from the OS. + return CPUFeatures(); +} + +void CPUFeatures::Combine(const CPUFeatures& other) { + features_ |= other.features_; +} + +void CPUFeatures::Combine(Feature feature0, + Feature feature1, + Feature feature2, + Feature feature3) { + features_ |= MakeFeatureMask(feature0); + features_ |= MakeFeatureMask(feature1); + features_ |= MakeFeatureMask(feature2); + features_ |= MakeFeatureMask(feature3); +} + +void CPUFeatures::Remove(const CPUFeatures& other) { + features_ &= ~other.features_; +} + +void CPUFeatures::Remove(Feature feature0, + Feature feature1, + Feature feature2, + Feature feature3) { + features_ &= ~MakeFeatureMask(feature0); + features_ &= ~MakeFeatureMask(feature1); + features_ &= ~MakeFeatureMask(feature2); + features_ &= ~MakeFeatureMask(feature3); +} + +CPUFeatures CPUFeatures::With(const CPUFeatures& other) const { + CPUFeatures f(*this); + f.Combine(other); + return f; +} + +CPUFeatures CPUFeatures::With(Feature feature0, + Feature feature1, + Feature feature2, + Feature feature3) const { + CPUFeatures f(*this); + f.Combine(feature0, feature1, feature2, feature3); + return f; +} + +CPUFeatures CPUFeatures::Without(const CPUFeatures& other) const { + CPUFeatures f(*this); + f.Remove(other); + return f; +} + +CPUFeatures CPUFeatures::Without(Feature feature0, + Feature feature1, + Feature feature2, + Feature feature3) const { + CPUFeatures f(*this); + f.Remove(feature0, feature1, feature2, feature3); + return f; +} + +bool CPUFeatures::Has(const CPUFeatures& other) const { + return (features_ & other.features_) == other.features_; +} + +bool CPUFeatures::Has(Feature feature0, + Feature feature1, + Feature feature2, + Feature feature3) const { + uint64_t mask = MakeFeatureMask(feature0) | MakeFeatureMask(feature1) | + MakeFeatureMask(feature2) | MakeFeatureMask(feature3); + return (features_ & mask) == mask; +} + +size_t CPUFeatures::Count() const { return CountSetBits(features_); } + +std::ostream& operator<<(std::ostream& os, CPUFeatures::Feature feature) { + // clang-format off + switch (feature) { +#define VIXL_FORMAT_FEATURE(SYMBOL, NAME, CPUINFO) \ + case CPUFeatures::SYMBOL: \ + return os << NAME; +VIXL_CPU_FEATURE_LIST(VIXL_FORMAT_FEATURE) +#undef VIXL_FORMAT_FEATURE + case CPUFeatures::kNone: + return os << "none"; + case CPUFeatures::kNumberOfFeatures: + VIXL_UNREACHABLE(); + } + // clang-format on + VIXL_UNREACHABLE(); + return os; +} + +CPUFeatures::const_iterator CPUFeatures::begin() const { + if (features_ == 0) return const_iterator(this, kNone); + + int feature_number = CountTrailingZeros(features_); + vixl::CPUFeatures::Feature feature = + static_cast(feature_number); + return const_iterator(this, feature); +} + +CPUFeatures::const_iterator CPUFeatures::end() const { + return const_iterator(this, kNone); +} + +std::ostream& operator<<(std::ostream& os, const CPUFeatures& features) { + CPUFeatures::const_iterator it = features.begin(); + while (it != features.end()) { + os << *it; + ++it; + if (it != features.end()) os << ", "; + } + return os; +} + +bool CPUFeaturesConstIterator::operator==( + const CPUFeaturesConstIterator& other) const { + VIXL_ASSERT(IsValid()); + return (cpu_features_ == other.cpu_features_) && (feature_ == other.feature_); +} + +CPUFeatures::Feature CPUFeaturesConstIterator::operator++() { // Prefix + VIXL_ASSERT(IsValid()); + do { + // Find the next feature. The order is unspecified. + feature_ = static_cast(feature_ + 1); + if (feature_ == CPUFeatures::kNumberOfFeatures) { + feature_ = CPUFeatures::kNone; + VIXL_STATIC_ASSERT(CPUFeatures::kNone == -1); + } + VIXL_ASSERT(CPUFeatures::kNone <= feature_); + VIXL_ASSERT(feature_ < CPUFeatures::kNumberOfFeatures); + // cpu_features_->Has(kNone) is always true, so this will terminate even if + // the features list is empty. + } while (!cpu_features_->Has(feature_)); + return feature_; +} + +CPUFeatures::Feature CPUFeaturesConstIterator::operator++(int) { // Postfix + CPUFeatures::Feature result = feature_; + ++(*this); + return result; +} + +} // namespace vixl diff --git a/dep/vixl/src/utils-vixl.cc b/dep/vixl/src/utils-vixl.cc new file mode 100644 index 000000000..41b558686 --- /dev/null +++ b/dep/vixl/src/utils-vixl.cc @@ -0,0 +1,555 @@ +// Copyright 2015, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include + +#include "utils-vixl.h" + +namespace vixl { + +// The default NaN values (for FPCR.DN=1). +const double kFP64DefaultNaN = RawbitsToDouble(UINT64_C(0x7ff8000000000000)); +const float kFP32DefaultNaN = RawbitsToFloat(0x7fc00000); +const Float16 kFP16DefaultNaN = RawbitsToFloat16(0x7e00); + +// Floating-point zero values. +const Float16 kFP16PositiveZero = RawbitsToFloat16(0x0); +const Float16 kFP16NegativeZero = RawbitsToFloat16(0x8000); + +// Floating-point infinity values. +const Float16 kFP16PositiveInfinity = RawbitsToFloat16(0x7c00); +const Float16 kFP16NegativeInfinity = RawbitsToFloat16(0xfc00); +const float kFP32PositiveInfinity = RawbitsToFloat(0x7f800000); +const float kFP32NegativeInfinity = RawbitsToFloat(0xff800000); +const double kFP64PositiveInfinity = + RawbitsToDouble(UINT64_C(0x7ff0000000000000)); +const double kFP64NegativeInfinity = + RawbitsToDouble(UINT64_C(0xfff0000000000000)); + +bool IsZero(Float16 value) { + uint16_t bits = Float16ToRawbits(value); + return (bits == Float16ToRawbits(kFP16PositiveZero) || + bits == Float16ToRawbits(kFP16NegativeZero)); +} + +uint16_t Float16ToRawbits(Float16 value) { return value.rawbits_; } + +uint32_t FloatToRawbits(float value) { + uint32_t bits = 0; + memcpy(&bits, &value, 4); + return bits; +} + + +uint64_t DoubleToRawbits(double value) { + uint64_t bits = 0; + memcpy(&bits, &value, 8); + return bits; +} + + +Float16 RawbitsToFloat16(uint16_t bits) { + Float16 f; + f.rawbits_ = bits; + return f; +} + + +float RawbitsToFloat(uint32_t bits) { + float value = 0.0; + memcpy(&value, &bits, 4); + return value; +} + + +double RawbitsToDouble(uint64_t bits) { + double value = 0.0; + memcpy(&value, &bits, 8); + return value; +} + + +uint32_t Float16Sign(internal::SimFloat16 val) { + uint16_t rawbits = Float16ToRawbits(val); + return ExtractUnsignedBitfield32(15, 15, rawbits); +} + + +uint32_t Float16Exp(internal::SimFloat16 val) { + uint16_t rawbits = Float16ToRawbits(val); + return ExtractUnsignedBitfield32(14, 10, rawbits); +} + +uint32_t Float16Mantissa(internal::SimFloat16 val) { + uint16_t rawbits = Float16ToRawbits(val); + return ExtractUnsignedBitfield32(9, 0, rawbits); +} + + +uint32_t FloatSign(float val) { + uint32_t rawbits = FloatToRawbits(val); + return ExtractUnsignedBitfield32(31, 31, rawbits); +} + + +uint32_t FloatExp(float val) { + uint32_t rawbits = FloatToRawbits(val); + return ExtractUnsignedBitfield32(30, 23, rawbits); +} + + +uint32_t FloatMantissa(float val) { + uint32_t rawbits = FloatToRawbits(val); + return ExtractUnsignedBitfield32(22, 0, rawbits); +} + + +uint32_t DoubleSign(double val) { + uint64_t rawbits = DoubleToRawbits(val); + return static_cast(ExtractUnsignedBitfield64(63, 63, rawbits)); +} + + +uint32_t DoubleExp(double val) { + uint64_t rawbits = DoubleToRawbits(val); + return static_cast(ExtractUnsignedBitfield64(62, 52, rawbits)); +} + + +uint64_t DoubleMantissa(double val) { + uint64_t rawbits = DoubleToRawbits(val); + return ExtractUnsignedBitfield64(51, 0, rawbits); +} + + +internal::SimFloat16 Float16Pack(uint16_t sign, + uint16_t exp, + uint16_t mantissa) { + uint16_t bits = (sign << 15) | (exp << 10) | mantissa; + return RawbitsToFloat16(bits); +} + + +float FloatPack(uint32_t sign, uint32_t exp, uint32_t mantissa) { + uint32_t bits = (sign << 31) | (exp << 23) | mantissa; + return RawbitsToFloat(bits); +} + + +double DoublePack(uint64_t sign, uint64_t exp, uint64_t mantissa) { + uint64_t bits = (sign << 63) | (exp << 52) | mantissa; + return RawbitsToDouble(bits); +} + + +int Float16Classify(Float16 value) { + uint16_t bits = Float16ToRawbits(value); + uint16_t exponent_max = (1 << 5) - 1; + uint16_t exponent_mask = exponent_max << 10; + uint16_t mantissa_mask = (1 << 10) - 1; + + uint16_t exponent = (bits & exponent_mask) >> 10; + uint16_t mantissa = bits & mantissa_mask; + if (exponent == 0) { + if (mantissa == 0) { + return FP_ZERO; + } + return FP_SUBNORMAL; + } else if (exponent == exponent_max) { + if (mantissa == 0) { + return FP_INFINITE; + } + return FP_NAN; + } + return FP_NORMAL; +} + + +unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size) { + VIXL_ASSERT((reg_size % 8) == 0); + int count = 0; + for (unsigned i = 0; i < (reg_size / 16); i++) { + if ((imm & 0xffff) == 0) { + count++; + } + imm >>= 16; + } + return count; +} + + +int BitCount(uint64_t value) { return CountSetBits(value); } + +// Float16 definitions. + +Float16::Float16(double dvalue) { + rawbits_ = + Float16ToRawbits(FPToFloat16(dvalue, FPTieEven, kIgnoreDefaultNaN)); +} + +namespace internal { + +SimFloat16 SimFloat16::operator-() const { + return RawbitsToFloat16(rawbits_ ^ 0x8000); +} + +// SimFloat16 definitions. +SimFloat16 SimFloat16::operator+(SimFloat16 rhs) const { + return static_cast(*this) + static_cast(rhs); +} + +SimFloat16 SimFloat16::operator-(SimFloat16 rhs) const { + return static_cast(*this) - static_cast(rhs); +} + +SimFloat16 SimFloat16::operator*(SimFloat16 rhs) const { + return static_cast(*this) * static_cast(rhs); +} + +SimFloat16 SimFloat16::operator/(SimFloat16 rhs) const { + return static_cast(*this) / static_cast(rhs); +} + +bool SimFloat16::operator<(SimFloat16 rhs) const { + return static_cast(*this) < static_cast(rhs); +} + +bool SimFloat16::operator>(SimFloat16 rhs) const { + return static_cast(*this) > static_cast(rhs); +} + +bool SimFloat16::operator==(SimFloat16 rhs) const { + if (IsNaN(*this) || IsNaN(rhs)) { + return false; + } else if (IsZero(rhs) && IsZero(*this)) { + // +0 and -0 should be treated as equal. + return true; + } + return this->rawbits_ == rhs.rawbits_; +} + +bool SimFloat16::operator!=(SimFloat16 rhs) const { return !(*this == rhs); } + +bool SimFloat16::operator==(double rhs) const { + return static_cast(*this) == static_cast(rhs); +} + +SimFloat16::operator double() const { + return FPToDouble(*this, kIgnoreDefaultNaN); +} + +Int64 BitCount(Uint32 value) { return CountSetBits(value.Get()); } + +} // namespace internal + +float FPToFloat(Float16 value, UseDefaultNaN DN, bool* exception) { + uint16_t bits = Float16ToRawbits(value); + uint32_t sign = bits >> 15; + uint32_t exponent = + ExtractUnsignedBitfield32(kFloat16MantissaBits + kFloat16ExponentBits - 1, + kFloat16MantissaBits, + bits); + uint32_t mantissa = + ExtractUnsignedBitfield32(kFloat16MantissaBits - 1, 0, bits); + + switch (Float16Classify(value)) { + case FP_ZERO: + return (sign == 0) ? 0.0f : -0.0f; + + case FP_INFINITE: + return (sign == 0) ? kFP32PositiveInfinity : kFP32NegativeInfinity; + + case FP_SUBNORMAL: { + // Calculate shift required to put mantissa into the most-significant bits + // of the destination mantissa. + int shift = CountLeadingZeros(mantissa << (32 - 10)); + + // Shift mantissa and discard implicit '1'. + mantissa <<= (kFloatMantissaBits - kFloat16MantissaBits) + shift + 1; + mantissa &= (1 << kFloatMantissaBits) - 1; + + // Adjust the exponent for the shift applied, and rebias. + exponent = exponent - shift + (-15 + 127); + break; + } + + case FP_NAN: + if (IsSignallingNaN(value)) { + if (exception != NULL) { + *exception = true; + } + } + if (DN == kUseDefaultNaN) return kFP32DefaultNaN; + + // Convert NaNs as the processor would: + // - The sign is propagated. + // - The payload (mantissa) is transferred entirely, except that the top + // bit is forced to '1', making the result a quiet NaN. The unused + // (low-order) payload bits are set to 0. + exponent = (1 << kFloatExponentBits) - 1; + + // Increase bits in mantissa, making low-order bits 0. + mantissa <<= (kFloatMantissaBits - kFloat16MantissaBits); + mantissa |= 1 << 22; // Force a quiet NaN. + break; + + case FP_NORMAL: + // Increase bits in mantissa, making low-order bits 0. + mantissa <<= (kFloatMantissaBits - kFloat16MantissaBits); + + // Change exponent bias. + exponent += (-15 + 127); + break; + + default: + VIXL_UNREACHABLE(); + } + return RawbitsToFloat((sign << 31) | (exponent << kFloatMantissaBits) | + mantissa); +} + + +float FPToFloat(double value, + FPRounding round_mode, + UseDefaultNaN DN, + bool* exception) { + // Only the FPTieEven rounding mode is implemented. + VIXL_ASSERT((round_mode == FPTieEven) || (round_mode == FPRoundOdd)); + USE(round_mode); + + switch (std::fpclassify(value)) { + case FP_NAN: { + if (IsSignallingNaN(value)) { + if (exception != NULL) { + *exception = true; + } + } + if (DN == kUseDefaultNaN) return kFP32DefaultNaN; + + // Convert NaNs as the processor would: + // - The sign is propagated. + // - The payload (mantissa) is transferred as much as possible, except + // that the top bit is forced to '1', making the result a quiet NaN. + uint64_t raw = DoubleToRawbits(value); + + uint32_t sign = raw >> 63; + uint32_t exponent = (1 << 8) - 1; + uint32_t payload = + static_cast(ExtractUnsignedBitfield64(50, 52 - 23, raw)); + payload |= (1 << 22); // Force a quiet NaN. + + return RawbitsToFloat((sign << 31) | (exponent << 23) | payload); + } + + case FP_ZERO: + case FP_INFINITE: { + // In a C++ cast, any value representable in the target type will be + // unchanged. This is always the case for +/-0.0 and infinities. + return static_cast(value); + } + + case FP_NORMAL: + case FP_SUBNORMAL: { + // Convert double-to-float as the processor would, assuming that FPCR.FZ + // (flush-to-zero) is not set. + uint64_t raw = DoubleToRawbits(value); + // Extract the IEEE-754 double components. + uint32_t sign = raw >> 63; + // Extract the exponent and remove the IEEE-754 encoding bias. + int32_t exponent = + static_cast(ExtractUnsignedBitfield64(62, 52, raw)) - 1023; + // Extract the mantissa and add the implicit '1' bit. + uint64_t mantissa = ExtractUnsignedBitfield64(51, 0, raw); + if (std::fpclassify(value) == FP_NORMAL) { + mantissa |= (UINT64_C(1) << 52); + } + return FPRoundToFloat(sign, exponent, mantissa, round_mode); + } + } + + VIXL_UNREACHABLE(); + return value; +} + +// TODO: We should consider implementing a full FPToDouble(Float16) +// conversion function (for performance reasons). +double FPToDouble(Float16 value, UseDefaultNaN DN, bool* exception) { + // We can rely on implicit float to double conversion here. + return FPToFloat(value, DN, exception); +} + + +double FPToDouble(float value, UseDefaultNaN DN, bool* exception) { + switch (std::fpclassify(value)) { + case FP_NAN: { + if (IsSignallingNaN(value)) { + if (exception != NULL) { + *exception = true; + } + } + if (DN == kUseDefaultNaN) return kFP64DefaultNaN; + + // Convert NaNs as the processor would: + // - The sign is propagated. + // - The payload (mantissa) is transferred entirely, except that the top + // bit is forced to '1', making the result a quiet NaN. The unused + // (low-order) payload bits are set to 0. + uint32_t raw = FloatToRawbits(value); + + uint64_t sign = raw >> 31; + uint64_t exponent = (1 << 11) - 1; + uint64_t payload = ExtractUnsignedBitfield64(21, 0, raw); + payload <<= (52 - 23); // The unused low-order bits should be 0. + payload |= (UINT64_C(1) << 51); // Force a quiet NaN. + + return RawbitsToDouble((sign << 63) | (exponent << 52) | payload); + } + + case FP_ZERO: + case FP_NORMAL: + case FP_SUBNORMAL: + case FP_INFINITE: { + // All other inputs are preserved in a standard cast, because every value + // representable using an IEEE-754 float is also representable using an + // IEEE-754 double. + return static_cast(value); + } + } + + VIXL_UNREACHABLE(); + return static_cast(value); +} + + +Float16 FPToFloat16(float value, + FPRounding round_mode, + UseDefaultNaN DN, + bool* exception) { + // Only the FPTieEven rounding mode is implemented. + VIXL_ASSERT(round_mode == FPTieEven); + USE(round_mode); + + uint32_t raw = FloatToRawbits(value); + int32_t sign = raw >> 31; + int32_t exponent = ExtractUnsignedBitfield32(30, 23, raw) - 127; + uint32_t mantissa = ExtractUnsignedBitfield32(22, 0, raw); + + switch (std::fpclassify(value)) { + case FP_NAN: { + if (IsSignallingNaN(value)) { + if (exception != NULL) { + *exception = true; + } + } + if (DN == kUseDefaultNaN) return kFP16DefaultNaN; + + // Convert NaNs as the processor would: + // - The sign is propagated. + // - The payload (mantissa) is transferred as much as possible, except + // that the top bit is forced to '1', making the result a quiet NaN. + uint16_t result = (sign == 0) ? Float16ToRawbits(kFP16PositiveInfinity) + : Float16ToRawbits(kFP16NegativeInfinity); + result |= mantissa >> (kFloatMantissaBits - kFloat16MantissaBits); + result |= (1 << 9); // Force a quiet NaN; + return RawbitsToFloat16(result); + } + + case FP_ZERO: + return (sign == 0) ? kFP16PositiveZero : kFP16NegativeZero; + + case FP_INFINITE: + return (sign == 0) ? kFP16PositiveInfinity : kFP16NegativeInfinity; + + case FP_NORMAL: + case FP_SUBNORMAL: { + // Convert float-to-half as the processor would, assuming that FPCR.FZ + // (flush-to-zero) is not set. + + // Add the implicit '1' bit to the mantissa. + mantissa += (1 << 23); + return FPRoundToFloat16(sign, exponent, mantissa, round_mode); + } + } + + VIXL_UNREACHABLE(); + return kFP16PositiveZero; +} + + +Float16 FPToFloat16(double value, + FPRounding round_mode, + UseDefaultNaN DN, + bool* exception) { + // Only the FPTieEven rounding mode is implemented. + VIXL_ASSERT(round_mode == FPTieEven); + USE(round_mode); + + uint64_t raw = DoubleToRawbits(value); + int32_t sign = raw >> 63; + int64_t exponent = ExtractUnsignedBitfield64(62, 52, raw) - 1023; + uint64_t mantissa = ExtractUnsignedBitfield64(51, 0, raw); + + switch (std::fpclassify(value)) { + case FP_NAN: { + if (IsSignallingNaN(value)) { + if (exception != NULL) { + *exception = true; + } + } + if (DN == kUseDefaultNaN) return kFP16DefaultNaN; + + // Convert NaNs as the processor would: + // - The sign is propagated. + // - The payload (mantissa) is transferred as much as possible, except + // that the top bit is forced to '1', making the result a quiet NaN. + uint16_t result = (sign == 0) ? Float16ToRawbits(kFP16PositiveInfinity) + : Float16ToRawbits(kFP16NegativeInfinity); + result |= mantissa >> (kDoubleMantissaBits - kFloat16MantissaBits); + result |= (1 << 9); // Force a quiet NaN; + return RawbitsToFloat16(result); + } + + case FP_ZERO: + return (sign == 0) ? kFP16PositiveZero : kFP16NegativeZero; + + case FP_INFINITE: + return (sign == 0) ? kFP16PositiveInfinity : kFP16NegativeInfinity; + case FP_NORMAL: + case FP_SUBNORMAL: { + // Convert double-to-half as the processor would, assuming that FPCR.FZ + // (flush-to-zero) is not set. + + // Add the implicit '1' bit to the mantissa. + mantissa += (UINT64_C(1) << 52); + return FPRoundToFloat16(sign, exponent, mantissa, round_mode); + } + } + + VIXL_UNREACHABLE(); + return kFP16PositiveZero; +} + +} // namespace vixl