From c36d27296e3d78d007d8cc0dad2da359a9ef768d Mon Sep 17 00:00:00 2001 From: Anthony Pesch Date: Wed, 21 Jun 2017 23:54:37 -0400 Subject: [PATCH] added vixl to build --- CMakeLists.txt | 52 +- deps/vixl/AUTHORS | 8 + deps/vixl/CPPLINT.cfg | 38 + deps/vixl/LICENCE | 30 + deps/vixl/README.md | 186 + deps/vixl/SConstruct | 574 + deps/vixl/src/aarch32/assembler-aarch32.cc | 27824 +++++++ deps/vixl/src/aarch32/assembler-aarch32.h | 6126 ++ deps/vixl/src/aarch32/constants-aarch32.cc | 855 + deps/vixl/src/aarch32/constants-aarch32.h | 541 + deps/vixl/src/aarch32/disasm-aarch32.cc | 68002 ++++++++++++++++ deps/vixl/src/aarch32/disasm-aarch32.h | 2616 + deps/vixl/src/aarch32/instructions-aarch32.cc | 743 + deps/vixl/src/aarch32/instructions-aarch32.h | 1355 + deps/vixl/src/aarch32/location-aarch32.cc | 152 + deps/vixl/src/aarch32/location-aarch32.h | 409 + .../src/aarch32/macro-assembler-aarch32.cc | 2312 + .../src/aarch32/macro-assembler-aarch32.h | 10907 +++ deps/vixl/src/aarch32/operands-aarch32.cc | 562 + deps/vixl/src/aarch32/operands-aarch32.h | 927 + deps/vixl/src/aarch64/abi-aarch64.h | 167 + deps/vixl/src/aarch64/assembler-aarch64.cc | 4848 ++ deps/vixl/src/aarch64/assembler-aarch64.h | 3386 + deps/vixl/src/aarch64/constants-aarch64.h | 2129 + deps/vixl/src/aarch64/cpu-aarch64.cc | 178 + deps/vixl/src/aarch64/cpu-aarch64.h | 86 + deps/vixl/src/aarch64/debugger-aarch64.cc | 1635 + deps/vixl/src/aarch64/debugger-aarch64.h | 109 + deps/vixl/src/aarch64/decoder-aarch64.cc | 898 + deps/vixl/src/aarch64/decoder-aarch64.h | 275 + deps/vixl/src/aarch64/disasm-aarch64.cc | 4911 ++ deps/vixl/src/aarch64/disasm-aarch64.h | 183 + deps/vixl/src/aarch64/instructions-aarch64.cc | 706 + deps/vixl/src/aarch64/instructions-aarch64.h | 872 + deps/vixl/src/aarch64/instrument-aarch64.cc | 857 + deps/vixl/src/aarch64/instrument-aarch64.h | 117 + deps/vixl/src/aarch64/logic-aarch64.cc | 4962 ++ .../src/aarch64/macro-assembler-aarch64.cc | 2904 + .../src/aarch64/macro-assembler-aarch64.h | 3499 + deps/vixl/src/aarch64/operands-aarch64.cc | 528 + deps/vixl/src/aarch64/operands-aarch64.h | 961 + deps/vixl/src/aarch64/simulator-aarch64.cc | 5403 ++ deps/vixl/src/aarch64/simulator-aarch64.h | 3212 + .../src/aarch64/simulator-constants-aarch64.h | 157 + deps/vixl/src/assembler-base-vixl.h | 101 + deps/vixl/src/code-buffer-vixl.cc | 178 + deps/vixl/src/code-buffer-vixl.h | 189 + deps/vixl/src/code-generation-scopes-vixl.h | 322 + deps/vixl/src/compiler-intrinsics-vixl.cc | 144 + deps/vixl/src/compiler-intrinsics-vixl.h | 160 + deps/vixl/src/globals-vixl.h | 287 + deps/vixl/src/invalset-vixl.h | 915 + deps/vixl/src/macro-assembler-interface.h | 75 + deps/vixl/src/platform-vixl.h | 39 + deps/vixl/src/pool-manager-impl.h | 522 + deps/vixl/src/pool-manager.h | 556 + deps/vixl/src/utils-vixl.cc | 150 + deps/vixl/src/utils-vixl.h | 889 + 58 files changed, 171708 insertions(+), 21 deletions(-) create mode 100644 deps/vixl/AUTHORS create mode 100644 deps/vixl/CPPLINT.cfg create mode 100644 deps/vixl/LICENCE create mode 100644 deps/vixl/README.md create mode 100644 deps/vixl/SConstruct create mode 100644 deps/vixl/src/aarch32/assembler-aarch32.cc create mode 100644 deps/vixl/src/aarch32/assembler-aarch32.h create mode 100644 deps/vixl/src/aarch32/constants-aarch32.cc create mode 100644 deps/vixl/src/aarch32/constants-aarch32.h create mode 100644 deps/vixl/src/aarch32/disasm-aarch32.cc create mode 100644 deps/vixl/src/aarch32/disasm-aarch32.h create mode 100644 deps/vixl/src/aarch32/instructions-aarch32.cc create mode 100644 deps/vixl/src/aarch32/instructions-aarch32.h create mode 100644 deps/vixl/src/aarch32/location-aarch32.cc create mode 100644 deps/vixl/src/aarch32/location-aarch32.h create mode 100644 deps/vixl/src/aarch32/macro-assembler-aarch32.cc create mode 100644 deps/vixl/src/aarch32/macro-assembler-aarch32.h create mode 100644 deps/vixl/src/aarch32/operands-aarch32.cc create mode 100644 deps/vixl/src/aarch32/operands-aarch32.h create mode 100644 deps/vixl/src/aarch64/abi-aarch64.h create mode 100644 deps/vixl/src/aarch64/assembler-aarch64.cc create mode 100644 deps/vixl/src/aarch64/assembler-aarch64.h create mode 100644 deps/vixl/src/aarch64/constants-aarch64.h create mode 100644 deps/vixl/src/aarch64/cpu-aarch64.cc create mode 100644 deps/vixl/src/aarch64/cpu-aarch64.h create mode 100644 deps/vixl/src/aarch64/debugger-aarch64.cc create mode 100644 deps/vixl/src/aarch64/debugger-aarch64.h create mode 100644 deps/vixl/src/aarch64/decoder-aarch64.cc create mode 100644 deps/vixl/src/aarch64/decoder-aarch64.h create mode 100644 deps/vixl/src/aarch64/disasm-aarch64.cc create mode 100644 deps/vixl/src/aarch64/disasm-aarch64.h create mode 100644 deps/vixl/src/aarch64/instructions-aarch64.cc create mode 100644 deps/vixl/src/aarch64/instructions-aarch64.h create mode 100644 deps/vixl/src/aarch64/instrument-aarch64.cc create mode 100644 deps/vixl/src/aarch64/instrument-aarch64.h create mode 100644 deps/vixl/src/aarch64/logic-aarch64.cc create mode 100644 deps/vixl/src/aarch64/macro-assembler-aarch64.cc create mode 100644 deps/vixl/src/aarch64/macro-assembler-aarch64.h create mode 100644 deps/vixl/src/aarch64/operands-aarch64.cc create mode 100644 deps/vixl/src/aarch64/operands-aarch64.h create mode 100644 deps/vixl/src/aarch64/simulator-aarch64.cc create mode 100644 deps/vixl/src/aarch64/simulator-aarch64.h create mode 100644 deps/vixl/src/aarch64/simulator-constants-aarch64.h create mode 100644 deps/vixl/src/assembler-base-vixl.h create mode 100644 deps/vixl/src/code-buffer-vixl.cc create mode 100644 deps/vixl/src/code-buffer-vixl.h create mode 100644 deps/vixl/src/code-generation-scopes-vixl.h create mode 100644 deps/vixl/src/compiler-intrinsics-vixl.cc create mode 100644 deps/vixl/src/compiler-intrinsics-vixl.h create mode 100644 deps/vixl/src/globals-vixl.h create mode 100644 deps/vixl/src/invalset-vixl.h create mode 100644 deps/vixl/src/macro-assembler-interface.h create mode 100644 deps/vixl/src/platform-vixl.h create mode 100644 deps/vixl/src/pool-manager-impl.h create mode 100644 deps/vixl/src/pool-manager.h create mode 100644 deps/vixl/src/utils-vixl.cc create mode 100644 deps/vixl/src/utils-vixl.h diff --git a/CMakeLists.txt b/CMakeLists.txt index d04010cb..3a5aef07 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -99,8 +99,19 @@ add_library(inih STATIC deps/inih/ini.c) list(APPEND RELIB_INCLUDES deps/inih) list(APPEND RELIB_LIBS inih) +# vixl +if(ARCH_A64) + file(GLOB VIXL_SOURCES deps/vixl/src/*.cc deps/vixl/src/aarch64/*.cc) + add_library(vixl STATIC ${VIXL_SOURCES}) + target_compile_definitions(vixl PRIVATE -DVIXL_CODE_BUFFER_MALLOC) + list(APPEND RELIB_INCLUDES deps/vixl/src) + list(APPEND RELIB_LIBS vixl) +endif() + # xbyak -list(APPEND RELIB_INCLUDES deps/xbyak-4.901) +if(ARCH_X64) + list(APPEND RELIB_INCLUDES deps/xbyak-4.901) +endif() #-------------------------------------------------- # optional libs @@ -123,28 +134,28 @@ list(APPEND RELIB_INCLUDES deps/microprofile) # sdl2 if(NOT BUILD_LIBRETRO) -set(DIRECTX OFF CACHE BOOL "") -set(RENDER_D3D OFF CACHE BOOL "") -set(SDL_ATOMIC OFF CACHE BOOL "") -set(SDL_CPUINFO ON CACHE BOOL "") -set(SDL_FILESYSTEM OFF CACHE BOOL "") -set(SDL_HAPTIC OFF CACHE BOOL "") -set(SDL_POWER OFF CACHE BOOL "") -set(SDL_RENDER OFF CACHE BOOL "") -set(SDL_SHARED OFF CACHE BOOL "") -set(SDL_STATIC ON CACHE BOOL "") + set(DIRECTX OFF CACHE BOOL "") + set(RENDER_D3D OFF CACHE BOOL "") + set(SDL_ATOMIC OFF CACHE BOOL "") + set(SDL_CPUINFO ON CACHE BOOL "") + set(SDL_FILESYSTEM OFF CACHE BOOL "") + set(SDL_HAPTIC OFF CACHE BOOL "") + set(SDL_POWER OFF CACHE BOOL "") + set(SDL_RENDER OFF CACHE BOOL "") + set(SDL_SHARED OFF CACHE BOOL "") + set(SDL_STATIC ON CACHE BOOL "") -if(PLATFORM_DARWIN) - set(SDL_FRAMEWORK_CARBON 1) -endif() + if(PLATFORM_DARWIN) + set(SDL_FRAMEWORK_CARBON 1) + endif() -add_subdirectory(deps/sdl2-2.0.5 EXCLUDE_FROM_ALL) -list(APPEND RELIB_INCLUDES deps/sdl2-2.0.5/include) + add_subdirectory(deps/sdl2-2.0.5 EXCLUDE_FROM_ALL) + list(APPEND RELIB_INCLUDES deps/sdl2-2.0.5/include) -if(MINGW) - list(APPEND SDL_LIBS mingw32) -endif() -list(APPEND SDL_LIBS SDL2main SDL2-static) + if(MINGW) + list(APPEND SDL_LIBS mingw32) + endif() + list(APPEND SDL_LIBS SDL2main SDL2-static) endif() #-------------------------------------------------- @@ -158,7 +169,6 @@ file(GLOB_RECURSE CLANG_FORMAT_ARGS "src/*.c" "src/*.cc" "src/*.h" "test/*.c" "t add_custom_target(format ${CLANG_FORMAT_EXECUTABLE} -i ${CLANG_FORMAT_ARGS}) endif() - #-------------------------------------------------- # redream sources, includes and libs, common to multiple projects #-------------------------------------------------- diff --git a/deps/vixl/AUTHORS b/deps/vixl/AUTHORS new file mode 100644 index 00000000..257ec9d3 --- /dev/null +++ b/deps/vixl/AUTHORS @@ -0,0 +1,8 @@ +# Below is a list of people and organisations that have contributed to the VIXL +# project. Entries should be added to the list as: +# +# Name/Organization + +ARM Ltd. <*@arm.com> +Google Inc. <*@google.com> +Linaro <*@linaro.org> diff --git a/deps/vixl/CPPLINT.cfg b/deps/vixl/CPPLINT.cfg new file mode 100644 index 00000000..93f11aa4 --- /dev/null +++ b/deps/vixl/CPPLINT.cfg @@ -0,0 +1,38 @@ +# Stop cpplint for looking for CPPLINT.cfg outside of vixl. +set noparent +filter=+build/class +filter=+build/deprecated +filter=+build/forward_decl +filter=+build/include_order +filter=+build/printf_format +filter=+build/storage_class +filter=+legal/copyright +filter=+readability/boost +filter=+readability/braces +filter=+readability/casting +filter=+readability/constructors +filter=+readability/fn_size +filter=+readability/function +filter=+readability/multiline_comment +filter=+readability/multiline_string +filter=+readability/streams +filter=+readability/utf8 +filter=+runtime/arrays +filter=+runtime/casting +filter=+runtime/deprecated_fn +filter=+runtime/explicit +filter=+runtime/int +filter=+runtime/memset +filter=+runtime/mutex +filter=+runtime/nonconf +filter=+runtime/printf +filter=+runtime/printf_format +filter=+runtime/references +filter=+runtime/rtti +filter=+runtime/sizeof +filter=+runtime/string +filter=+runtime/virtual +filter=+runtime/vlog +# cpplint.py enables these filters in reversed order. +filter=- +linelength=80 diff --git a/deps/vixl/LICENCE b/deps/vixl/LICENCE new file mode 100644 index 00000000..0acd8ebd --- /dev/null +++ b/deps/vixl/LICENCE @@ -0,0 +1,30 @@ +LICENCE +======= + +The software in this repository is covered by the following licence. + +// Copyright 2015, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/deps/vixl/README.md b/deps/vixl/README.md new file mode 100644 index 00000000..d421a37a --- /dev/null +++ b/deps/vixl/README.md @@ -0,0 +1,186 @@ +VIXL: ARMv8 Runtime Code Generation Library, Development Version +================================================================ + +Contents: + + * Overview + * Licence + * Requirements + * Known limitations + * Usage + + +Overview +======== + +VIXL contains three components. + + 1. Programmatic **assemblers** to generate A64, A32 or T32 code at runtime. The + assemblers abstract some of the constraints of each ISA; for example, most + instructions support any immediate. + 2. **Disassemblers** that can print any instruction emitted by the assemblers. + 3. A **simulator** that can simulate any instruction emitted by the A64 + assembler. The simulator allows generated code to be run on another + architecture without the need for a full ISA model. + +The VIXL git repository can be found [on 'https://git.linaro.org'][vixl]. + +Changes from previous versions of VIXL can be found in the +[Changelog](doc/changelog.md). + + +Licence +======= + +This software is covered by the licence described in the [LICENCE](LICENCE) +file. + + +Requirements +============ + +To build VIXL the following software is required: + + 1. Python 2.7 + 2. SCons 2.0 + 3. GCC 4.8+ or Clang 3.4+ + +A 64-bit host machine is required, implementing an LP64 data model. VIXL has +been tested using GCC on AArch64 Debian, GCC and Clang on amd64 Ubuntu +systems. + +To run the linter and code formatting stages of the tests, the following +software is also required: + + 1. Git + 2. [Google's `cpplint.py`][cpplint] + 3. clang-format-3.6 + +Refer to the 'Usage' section for details. + + +Known Limitations for AArch64 code generation +============================================= + +VIXL was developed for JavaScript engines so a number of features from A64 were +deemed unnecessary: + + * Limited rounding mode support for floating point. + * Limited support for synchronisation instructions. + * Limited support for system instructions. + * A few miscellaneous integer and floating point instructions are missing. + +The VIXL simulator supports only those instructions that the VIXL assembler can +generate. The `doc` directory contains a +[list of supported A64 instructions](doc/aarch64/supported-instructions-aarch64.md). + +The VIXL simulator was developed to run on 64-bit amd64 platforms. Whilst it +builds and mostly works for 32-bit x86 platforms, there are a number of +floating-point operations which do not work correctly, and a number of tests +fail as a result. + +VIXL may not build using Clang 3.7, due to a compiler warning. A workaround is +to disable conversion of warnings to errors, or to delete the offending +`return` statement reported and rebuild. This problem will be fixed in the next +release. + +Debug Builds +------------ + +Your project's build system must define `VIXL_DEBUG` (eg. `-DVIXL_DEBUG`) +when using a VIXL library that has been built with debug enabled. + +Some classes defined in VIXL header files contain fields that are only present +in debug builds, so if `VIXL_DEBUG` is defined when the library is built, but +not defined for the header files included in your project, you will see runtime +failures. + +Exclusive-Access Instructions +----------------------------- + +All exclusive-access instructions are supported, but the simulator cannot +accurately simulate their behaviour as described in the ARMv8 Architecture +Reference Manual. + + * A local monitor is simulated, so simulated exclusive loads and stores execute + as expected in a single-threaded environment. + * The global monitor is simulated by occasionally causing exclusive-access + instructions to fail regardless of the local monitor state. + * Load-acquire, store-release semantics are approximated by issuing a host + memory barrier after loads or before stores. The built-in + `__sync_synchronize()` is used for this purpose. + +The simulator tries to be strict, and implements the following restrictions that +the ARMv8 ARM allows: + + * A pair of load-/store-exclusive instructions will only succeed if they have + the same address and access size. + * Most of the time, cache-maintenance operations or explicit memory accesses + will clear the exclusive monitor. + * To ensure that simulated code does not depend on this behaviour, the + exclusive monitor will sometimes be left intact after these instructions. + +Instructions affected by these limitations: + `stxrb`, `stxrh`, `stxr`, `ldxrb`, `ldxrh`, `ldxr`, `stxp`, `ldxp`, `stlxrb`, + `stlxrh`, `stlxr`, `ldaxrb`, `ldaxrh`, `ldaxr`, `stlxp`, `ldaxp`, `stlrb`, + `stlrh`, `stlr`, `ldarb`, `ldarh`, `ldar`, `clrex`. + + +Usage +===== + +Running all Tests +----------------- + +The helper script `tools/test.py` will build and run every test that is provided +with VIXL, in both release and debug mode. It is a useful script for verifying +that all of VIXL's dependencies are in place and that VIXL is working as it +should. + +By default, the `tools/test.py` script runs a linter to check that the source +code conforms with the code style guide, and to detect several common errors +that the compiler may not warn about. This is most useful for VIXL developers. +The linter has the following dependencies: + + 1. Git must be installed, and the VIXL project must be in a valid Git + repository, such as one produced using `git clone`. + 2. `cpplint.py`, [as provided by Google][cpplint], must be available (and + executable) on the `PATH`. + +It is possible to tell `tools/test.py` to skip the linter stage by passing +`--nolint`. This removes the dependency on `cpplint.py` and Git. The `--nolint` +option is implied if the VIXL project is a snapshot (with no `.git` directory). + +Additionally, `tools/test.py` tests code formatting using `clang-format-3.6`. +If you don't have `clang-format-3.6`, disable the test using the +`--noclang-format` option. + +Also note that the tests for the tracing features depend upon external `diff` +and `sed` tools. If these tools are not available in `PATH`, these tests will +fail. + +Getting Started +--------------- + +We have separate guides for introducing VIXL, depending on what architecture you +are targeting. A guide for working with AArch32 can be found +[here][getting-started-aarch32], while the AArch64 guide is +[here][getting-started-aarch64]. Example source code is provided in the +[examples](examples) directory. You can build examples with either `scons +aarch32_examples` or `scons aarch64_examples` from the root directory, or use +`scons --help` to get a detailed list of available build targets. + + + + +[cpplint]: http://google-styleguide.googlecode.com/svn/trunk/cpplint/cpplint.py + "Google's cpplint.py script." + +[vixl]: https://git.linaro.org/arm/vixl.git + "The VIXL repository at 'https://git.linaro.org'." + +[getting-started-aarch32]: doc/aarch32/getting-started-aarch32.md + "Introduction to VIXL for AArch32." + +[getting-started-aarch64]: doc/aarch64/getting-started-aarch64.md + "Introduction to VIXL for AArch64." diff --git a/deps/vixl/SConstruct b/deps/vixl/SConstruct new file mode 100644 index 00000000..f89274aa --- /dev/null +++ b/deps/vixl/SConstruct @@ -0,0 +1,574 @@ +# Copyright 2015, VIXL authors +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of ARM Limited nor the names of its contributors may be +# used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import glob +import itertools +import os +from os.path import join +import platform +import subprocess +import sys +from collections import OrderedDict + +root_dir = os.path.dirname(File('SConstruct').rfile().abspath) +sys.path.insert(0, join(root_dir, 'tools')) +import config +import util + +from SCons.Errors import UserError + + +Help(''' +Build system for the VIXL project. +See README.md for documentation and details about the build system. +''') + + +# We track top-level targets to automatically generate help and alias them. +class VIXLTargets: + def __init__(self): + self.targets = [] + self.help_messages = [] + def Add(self, target, help_message): + self.targets.append(target) + self.help_messages.append(help_message) + def Help(self): + res = "" + for i in range(len(self.targets)): + res += '\t{0:<{1}}{2:<{3}}\n'.format( + 'scons ' + self.targets[i], + len('scons ') + max(map(len, self.targets)), + ' : ' + self.help_messages[i], + len(' : ') + max(map(len, self.help_messages))) + return res + +top_level_targets = VIXLTargets() + + + +# Build options ---------------------------------------------------------------- + +# Store all the options in a dictionary. +# The SConstruct will check the build variables and construct the build +# environment as appropriate. +options = { + 'all' : { # Unconditionally processed. + 'CCFLAGS' : ['-Wall', + '-Werror', + '-fdiagnostics-show-option', + '-Wextra', + '-Wredundant-decls', + '-pedantic', + '-Wwrite-strings', + '-Wunused'], + 'CPPPATH' : [config.dir_src_vixl] + }, +# 'build_option:value' : { +# 'environment_key' : 'values to append' +# }, + 'mode:debug' : { + 'CCFLAGS' : ['-DVIXL_DEBUG', '-O0'] + }, + 'mode:release' : { + 'CCFLAGS' : ['-O3'], + }, + 'simulator:aarch64' : { + 'CCFLAGS' : ['-DVIXL_INCLUDE_SIMULATOR_AARCH64'], + }, + 'symbols:on' : { + 'CCFLAGS' : ['-g'], + 'LINKFLAGS' : ['-g'] + }, + 'negative_testing:on' : { + 'CCFLAGS' : ['-DVIXL_NEGATIVE_TESTING'] + }, + 'code_buffer_allocator:mmap' : { + 'CCFLAGS' : ['-DVIXL_CODE_BUFFER_MMAP'] + }, + 'code_buffer_allocator:malloc' : { + 'CCFLAGS' : ['-DVIXL_CODE_BUFFER_MALLOC'] + } + } + + +# A `DefaultVariable` has a default value that depends on elements not known +# when variables are first evaluated. +# Each `DefaultVariable` has a handler that will compute the default value for +# the given environment. +def modifiable_flags_handler(env): + env['modifiable_flags'] = \ + 'on' if 'mode' in env and env['mode'] == 'debug' else 'off' + + +def symbols_handler(env): + env['symbols'] = 'on' if 'mode' in env and env['mode'] == 'debug' else 'off' + +def Is32BitHost(env): + return env['host_arch'] in ['aarch32', 'i386'] + +def IsAArch64Host(env): + return env['host_arch'] == 'aarch64' + +def CanTargetA32(env): + return 'a32' in env['target'] + +def CanTargetT32(env): + return 't32' in env['target'] + +def CanTargetAArch32(env): + return CanTargetA32(env) or CanTargetT32(env) + +def CanTargetA64(env): + return 'a64' in env['target'] + +def CanTargetAArch64(env): + return CanTargetA64(env) + + +# By default, include the simulator only if AArch64 is targeted and we are not +# building VIXL natively for AArch64. +def simulator_handler(env): + if not IsAArch64Host(env) and CanTargetAArch64(env): + env['simulator'] = 'aarch64' + else: + env['simulator'] = 'none' + + +# 'mmap' is required for use with 'mprotect', which is needed for the tests +# (when running natively), so we use it by default where we can. +def code_buffer_allocator_handler(env): + directives = util.GetCompilerDirectives(env) + if '__linux__' in directives: + env['code_buffer_allocator'] = 'mmap' + else: + env['code_buffer_allocator'] = 'malloc' + +# A validator checks the consistency of provided options against the environment. +def default_validator(env): + pass + + +def simulator_validator(env): + if env['simulator'] == 'aarch64' and not CanTargetAArch64(env): + raise UserError('Building an AArch64 simulator implies that VIXL targets ' + 'AArch64. Set `target` to include `aarch64` or `a64`.') + + +# Default variables may depend on each other, therefore we need this dictionnary +# to be ordered. +vars_default_handlers = OrderedDict({ + # variable_name : [ 'default val', 'handler', 'validator'] + 'symbols' : [ 'mode==debug', symbols_handler, default_validator ], + 'modifiable_flags' : [ 'mode==debug', modifiable_flags_handler, default_validator], + 'simulator' : [ 'on if the target architectures include AArch64 but ' + 'the host is not AArch64, else off', + simulator_handler, simulator_validator ], + 'code_buffer_allocator' : [ 'mmap with __linux__, malloc otherwise', + code_buffer_allocator_handler, default_validator ] + }) + + +def DefaultVariable(name, help, allowed_values): + help = '%s (%s)' % (help, '|'.join(allowed_values)) + default_value = vars_default_handlers[name][0] + def validator(name, value, env): + if value != default_value and value not in allowed_values: + raise UserError('Invalid value for option {name}: {value}. ' + 'Valid values are: {allowed_values}'.format( + name, value, allowed_values)) + return (name, help, default_value, validator) + + +def AliasedListVariable(name, help, default_value, allowed_values, aliasing): + help = '%s (all|auto|comma-separated list) (any combination from [%s])' % \ + (help, ', '.join(allowed_values)) + + def validator(name, value, env): + # Here list has been converted to space separated strings. + if value == '': return # auto + for v in value.split(): + if v not in allowed_values: + raise UserError('Invalid value for %s: %s' % (name, value)) + + def converter(value): + if value == 'auto': return [] + if value == 'all': + translated = [aliasing[v] for v in allowed_values] + return list(set(itertools.chain.from_iterable(translated))) + # The validator is run later hence the get. + translated = [aliasing.get(v, v) for v in value.split(',')] + return list(set(itertools.chain.from_iterable(translated))) + + return (name, help, default_value, validator, converter) + + +vars = Variables() +# Define command line build options. +vars.AddVariables( + AliasedListVariable('target', 'Target ISA/Architecture', 'auto', + ['aarch32', 'a32', 't32', 'aarch64', 'a64'], + {'aarch32' : ['a32', 't32'], + 'a32' : ['a32'], 't32' : ['t32'], + 'aarch64' : ['a64'], 'a64' : ['a64']}), + EnumVariable('mode', 'Build mode', + 'release', allowed_values=config.build_options_modes), + EnumVariable('negative_testing', + 'Enable negative testing (needs exceptions)', + 'off', allowed_values=['on', 'off']), + DefaultVariable('symbols', 'Include debugging symbols in the binaries', + ['on', 'off']), + DefaultVariable('simulator', 'Simulators to include', ['aarch64', 'none']), + DefaultVariable('code_buffer_allocator', + 'Configure the allocation mechanism in the CodeBuffer', + ['malloc', 'mmap']), + ('std', 'C++ standard. The standards tested are: %s.' % \ + ', '.join(config.tested_cpp_standards)) + ) + +# We use 'variant directories' to avoid recompiling multiple times when build +# options are changed, different build paths are used depending on the options +# set. These are the options that should be reflected in the build directory +# path. +options_influencing_build_path = [ + 'target', 'mode', 'symbols', 'CXX', 'std', 'simulator', 'negative_testing', + 'code_buffer_allocator' +] + + + +# Build helpers ---------------------------------------------------------------- + +def RetrieveEnvironmentVariables(env): + for key in ['CC', 'CXX', 'AR', 'RANLIB', 'LD']: + if os.getenv(key): env[key] = os.getenv(key) + if os.getenv('LD_LIBRARY_PATH'): env['LIBPATH'] = os.getenv('LD_LIBRARY_PATH') + if os.getenv('CCFLAGS'): + env.Append(CCFLAGS = os.getenv('CCFLAGS').split()) + if os.getenv('CXXFLAGS'): + env.Append(CXXFLAGS = os.getenv('CXXFLAGS').split()) + if os.getenv('LINKFLAGS'): + env.Append(LINKFLAGS = os.getenv('LINKFLAGS').split()) + # This allows colors to be displayed when using with clang. + env['ENV']['TERM'] = os.getenv('TERM') + + +# The architecture targeted by default will depend on the compiler being +# used. 'host_arch' is extracted from the compiler while 'target' can be +# set by the user. +# By default, we target both AArch32 and AArch64 unless the compiler targets a +# 32-bit architecture. At the moment, we cannot build VIXL's AArch64 support on +# a 32-bit platform. +# TODO: Port VIXL to build on a 32-bit platform. +def target_handler(env): + # Auto detect + if Is32BitHost(env): + # We use list(set(...)) to keep the same order as if it was specify as + # an option. + env['target'] = list(set(['a32', 't32'])) + else: + env['target'] = list(set(['a64', 'a32', 't32'])) + + +def target_validator(env): + # TODO: Port VIXL64 to work on a 32-bit platform. + if Is32BitHost(env) and CanTargetAArch64(env): + raise UserError('Building VIXL for AArch64 in 32-bit is not supported. Set ' + '`target` to `aarch32`') + + +# The target option is handled differently from the rest. +def ProcessTargetOption(env): + if env['target'] == []: target_handler(env) + + if 'a32' in env['target']: env['CCFLAGS'] += ['-DVIXL_INCLUDE_TARGET_A32'] + if 't32' in env['target']: env['CCFLAGS'] += ['-DVIXL_INCLUDE_TARGET_T32'] + if 'a64' in env['target']: env['CCFLAGS'] += ['-DVIXL_INCLUDE_TARGET_A64'] + + target_validator(env) + + +def ProcessBuildOptions(env): + # 'all' is unconditionally processed. + if 'all' in options: + for var in options['all']: + if var in env and env[var]: + env[var] += options['all'][var] + else: + env[var] = options['all'][var] + + # The target option *must* be processed before the options defined in + # vars_default_handlers. + ProcessTargetOption(env) + + # Other build options must match 'option:value' + env_dict = env.Dictionary() + + # First apply the default variables handlers in order. + for key, value in vars_default_handlers.items(): + default = value[0] + handler = value[1] + if env_dict.get(key) == default: + handler(env_dict) + + # Second, run the series of validators, to check for errors. + for _, value in vars_default_handlers.items(): + validator = value[2] + validator(env) + + for key in env_dict.keys(): + # Then update the environment according to the value of the variable. + key_val_couple = key + ':%s' % env_dict[key] + if key_val_couple in options: + for var in options[key_val_couple]: + env[var] += options[key_val_couple][var] + + +def ConfigureEnvironmentForCompiler(env): + if CanTargetA32(env) and CanTargetT32(env): + # When building for only one aarch32 isa, fixing the no-return is not worth + # the effort. + env.Append(CPPFLAGS = ['-Wmissing-noreturn']) + + compiler = util.CompilerInformation(env) + if compiler == 'clang': + # These warnings only work for Clang. + # -Wimplicit-fallthrough only works when compiling the code base as C++11 or + # newer. The compiler does not complain if the option is passed when + # compiling earlier C++ standards. + env.Append(CPPFLAGS = ['-Wimplicit-fallthrough', '-Wshorten-64-to-32']) + + # The '-Wunreachable-code' flag breaks builds for clang 3.4. + if compiler != 'clang-3.4': + env.Append(CPPFLAGS = ['-Wunreachable-code']) + + # GCC 4.8 has a bug which produces a warning saying that an anonymous Operand + # object might be used uninitialized: + # http://gcc.gnu.org/bugzilla/show_bug.cgi?id=57045 + # The bug does not seem to appear in GCC 4.7, or in debug builds with GCC 4.8. + if env['mode'] == 'release': + if compiler == 'gcc-4.8': + env.Append(CPPFLAGS = ['-Wno-maybe-uninitialized']) + + # GCC 6 and higher is able to detect throwing from inside a destructor and + # reports a warning. However, if negative testing is enabled then assertions + # will throw exceptions. + if env['negative_testing'] == 'on' and env['mode'] == 'debug' \ + and compiler >= 'gcc-6': + env.Append(CPPFLAGS = ['-Wno-terminate']) + # The C++11 compatibility warning will also be triggered for this case, as + # the behavior of throwing from desctructors has changed. + if 'std' in env and env['std'] == 'c++98': + env.Append(CPPFLAGS = ['-Wno-c++11-compat']) + + # When compiling with c++98 (the default), allow long long constants. + if 'std' not in env or env['std'] == 'c++98': + env.Append(CPPFLAGS = ['-Wno-long-long']) + # When compiling with c++11, suggest missing override keywords on methods. + if 'std' in env and env['std'] in ['c++11', 'c++14']: + if compiler >= 'gcc-5': + env.Append(CPPFLAGS = ['-Wsuggest-override']) + elif compiler >= 'clang-3.6': + env.Append(CPPFLAGS = ['-Winconsistent-missing-override']) + + +def ConfigureEnvironment(env): + RetrieveEnvironmentVariables(env) + env['host_arch'] = util.GetHostArch(env) + ProcessBuildOptions(env) + if 'std' in env: + env.Append(CPPFLAGS = ['-std=' + env['std']]) + std_path = env['std'] + ConfigureEnvironmentForCompiler(env) + + +def TargetBuildDir(env): + # Build-time option values are embedded in the build path to avoid requiring a + # full build when an option changes. + build_dir = config.dir_build + for option in options_influencing_build_path: + option_value = ''.join(env[option]) if option in env else '' + build_dir = join(build_dir, option + '_'+ option_value) + return build_dir + + +def PrepareVariantDir(location, build_dir): + location_build_dir = join(build_dir, location) + VariantDir(location_build_dir, location) + return location_build_dir + + +def VIXLLibraryTarget(env): + build_dir = TargetBuildDir(env) + # Create a link to the latest build directory. + # Use `-r` to avoid failure when `latest` exists and is a directory. + subprocess.check_call(["rm", "-rf", config.dir_build_latest]) + util.ensure_dir(build_dir) + subprocess.check_call(["ln", "-s", build_dir, config.dir_build_latest]) + # Source files are in `src` and in `src/aarch64/`. + variant_dir_vixl = PrepareVariantDir(join('src'), build_dir) + sources = [Glob(join(variant_dir_vixl, '*.cc'))] + if CanTargetAArch32(env): + variant_dir_aarch32 = PrepareVariantDir(join('src', 'aarch32'), build_dir) + sources.append(Glob(join(variant_dir_aarch32, '*.cc'))) + if CanTargetAArch64(env): + variant_dir_aarch64 = PrepareVariantDir(join('src', 'aarch64'), build_dir) + sources.append(Glob(join(variant_dir_aarch64, '*.cc'))) + return env.Library(join(build_dir, 'vixl'), sources) + + + +# Build ------------------------------------------------------------------------ + +# The VIXL library, built by default. +env = Environment(variables = vars, + BUILDERS = { + 'Markdown': Builder(action = 'markdown $SOURCE > $TARGET', + suffix = '.html') + }) +# Abort the build if any command line option is unknown or invalid. +unknown_build_options = vars.UnknownVariables() +if unknown_build_options: + print 'Unknown build options:', unknown_build_options.keys() + Exit(1) + +ConfigureEnvironment(env) +Help(vars.GenerateHelpText(env)) +libvixl = VIXLLibraryTarget(env) +Default(libvixl) +env.Alias('libvixl', libvixl) +top_level_targets.Add('', 'Build the VIXL library.') + + +# Common test code. +test_build_dir = PrepareVariantDir('test', TargetBuildDir(env)) +test_objects = [env.Object(Glob(join(test_build_dir, '*.cc')))] + +# AArch32 support +if CanTargetAArch32(env): + # The examples. + aarch32_example_names = util.ListCCFilesWithoutExt(config.dir_aarch32_examples) + aarch32_examples_build_dir = PrepareVariantDir('examples/aarch32', TargetBuildDir(env)) + aarch32_example_targets = [] + for example in aarch32_example_names: + prog = env.Program(join(aarch32_examples_build_dir, example), + join(aarch32_examples_build_dir, example + '.cc'), + LIBS=[libvixl]) + aarch32_example_targets.append(prog) + env.Alias('aarch32_examples', aarch32_example_targets) + top_level_targets.Add('aarch32_examples', 'Build the examples for AArch32.') + + # The benchmarks + aarch32_benchmark_names = util.ListCCFilesWithoutExt(config.dir_aarch32_benchmarks) + aarch32_benchmarks_build_dir = PrepareVariantDir('benchmarks/aarch32', TargetBuildDir(env)) + aarch32_benchmark_targets = [] + for bench in aarch32_benchmark_names: + prog = env.Program(join(aarch32_benchmarks_build_dir, bench), + join(aarch32_benchmarks_build_dir, bench + '.cc'), + LIBS=[libvixl]) + aarch32_benchmark_targets.append(prog) + env.Alias('aarch32_benchmarks', aarch32_benchmark_targets) + top_level_targets.Add('aarch32_benchmarks', 'Build the benchmarks for AArch32.') + + # The tests. + test_aarch32_build_dir = PrepareVariantDir(join('test', 'aarch32'), TargetBuildDir(env)) + test_objects.append(env.Object( + Glob(join(test_aarch32_build_dir, '*.cc')), + CPPPATH = env['CPPPATH'] + [config.dir_tests])) + +# AArch64 support +if CanTargetAArch64(env): + # The benchmarks. + aarch64_benchmark_names = util.ListCCFilesWithoutExt(config.dir_aarch64_benchmarks) + aarch64_benchmarks_build_dir = PrepareVariantDir('benchmarks/aarch64', TargetBuildDir(env)) + aarch64_benchmark_targets = [] + for bench in aarch64_benchmark_names: + prog = env.Program(join(aarch64_benchmarks_build_dir, bench), + join(aarch64_benchmarks_build_dir, bench + '.cc'), + LIBS=[libvixl]) + aarch64_benchmark_targets.append(prog) + env.Alias('aarch64_benchmarks', aarch64_benchmark_targets) + top_level_targets.Add('aarch64_benchmarks', 'Build the benchmarks for AArch64.') + + # The examples. + aarch64_example_names = util.ListCCFilesWithoutExt(config.dir_aarch64_examples) + aarch64_examples_build_dir = PrepareVariantDir('examples/aarch64', TargetBuildDir(env)) + aarch64_example_targets = [] + for example in aarch64_example_names: + prog = env.Program(join(aarch64_examples_build_dir, example), + join(aarch64_examples_build_dir, example + '.cc'), + LIBS=[libvixl]) + aarch64_example_targets.append(prog) + env.Alias('aarch64_examples', aarch64_example_targets) + top_level_targets.Add('aarch64_examples', 'Build the examples for AArch64.') + + # The tests. + test_aarch64_build_dir = PrepareVariantDir(join('test', 'aarch64'), TargetBuildDir(env)) + test_objects.append(env.Object( + Glob(join(test_aarch64_build_dir, '*.cc')), + CPPPATH = env['CPPPATH'] + [config.dir_tests])) + + # The test requires building the example files with specific options, so we + # create a separate variant dir for the example objects built this way. + test_aarch64_examples_vdir = join(TargetBuildDir(env), 'test', 'aarch64', 'test_examples') + VariantDir(test_aarch64_examples_vdir, '.') + test_aarch64_examples_obj = env.Object( + [Glob(join(test_aarch64_examples_vdir, join('test', 'aarch64', 'examples/aarch64', '*.cc'))), + Glob(join(test_aarch64_examples_vdir, join('examples/aarch64', '*.cc')))], + CCFLAGS = env['CCFLAGS'] + ['-DTEST_EXAMPLES'], + CPPPATH = env['CPPPATH'] + [config.dir_aarch64_examples] + [config.dir_tests]) + test_objects.append(test_aarch64_examples_obj) + +test = env.Program(join(test_build_dir, 'test-runner'), test_objects, + LIBS=[libvixl]) +env.Alias('tests', test) +top_level_targets.Add('tests', 'Build the tests.') + + +env.Alias('all', top_level_targets.targets) +top_level_targets.Add('all', 'Build all the targets above.') + +Help('\n\nAvailable top level targets:\n' + top_level_targets.Help()) + +extra_targets = VIXLTargets() + +# Build documentation +doc = [ + env.Markdown('README.md'), + env.Markdown('doc/changelog.md'), + env.Markdown('doc/aarch32/getting-started-aarch32.md'), + env.Markdown('doc/aarch32/design/code-generation-aarch32.md'), + env.Markdown('doc/aarch32/design/literal-pool-aarch32.md'), + env.Markdown('doc/aarch64/supported-instructions-aarch64.md'), + env.Markdown('doc/aarch64/getting-started-aarch64.md'), + env.Markdown('doc/aarch64/topics/ycm.md'), + env.Markdown('doc/aarch64/topics/extending-the-disassembler.md'), + env.Markdown('doc/aarch64/topics/index.md'), +] +env.Alias('doc', doc) +extra_targets.Add('doc', 'Convert documentation to HTML (requires the ' + '`markdown` program).') + +Help('\nAvailable extra targets:\n' + extra_targets.Help()) diff --git a/deps/vixl/src/aarch32/assembler-aarch32.cc b/deps/vixl/src/aarch32/assembler-aarch32.cc new file mode 100644 index 00000000..8a858f72 --- /dev/null +++ b/deps/vixl/src/aarch32/assembler-aarch32.cc @@ -0,0 +1,27824 @@ +// Copyright 2017, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +extern "C" { +#include +} + +#include +#include +#include +#include +#include + +#include "utils-vixl.h" +#include "aarch32/constants-aarch32.h" +#include "aarch32/instructions-aarch32.h" +#include "aarch32/operands-aarch32.h" +#include "aarch32/assembler-aarch32.h" + +namespace vixl { +namespace aarch32 { + +void Assembler::EmitT32_16(uint16_t instr) { + VIXL_ASSERT(buffer_.Is16bitAligned()); + buffer_.Emit16(instr); +} + + +void Assembler::EmitT32_32(uint32_t instr) { + VIXL_ASSERT(buffer_.Is16bitAligned()); + buffer_.Emit16(static_cast(instr >> 16)); + buffer_.Emit16(static_cast(instr & 0xffff)); +} + + +void Assembler::EmitA32(uint32_t instr) { + VIXL_ASSERT(buffer_.Is32bitAligned()); + buffer_.Emit32(instr); +} + + +#ifdef VIXL_DEBUG +void Assembler::PerformCheckIT(Condition condition) { + if (it_mask_ == 0) { + VIXL_ASSERT(IsUsingA32() || condition.Is(al)); + } else { + VIXL_ASSERT(condition.Is(first_condition_)); + first_condition_ = + Condition((first_condition_.GetCondition() & 0xe) | (it_mask_ >> 3)); + // For A32, AdavanceIT() is not called by the assembler. We must call it + // in order to check that IT instructions are used consistently with + // the following conditional instructions. + if (IsUsingA32()) AdvanceIT(); + } +} +#endif + + +void Assembler::BindHelper(Label* label) { + VIXL_ASSERT(!label->IsBound()); + label->SetLocation(this, GetCursorOffset()); + label->MarkBound(); +} + +uint32_t Assembler::Link(uint32_t instr, + Location* location, + const Location::EmitOperator& op, + const ReferenceInfo* info) { + location->SetReferenced(); + if (location->IsBound()) { + return op.Encode(instr, GetCursorOffset(), location); + } + location->AddForwardRef(GetCursorOffset(), op, info); + return instr; +} + + +// Start of generated code. +class Dt_L_imm6_1 : public EncodingValue { + uint32_t type_; + + public: + explicit Dt_L_imm6_1(DataType dt); + uint32_t GetTypeEncodingValue() const { return type_; } +}; + +Dt_L_imm6_1::Dt_L_imm6_1(DataType dt) { + switch (dt.GetValue()) { + case S8: + type_ = 0x0; + SetEncodingValue(0x1); + break; + case U8: + type_ = 0x1; + SetEncodingValue(0x1); + break; + case S16: + type_ = 0x0; + SetEncodingValue(0x2); + break; + case U16: + type_ = 0x1; + SetEncodingValue(0x2); + break; + case S32: + type_ = 0x0; + SetEncodingValue(0x4); + break; + case U32: + type_ = 0x1; + SetEncodingValue(0x4); + break; + case S64: + type_ = 0x0; + SetEncodingValue(0x8); + break; + case U64: + type_ = 0x1; + SetEncodingValue(0x8); + break; + default: + VIXL_UNREACHABLE(); + type_ = 0x0; + break; + } +} + +class Dt_L_imm6_2 : public EncodingValue { + uint32_t type_; + + public: + explicit Dt_L_imm6_2(DataType dt); + uint32_t GetTypeEncodingValue() const { return type_; } +}; + +Dt_L_imm6_2::Dt_L_imm6_2(DataType dt) { + switch (dt.GetValue()) { + case S8: + type_ = 0x1; + SetEncodingValue(0x1); + break; + case S16: + type_ = 0x1; + SetEncodingValue(0x2); + break; + case S32: + type_ = 0x1; + SetEncodingValue(0x4); + break; + case S64: + type_ = 0x1; + SetEncodingValue(0x8); + break; + default: + VIXL_UNREACHABLE(); + type_ = 0x0; + break; + } +} + +class Dt_L_imm6_3 : public EncodingValue { + public: + explicit Dt_L_imm6_3(DataType dt); +}; + +Dt_L_imm6_3::Dt_L_imm6_3(DataType dt) { + switch (dt.GetValue()) { + case I8: + SetEncodingValue(0x1); + break; + case I16: + SetEncodingValue(0x2); + break; + case I32: + SetEncodingValue(0x4); + break; + case I64: + SetEncodingValue(0x8); + break; + default: + break; + } +} + +class Dt_L_imm6_4 : public EncodingValue { + public: + explicit Dt_L_imm6_4(DataType dt); +}; + +Dt_L_imm6_4::Dt_L_imm6_4(DataType dt) { + switch (dt.GetValue()) { + case Untyped8: + SetEncodingValue(0x1); + break; + case Untyped16: + SetEncodingValue(0x2); + break; + case Untyped32: + SetEncodingValue(0x4); + break; + case Untyped64: + SetEncodingValue(0x8); + break; + default: + break; + } +} + +class Dt_imm6_1 : public EncodingValue { + uint32_t type_; + + public: + explicit Dt_imm6_1(DataType dt); + uint32_t GetTypeEncodingValue() const { return type_; } +}; + +Dt_imm6_1::Dt_imm6_1(DataType dt) { + switch (dt.GetValue()) { + case S16: + type_ = 0x0; + SetEncodingValue(0x1); + break; + case U16: + type_ = 0x1; + SetEncodingValue(0x1); + break; + case S32: + type_ = 0x0; + SetEncodingValue(0x2); + break; + case U32: + type_ = 0x1; + SetEncodingValue(0x2); + break; + case S64: + type_ = 0x0; + SetEncodingValue(0x4); + break; + case U64: + type_ = 0x1; + SetEncodingValue(0x4); + break; + default: + VIXL_UNREACHABLE(); + type_ = 0x0; + break; + } +} + +class Dt_imm6_2 : public EncodingValue { + uint32_t type_; + + public: + explicit Dt_imm6_2(DataType dt); + uint32_t GetTypeEncodingValue() const { return type_; } +}; + +Dt_imm6_2::Dt_imm6_2(DataType dt) { + switch (dt.GetValue()) { + case S16: + type_ = 0x1; + SetEncodingValue(0x1); + break; + case S32: + type_ = 0x1; + SetEncodingValue(0x2); + break; + case S64: + type_ = 0x1; + SetEncodingValue(0x4); + break; + default: + VIXL_UNREACHABLE(); + type_ = 0x0; + break; + } +} + +class Dt_imm6_3 : public EncodingValue { + public: + explicit Dt_imm6_3(DataType dt); +}; + +Dt_imm6_3::Dt_imm6_3(DataType dt) { + switch (dt.GetValue()) { + case I16: + SetEncodingValue(0x1); + break; + case I32: + SetEncodingValue(0x2); + break; + case I64: + SetEncodingValue(0x4); + break; + default: + break; + } +} + +class Dt_imm6_4 : public EncodingValue { + uint32_t type_; + + public: + explicit Dt_imm6_4(DataType dt); + uint32_t GetTypeEncodingValue() const { return type_; } +}; + +Dt_imm6_4::Dt_imm6_4(DataType dt) { + switch (dt.GetValue()) { + case S8: + type_ = 0x0; + SetEncodingValue(0x1); + break; + case U8: + type_ = 0x1; + SetEncodingValue(0x1); + break; + case S16: + type_ = 0x0; + SetEncodingValue(0x2); + break; + case U16: + type_ = 0x1; + SetEncodingValue(0x2); + break; + case S32: + type_ = 0x0; + SetEncodingValue(0x4); + break; + case U32: + type_ = 0x1; + SetEncodingValue(0x4); + break; + default: + VIXL_UNREACHABLE(); + type_ = 0x0; + break; + } +} + +class Dt_op_U_size_1 : public EncodingValue { + public: + explicit Dt_op_U_size_1(DataType dt); +}; + +Dt_op_U_size_1::Dt_op_U_size_1(DataType dt) { + switch (dt.GetValue()) { + case S8: + SetEncodingValue(0x0); + break; + case S16: + SetEncodingValue(0x1); + break; + case S32: + SetEncodingValue(0x2); + break; + case U8: + SetEncodingValue(0x4); + break; + case U16: + SetEncodingValue(0x5); + break; + case U32: + SetEncodingValue(0x6); + break; + case P8: + SetEncodingValue(0x8); + break; + case P64: + SetEncodingValue(0xa); + break; + default: + break; + } +} + +class Dt_op_size_1 : public EncodingValue { + public: + explicit Dt_op_size_1(DataType dt); +}; + +Dt_op_size_1::Dt_op_size_1(DataType dt) { + switch (dt.GetValue()) { + case I8: + SetEncodingValue(0x0); + break; + case I16: + SetEncodingValue(0x1); + break; + case I32: + SetEncodingValue(0x2); + break; + case P8: + SetEncodingValue(0x4); + break; + default: + break; + } +} + +class Dt_op_size_2 : public EncodingValue { + public: + explicit Dt_op_size_2(DataType dt); +}; + +Dt_op_size_2::Dt_op_size_2(DataType dt) { + switch (dt.GetValue()) { + case S8: + SetEncodingValue(0x0); + break; + case S16: + SetEncodingValue(0x1); + break; + case S32: + SetEncodingValue(0x2); + break; + case U8: + SetEncodingValue(0x4); + break; + case U16: + SetEncodingValue(0x5); + break; + case U32: + SetEncodingValue(0x6); + break; + default: + break; + } +} + +class Dt_op_size_3 : public EncodingValue { + public: + explicit Dt_op_size_3(DataType dt); +}; + +Dt_op_size_3::Dt_op_size_3(DataType dt) { + switch (dt.GetValue()) { + case S16: + SetEncodingValue(0x0); + break; + case S32: + SetEncodingValue(0x1); + break; + case S64: + SetEncodingValue(0x2); + break; + case U16: + SetEncodingValue(0x4); + break; + case U32: + SetEncodingValue(0x5); + break; + case U64: + SetEncodingValue(0x6); + break; + default: + break; + } +} + +class Dt_U_imm3H_1 : public EncodingValue { + public: + explicit Dt_U_imm3H_1(DataType dt); +}; + +Dt_U_imm3H_1::Dt_U_imm3H_1(DataType dt) { + switch (dt.GetValue()) { + case S8: + SetEncodingValue(0x1); + break; + case S16: + SetEncodingValue(0x2); + break; + case S32: + SetEncodingValue(0x4); + break; + case U8: + SetEncodingValue(0x9); + break; + case U16: + SetEncodingValue(0xa); + break; + case U32: + SetEncodingValue(0xc); + break; + default: + break; + } +} + +class Dt_U_opc1_opc2_1 : public EncodingValue { + public: + explicit Dt_U_opc1_opc2_1(DataType dt, const DRegisterLane& lane); +}; + +Dt_U_opc1_opc2_1::Dt_U_opc1_opc2_1(DataType dt, const DRegisterLane& lane) { + switch (dt.GetValue()) { + case S8: + if ((lane.GetLane() & 7) != lane.GetLane()) { + return; + } + SetEncodingValue(0x8 | lane.GetLane()); + break; + case S16: + if ((lane.GetLane() & 3) != lane.GetLane()) { + return; + } + SetEncodingValue(0x1 | (lane.GetLane() << 1)); + break; + case U8: + if ((lane.GetLane() & 7) != lane.GetLane()) { + return; + } + SetEncodingValue(0x18 | lane.GetLane()); + break; + case U16: + if ((lane.GetLane() & 3) != lane.GetLane()) { + return; + } + SetEncodingValue(0x11 | (lane.GetLane() << 1)); + break; + case Untyped32: + if ((lane.GetLane() & 1) != lane.GetLane()) { + return; + } + SetEncodingValue(0x0 | (lane.GetLane() << 2)); + break; + case kDataTypeValueNone: + if ((lane.GetLane() & 1) != lane.GetLane()) { + return; + } + SetEncodingValue(0x0 | (lane.GetLane() << 2)); + break; + default: + break; + } +} + +class Dt_opc1_opc2_1 : public EncodingValue { + public: + explicit Dt_opc1_opc2_1(DataType dt, const DRegisterLane& lane); +}; + +Dt_opc1_opc2_1::Dt_opc1_opc2_1(DataType dt, const DRegisterLane& lane) { + switch (dt.GetValue()) { + case Untyped8: + if ((lane.GetLane() & 7) != lane.GetLane()) { + return; + } + SetEncodingValue(0x8 | lane.GetLane()); + break; + case Untyped16: + if ((lane.GetLane() & 3) != lane.GetLane()) { + return; + } + SetEncodingValue(0x1 | (lane.GetLane() << 1)); + break; + case Untyped32: + if ((lane.GetLane() & 1) != lane.GetLane()) { + return; + } + SetEncodingValue(0x0 | (lane.GetLane() << 2)); + break; + case kDataTypeValueNone: + if ((lane.GetLane() & 1) != lane.GetLane()) { + return; + } + SetEncodingValue(0x0 | (lane.GetLane() << 2)); + break; + default: + break; + } +} + +class Dt_imm4_1 : public EncodingValue { + public: + explicit Dt_imm4_1(DataType dt, const DRegisterLane& lane); +}; + +Dt_imm4_1::Dt_imm4_1(DataType dt, const DRegisterLane& lane) { + switch (dt.GetValue()) { + case Untyped8: + if ((lane.GetLane() & 7) != lane.GetLane()) { + return; + } + SetEncodingValue(0x1 | (lane.GetLane() << 1)); + break; + case Untyped16: + if ((lane.GetLane() & 3) != lane.GetLane()) { + return; + } + SetEncodingValue(0x2 | (lane.GetLane() << 2)); + break; + case Untyped32: + if ((lane.GetLane() & 1) != lane.GetLane()) { + return; + } + SetEncodingValue(0x4 | (lane.GetLane() << 3)); + break; + default: + break; + } +} + +class Dt_B_E_1 : public EncodingValue { + public: + explicit Dt_B_E_1(DataType dt); +}; + +Dt_B_E_1::Dt_B_E_1(DataType dt) { + switch (dt.GetValue()) { + case Untyped8: + SetEncodingValue(0x2); + break; + case Untyped16: + SetEncodingValue(0x1); + break; + case Untyped32: + SetEncodingValue(0x0); + break; + default: + break; + } +} + +class Dt_op_1 : public EncodingValue { + public: + Dt_op_1(DataType dt1, DataType dt2); +}; + +Dt_op_1::Dt_op_1(DataType dt1, DataType dt2) { + if ((dt1.GetValue() == F32) && (dt2.GetValue() == S32)) { + SetEncodingValue(0x0); + return; + } + if ((dt1.GetValue() == F32) && (dt2.GetValue() == U32)) { + SetEncodingValue(0x1); + return; + } + if ((dt1.GetValue() == S32) && (dt2.GetValue() == F32)) { + SetEncodingValue(0x2); + return; + } + if ((dt1.GetValue() == U32) && (dt2.GetValue() == F32)) { + SetEncodingValue(0x3); + return; + } +} + +class Dt_op_2 : public EncodingValue { + public: + explicit Dt_op_2(DataType dt); +}; + +Dt_op_2::Dt_op_2(DataType dt) { + switch (dt.GetValue()) { + case U32: + SetEncodingValue(0x0); + break; + case S32: + SetEncodingValue(0x1); + break; + default: + break; + } +} + +class Dt_op_3 : public EncodingValue { + public: + explicit Dt_op_3(DataType dt); +}; + +Dt_op_3::Dt_op_3(DataType dt) { + switch (dt.GetValue()) { + case S32: + SetEncodingValue(0x0); + break; + case U32: + SetEncodingValue(0x1); + break; + default: + break; + } +} + +class Dt_U_sx_1 : public EncodingValue { + public: + explicit Dt_U_sx_1(DataType dt); +}; + +Dt_U_sx_1::Dt_U_sx_1(DataType dt) { + switch (dt.GetValue()) { + case S16: + SetEncodingValue(0x0); + break; + case S32: + SetEncodingValue(0x1); + break; + case U16: + SetEncodingValue(0x2); + break; + case U32: + SetEncodingValue(0x3); + break; + default: + break; + } +} + +class Dt_op_U_1 : public EncodingValue { + public: + Dt_op_U_1(DataType dt1, DataType dt2); +}; + +Dt_op_U_1::Dt_op_U_1(DataType dt1, DataType dt2) { + if ((dt1.GetValue() == F32) && (dt2.GetValue() == S32)) { + SetEncodingValue(0x0); + return; + } + if ((dt1.GetValue() == F32) && (dt2.GetValue() == U32)) { + SetEncodingValue(0x1); + return; + } + if ((dt1.GetValue() == S32) && (dt2.GetValue() == F32)) { + SetEncodingValue(0x2); + return; + } + if ((dt1.GetValue() == U32) && (dt2.GetValue() == F32)) { + SetEncodingValue(0x3); + return; + } +} + +class Dt_sz_1 : public EncodingValue { + public: + explicit Dt_sz_1(DataType dt); +}; + +Dt_sz_1::Dt_sz_1(DataType dt) { + switch (dt.GetValue()) { + case F32: + SetEncodingValue(0x0); + break; + default: + break; + } +} + +class Dt_F_size_1 : public EncodingValue { + public: + explicit Dt_F_size_1(DataType dt); +}; + +Dt_F_size_1::Dt_F_size_1(DataType dt) { + switch (dt.GetValue()) { + case S8: + SetEncodingValue(0x0); + break; + case S16: + SetEncodingValue(0x1); + break; + case S32: + SetEncodingValue(0x2); + break; + case F32: + SetEncodingValue(0x6); + break; + default: + break; + } +} + +class Dt_F_size_2 : public EncodingValue { + public: + explicit Dt_F_size_2(DataType dt); +}; + +Dt_F_size_2::Dt_F_size_2(DataType dt) { + switch (dt.GetValue()) { + case I8: + SetEncodingValue(0x0); + break; + case I16: + SetEncodingValue(0x1); + break; + case I32: + SetEncodingValue(0x2); + break; + case F32: + SetEncodingValue(0x6); + break; + default: + break; + } +} + +class Dt_F_size_3 : public EncodingValue { + public: + explicit Dt_F_size_3(DataType dt); +}; + +Dt_F_size_3::Dt_F_size_3(DataType dt) { + switch (dt.GetValue()) { + case I16: + SetEncodingValue(0x1); + break; + case I32: + SetEncodingValue(0x2); + break; + case F32: + SetEncodingValue(0x6); + break; + default: + break; + } +} + +class Dt_F_size_4 : public EncodingValue { + public: + explicit Dt_F_size_4(DataType dt); +}; + +Dt_F_size_4::Dt_F_size_4(DataType dt) { + switch (dt.GetValue()) { + case U32: + SetEncodingValue(0x2); + break; + case F32: + SetEncodingValue(0x6); + break; + default: + break; + } +} + +class Dt_U_size_1 : public EncodingValue { + public: + explicit Dt_U_size_1(DataType dt); +}; + +Dt_U_size_1::Dt_U_size_1(DataType dt) { + switch (dt.GetValue()) { + case S8: + SetEncodingValue(0x0); + break; + case S16: + SetEncodingValue(0x1); + break; + case S32: + SetEncodingValue(0x2); + break; + case U8: + SetEncodingValue(0x4); + break; + case U16: + SetEncodingValue(0x5); + break; + case U32: + SetEncodingValue(0x6); + break; + default: + break; + } +} + +class Dt_U_size_2 : public EncodingValue { + public: + explicit Dt_U_size_2(DataType dt); +}; + +Dt_U_size_2::Dt_U_size_2(DataType dt) { + switch (dt.GetValue()) { + case S16: + SetEncodingValue(0x1); + break; + case S32: + SetEncodingValue(0x2); + break; + case U16: + SetEncodingValue(0x5); + break; + case U32: + SetEncodingValue(0x6); + break; + default: + break; + } +} + +class Dt_U_size_3 : public EncodingValue { + public: + explicit Dt_U_size_3(DataType dt); +}; + +Dt_U_size_3::Dt_U_size_3(DataType dt) { + switch (dt.GetValue()) { + case S8: + SetEncodingValue(0x0); + break; + case S16: + SetEncodingValue(0x1); + break; + case S32: + SetEncodingValue(0x2); + break; + case S64: + SetEncodingValue(0x3); + break; + case U8: + SetEncodingValue(0x4); + break; + case U16: + SetEncodingValue(0x5); + break; + case U32: + SetEncodingValue(0x6); + break; + case U64: + SetEncodingValue(0x7); + break; + default: + break; + } +} + +class Dt_size_1 : public EncodingValue { + public: + explicit Dt_size_1(DataType dt); +}; + +Dt_size_1::Dt_size_1(DataType dt) { + switch (dt.GetValue()) { + case Untyped8: + SetEncodingValue(0x0); + break; + default: + break; + } +} + +class Dt_size_2 : public EncodingValue { + public: + explicit Dt_size_2(DataType dt); +}; + +Dt_size_2::Dt_size_2(DataType dt) { + switch (dt.GetValue()) { + case I8: + SetEncodingValue(0x0); + break; + case I16: + SetEncodingValue(0x1); + break; + case I32: + SetEncodingValue(0x2); + break; + case I64: + SetEncodingValue(0x3); + break; + default: + break; + } +} + +class Dt_size_3 : public EncodingValue { + public: + explicit Dt_size_3(DataType dt); +}; + +Dt_size_3::Dt_size_3(DataType dt) { + switch (dt.GetValue()) { + case I16: + SetEncodingValue(0x0); + break; + case I32: + SetEncodingValue(0x1); + break; + case I64: + SetEncodingValue(0x2); + break; + default: + break; + } +} + +class Dt_size_4 : public EncodingValue { + public: + explicit Dt_size_4(DataType dt); +}; + +Dt_size_4::Dt_size_4(DataType dt) { + switch (dt.GetValue()) { + case I8: + SetEncodingValue(0x0); + break; + case I16: + SetEncodingValue(0x1); + break; + case I32: + SetEncodingValue(0x2); + break; + default: + break; + } +} + +class Dt_size_5 : public EncodingValue { + public: + explicit Dt_size_5(DataType dt); +}; + +Dt_size_5::Dt_size_5(DataType dt) { + switch (dt.GetValue()) { + case S8: + SetEncodingValue(0x0); + break; + case S16: + SetEncodingValue(0x1); + break; + case S32: + SetEncodingValue(0x2); + break; + default: + break; + } +} + +class Dt_size_6 : public EncodingValue { + public: + explicit Dt_size_6(DataType dt); +}; + +Dt_size_6::Dt_size_6(DataType dt) { + switch (dt.GetValue()) { + case Untyped8: + SetEncodingValue(0x0); + break; + case Untyped16: + SetEncodingValue(0x1); + break; + case Untyped32: + SetEncodingValue(0x2); + break; + case Untyped64: + SetEncodingValue(0x3); + break; + default: + break; + } +} + +class Dt_size_7 : public EncodingValue { + public: + explicit Dt_size_7(DataType dt); +}; + +Dt_size_7::Dt_size_7(DataType dt) { + switch (dt.GetValue()) { + case Untyped8: + SetEncodingValue(0x0); + break; + case Untyped16: + SetEncodingValue(0x1); + break; + case Untyped32: + SetEncodingValue(0x2); + break; + default: + break; + } +} + +class Dt_size_8 : public EncodingValue { + public: + Dt_size_8(DataType dt, Alignment align); +}; + +Dt_size_8::Dt_size_8(DataType dt, Alignment align) { + switch (dt.GetValue()) { + case Untyped8: + SetEncodingValue(0x0); + break; + case Untyped16: + SetEncodingValue(0x1); + break; + case Untyped32: + if (align.Is(k64BitAlign) || align.Is(kNoAlignment)) { + SetEncodingValue(0x2); + } else if (align.Is(k128BitAlign)) { + SetEncodingValue(0x3); + } + break; + default: + break; + } +} + +class Dt_size_9 : public EncodingValue { + uint32_t type_; + + public: + explicit Dt_size_9(DataType dt); + uint32_t GetTypeEncodingValue() const { return type_; } +}; + +Dt_size_9::Dt_size_9(DataType dt) { + switch (dt.GetValue()) { + case I16: + type_ = 0x0; + SetEncodingValue(0x1); + break; + case I32: + type_ = 0x0; + SetEncodingValue(0x2); + break; + case F32: + type_ = 0x1; + SetEncodingValue(0x2); + break; + default: + VIXL_UNREACHABLE(); + type_ = 0x0; + break; + } +} + +class Dt_size_10 : public EncodingValue { + public: + explicit Dt_size_10(DataType dt); +}; + +Dt_size_10::Dt_size_10(DataType dt) { + switch (dt.GetValue()) { + case S8: + case U8: + case I8: + SetEncodingValue(0x0); + break; + case S16: + case U16: + case I16: + SetEncodingValue(0x1); + break; + case S32: + case U32: + case I32: + SetEncodingValue(0x2); + break; + default: + break; + } +} + +class Dt_size_11 : public EncodingValue { + uint32_t type_; + + public: + explicit Dt_size_11(DataType dt); + uint32_t GetTypeEncodingValue() const { return type_; } +}; + +Dt_size_11::Dt_size_11(DataType dt) { + switch (dt.GetValue()) { + case S16: + type_ = 0x0; + SetEncodingValue(0x1); + break; + case U16: + type_ = 0x1; + SetEncodingValue(0x1); + break; + case S32: + type_ = 0x0; + SetEncodingValue(0x2); + break; + case U32: + type_ = 0x1; + SetEncodingValue(0x2); + break; + default: + VIXL_UNREACHABLE(); + type_ = 0x0; + break; + } +} + +class Dt_size_12 : public EncodingValue { + uint32_t type_; + + public: + explicit Dt_size_12(DataType dt); + uint32_t GetTypeEncodingValue() const { return type_; } +}; + +Dt_size_12::Dt_size_12(DataType dt) { + switch (dt.GetValue()) { + case S8: + type_ = 0x0; + SetEncodingValue(0x0); + break; + case U8: + type_ = 0x1; + SetEncodingValue(0x0); + break; + case S16: + type_ = 0x0; + SetEncodingValue(0x1); + break; + case U16: + type_ = 0x1; + SetEncodingValue(0x1); + break; + case S32: + type_ = 0x0; + SetEncodingValue(0x2); + break; + case U32: + type_ = 0x1; + SetEncodingValue(0x2); + break; + default: + VIXL_UNREACHABLE(); + type_ = 0x0; + break; + } +} + +class Dt_size_13 : public EncodingValue { + public: + explicit Dt_size_13(DataType dt); +}; + +Dt_size_13::Dt_size_13(DataType dt) { + switch (dt.GetValue()) { + case S16: + SetEncodingValue(0x1); + break; + case S32: + SetEncodingValue(0x2); + break; + default: + break; + } +} + +class Dt_size_14 : public EncodingValue { + public: + explicit Dt_size_14(DataType dt); +}; + +Dt_size_14::Dt_size_14(DataType dt) { + switch (dt.GetValue()) { + case S16: + SetEncodingValue(0x0); + break; + case S32: + SetEncodingValue(0x1); + break; + case S64: + SetEncodingValue(0x2); + break; + default: + break; + } +} + +class Dt_size_15 : public EncodingValue { + public: + explicit Dt_size_15(DataType dt); +}; + +Dt_size_15::Dt_size_15(DataType dt) { + switch (dt.GetValue()) { + case Untyped8: + SetEncodingValue(0x0); + break; + case Untyped16: + SetEncodingValue(0x1); + break; + default: + break; + } +} + +class Dt_size_16 : public EncodingValue { + public: + explicit Dt_size_16(DataType dt); +}; + +Dt_size_16::Dt_size_16(DataType dt) { + switch (dt.GetValue()) { + case I8: + SetEncodingValue(0x0); + break; + case I16: + SetEncodingValue(0x1); + break; + case I32: + SetEncodingValue(0x2); + break; + default: + break; + } +} + +class Index_1 : public EncodingValue { + public: + Index_1(const NeonRegisterList& nreglist, DataType dt); +}; + +Index_1::Index_1(const NeonRegisterList& nreglist, DataType dt) { + switch (dt.GetValue()) { + case Untyped8: { + if ((nreglist.GetTransferLane() & 7) != nreglist.GetTransferLane()) { + return; + } + uint32_t value = nreglist.GetTransferLane() << 1; + if (!nreglist.IsSingleSpaced()) return; + SetEncodingValue(value); + break; + } + case Untyped16: { + if ((nreglist.GetTransferLane() & 3) != nreglist.GetTransferLane()) { + return; + } + uint32_t value = nreglist.GetTransferLane() << 2; + if (nreglist.IsDoubleSpaced()) value |= 2; + SetEncodingValue(value); + break; + } + case Untyped32: { + if ((nreglist.GetTransferLane() & 1) != nreglist.GetTransferLane()) { + return; + } + uint32_t value = nreglist.GetTransferLane() << 3; + if (nreglist.IsDoubleSpaced()) value |= 4; + SetEncodingValue(value); + break; + } + default: + break; + } +} + +class Align_index_align_1 : public EncodingValue { + public: + Align_index_align_1(Alignment align, + const NeonRegisterList& nreglist, + DataType dt); +}; + +Align_index_align_1::Align_index_align_1(Alignment align, + const NeonRegisterList& nreglist, + DataType dt) { + switch (dt.GetValue()) { + case Untyped8: { + uint32_t value; + if (align.GetType() == kNoAlignment) { + value = 0; + } else { + return; + } + if ((nreglist.GetTransferLane() & 7) != nreglist.GetTransferLane()) { + return; + } + value |= nreglist.GetTransferLane() << 1; + SetEncodingValue(value); + break; + } + case Untyped16: { + uint32_t value; + if (align.GetType() == k16BitAlign) { + value = 1; + } else if (align.GetType() == kNoAlignment) { + value = 0; + } else { + return; + } + if ((nreglist.GetTransferLane() & 3) != nreglist.GetTransferLane()) { + return; + } + value |= nreglist.GetTransferLane() << 2; + SetEncodingValue(value); + break; + } + case Untyped32: { + uint32_t value; + if (align.GetType() == k32BitAlign) { + value = 3; + } else if (align.GetType() == kNoAlignment) { + value = 0; + } else { + return; + } + if ((nreglist.GetTransferLane() & 1) != nreglist.GetTransferLane()) { + return; + } + value |= nreglist.GetTransferLane() << 3; + SetEncodingValue(value); + break; + } + default: + break; + } +} + +class Align_index_align_2 : public EncodingValue { + public: + Align_index_align_2(Alignment align, + const NeonRegisterList& nreglist, + DataType dt); +}; + +Align_index_align_2::Align_index_align_2(Alignment align, + const NeonRegisterList& nreglist, + DataType dt) { + switch (dt.GetValue()) { + case Untyped8: { + uint32_t value; + if (align.GetType() == k16BitAlign) { + value = 1; + } else if (align.GetType() == kNoAlignment) { + value = 0; + } else { + return; + } + if ((nreglist.GetTransferLane() & 7) != nreglist.GetTransferLane()) { + return; + } + value |= nreglist.GetTransferLane() << 1; + if (!nreglist.IsSingleSpaced()) return; + SetEncodingValue(value); + break; + } + case Untyped16: { + uint32_t value; + if (align.GetType() == k32BitAlign) { + value = 1; + } else if (align.GetType() == kNoAlignment) { + value = 0; + } else { + return; + } + if ((nreglist.GetTransferLane() & 3) != nreglist.GetTransferLane()) { + return; + } + value |= nreglist.GetTransferLane() << 2; + if (nreglist.IsDoubleSpaced()) value |= 2; + SetEncodingValue(value); + break; + } + case Untyped32: { + uint32_t value; + if (align.GetType() == k64BitAlign) { + value = 1; + } else if (align.GetType() == kNoAlignment) { + value = 0; + } else { + return; + } + if ((nreglist.GetTransferLane() & 1) != nreglist.GetTransferLane()) { + return; + } + value |= nreglist.GetTransferLane() << 3; + if (nreglist.IsDoubleSpaced()) value |= 4; + SetEncodingValue(value); + break; + } + default: + break; + } +} + +class Align_index_align_3 : public EncodingValue { + public: + Align_index_align_3(Alignment align, + const NeonRegisterList& nreglist, + DataType dt); +}; + +Align_index_align_3::Align_index_align_3(Alignment align, + const NeonRegisterList& nreglist, + DataType dt) { + switch (dt.GetValue()) { + case Untyped8: { + uint32_t value; + if (align.GetType() == k32BitAlign) { + value = 1; + } else if (align.GetType() == kNoAlignment) { + value = 0; + } else { + return; + } + if ((nreglist.GetTransferLane() & 7) != nreglist.GetTransferLane()) { + return; + } + value |= nreglist.GetTransferLane() << 1; + if (!nreglist.IsSingleSpaced()) return; + SetEncodingValue(value); + break; + } + case Untyped16: { + uint32_t value; + if (align.GetType() == k64BitAlign) { + value = 1; + } else if (align.GetType() == kNoAlignment) { + value = 0; + } else { + return; + } + if ((nreglist.GetTransferLane() & 3) != nreglist.GetTransferLane()) { + return; + } + value |= nreglist.GetTransferLane() << 2; + if (nreglist.IsDoubleSpaced()) value |= 2; + SetEncodingValue(value); + break; + } + case Untyped32: { + uint32_t value; + if (align.GetType() == k64BitAlign) { + value = 1; + } else if (align.GetType() == k128BitAlign) { + value = 2; + } else if (align.GetType() == kNoAlignment) { + value = 0; + } else { + return; + } + if ((nreglist.GetTransferLane() & 1) != nreglist.GetTransferLane()) { + return; + } + value |= nreglist.GetTransferLane() << 3; + if (nreglist.IsDoubleSpaced()) value |= 4; + SetEncodingValue(value); + break; + } + default: + break; + } +} + +class Align_a_1 : public EncodingValue { + public: + Align_a_1(Alignment align, DataType dt); +}; + +Align_a_1::Align_a_1(Alignment align, DataType dt) { + switch (align.GetType()) { + case k16BitAlign: + if (dt.Is(Untyped16)) SetEncodingValue(0x1); + break; + case k32BitAlign: + if (dt.Is(Untyped32)) SetEncodingValue(0x1); + break; + case kNoAlignment: + SetEncodingValue(0x0); + break; + default: + break; + } +} + +class Align_a_2 : public EncodingValue { + public: + Align_a_2(Alignment align, DataType dt); +}; + +Align_a_2::Align_a_2(Alignment align, DataType dt) { + switch (align.GetType()) { + case k16BitAlign: + if (dt.Is(Untyped8)) SetEncodingValue(0x1); + break; + case k32BitAlign: + if (dt.Is(Untyped16)) SetEncodingValue(0x1); + break; + case k64BitAlign: + if (dt.Is(Untyped32)) SetEncodingValue(0x1); + break; + case kNoAlignment: + SetEncodingValue(0x0); + break; + default: + break; + } +} + +class Align_a_3 : public EncodingValue { + public: + Align_a_3(Alignment align, DataType dt); +}; + +Align_a_3::Align_a_3(Alignment align, DataType dt) { + switch (align.GetType()) { + case k32BitAlign: + if (dt.Is(Untyped8)) SetEncodingValue(0x1); + break; + case k64BitAlign: + if (dt.Is(Untyped16)) + SetEncodingValue(0x1); + else if (dt.Is(Untyped32)) + SetEncodingValue(0x1); + break; + case k128BitAlign: + if (dt.Is(Untyped32)) SetEncodingValue(0x1); + break; + case kNoAlignment: + SetEncodingValue(0x0); + break; + default: + break; + } +} + +class Align_align_1 : public EncodingValue { + public: + Align_align_1(Alignment align, const NeonRegisterList& nreglist); +}; + +Align_align_1::Align_align_1(Alignment align, + const NeonRegisterList& nreglist) { + switch (align.GetType()) { + case k64BitAlign: + SetEncodingValue(0x1); + break; + case k128BitAlign: + if ((nreglist.GetLength() == 2) || (nreglist.GetLength() == 4)) + SetEncodingValue(0x2); + break; + case k256BitAlign: + if ((nreglist.GetLength() == 2) || (nreglist.GetLength() == 4)) + SetEncodingValue(0x3); + break; + case kNoAlignment: + SetEncodingValue(0x0); + break; + default: + break; + } +} + +class Align_align_2 : public EncodingValue { + public: + Align_align_2(Alignment align, const NeonRegisterList& nreglist); +}; + +Align_align_2::Align_align_2(Alignment align, + const NeonRegisterList& nreglist) { + switch (align.GetType()) { + case k64BitAlign: + SetEncodingValue(0x1); + break; + case k128BitAlign: + SetEncodingValue(0x2); + break; + case k256BitAlign: + if ((nreglist.GetLength() == 4)) SetEncodingValue(0x3); + break; + case kNoAlignment: + SetEncodingValue(0x0); + break; + default: + break; + } +} + +class Align_align_3 : public EncodingValue { + public: + explicit Align_align_3(Alignment align); +}; + +Align_align_3::Align_align_3(Alignment align) { + switch (align.GetType()) { + case k64BitAlign: + SetEncodingValue(0x1); + break; + case kNoAlignment: + SetEncodingValue(0x0); + break; + default: + break; + } +} + +class Align_align_4 : public EncodingValue { + public: + explicit Align_align_4(Alignment align); +}; + +Align_align_4::Align_align_4(Alignment align) { + switch (align.GetType()) { + case k64BitAlign: + SetEncodingValue(0x1); + break; + case k128BitAlign: + SetEncodingValue(0x2); + break; + case k256BitAlign: + SetEncodingValue(0x3); + break; + case kNoAlignment: + SetEncodingValue(0x0); + break; + default: + break; + } +} + +class Align_align_5 : public EncodingValue { + public: + Align_align_5(Alignment align, const NeonRegisterList& nreglist); +}; + +Align_align_5::Align_align_5(Alignment align, + const NeonRegisterList& nreglist) { + switch (align.GetType()) { + case k64BitAlign: + SetEncodingValue(0x1); + break; + case k128BitAlign: + if ((nreglist.GetLength() == 2) || (nreglist.GetLength() == 4)) + SetEncodingValue(0x2); + break; + case k256BitAlign: + if ((nreglist.GetLength() == 4)) SetEncodingValue(0x3); + break; + case kNoAlignment: + SetEncodingValue(0x0); + break; + default: + break; + } +} + + +// CBNZ{} ,