This commit is contained in:
TheLastRar 2025-01-17 04:37:18 +01:00 committed by GitHub
commit 85ef03b022
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
38 changed files with 4870 additions and 2625 deletions

View File

@ -159,8 +159,8 @@ template <>
inline GenericOperand ABI::GetReturnGenericOperand<void>() const {
return GenericOperand();
}
}
} // namespace vixl::aarch64
} // namespace aarch64
} // namespace vixl
#endif // VIXL_AARCH64_ABI_AARCH64_H_

View File

@ -33,6 +33,7 @@
#include "../globals-vixl.h"
#include "../invalset-vixl.h"
#include "../utils-vixl.h"
#include "operands-aarch64.h"
namespace vixl {
@ -403,6 +404,15 @@ enum LoadStoreScalingOption {
// Assembler.
class Assembler : public vixl::internal::AssemblerBase {
public:
explicit Assembler(
PositionIndependentCodeOption pic = PositionIndependentCode)
: pic_(pic), cpu_features_(CPUFeatures::AArch64LegacyBaseline()) {}
explicit Assembler(
size_t capacity,
PositionIndependentCodeOption pic = PositionIndependentCode)
: AssemblerBase(capacity),
pic_(pic),
cpu_features_(CPUFeatures::AArch64LegacyBaseline()) {}
Assembler(byte* buffer,
size_t capacity,
PositionIndependentCodeOption pic = PositionIndependentCode)
@ -2148,6 +2158,9 @@ class Assembler : public vixl::internal::AssemblerBase {
// System instruction with pre-encoded op (op1:crn:crm:op2).
void sys(int op, const Register& xt = xzr);
// System instruction with result.
void sysl(int op, const Register& xt = xzr);
// System data cache operation.
void dc(DataCacheOp op, const Register& rt);
@ -3608,6 +3621,117 @@ class Assembler : public vixl::internal::AssemblerBase {
// Unsigned 8-bit integer matrix multiply-accumulate (vector).
void ummla(const VRegister& vd, const VRegister& vn, const VRegister& vm);
// Bit Clear and exclusive-OR.
void bcax(const VRegister& vd,
const VRegister& vn,
const VRegister& vm,
const VRegister& va);
// Three-way Exclusive-OR.
void eor3(const VRegister& vd,
const VRegister& vn,
const VRegister& vm,
const VRegister& va);
// Exclusive-OR and Rotate.
void xar(const VRegister& vd,
const VRegister& vn,
const VRegister& vm,
int rotate);
// Rotate and Exclusive-OR
void rax1(const VRegister& vd, const VRegister& vn, const VRegister& vm);
// SHA1 hash update (choose).
void sha1c(const VRegister& vd, const VRegister& vn, const VRegister& vm);
// SHA1 fixed rotate.
void sha1h(const VRegister& sd, const VRegister& sn);
// SHA1 hash update (majority).
void sha1m(const VRegister& vd, const VRegister& vn, const VRegister& vm);
// SHA1 hash update (parity).
void sha1p(const VRegister& vd, const VRegister& vn, const VRegister& vm);
// SHA1 schedule update 0.
void sha1su0(const VRegister& vd, const VRegister& vn, const VRegister& vm);
// SHA1 schedule update 1.
void sha1su1(const VRegister& vd, const VRegister& vn);
// SHA256 hash update (part 1).
void sha256h(const VRegister& vd, const VRegister& vn, const VRegister& vm);
// SHA256 hash update (part 2).
void sha256h2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
// SHA256 schedule update 0.
void sha256su0(const VRegister& vd, const VRegister& vn);
// SHA256 schedule update 1.
void sha256su1(const VRegister& vd, const VRegister& vn, const VRegister& vm);
// SHA512 hash update part 1.
void sha512h(const VRegister& vd, const VRegister& vn, const VRegister& vm);
// SHA512 hash update part 2.
void sha512h2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
// SHA512 schedule Update 0.
void sha512su0(const VRegister& vd, const VRegister& vn);
// SHA512 schedule Update 1.
void sha512su1(const VRegister& vd, const VRegister& vn, const VRegister& vm);
// AES single round decryption.
void aesd(const VRegister& vd, const VRegister& vn);
// AES single round encryption.
void aese(const VRegister& vd, const VRegister& vn);
// AES inverse mix columns.
void aesimc(const VRegister& vd, const VRegister& vn);
// AES mix columns.
void aesmc(const VRegister& vd, const VRegister& vn);
// SM3PARTW1.
void sm3partw1(const VRegister& vd, const VRegister& vn, const VRegister& vm);
// SM3PARTW2.
void sm3partw2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
// SM3SS1.
void sm3ss1(const VRegister& vd,
const VRegister& vn,
const VRegister& vm,
const VRegister& va);
// SM3TT1A.
void sm3tt1a(const VRegister& vd,
const VRegister& vn,
const VRegister& vm,
int index);
// SM3TT1B.
void sm3tt1b(const VRegister& vd,
const VRegister& vn,
const VRegister& vm,
int index);
// SM3TT2A.
void sm3tt2a(const VRegister& vd,
const VRegister& vn,
const VRegister& vm,
int index);
// SM3TT2B.
void sm3tt2b(const VRegister& vd,
const VRegister& vn,
const VRegister& vm,
int index);
// Scalable Vector Extensions.
// Absolute value (predicated).
@ -7062,6 +7186,21 @@ class Assembler : public vixl::internal::AssemblerBase {
// Unsigned Minimum.
void umin(const Register& rd, const Register& rn, const Operand& op);
// Check feature status.
void chkfeat(const Register& rd);
// Guarded Control Stack Push.
void gcspushm(const Register& rt);
// Guarded Control Stack Pop.
void gcspopm(const Register& rt);
// Guarded Control Stack Switch Stack 1.
void gcsss1(const Register& rt);
// Guarded Control Stack Switch Stack 2.
void gcsss2(const Register& rt);
// Emit generic instructions.
// Emit raw instructions into the instruction stream.
@ -7530,6 +7669,8 @@ class Assembler : public vixl::internal::AssemblerBase {
static Instr VFormat(VRegister vd) {
if (vd.Is64Bits()) {
switch (vd.GetLanes()) {
case 1:
return NEON_1D;
case 2:
return NEON_2S;
case 4:

File diff suppressed because it is too large Load Diff

View File

@ -32,6 +32,7 @@
#include <unordered_map>
#include "../cpu-features.h"
#include "decoder-aarch64.h"
#include "decoder-visitor-map-aarch64.h"
@ -112,6 +113,7 @@ class CPUFeaturesAuditor : public DecoderVisitor {
#define DECLARE(A) virtual void Visit##A(const Instruction* instr);
VISITOR_LIST(DECLARE)
#undef DECLARE
void VisitCryptoSM3(const Instruction* instr);
void LoadStoreHelper(const Instruction* instr);
void LoadStorePairHelper(const Instruction* instr);
@ -126,6 +128,7 @@ class CPUFeaturesAuditor : public DecoderVisitor {
uint32_t,
std::function<void(CPUFeaturesAuditor*, const Instruction*)>>;
static const FormToVisitorFnMap* GetFormToVisitorFnMap();
uint32_t form_hash_;
};
} // namespace aarch64

View File

@ -0,0 +1,276 @@
// Copyright 2023, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_AARCH64_DEBUGGER_AARCH64_H_
#define VIXL_AARCH64_DEBUGGER_AARCH64_H_
#include <optional>
#include <unordered_set>
#include <vector>
#include "../cpu-features.h"
#include "../globals-vixl.h"
#include "../utils-vixl.h"
#include "abi-aarch64.h"
#include "cpu-features-auditor-aarch64.h"
#include "disasm-aarch64.h"
#include "instructions-aarch64.h"
#include "simulator-aarch64.h"
#include "simulator-constants-aarch64.h"
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
namespace vixl {
namespace aarch64 {
class Simulator;
enum DebugReturn { DebugContinue, DebugExit };
// A debugger command that performs some action when used by the simulator
// debugger.
class DebuggerCmd {
public:
DebuggerCmd(Simulator* sim,
std::string cmd_word,
std::string cmd_alias,
std::string usage,
std::string description);
virtual ~DebuggerCmd() {}
// Perform some action based on the arguments passed in. Returns true if the
// debugger should exit after the action, false otherwise.
virtual DebugReturn Action(const std::vector<std::string>& args) = 0;
// Return the command word.
std::string_view GetCommandWord() { return command_word_; }
// Return the alias for this command. Returns an empty string if this command
// has no alias.
std::string_view GetCommandAlias() { return command_alias_; }
// Return this commands usage.
std::string_view GetArgsString() { return args_str_; }
// Return this commands description.
std::string_view GetDescription() { return description_; }
protected:
// Simulator which this command will be performed on.
Simulator* sim_;
// Stream to output the result of the command to.
FILE* ostream_;
// Command word that, when given to the interactive debugger, calls Action.
std::string command_word_;
// Optional alias for the command_word.
std::string command_alias_;
// Optional string showing the arguments that can be passed to the command.
std::string args_str_;
// Optional description of the command.
std::string description_;
};
//
// Base debugger command handlers:
//
class HelpCmd : public DebuggerCmd {
public:
HelpCmd(Simulator* sim)
: DebuggerCmd(sim, "help", "h", "", "Display this help message.") {}
DebugReturn Action(const std::vector<std::string>& args) override;
};
class BreakCmd : public DebuggerCmd {
public:
BreakCmd(Simulator* sim)
: DebuggerCmd(sim,
"break",
"b",
"<address>",
"Set or remove a breakpoint.") {}
DebugReturn Action(const std::vector<std::string>& args) override;
};
class StepCmd : public DebuggerCmd {
public:
StepCmd(Simulator* sim)
: DebuggerCmd(sim,
"step",
"s",
"[<n>]",
"Step n instructions, default step 1 instruction.") {}
DebugReturn Action(const std::vector<std::string>& args) override;
};
class ContinueCmd : public DebuggerCmd {
public:
ContinueCmd(Simulator* sim)
: DebuggerCmd(sim,
"continue",
"c",
"",
"Exit the debugger and continue executing instructions.") {}
DebugReturn Action(const std::vector<std::string>& args) override;
};
class PrintCmd : public DebuggerCmd {
public:
PrintCmd(Simulator* sim)
: DebuggerCmd(sim,
"print",
"p",
"<register|all|system>",
"Print the contents of a register, all registers or all"
" system registers.") {}
DebugReturn Action(const std::vector<std::string>& args) override;
};
class TraceCmd : public DebuggerCmd {
public:
TraceCmd(Simulator* sim)
: DebuggerCmd(sim,
"trace",
"t",
"",
"Start/stop memory and register tracing.") {}
DebugReturn Action(const std::vector<std::string>& args) override;
};
class GdbCmd : public DebuggerCmd {
public:
GdbCmd(Simulator* sim)
: DebuggerCmd(sim,
"gdb",
"g",
"",
"Enter an already running instance of gdb.") {}
DebugReturn Action(const std::vector<std::string>& args) override;
};
// A debugger for the Simulator which takes input from the user in order to
// control the running of the Simulator.
class Debugger {
public:
// A pair consisting of a register character (e.g: W, X, V) and a register
// code (e.g: 0, 1 ...31) which represents a single parsed register.
//
// Note: the register character is guaranteed to be upper case.
using RegisterParsedFormat = std::pair<char, unsigned>;
Debugger(Simulator* sim);
// Set the input stream, from which commands are read, to a custom stream.
void SetInputStream(std::istream* stream) { input_stream_ = stream; }
// Register a new command for the debugger.
template <class T>
void RegisterCmd();
// Set a breakpoint at the given address.
void RegisterBreakpoint(uint64_t addr) { breakpoints_.insert(addr); }
// Remove a breakpoint at the given address.
void RemoveBreakpoint(uint64_t addr) { breakpoints_.erase(addr); }
// Return true if the address is the location of a breakpoint.
bool IsBreakpoint(uint64_t addr) const {
return (breakpoints_.find(addr) != breakpoints_.end());
}
// Return true if the simulator pc is a breakpoint.
bool IsAtBreakpoint() const;
// Main loop for the debugger. Keep prompting for user inputted debugger
// commands and try to execute them until a command is given that exits the
// interactive debugger.
void Debug();
// Get an unsigned integer value from a string and return it in 'value'.
// Base is used to determine the numeric base of the number to be read,
// i.e: 8 for octal, 10 for decimal, 16 for hexadecimal and 0 for
// auto-detect. Return true if an integer value was found, false otherwise.
static std::optional<uint64_t> ParseUint64String(std::string_view uint64_str,
int base = 0);
// Get a register from a string and return it in 'reg'. Return true if a
// valid register character and code (e.g: W0, X29, V31) was found, false
// otherwise.
static std::optional<RegisterParsedFormat> ParseRegString(
std::string_view reg_str);
// Print the usage of each debugger command.
void PrintUsage();
private:
// Split a string based on the separator given (a single space character by
// default) and return as a std::vector of strings.
static std::vector<std::string> Tokenize(std::string_view input_line,
char separator = ' ');
// Try to execute a single debugger command.
DebugReturn ExecDebugCommand(const std::vector<std::string>& tokenized_cmd);
// Return true if the string is zero, i.e: all characters in the string
// (other than prefixes) are zero.
static bool IsZeroUint64String(std::string_view uint64_str, int base);
// The simulator that this debugger acts on.
Simulator* sim_;
// A vector of all commands recognised by the debugger.
std::vector<std::unique_ptr<DebuggerCmd>> debugger_cmds_;
// Input stream from which commands are read. Default is std::cin.
std::istream* input_stream_;
// Output stream from the simulator.
FILE* ostream_;
// A list of all instruction addresses that, when executed by the
// simulator, will start the interactive debugger if it hasn't already.
std::unordered_set<uint64_t> breakpoints_;
};
} // namespace aarch64
} // namespace vixl
#endif // VIXL_INCLUDE_SIMULATOR_AARCH64
#endif // VIXL_AARCH64_DEBUGGER_AARCH64_H_

View File

@ -3764,7 +3764,7 @@ static const DecodeMapping kDecodeMapping[] = {
{"001110"_b, "autiaz_hi_hints"},
{"001111"_b, "autibz_hi_hints"},
{"0100xx"_b, "bti_hb_hints"},
{"010100"_b, "chkfeat_hi_hints"},
{"010100"_b, "chkfeat_hf_hints"},
{"0101x1"_b, "hint_hm_hints"},
{"01x110"_b, "hint_hm_hints"},
{"10xxxx"_b, "hint_hm_hints"},

View File

@ -2074,7 +2074,6 @@
{"scvtf_asimdmiscfp16_r"_h, &VISITORCLASS::VisitNEON2RegMiscFP16}, \
{"ucvtf_asimdmiscfp16_r"_h, &VISITORCLASS::VisitNEON2RegMiscFP16}, \
{"addhn_asimddiff_n"_h, &VISITORCLASS::VisitNEON3Different}, \
{"pmull_asimddiff_l"_h, &VISITORCLASS::VisitNEON3Different}, \
{"raddhn_asimddiff_n"_h, &VISITORCLASS::VisitNEON3Different}, \
{"rsubhn_asimddiff_n"_h, &VISITORCLASS::VisitNEON3Different}, \
{"sabal_asimddiff_l"_h, &VISITORCLASS::VisitNEON3Different}, \
@ -2592,6 +2591,7 @@
{"dmb_bo_barriers"_h, &VISITORCLASS::VisitSystem}, \
{"dsb_bo_barriers"_h, &VISITORCLASS::VisitSystem}, \
{"hint_hm_hints"_h, &VISITORCLASS::VisitSystem}, \
{"chkfeat_hf_hints"_h, &VISITORCLASS::VisitSystem}, \
{"mrs_rs_systemmove"_h, &VISITORCLASS::VisitSystem}, \
{"msr_sr_systemmove"_h, &VISITORCLASS::VisitSystem}, \
{"psb_hc_hints"_h, &VISITORCLASS::VisitSystem}, \
@ -2638,7 +2638,6 @@
&VISITORCLASS::VisitUnconditionalBranchToRegister}, \
{"ret_64r_branch_reg"_h, \
&VISITORCLASS::VisitUnconditionalBranchToRegister}, \
{"bcax_vvv16_crypto4"_h, &VISITORCLASS::VisitUnimplemented}, \
{"bfcvtn_asimdmisc_4s"_h, &VISITORCLASS::VisitUnimplemented}, \
{"bfdot_asimdelem_e"_h, &VISITORCLASS::VisitUnimplemented}, \
{"bfdot_asimdsame2_d"_h, &VISITORCLASS::VisitUnimplemented}, \
@ -2646,7 +2645,6 @@
{"bfmlal_asimdsame2_f"_h, &VISITORCLASS::VisitUnimplemented}, \
{"bfmmla_asimdsame2_e"_h, &VISITORCLASS::VisitUnimplemented}, \
{"dsb_bon_barriers"_h, &VISITORCLASS::VisitUnimplemented}, \
{"eor3_vvv16_crypto4"_h, &VISITORCLASS::VisitUnimplemented}, \
{"ld64b_64l_memop"_h, &VISITORCLASS::VisitUnimplemented}, \
{"ldgm_64bulk_ldsttags"_h, &VISITORCLASS::VisitUnimplemented}, \
{"ldtrb_32_ldst_unpriv"_h, &VISITORCLASS::VisitUnimplemented}, \
@ -2658,18 +2656,13 @@
{"ldtrsw_64_ldst_unpriv"_h, &VISITORCLASS::VisitUnimplemented}, \
{"ldtr_32_ldst_unpriv"_h, &VISITORCLASS::VisitUnimplemented}, \
{"ldtr_64_ldst_unpriv"_h, &VISITORCLASS::VisitUnimplemented}, \
{"rax1_vvv2_cryptosha512_3"_h, &VISITORCLASS::VisitUnimplemented}, \
{"sha512h2_qqv_cryptosha512_3"_h, &VISITORCLASS::VisitUnimplemented}, \
{"sha512h_qqv_cryptosha512_3"_h, &VISITORCLASS::VisitUnimplemented}, \
{"sha512su0_vv2_cryptosha512_2"_h, &VISITORCLASS::VisitUnimplemented}, \
{"sha512su1_vvv2_cryptosha512_3"_h, &VISITORCLASS::VisitUnimplemented}, \
{"sm3partw1_vvv4_cryptosha512_3"_h, &VISITORCLASS::VisitUnimplemented}, \
{"sm3partw2_vvv4_cryptosha512_3"_h, &VISITORCLASS::VisitUnimplemented}, \
{"sm3ss1_vvv4_crypto4"_h, &VISITORCLASS::VisitUnimplemented}, \
{"sm3tt1a_vvv4_crypto3_imm2"_h, &VISITORCLASS::VisitUnimplemented}, \
{"sm3tt1b_vvv4_crypto3_imm2"_h, &VISITORCLASS::VisitUnimplemented}, \
{"sm3tt2a_vvv4_crypto3_imm2"_h, &VISITORCLASS::VisitUnimplemented}, \
{"sm3tt2b_vvv_crypto3_imm2"_h, &VISITORCLASS::VisitUnimplemented}, \
{"sm3partw1_vvv4_cryptosha512_3"_h, &VISITORCLASS::VisitCryptoSM3}, \
{"sm3partw2_vvv4_cryptosha512_3"_h, &VISITORCLASS::VisitCryptoSM3}, \
{"sm3ss1_vvv4_crypto4"_h, &VISITORCLASS::VisitCryptoSM3}, \
{"sm3tt1a_vvv4_crypto3_imm2"_h, &VISITORCLASS::VisitCryptoSM3}, \
{"sm3tt1b_vvv4_crypto3_imm2"_h, &VISITORCLASS::VisitCryptoSM3}, \
{"sm3tt2a_vvv4_crypto3_imm2"_h, &VISITORCLASS::VisitCryptoSM3}, \
{"sm3tt2b_vvv_crypto3_imm2"_h, &VISITORCLASS::VisitCryptoSM3}, \
{"sm4ekey_vvv4_cryptosha512_3"_h, &VISITORCLASS::VisitUnimplemented}, \
{"sm4e_vv4_cryptosha512_2"_h, &VISITORCLASS::VisitUnimplemented}, \
{"st64b_64l_memop"_h, &VISITORCLASS::VisitUnimplemented}, \
@ -2686,7 +2679,6 @@
{"ttest_br_systemresult"_h, &VISITORCLASS::VisitUnimplemented}, \
{"wfet_only_systeminstrswithreg"_h, &VISITORCLASS::VisitUnimplemented}, \
{"wfit_only_systeminstrswithreg"_h, &VISITORCLASS::VisitUnimplemented}, \
{"xar_vvv2_crypto3_imm6"_h, &VISITORCLASS::VisitUnimplemented}, \
{"bfcvt_z_p_z_s2bf"_h, &VISITORCLASS::VisitUnimplemented}, \
{"bfcvtnt_z_p_z_s2bf"_h, &VISITORCLASS::VisitUnimplemented}, \
{"bfdot_z_zzz"_h, &VISITORCLASS::VisitUnimplemented}, \
@ -2827,6 +2819,7 @@
{"fmlal_asimdsame_f"_h, &VISITORCLASS::VisitNEON3Same}, \
{"fmlsl2_asimdsame_f"_h, &VISITORCLASS::VisitNEON3Same}, \
{"fmlsl_asimdsame_f"_h, &VISITORCLASS::VisitNEON3Same}, \
{"pmull_asimddiff_l"_h, &VISITORCLASS::VisitNEON3Different}, \
{"ushll_asimdshf_l"_h, &VISITORCLASS::VisitNEONShiftImmediate}, \
{"sshll_asimdshf_l"_h, &VISITORCLASS::VisitNEONShiftImmediate}, \
{"shrn_asimdshf_n"_h, &VISITORCLASS::VisitNEONShiftImmediate}, \

View File

@ -228,6 +228,11 @@ class Disassembler : public DecoderVisitor {
void DisassembleNEONScalarShiftRightNarrowImm(const Instruction* instr);
void DisassembleNEONScalar2RegMiscOnlyD(const Instruction* instr);
void DisassembleNEONFPScalar2RegMisc(const Instruction* instr);
void DisassembleNEONPolynomialMul(const Instruction* instr);
void DisassembleNEON4Same(const Instruction* instr);
void DisassembleNEONXar(const Instruction* instr);
void DisassembleNEONRax1(const Instruction* instr);
void DisassembleSHA512(const Instruction* instr);
void DisassembleMTELoadTag(const Instruction* instr);
void DisassembleMTEStoreTag(const Instruction* instr);
@ -238,6 +243,8 @@ class Disassembler : public DecoderVisitor {
void Disassemble_Xd_XnSP_Xm(const Instruction* instr);
void Disassemble_Xd_XnSP_XmSP(const Instruction* instr);
void VisitCryptoSM3(const Instruction* instr);
void Format(const Instruction* instr,
const char* mnemonic,
const char* format0,

View File

@ -32,11 +32,6 @@
#include "constants-aarch64.h"
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wdeprecated-enum-enum-conversion"
#endif
namespace vixl {
namespace aarch64 {
// ISA constants. --------------------------------------------------------------
@ -152,19 +147,19 @@ const unsigned kMTETagWidth = 4;
// Make these moved float constants backwards compatible
// with explicit vixl::aarch64:: namespace references.
using vixl::kDoubleMantissaBits;
using vixl::kDoubleExponentBits;
using vixl::kFloatMantissaBits;
using vixl::kFloatExponentBits;
using vixl::kFloat16MantissaBits;
using vixl::kDoubleMantissaBits;
using vixl::kFloat16ExponentBits;
using vixl::kFloat16MantissaBits;
using vixl::kFloatExponentBits;
using vixl::kFloatMantissaBits;
using vixl::kFP16PositiveInfinity;
using vixl::kFP16NegativeInfinity;
using vixl::kFP32PositiveInfinity;
using vixl::kFP16PositiveInfinity;
using vixl::kFP32NegativeInfinity;
using vixl::kFP64PositiveInfinity;
using vixl::kFP32PositiveInfinity;
using vixl::kFP64NegativeInfinity;
using vixl::kFP64PositiveInfinity;
using vixl::kFP16DefaultNaN;
using vixl::kFP32DefaultNaN;
@ -222,9 +217,10 @@ enum VectorFormat {
kFormatVnQ = kFormatSVEQ | kFormatSVE,
kFormatVnO = kFormatSVEO | kFormatSVE,
// An artificial value, used by simulator trace tests and a few oddball
// Artificial values, used by simulator trace tests and a few oddball
// instructions (such as FMLAL).
kFormat2H = 0xfffffffe
kFormat2H = 0xfffffffe,
kFormat1Q = 0xfffffffd
};
// Instructions. ---------------------------------------------------------------
@ -1141,8 +1137,4 @@ class NEONFormatDecoder {
} // namespace aarch64
} // namespace vixl
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#endif // VIXL_AARCH64_INSTRUCTIONS_AARCH64_H_

View File

@ -664,6 +664,10 @@ enum FPMacroNaNPropagationOption {
class MacroAssembler : public Assembler, public MacroAssemblerInterface {
public:
explicit MacroAssembler(
PositionIndependentCodeOption pic = PositionIndependentCode);
MacroAssembler(size_t capacity,
PositionIndependentCodeOption pic = PositionIndependentCode);
MacroAssembler(byte* buffer,
size_t capacity,
PositionIndependentCodeOption pic = PositionIndependentCode);
@ -1750,7 +1754,7 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface {
V(casah, Casah) \
V(caslh, Caslh) \
V(casalh, Casalh)
// clang-format on
// clang-format on
#define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
void MASM(const Register& rs, const Register& rt, const MemOperand& src) { \
@ -1768,7 +1772,7 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface {
V(caspa, Caspa) \
V(caspl, Caspl) \
V(caspal, Caspal)
// clang-format on
// clang-format on
#define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
void MASM(const Register& rs, \
@ -1813,7 +1817,7 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface {
V(MASM##alb, ASM##alb) \
V(MASM##ah, ASM##ah) \
V(MASM##alh, ASM##alh)
// clang-format on
// clang-format on
#define DEFINE_MACRO_LOAD_ASM_FUNC(MASM, ASM) \
void MASM(const Register& rs, const Register& rt, const MemOperand& src) { \
@ -2713,6 +2717,27 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface {
subps(xd, xn, xm);
}
void Cmpp(const Register& xn, const Register& xm) { Subps(xzr, xn, xm); }
void Chkfeat(const Register& xdn);
void Gcspushm(const Register& rt) {
VIXL_ASSERT(allow_macro_instructions_);
SingleEmissionCheckScope guard(this);
gcspushm(rt);
}
void Gcspopm(const Register& rt = xzr) {
VIXL_ASSERT(allow_macro_instructions_);
SingleEmissionCheckScope guard(this);
gcspopm(rt);
}
void Gcsss1(const Register& rt) {
VIXL_ASSERT(allow_macro_instructions_);
SingleEmissionCheckScope guard(this);
gcsss1(rt);
}
void Gcsss2(const Register& rt) {
VIXL_ASSERT(allow_macro_instructions_);
SingleEmissionCheckScope guard(this);
gcsss2(rt);
}
// NEON 3 vector register instructions.
#define NEON_3VREG_MACRO_LIST(V) \
@ -2762,6 +2787,7 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface {
V(pmull2, Pmull2) \
V(raddhn, Raddhn) \
V(raddhn2, Raddhn2) \
V(rax1, Rax1) \
V(rsubhn, Rsubhn) \
V(rsubhn2, Rsubhn2) \
V(saba, Saba) \
@ -2774,8 +2800,20 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface {
V(saddl2, Saddl2) \
V(saddw, Saddw) \
V(saddw2, Saddw2) \
V(sha1c, Sha1c) \
V(sha1m, Sha1m) \
V(sha1p, Sha1p) \
V(sha1su0, Sha1su0) \
V(sha256h, Sha256h) \
V(sha256h2, Sha256h2) \
V(sha256su1, Sha256su1) \
V(sha512h, Sha512h) \
V(sha512h2, Sha512h2) \
V(sha512su1, Sha512su1) \
V(shadd, Shadd) \
V(shsub, Shsub) \
V(sm3partw1, Sm3partw1) \
V(sm3partw2, Sm3partw2) \
V(smax, Smax) \
V(smaxp, Smaxp) \
V(smin, Smin) \
@ -2870,6 +2908,10 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface {
V(abs, Abs) \
V(addp, Addp) \
V(addv, Addv) \
V(aesd, Aesd) \
V(aese, Aese) \
V(aesimc, Aesimc) \
V(aesmc, Aesmc) \
V(cls, Cls) \
V(clz, Clz) \
V(cnt, Cnt) \
@ -2918,6 +2960,10 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface {
V(sadalp, Sadalp) \
V(saddlp, Saddlp) \
V(saddlv, Saddlv) \
V(sha1h, Sha1h) \
V(sha1su1, Sha1su1) \
V(sha256su0, Sha256su0) \
V(sha512su0, Sha512su0) \
V(smaxv, Smaxv) \
V(sminv, Sminv) \
V(sqabs, Sqabs) \
@ -3008,7 +3054,11 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface {
V(umlsl, Umlsl) \
V(umlsl2, Umlsl2) \
V(sudot, Sudot) \
V(usdot, Usdot)
V(usdot, Usdot) \
V(sm3tt1a, Sm3tt1a) \
V(sm3tt1b, Sm3tt1b) \
V(sm3tt2a, Sm3tt2a) \
V(sm3tt2b, Sm3tt2b)
#define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
@ -3127,6 +3177,14 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface {
SVE_3VREG_COMMUTATIVE_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
#undef DEFINE_MACRO_ASM_FUNC
void Bcax(const VRegister& vd,
const VRegister& vn,
const VRegister& vm,
const VRegister& va) {
VIXL_ASSERT(allow_macro_instructions_);
SingleEmissionCheckScope guard(this);
bcax(vd, vn, vm, va);
}
void Bic(const VRegister& vd, const int imm8, const int left_shift = 0) {
VIXL_ASSERT(allow_macro_instructions_);
SingleEmissionCheckScope guard(this);
@ -3167,6 +3225,14 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface {
SingleEmissionCheckScope guard(this);
dup(vd, rn);
}
void Eor3(const VRegister& vd,
const VRegister& vn,
const VRegister& vm,
const VRegister& va) {
VIXL_ASSERT(allow_macro_instructions_);
SingleEmissionCheckScope guard(this);
eor3(vd, vn, vm, va);
}
void Ext(const VRegister& vd,
const VRegister& vn,
const VRegister& vm,
@ -3463,6 +3529,14 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface {
SingleEmissionCheckScope guard(this);
st4(vt, vt2, vt3, vt4, lane, dst);
}
void Sm3ss1(const VRegister& vd,
const VRegister& vn,
const VRegister& vm,
const VRegister& va) {
VIXL_ASSERT(allow_macro_instructions_);
SingleEmissionCheckScope guard(this);
sm3ss1(vd, vn, vm, va);
}
void Smov(const Register& rd, const VRegister& vn, int vn_index) {
VIXL_ASSERT(allow_macro_instructions_);
SingleEmissionCheckScope guard(this);
@ -3473,6 +3547,14 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface {
SingleEmissionCheckScope guard(this);
umov(rd, vn, vn_index);
}
void Xar(const VRegister& vd,
const VRegister& vn,
const VRegister& vm,
int rotate) {
VIXL_ASSERT(allow_macro_instructions_);
SingleEmissionCheckScope guard(this);
xar(vd, vn, vm, rotate);
}
void Crc32b(const Register& rd, const Register& rn, const Register& rm) {
VIXL_ASSERT(allow_macro_instructions_);
SingleEmissionCheckScope guard(this);
@ -8580,6 +8662,16 @@ class UseScratchRegisterScope {
return AcquireFrom(available, kGoverningPRegisterMask).P();
}
// TODO: extend to other scratch register lists.
bool TryAcquire(const Register& required_reg) {
CPURegList* list = masm_->GetScratchRegisterList();
if (list->IncludesAliasOf(required_reg)) {
list->Remove(required_reg);
return true;
}
return false;
}
Register AcquireRegisterOfSize(int size_in_bits);
Register AcquireSameSizeAs(const Register& reg) {
return AcquireRegisterOfSize(reg.GetSizeInBits());

View File

@ -735,7 +735,7 @@ class SVEMemOperand {
class IntegerOperand {
public:
#define VIXL_INT_TYPES(V) \
V(char) V(short) V(int) V(long) V(long long) // NOLINT(runtime/int)
V(char) V(short) V(int) V(long) V(long long) // NOLINT(google-runtime-int)
#define VIXL_DECL_INT_OVERLOADS(T) \
/* These are allowed to be implicit constructors because this is a */ \
/* wrapper class that doesn't normally perform any type conversion. */ \
@ -993,7 +993,7 @@ class GenericOperand {
// We only support sizes up to X/D register sizes.
size_t mem_op_size_;
};
}
} // namespace vixl::aarch64
} // namespace aarch64
} // namespace vixl
#endif // VIXL_AARCH64_OPERANDS_AARCH64_H_

View File

@ -575,6 +575,7 @@ class VRegister : public CPURegister {
VRegister V4S() const;
VRegister V1D() const;
VRegister V2D() const;
VRegister V1Q() const;
VRegister S4B() const;
bool IsValid() const { return IsValidVRegister(); }
@ -895,7 +896,7 @@ bool AreSameLaneSize(const CPURegister& reg1,
const CPURegister& reg2,
const CPURegister& reg3 = NoCPUReg,
const CPURegister& reg4 = NoCPUReg);
}
} // namespace vixl::aarch64
} // namespace aarch64
} // namespace vixl
#endif // VIXL_AARCH64_REGISTERS_AARCH64_H_

View File

@ -28,15 +28,18 @@
#define VIXL_AARCH64_SIMULATOR_AARCH64_H_
#include <memory>
#include <mutex>
#include <random>
#include <unordered_map>
#include <vector>
#include "../cpu-features.h"
#include "../globals-vixl.h"
#include "../utils-vixl.h"
#include "../cpu-features.h"
#include "abi-aarch64.h"
#include "cpu-features-auditor-aarch64.h"
#include "debugger-aarch64.h"
#include "disasm-aarch64.h"
#include "instructions-aarch64.h"
#include "simulator-constants-aarch64.h"
@ -67,6 +70,28 @@ namespace aarch64 {
class Simulator;
struct RuntimeCallStructHelper;
enum class MemoryAccessResult { Success = 0, Failure = 1 };
// Try to access a piece of memory at the given address. Accessing that memory
// might raise a signal which, if handled by a custom signal handler, should
// setup the native and simulated context in order to continue. Return whether
// the memory access failed (i.e: raised a signal) or succeeded.
MemoryAccessResult TryMemoryAccess(uintptr_t address, uintptr_t access_size);
#ifdef VIXL_ENABLE_IMPLICIT_CHECKS
// Access a byte of memory from the address at the given offset. If the memory
// could be accessed then return MemoryAccessResult::Success. If the memory
// could not be accessed, and therefore raised a signal, setup the simulated
// context and return MemoryAccessResult::Failure.
//
// If a signal is raised then it is expected that the signal handler will place
// MemoryAccessResult::Failure in the native return register and the address of
// _vixl_internal_AccessMemory_continue into the native instruction pointer.
extern "C" MemoryAccessResult _vixl_internal_ReadMemory(uintptr_t address,
uintptr_t offset);
extern "C" uintptr_t _vixl_internal_AccessMemory_continue();
#endif // VIXL_ENABLE_IMPLICIT_CHECKS
class SimStack {
public:
SimStack() {}
@ -135,7 +160,7 @@ class SimStack {
// Allocate the stack, locking the parameters.
Allocated Allocate() {
size_t align_to = 1 << align_log2_;
size_t align_to = uint64_t{1} << align_log2_;
size_t l = AlignUp(limit_guard_size_, align_to);
size_t u = AlignUp(usable_size_, align_to);
size_t b = AlignUp(base_guard_size_, align_to);
@ -365,7 +390,7 @@ class Memory {
}
template <typename T, typename A>
T Read(A address, Instruction const* pc = nullptr) const {
std::optional<T> Read(A address, Instruction const* pc = nullptr) const {
T value;
VIXL_STATIC_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) ||
(sizeof(value) == 4) || (sizeof(value) == 8) ||
@ -377,12 +402,16 @@ class Memory {
if (!IsMTETagsMatched(address, pc)) {
VIXL_ABORT_WITH_MSG("Tag mismatch.");
}
if (TryMemoryAccess(reinterpret_cast<uintptr_t>(base), sizeof(value)) ==
MemoryAccessResult::Failure) {
return std::nullopt;
}
memcpy(&value, base, sizeof(value));
return value;
}
template <typename T, typename A>
void Write(A address, T value, Instruction const* pc = nullptr) const {
bool Write(A address, T value, Instruction const* pc = nullptr) const {
VIXL_STATIC_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) ||
(sizeof(value) == 4) || (sizeof(value) == 8) ||
(sizeof(value) == 16));
@ -393,11 +422,16 @@ class Memory {
if (!IsMTETagsMatched(address, pc)) {
VIXL_ABORT_WITH_MSG("Tag mismatch.");
}
if (TryMemoryAccess(reinterpret_cast<uintptr_t>(base), sizeof(value)) ==
MemoryAccessResult::Failure) {
return false;
}
memcpy(base, &value, sizeof(value));
return true;
}
template <typename A>
uint64_t ReadUint(int size_in_bytes, A address) const {
std::optional<uint64_t> ReadUint(int size_in_bytes, A address) const {
switch (size_in_bytes) {
case 1:
return Read<uint8_t>(address);
@ -413,7 +447,7 @@ class Memory {
}
template <typename A>
int64_t ReadInt(int size_in_bytes, A address) const {
std::optional<int64_t> ReadInt(int size_in_bytes, A address) const {
switch (size_in_bytes) {
case 1:
return Read<int8_t>(address);
@ -429,7 +463,7 @@ class Memory {
}
template <typename A>
void Write(int size_in_bytes, A address, uint64_t value) const {
bool Write(int size_in_bytes, A address, uint64_t value) const {
switch (size_in_bytes) {
case 1:
return Write(address, static_cast<uint8_t>(value));
@ -441,6 +475,7 @@ class Memory {
return Write(address, value);
}
VIXL_UNREACHABLE();
return false;
}
void AppendMetaData(MetaDataDepot* metadata_depot) {
@ -649,7 +684,7 @@ class LogicPRegister {
void SetAllBits() {
int chunk_size = sizeof(ChunkType) * kBitsPerByte;
ChunkType bits = GetUintMask(chunk_size);
ChunkType bits = static_cast<ChunkType>(GetUintMask(chunk_size));
for (int lane = 0;
lane < (static_cast<int>(register_.GetSizeInBits() / chunk_size));
lane++) {
@ -702,6 +737,8 @@ class LogicPRegister {
SimPRegister& register_;
};
using vixl_uint128_t = std::pair<uint64_t, uint64_t>;
// Representation of a vector register, with typed getters and setters for lanes
// and additional information to represent lane state.
class LogicVRegister {
@ -830,6 +867,17 @@ class LogicVRegister {
}
}
void SetUint(VectorFormat vform, int index, vixl_uint128_t value) const {
if (LaneSizeInBitsFromFormat(vform) <= 64) {
SetUint(vform, index, value.second);
return;
}
// TODO: Extend this to SVE.
VIXL_ASSERT((vform == kFormat1Q) && (index == 0));
SetUint(kFormat2D, 0, value.second);
SetUint(kFormat2D, 1, value.first);
}
void SetUintArray(VectorFormat vform, const uint64_t* src) const {
ClearForWrite(vform);
for (int i = 0; i < LaneCountFromFormat(vform); i++) {
@ -1233,6 +1281,11 @@ class SimExclusiveGlobalMonitor {
uint32_t seed_;
};
class Debugger;
template <uint32_t mode>
uint64_t CryptoOp(uint64_t x, uint64_t y, uint64_t z);
class Simulator : public DecoderVisitor {
public:
explicit Simulator(Decoder* decoder,
@ -1248,7 +1301,7 @@ class Simulator : public DecoderVisitor {
#if defined(VIXL_HAS_ABI_SUPPORT) && __cplusplus >= 201103L && \
(defined(__clang__) || GCC_VERSION_OR_NEWER(4, 9, 1))
(defined(_MSC_VER) || defined(__clang__) || GCC_VERSION_OR_NEWER(4, 9, 1))
// Templated `RunFrom` version taking care of passing arguments and returning
// the result value.
// This allows code like:
@ -1307,6 +1360,8 @@ class Simulator : public DecoderVisitor {
static const Instruction* kEndOfSimAddress;
// Simulation helpers.
bool IsSimulationFinished() const { return pc_ == kEndOfSimAddress; }
const Instruction* ReadPc() const { return pc_; }
VIXL_DEPRECATED("ReadPc", const Instruction* pc() const) { return ReadPc(); }
@ -1456,6 +1511,7 @@ class Simulator : public DecoderVisitor {
void SimulateNEONFPMulByElementLong(const Instruction* instr);
void SimulateNEONComplexMulByElement(const Instruction* instr);
void SimulateNEONDotProdByElement(const Instruction* instr);
void SimulateNEONSHA3(const Instruction* instr);
void SimulateMTEAddSubTag(const Instruction* instr);
void SimulateMTETagMaskInsert(const Instruction* instr);
void SimulateMTESubPointer(const Instruction* instr);
@ -1475,7 +1531,9 @@ class Simulator : public DecoderVisitor {
void SimulateSetGM(const Instruction* instr);
void SimulateSignedMinMax(const Instruction* instr);
void SimulateUnsignedMinMax(const Instruction* instr);
void SimulateSHA512(const Instruction* instr);
void VisitCryptoSM3(const Instruction* instr);
// Integer register accessors.
@ -2006,62 +2064,66 @@ class Simulator : public DecoderVisitor {
}
template <typename T, typename A>
T MemRead(A address) const {
std::optional<T> MemRead(A address) const {
Instruction const* pc = ReadPc();
return memory_.Read<T>(address, pc);
}
template <typename T, typename A>
void MemWrite(A address, T value) const {
bool MemWrite(A address, T value) const {
Instruction const* pc = ReadPc();
return memory_.Write(address, value, pc);
}
template <typename A>
uint64_t MemReadUint(int size_in_bytes, A address) const {
std::optional<uint64_t> MemReadUint(int size_in_bytes, A address) const {
return memory_.ReadUint(size_in_bytes, address);
}
template <typename A>
int64_t MemReadInt(int size_in_bytes, A address) const {
std::optional<int64_t> MemReadInt(int size_in_bytes, A address) const {
return memory_.ReadInt(size_in_bytes, address);
}
template <typename A>
void MemWrite(int size_in_bytes, A address, uint64_t value) const {
bool MemWrite(int size_in_bytes, A address, uint64_t value) const {
return memory_.Write(size_in_bytes, address, value);
}
void LoadLane(LogicVRegister dst,
bool LoadLane(LogicVRegister dst,
VectorFormat vform,
int index,
uint64_t addr) const {
unsigned msize_in_bytes = LaneSizeInBytesFromFormat(vform);
LoadUintToLane(dst, vform, msize_in_bytes, index, addr);
return LoadUintToLane(dst, vform, msize_in_bytes, index, addr);
}
void LoadUintToLane(LogicVRegister dst,
bool LoadUintToLane(LogicVRegister dst,
VectorFormat vform,
unsigned msize_in_bytes,
int index,
uint64_t addr) const {
dst.SetUint(vform, index, MemReadUint(msize_in_bytes, addr));
VIXL_DEFINE_OR_RETURN_FALSE(value, MemReadUint(msize_in_bytes, addr));
dst.SetUint(vform, index, value);
return true;
}
void LoadIntToLane(LogicVRegister dst,
bool LoadIntToLane(LogicVRegister dst,
VectorFormat vform,
unsigned msize_in_bytes,
int index,
uint64_t addr) const {
dst.SetInt(vform, index, MemReadInt(msize_in_bytes, addr));
VIXL_DEFINE_OR_RETURN_FALSE(value, MemReadInt(msize_in_bytes, addr));
dst.SetInt(vform, index, value);
return true;
}
void StoreLane(const LogicVRegister& src,
bool StoreLane(const LogicVRegister& src,
VectorFormat vform,
int index,
uint64_t addr) const {
unsigned msize_in_bytes = LaneSizeInBytesFromFormat(vform);
MemWrite(msize_in_bytes, addr, src.Uint(vform, index));
return MemWrite(msize_in_bytes, addr, src.Uint(vform, index));
}
uint64_t ComputeMemOperandAddress(const MemOperand& mem_op) const;
@ -2072,12 +2134,14 @@ class Simulator : public DecoderVisitor {
return ReadCPURegister<T>(operand.GetCPURegister());
} else {
VIXL_ASSERT(operand.IsMemOperand());
return MemRead<T>(ComputeMemOperandAddress(operand.GetMemOperand()));
auto res = MemRead<T>(ComputeMemOperandAddress(operand.GetMemOperand()));
VIXL_ASSERT(res);
return *res;
}
}
template <typename T>
void WriteGenericOperand(GenericOperand operand,
bool WriteGenericOperand(GenericOperand operand,
T value,
RegLogMode log_mode = LogRegWrites) {
if (operand.IsCPURegister()) {
@ -2093,8 +2157,9 @@ class Simulator : public DecoderVisitor {
WriteCPURegister(operand.GetCPURegister(), raw, log_mode);
} else {
VIXL_ASSERT(operand.IsMemOperand());
MemWrite(ComputeMemOperandAddress(operand.GetMemOperand()), value);
return MemWrite(ComputeMemOperandAddress(operand.GetMemOperand()), value);
}
return true;
}
bool ReadN() const { return nzcv_.GetN() != 0; }
@ -2470,12 +2535,16 @@ class Simulator : public DecoderVisitor {
// Other state updates, including system registers.
void PrintSystemRegister(SystemRegister id);
void PrintTakenBranch(const Instruction* target);
void PrintGCS(bool is_push, uint64_t addr, size_t entry);
void LogSystemRegister(SystemRegister id) {
if (ShouldTraceSysRegs()) PrintSystemRegister(id);
}
void LogTakenBranch(const Instruction* target) {
if (ShouldTraceBranches()) PrintTakenBranch(target);
}
void LogGCS(bool is_push, uint64_t addr, size_t entry) {
if (ShouldTraceSysRegs()) PrintGCS(is_push, addr, entry);
}
// Trace memory accesses.
@ -2837,7 +2906,7 @@ class Simulator : public DecoderVisitor {
}
if (offset == 0) {
while ((exclude & (1 << tag)) != 0) {
while ((exclude & (uint64_t{1} << tag)) != 0) {
tag = (tag + 1) % 16;
}
}
@ -2845,7 +2914,7 @@ class Simulator : public DecoderVisitor {
while (offset > 0) {
offset--;
tag = (tag + 1) % 16;
while ((exclude & (1 << tag)) != 0) {
while ((exclude & (uint64_t{1} << tag)) != 0) {
tag = (tag + 1) % 16;
}
}
@ -2857,12 +2926,15 @@ class Simulator : public DecoderVisitor {
return (addr & ~(UINT64_C(0xf) << 56)) | (tag << 56);
}
#if __linux__
#define VIXL_HAS_SIMULATED_MMAP
// Create or remove a mapping with memory protection. Memory attributes such
// as MTE and BTI are represented by metadata in Simulator.
void* Mmap(
void* address, size_t length, int prot, int flags, int fd, off_t offset);
int Munmap(void* address, size_t length, int prot);
#endif
// The common CPUFeatures interface with the set of available features.
@ -2885,7 +2957,7 @@ class Simulator : public DecoderVisitor {
// Also, the initialisation of the tuples in RuntimeCall(Non)Void is incorrect
// in GCC before 4.9.1: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=51253
#if defined(VIXL_HAS_ABI_SUPPORT) && __cplusplus >= 201103L && \
(defined(__clang__) || GCC_VERSION_OR_NEWER(4, 9, 1))
(defined(_MSC_VER) || defined(__clang__) || GCC_VERSION_OR_NEWER(4, 9, 1))
#define VIXL_HAS_SIMULATED_RUNTIME_CALL_SUPPORT
@ -2943,7 +3015,10 @@ class Simulator : public DecoderVisitor {
R return_value = DoRuntimeCall(function,
argument_operands,
__local_index_sequence_for<P...>{});
WriteGenericOperand(abi.GetReturnGenericOperand<R>(), return_value);
bool succeeded =
WriteGenericOperand(abi.GetReturnGenericOperand<R>(), return_value);
USE(succeeded);
VIXL_ASSERT(succeeded);
}
template <typename R, typename... P>
@ -3076,8 +3151,9 @@ class Simulator : public DecoderVisitor {
// either MTE protected or not.
if (count != expected) {
std::stringstream sstream;
sstream << std::hex << "MTE WARNING : the memory region being unmapped "
"starting at address 0x"
sstream << std::hex
<< "MTE WARNING : the memory region being unmapped "
"starting at address 0x"
<< reinterpret_cast<uint64_t>(address)
<< "is not fully MTE protected.\n";
VIXL_WARNING(sstream.str().c_str());
@ -3115,6 +3191,52 @@ class Simulator : public DecoderVisitor {
meta_data_.RegisterBranchInterception(*function, callback);
}
// Return the current output stream in use by the simulator.
FILE* GetOutputStream() const { return stream_; }
bool IsDebuggerEnabled() const { return debugger_enabled_; }
void SetDebuggerEnabled(bool enabled) { debugger_enabled_ = enabled; }
Debugger* GetDebugger() const { return debugger_.get(); }
#ifdef VIXL_ENABLE_IMPLICIT_CHECKS
// Returns true if the faulting instruction address (usually the program
// counter or instruction pointer) comes from an internal VIXL memory access.
// This can be used by signal handlers to check if a signal was raised from
// the simulator (via TryMemoryAccess) before the actual
// access occurs.
bool IsSimulatedMemoryAccess(uintptr_t fault_pc) const {
return (fault_pc ==
reinterpret_cast<uintptr_t>(&_vixl_internal_ReadMemory));
}
// Get the instruction address of the internal VIXL memory access continuation
// label. Signal handlers can resume execution at this address to return to
// TryMemoryAccess which will continue simulation.
uintptr_t GetSignalReturnAddress() const {
return reinterpret_cast<uintptr_t>(&_vixl_internal_AccessMemory_continue);
}
// Replace the fault address reported by the kernel with the actual faulting
// address.
//
// This is required because TryMemoryAccess reads a section of
// memory 1 byte at a time meaning the fault address reported may not be the
// base address of memory being accessed.
void ReplaceFaultAddress(siginfo_t* siginfo, void* context) {
#ifdef __x86_64__
// The base address being accessed is passed in as the first argument to
// _vixl_internal_ReadMemory.
ucontext_t* uc = reinterpret_cast<ucontext_t*>(context);
siginfo->si_addr = reinterpret_cast<void*>(uc->uc_mcontext.gregs[REG_RDI]);
#else
USE(siginfo);
USE(context);
#endif // __x86_64__
}
#endif // VIXL_ENABLE_IMPLICIT_CHECKS
protected:
const char* clr_normal;
const char* clr_flag_name;
@ -3195,8 +3317,9 @@ class Simulator : public DecoderVisitor {
uint64_t left,
uint64_t right,
int carry_in);
using vixl_uint128_t = std::pair<uint64_t, uint64_t>;
vixl_uint128_t Add128(vixl_uint128_t x, vixl_uint128_t y);
vixl_uint128_t Lsl128(vixl_uint128_t x, unsigned shift) const;
vixl_uint128_t Eor128(vixl_uint128_t x, vixl_uint128_t y) const;
vixl_uint128_t Mul64(uint64_t x, uint64_t y);
vixl_uint128_t Neg128(vixl_uint128_t x);
void LogicalHelper(const Instruction* instr, int64_t op2);
@ -3278,92 +3401,95 @@ class Simulator : public DecoderVisitor {
uint64_t PolynomialMult(uint64_t op1,
uint64_t op2,
int lane_size_in_bits) const;
vixl_uint128_t PolynomialMult128(uint64_t op1,
uint64_t op2,
int lane_size_in_bits) const;
void ld1(VectorFormat vform, LogicVRegister dst, uint64_t addr);
void ld1(VectorFormat vform, LogicVRegister dst, int index, uint64_t addr);
void ld1r(VectorFormat vform, LogicVRegister dst, uint64_t addr);
void ld1r(VectorFormat vform,
bool ld1(VectorFormat vform, LogicVRegister dst, uint64_t addr);
bool ld1(VectorFormat vform, LogicVRegister dst, int index, uint64_t addr);
bool ld1r(VectorFormat vform, LogicVRegister dst, uint64_t addr);
bool ld1r(VectorFormat vform,
VectorFormat unpack_vform,
LogicVRegister dst,
uint64_t addr,
bool is_signed = false);
void ld2(VectorFormat vform,
bool ld2(VectorFormat vform,
LogicVRegister dst1,
LogicVRegister dst2,
uint64_t addr);
void ld2(VectorFormat vform,
bool ld2(VectorFormat vform,
LogicVRegister dst1,
LogicVRegister dst2,
int index,
uint64_t addr);
void ld2r(VectorFormat vform,
bool ld2r(VectorFormat vform,
LogicVRegister dst1,
LogicVRegister dst2,
uint64_t addr);
void ld3(VectorFormat vform,
bool ld3(VectorFormat vform,
LogicVRegister dst1,
LogicVRegister dst2,
LogicVRegister dst3,
uint64_t addr);
void ld3(VectorFormat vform,
bool ld3(VectorFormat vform,
LogicVRegister dst1,
LogicVRegister dst2,
LogicVRegister dst3,
int index,
uint64_t addr);
void ld3r(VectorFormat vform,
bool ld3r(VectorFormat vform,
LogicVRegister dst1,
LogicVRegister dst2,
LogicVRegister dst3,
uint64_t addr);
void ld4(VectorFormat vform,
bool ld4(VectorFormat vform,
LogicVRegister dst1,
LogicVRegister dst2,
LogicVRegister dst3,
LogicVRegister dst4,
uint64_t addr);
void ld4(VectorFormat vform,
bool ld4(VectorFormat vform,
LogicVRegister dst1,
LogicVRegister dst2,
LogicVRegister dst3,
LogicVRegister dst4,
int index,
uint64_t addr);
void ld4r(VectorFormat vform,
bool ld4r(VectorFormat vform,
LogicVRegister dst1,
LogicVRegister dst2,
LogicVRegister dst3,
LogicVRegister dst4,
uint64_t addr);
void st1(VectorFormat vform, LogicVRegister src, uint64_t addr);
void st1(VectorFormat vform, LogicVRegister src, int index, uint64_t addr);
void st2(VectorFormat vform,
bool st1(VectorFormat vform, LogicVRegister src, uint64_t addr);
bool st1(VectorFormat vform, LogicVRegister src, int index, uint64_t addr);
bool st2(VectorFormat vform,
LogicVRegister src,
LogicVRegister src2,
uint64_t addr);
void st2(VectorFormat vform,
bool st2(VectorFormat vform,
LogicVRegister src,
LogicVRegister src2,
int index,
uint64_t addr);
void st3(VectorFormat vform,
bool st3(VectorFormat vform,
LogicVRegister src,
LogicVRegister src2,
LogicVRegister src3,
uint64_t addr);
void st3(VectorFormat vform,
bool st3(VectorFormat vform,
LogicVRegister src,
LogicVRegister src2,
LogicVRegister src3,
int index,
uint64_t addr);
void st4(VectorFormat vform,
bool st4(VectorFormat vform,
LogicVRegister src,
LogicVRegister src2,
LogicVRegister src3,
LogicVRegister src4,
uint64_t addr);
void st4(VectorFormat vform,
bool st4(VectorFormat vform,
LogicVRegister src,
LogicVRegister src2,
LogicVRegister src3,
@ -3649,6 +3775,10 @@ class Simulator : public DecoderVisitor {
LogicVRegister dst,
const LogicVRegister& src,
int rotation);
LogicVRegister rol(VectorFormat vform,
LogicVRegister dst,
const LogicVRegister& src,
int rotation);
LogicVRegister ext(VectorFormat vform,
LogicVRegister dst,
const LogicVRegister& src1,
@ -4373,6 +4503,90 @@ class Simulator : public DecoderVisitor {
LogicVRegister srcdst,
const LogicVRegister& src1,
const LogicVRegister& src2);
template <unsigned N>
static void SHARotateEltsLeftOne(uint64_t (&x)[N]) {
VIXL_STATIC_ASSERT(N == 4);
uint64_t temp = x[3];
x[3] = x[2];
x[2] = x[1];
x[1] = x[0];
x[0] = temp;
}
template <uint32_t mode>
LogicVRegister sha1(LogicVRegister srcdst,
const LogicVRegister& src1,
const LogicVRegister& src2) {
uint64_t y = src1.Uint(kFormat4S, 0);
uint64_t sd[4] = {};
srcdst.UintArray(kFormat4S, sd);
for (unsigned i = 0; i < ArrayLength(sd); i++) {
uint64_t t = CryptoOp<mode>(sd[1], sd[2], sd[3]);
y += RotateLeft(sd[0], 5, kSRegSize) + t;
y += src2.Uint(kFormat4S, i);
sd[1] = RotateLeft(sd[1], 30, kSRegSize);
// y:sd = ROL(y:sd, 32)
SHARotateEltsLeftOne(sd);
std::swap(sd[0], y);
}
srcdst.SetUintArray(kFormat4S, sd);
return srcdst;
}
LogicVRegister sha2h(LogicVRegister srcdst,
const LogicVRegister& src1,
const LogicVRegister& src2,
bool part1);
LogicVRegister sha2su0(LogicVRegister srcdst, const LogicVRegister& src1);
LogicVRegister sha2su1(LogicVRegister srcdst,
const LogicVRegister& src1,
const LogicVRegister& src2);
LogicVRegister sha512h(LogicVRegister srcdst,
const LogicVRegister& src1,
const LogicVRegister& src2);
LogicVRegister sha512h2(LogicVRegister srcdst,
const LogicVRegister& src1,
const LogicVRegister& src2);
LogicVRegister sha512su0(LogicVRegister srcdst, const LogicVRegister& src1);
LogicVRegister sha512su1(LogicVRegister srcdst,
const LogicVRegister& src1,
const LogicVRegister& src2);
LogicVRegister aes(LogicVRegister srcdst,
const LogicVRegister& src1,
bool decrypt);
LogicVRegister aesmix(LogicVRegister srcdst,
const LogicVRegister& src1,
bool inverse);
LogicVRegister sm3partw1(LogicVRegister dst,
const LogicVRegister& src1,
const LogicVRegister& src2);
LogicVRegister sm3partw2(LogicVRegister dst,
const LogicVRegister& src1,
const LogicVRegister& src2);
LogicVRegister sm3ss1(LogicVRegister dst,
const LogicVRegister& src1,
const LogicVRegister& src2,
const LogicVRegister& src3);
LogicVRegister sm3tt1(LogicVRegister srcdst,
const LogicVRegister& src1,
const LogicVRegister& src2,
int index,
bool is_a);
LogicVRegister sm3tt2(LogicVRegister srcdst,
const LogicVRegister& src1,
const LogicVRegister& src2,
int index,
bool is_a);
#define NEON_3VREG_LOGIC_LIST(V) \
V(addhn) \
V(addhn2) \
@ -4940,7 +5154,8 @@ class Simulator : public DecoderVisitor {
unsigned zt_code,
const LogicSVEAddressVector& addr);
// Load each active zt<i>[lane] from `addr.GetElementAddress(lane, ...)`.
void SVEStructuredLoadHelper(VectorFormat vform,
// Returns false if a load failed.
bool SVEStructuredLoadHelper(VectorFormat vform,
const LogicPRegister& pg,
unsigned zt_code,
const LogicSVEAddressVector& addr,
@ -5138,10 +5353,12 @@ class Simulator : public DecoderVisitor {
bool CanReadMemory(uintptr_t address, size_t size);
#ifndef _WIN32
// CanReadMemory needs placeholder file descriptors, so we use a pipe. We can
// save some system call overhead by opening them on construction, rather than
// on every call to CanReadMemory.
int placeholder_pipe_fd_[2];
#endif
template <typename T>
static T FPDefaultNaN();
@ -5220,11 +5437,15 @@ class Simulator : public DecoderVisitor {
CPUFeaturesAuditor cpu_features_auditor_;
std::vector<CPUFeatures> saved_cpu_features_;
// State for *rand48 functions, used to simulate randomness with repeatable
// linear_congruential_engine, used to simulate randomness with repeatable
// behaviour (so that tests are deterministic). This is used to simulate RNDR
// and RNDRRS, as well as to simulate a source of entropy for architecturally
// undefined behaviour.
uint16_t rand_state_[3];
std::linear_congruential_engine<uint64_t,
0x5DEECE66D,
0xB,
static_cast<uint64_t>(1) << 48>
rand_gen_;
// A configurable size of SVE vector registers.
unsigned vector_length_;
@ -5232,6 +5453,167 @@ class Simulator : public DecoderVisitor {
// Representation of memory attributes such as MTE tagging and BTI page
// protection in addition to branch interceptions.
MetaDataDepot meta_data_;
// True if the debugger is enabled and might get entered.
bool debugger_enabled_;
// Debugger for the simulator.
std::unique_ptr<Debugger> debugger_;
// The Guarded Control Stack is represented using a vector, where the more
// recently stored addresses are at higher-numbered indices.
using GuardedControlStack = std::vector<uint64_t>;
// The GCSManager handles the synchronisation of GCS across multiple
// Simulator instances. Each Simulator has its own stack, but all share
// a GCSManager instance. This allows exchanging stacks between Simulators
// in a threaded application.
class GCSManager {
public:
// Allocate a new Guarded Control Stack and add it to the vector of stacks.
uint64_t AllocateStack() {
const std::lock_guard<std::mutex> lock(stacks_mtx_);
GuardedControlStack* new_stack = new GuardedControlStack;
uint64_t result;
// Put the new stack into the first available slot.
for (result = 0; result < stacks_.size(); result++) {
if (stacks_[result] == nullptr) {
stacks_[result] = new_stack;
break;
}
}
// If there were no slots, create a new one.
if (result == stacks_.size()) {
stacks_.push_back(new_stack);
}
// Shift the index to look like a stack pointer aligned to a page.
result <<= kPageSizeLog2;
// Push the tagged index onto the new stack as a seal.
new_stack->push_back(result + 1);
return result;
}
// Free a Guarded Control Stack and set the stacks_ slot to null.
void FreeStack(uint64_t gcs) {
const std::lock_guard<std::mutex> lock(stacks_mtx_);
uint64_t gcs_index = GetGCSIndex(gcs);
GuardedControlStack* gcsptr = stacks_[gcs_index];
if (gcsptr == nullptr) {
VIXL_ABORT_WITH_MSG("Tried to free unallocated GCS ");
} else {
delete gcsptr;
stacks_[gcs_index] = nullptr;
}
}
// Get a pointer to the GCS vector using a GCS id.
GuardedControlStack* GetGCSPtr(uint64_t gcs) const {
return stacks_[GetGCSIndex(gcs)];
}
private:
uint64_t GetGCSIndex(uint64_t gcs) const { return gcs >> 12; }
std::vector<GuardedControlStack*> stacks_;
std::mutex stacks_mtx_;
};
// A GCS id indicating no GCS has been allocated.
static const uint64_t kGCSNoStack = kPageSize - 1;
uint64_t gcs_;
bool gcs_enabled_;
public:
GCSManager& GetGCSManager() {
static GCSManager manager;
return manager;
}
void EnableGCSCheck() { gcs_enabled_ = true; }
void DisableGCSCheck() { gcs_enabled_ = false; }
bool IsGCSCheckEnabled() const { return gcs_enabled_; }
private:
bool IsAllocatedGCS(uint64_t gcs) const { return gcs != kGCSNoStack; }
void ResetGCSState() {
GCSManager& m = GetGCSManager();
if (IsAllocatedGCS(gcs_)) {
m.FreeStack(gcs_);
}
ActivateGCS(m.AllocateStack());
GCSPop(); // Remove seal.
}
GuardedControlStack* GetGCSPtr(uint64_t gcs) {
GCSManager& m = GetGCSManager();
GuardedControlStack* result = m.GetGCSPtr(gcs);
return result;
}
GuardedControlStack* GetActiveGCSPtr() { return GetGCSPtr(gcs_); }
uint64_t ActivateGCS(uint64_t gcs) {
uint64_t outgoing_gcs = gcs_;
gcs_ = gcs;
return outgoing_gcs;
}
void GCSPush(uint64_t addr) {
GetActiveGCSPtr()->push_back(addr);
size_t entry = GetActiveGCSPtr()->size() - 1;
LogGCS(/* is_push = */ true, addr, entry);
}
uint64_t GCSPop() {
GuardedControlStack* gcs = GetActiveGCSPtr();
if (gcs->empty()) {
return 0;
}
uint64_t return_addr = gcs->back();
size_t entry = gcs->size() - 1;
gcs->pop_back();
LogGCS(/* is_push = */ false, return_addr, entry);
return return_addr;
}
uint64_t GCSPeek() {
GuardedControlStack* gcs = GetActiveGCSPtr();
if (gcs->empty()) {
return 0;
}
uint64_t return_addr = gcs->back();
return return_addr;
}
void ReportGCSFailure(const char* msg) {
if (IsGCSCheckEnabled()) {
GuardedControlStack* gcs = GetActiveGCSPtr();
printf("%s", msg);
if (gcs == nullptr) {
printf("GCS pointer is null\n");
} else {
printf("GCS records, most recent first:\n");
int most_recent_index = static_cast<int>(gcs->size()) - 1;
for (int i = 0; i < 8; i++) {
if (!gcs->empty()) {
uint64_t entry = gcs->back();
gcs->pop_back();
int index = most_recent_index - i;
printf(" gcs%" PRIu64 "[%d]: 0x%016" PRIx64 "\n",
gcs_,
index,
entry);
}
}
printf("End of GCS records.\n");
}
VIXL_ABORT_WITH_MSG("GCS failed ");
}
}
};
#if defined(VIXL_HAS_SIMULATED_RUNTIME_CALL_SUPPORT) && __cplusplus < 201402L

View File

@ -43,6 +43,9 @@ namespace internal {
class AssemblerBase {
public:
AssemblerBase() : allow_assembler_(false) {}
explicit AssemblerBase(size_t capacity)
: buffer_(capacity), allow_assembler_(false) {}
AssemblerBase(byte* buffer, size_t capacity)
: buffer_(buffer, capacity), allow_assembler_(false) {}

View File

@ -36,11 +36,21 @@ namespace vixl {
class CodeBuffer {
public:
static const size_t kDefaultCapacity = 4 * KBytes;
explicit CodeBuffer(size_t capacity = kDefaultCapacity);
CodeBuffer(byte* buffer, size_t capacity);
~CodeBuffer() VIXL_NEGATIVE_TESTING_ALLOW_EXCEPTION;
void Reset();
// Make the buffer executable or writable. These states are mutually
// exclusive.
// Note that these require page-aligned memory blocks, which we can only
// guarantee with VIXL_CODE_BUFFER_MMAP.
void SetExecutable();
void SetWritable();
ptrdiff_t GetOffsetFrom(ptrdiff_t offset) const {
ptrdiff_t cursor_offset = cursor_ - buffer_;
VIXL_ASSERT((offset >= 0) && (offset <= cursor_offset));
@ -136,6 +146,10 @@ class CodeBuffer {
return GetCapacity();
}
bool IsManaged() const { return managed_; }
void Grow(size_t new_capacity);
bool IsDirty() const { return dirty_; }
void SetClean() { dirty_ = false; }
@ -144,9 +158,24 @@ class CodeBuffer {
return GetRemainingBytes() >= amount;
}
void EnsureSpaceFor(size_t amount, bool* has_grown) {
bool is_full = !HasSpaceFor(amount);
if (is_full) Grow(capacity_ * 2 + amount);
VIXL_ASSERT(has_grown != NULL);
*has_grown = is_full;
}
void EnsureSpaceFor(size_t amount) {
bool placeholder;
EnsureSpaceFor(amount, &placeholder);
}
private:
// Backing store of the buffer.
byte* buffer_;
// If true the backing store is allocated and deallocated by the buffer. The
// backing store can then grow on demand. If false the backing store is
// provided by the user and cannot be resized internally.
bool managed_;
// Pointer to the next location to be written.
byte* cursor_;
// True if there has been any write since the buffer was created or cleaned.

View File

@ -68,14 +68,19 @@ class CodeBufferCheckScope {
size_t size,
BufferSpacePolicy check_policy = kReserveBufferSpace,
SizePolicy size_policy = kMaximumSize)
: assembler_(NULL), initialised_(false) {
: CodeBufferCheckScope() {
Open(assembler, size, check_policy, size_policy);
}
// This constructor does not implicitly initialise the scope. Instead, the
// user is required to explicitly call the `Open` function before using the
// scope.
CodeBufferCheckScope() : assembler_(NULL), initialised_(false) {
CodeBufferCheckScope()
: assembler_(NULL),
assert_policy_(kMaximumSize),
limit_(0),
previous_allow_assembler_(false),
initialised_(false) {
// Nothing to do.
}
@ -90,7 +95,7 @@ class CodeBufferCheckScope {
VIXL_ASSERT(assembler != NULL);
assembler_ = assembler;
if (check_policy == kReserveBufferSpace) {
VIXL_ASSERT(assembler->GetBuffer()->HasSpaceFor(size));
assembler->GetBuffer()->EnsureSpaceFor(size);
}
#ifdef VIXL_DEBUG
limit_ = assembler_->GetSizeOfCodeGenerated() + size;
@ -152,14 +157,15 @@ class EmissionCheckScope : public CodeBufferCheckScope {
// constructed.
EmissionCheckScope(MacroAssemblerInterface* masm,
size_t size,
SizePolicy size_policy = kMaximumSize) {
SizePolicy size_policy = kMaximumSize)
: EmissionCheckScope() {
Open(masm, size, size_policy);
}
// This constructor does not implicitly initialise the scope. Instead, the
// user is required to explicitly call the `Open` function before using the
// scope.
EmissionCheckScope() {}
EmissionCheckScope() : masm_(nullptr), pool_policy_(kBlockPools) {}
virtual ~EmissionCheckScope() { Close(); }
@ -250,14 +256,15 @@ class ExactAssemblyScope : public EmissionCheckScope {
// constructed.
ExactAssemblyScope(MacroAssemblerInterface* masm,
size_t size,
SizePolicy size_policy = kExactSize) {
SizePolicy size_policy = kExactSize)
: ExactAssemblyScope() {
Open(masm, size, size_policy);
}
// This constructor does not implicitly initialise the scope. Instead, the
// user is required to explicitly call the `Open` function before using the
// scope.
ExactAssemblyScope() {}
ExactAssemblyScope() : previous_allow_macro_assembler_(false) {}
virtual ~ExactAssemblyScope() { Close(); }

View File

@ -29,6 +29,7 @@
#define VIXL_COMPILER_INTRINSICS_H
#include <limits.h>
#include "globals-vixl.h"
namespace vixl {
@ -112,7 +113,8 @@ inline int CountLeadingSignBits(V value, int width = (sizeof(V) * 8)) {
VIXL_ASSERT(IsPowerOf2(width) && (width <= 64));
#if COMPILER_HAS_BUILTIN_CLRSB
VIXL_ASSERT((LLONG_MIN <= value) && (value <= LLONG_MAX));
int ll_width = sizeof(long long) * kBitsPerByte; // NOLINT(runtime/int)
int ll_width =
sizeof(long long) * kBitsPerByte; // NOLINT(google-runtime-int)
int result = __builtin_clrsbll(value) - (ll_width - width);
// Check that the value fits in the specified width.
VIXL_ASSERT(result >= 0);

View File

@ -201,7 +201,8 @@ namespace vixl {
/* Extended BFloat16 instructions */ \
V(kEBF16, "EBF16", "ebf16") \
V(kSVE_EBF16, "EBF16 (SVE)", "sveebf16") \
V(kCSSC, "CSSC", "cssc")
V(kCSSC, "CSSC", "cssc") \
V(kGCS, "GCS", "gcs")
// clang-format on

View File

@ -27,8 +27,8 @@
#ifndef VIXL_GLOBALS_H
#define VIXL_GLOBALS_H
#if __cplusplus < 201402L
#error VIXL requires C++14
#if __cplusplus < 201703L
#error VIXL requires C++17
#endif
// Get standard C99 macros for integer types.
@ -215,6 +215,18 @@ inline void USE(const T1&, const T2&, const T3&, const T4&) {}
} while (0)
#endif
// Evaluate 'init' to an std::optional and return if it's empty. If 'init' is
// not empty then define a variable 'name' with the value inside the
// std::optional.
#define VIXL_DEFINE_OR_RETURN(name, init) \
auto opt##name = init; \
if (!opt##name) return; \
auto name = *opt##name;
#define VIXL_DEFINE_OR_RETURN_FALSE(name, init) \
auto opt##name = init; \
if (!opt##name) return false; \
auto name = *opt##name;
#if __cplusplus >= 201103L
#define VIXL_NO_RETURN [[noreturn]]
#else

View File

@ -1,4 +1,3 @@
// Copyright 2015, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
@ -27,9 +26,8 @@
#ifndef VIXL_INVALSET_H_
#define VIXL_INVALSET_H_
#include <cstring>
#include <algorithm>
#include <cstring>
#include <vector>
#include "globals-vixl.h"
@ -92,6 +90,7 @@ class InvalSet {
public:
InvalSet();
~InvalSet() VIXL_NEGATIVE_TESTING_ALLOW_EXCEPTION;
InvalSet(InvalSet&&); // movable
static const size_t kNPreallocatedElements = N_PREALLOCATED_ELEMENTS;
static const KeyType kInvalidKey = INVALID_KEY;
@ -245,12 +244,11 @@ class InvalSet {
template <class S>
class InvalSetIterator {
public:
using iterator_category = std::forward_iterator_tag;
using value_type = typename S::_ElementType;
using difference_type = std::ptrdiff_t;
using pointer = typename S::_ElementType*;
using reference = typename S::_ElementType&;
using pointer = S*;
using reference = S&;
private:
// Redefine types to mirror the associated set types.
@ -327,6 +325,27 @@ InvalSet<TEMPLATE_INVALSET_P_DEF>::InvalSet()
#endif
}
template <TEMPLATE_INVALSET_P_DECL>
InvalSet<TEMPLATE_INVALSET_P_DEF>::InvalSet(InvalSet&& other)
: valid_cached_min_(false), sorted_(true), size_(0), vector_(NULL) {
VIXL_ASSERT(other.monitor() == 0);
if (this != &other) {
sorted_ = other.sorted_;
size_ = other.size_;
#ifdef VIXL_DEBUG
monitor_ = 0;
#endif
if (other.IsUsingVector()) {
vector_ = other.vector_;
other.vector_ = NULL;
} else {
std::move(other.preallocated_,
other.preallocated_ + other.size_,
preallocated_);
}
other.clear();
}
}
template <TEMPLATE_INVALSET_P_DECL>
InvalSet<TEMPLATE_INVALSET_P_DEF>::~InvalSet()

View File

@ -27,10 +27,10 @@
#ifndef VIXL_POOL_MANAGER_IMPL_H_
#define VIXL_POOL_MANAGER_IMPL_H_
#include "pool-manager.h"
#include <algorithm>
#include "assembler-base-vixl.h"
#include "pool-manager.h"
namespace vixl {
@ -487,7 +487,7 @@ void PoolManager<T>::Release(T pc) {
}
template <typename T>
PoolManager<T>::~PoolManager<T>() VIXL_NEGATIVE_TESTING_ALLOW_EXCEPTION {
PoolManager<T>::~PoolManager() VIXL_NEGATIVE_TESTING_ALLOW_EXCEPTION {
#ifdef VIXL_DEBUG
// Check for unbound objects.
for (objects_iter iter = objects_.begin(); iter != objects_.end(); ++iter) {
@ -517,6 +517,6 @@ int PoolManager<T>::GetPoolSizeForTest() const {
}
return size;
}
}
} // namespace vixl
#endif // VIXL_POOL_MANAGER_IMPL_H_

View File

@ -27,11 +27,10 @@
#ifndef VIXL_POOL_MANAGER_H_
#define VIXL_POOL_MANAGER_H_
#include <stdint.h>
#include <cstddef>
#include <limits>
#include <map>
#include <stdint.h>
#include <vector>
#include "globals-vixl.h"

View File

@ -239,6 +239,11 @@ inline uint64_t RotateRight(uint64_t value,
return value & width_mask;
}
inline uint64_t RotateLeft(uint64_t value,
unsigned int rotate,
unsigned int width) {
return RotateRight(value, width - rotate, width);
}
// Wrapper class for passing FP16 values through the assembler.
// This is purely to aid with type checking/casting.
@ -291,6 +296,12 @@ T UnsignedNegate(T value) {
return ~value + 1;
}
template <typename T>
bool CanBeNegated(T value) {
VIXL_STATIC_ASSERT(std::is_signed<T>::value);
return (value == std::numeric_limits<T>::min()) ? false : true;
}
// An absolute operation for signed integers that is defined for results outside
// the representable range. Specifically, Abs(MIN_INT) is MIN_INT.
template <typename T>
@ -548,13 +559,14 @@ inline T SignExtend(T val, int size_in_bits) {
template <typename T>
T ReverseBytes(T value, int block_bytes_log2) {
VIXL_ASSERT((sizeof(value) == 4) || (sizeof(value) == 8));
VIXL_ASSERT((1U << block_bytes_log2) <= sizeof(value));
VIXL_ASSERT((uint64_t{1} << block_bytes_log2) <= sizeof(value));
// Split the 64-bit value into an 8-bit array, where b[0] is the least
// significant byte, and b[7] is the most significant.
uint8_t bytes[8];
uint64_t mask = UINT64_C(0xff00000000000000);
for (int i = 7; i >= 0; i--) {
bytes[i] = (static_cast<uint64_t>(value) & mask) >> (i * 8);
bytes[i] =
static_cast<uint8_t>((static_cast<uint64_t>(value) & mask) >> (i * 8));
mask >>= 8;
}
@ -611,6 +623,39 @@ bool IsWordAligned(T pointer) {
return IsAligned<4>(pointer);
}
template <unsigned BITS, typename T>
bool IsRepeatingPattern(T value) {
VIXL_STATIC_ASSERT(std::is_unsigned<T>::value);
VIXL_ASSERT(IsMultiple(sizeof(value) * kBitsPerByte, BITS));
VIXL_ASSERT(IsMultiple(BITS, 2));
VIXL_STATIC_ASSERT(BITS >= 2);
#if (defined(__x86_64__) || defined(__i386)) && \
__clang_major__ >= 17 && __clang_major__ <= 19
// Workaround for https://github.com/llvm/llvm-project/issues/108722
unsigned hbits = BITS / 2;
T midmask = (~static_cast<T>(0) >> BITS) << hbits;
// E.g. for bytes in a word (0xb3b2b1b0): .b3b2b1. == .b2b1b0.
return (((value >> hbits) & midmask) == ((value << hbits) & midmask));
#else
return value == RotateRight(value, BITS, sizeof(value) * kBitsPerByte);
#endif
}
template <typename T>
bool AllBytesMatch(T value) {
return IsRepeatingPattern<kBitsPerByte>(value);
}
template <typename T>
bool AllHalfwordsMatch(T value) {
return IsRepeatingPattern<kBitsPerByte * 2>(value);
}
template <typename T>
bool AllWordsMatch(T value) {
return IsRepeatingPattern<kBitsPerByte * 4>(value);
}
// Increment a pointer until it has the specified alignment. The alignment must
// be a power of two.
template <class T>

View File

@ -25,9 +25,10 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "assembler-aarch64.h"
#include <cmath>
#include "assembler-aarch64.h"
#include "macro-assembler-aarch64.h"
namespace vixl {
@ -1176,8 +1177,7 @@ void Assembler::LoadStorePairNonTemporal(const CPURegister& rt,
VIXL_ASSERT(addr.IsImmediateOffset());
unsigned size =
CalcLSPairDataSize(static_cast<LoadStorePairOp>(
static_cast<uint32_t>(op) & static_cast<uint32_t>(LoadStorePairMask)));
CalcLSPairDataSize(static_cast<LoadStorePairOp>(op & LoadStorePairMask));
VIXL_ASSERT(IsImmLSPair(addr.GetOffset(), size));
int offset = static_cast<int>(addr.GetOffset());
Emit(op | Rt(rt) | Rt2(rt2) | RnSP(addr.GetBaseRegister()) |
@ -1918,6 +1918,12 @@ void Assembler::sys(int op, const Register& xt) {
}
void Assembler::sysl(int op, const Register& xt) {
VIXL_ASSERT(xt.Is64Bits());
Emit(SYSL | SysOp(op) | Rt(xt));
}
void Assembler::dc(DataCacheOp op, const Register& rt) {
if (op == CVAP) VIXL_ASSERT(CPUHas(CPUFeatures::kDCPoP));
if (op == CVADP) VIXL_ASSERT(CPUHas(CPUFeatures::kDCCVADP));
@ -1930,6 +1936,35 @@ void Assembler::ic(InstructionCacheOp op, const Register& rt) {
sys(op, rt);
}
void Assembler::gcspushm(const Register& rt) {
VIXL_ASSERT(CPUHas(CPUFeatures::kGCS));
sys(GCSPUSHM, rt);
}
void Assembler::gcspopm(const Register& rt) {
VIXL_ASSERT(CPUHas(CPUFeatures::kGCS));
sysl(GCSPOPM, rt);
}
void Assembler::gcsss1(const Register& rt) {
VIXL_ASSERT(CPUHas(CPUFeatures::kGCS));
sys(GCSSS1, rt);
}
void Assembler::gcsss2(const Register& rt) {
VIXL_ASSERT(CPUHas(CPUFeatures::kGCS));
sysl(GCSSS2, rt);
}
void Assembler::chkfeat(const Register& rd) {
VIXL_ASSERT(rd.Is(x16));
USE(rd);
hint(CHKFEAT);
}
void Assembler::hint(SystemHint code) { hint(static_cast<int>(code)); }
@ -2913,6 +2948,25 @@ void Assembler::st1(const VRegister& vt, int lane, const MemOperand& dst) {
LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore1);
}
void Assembler::pmull(const VRegister& vd,
const VRegister& vn,
const VRegister& vm) {
VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
VIXL_ASSERT(AreSameFormat(vn, vm));
VIXL_ASSERT((vn.Is8B() && vd.Is8H()) || (vn.Is1D() && vd.Is1Q()));
VIXL_ASSERT(CPUHas(CPUFeatures::kPmull1Q) || vd.Is8H());
Emit(VFormat(vn) | NEON_PMULL | Rm(vm) | Rn(vn) | Rd(vd));
}
void Assembler::pmull2(const VRegister& vd,
const VRegister& vn,
const VRegister& vm) {
VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
VIXL_ASSERT(AreSameFormat(vn, vm));
VIXL_ASSERT((vn.Is16B() && vd.Is8H()) || (vn.Is2D() && vd.Is1Q()));
VIXL_ASSERT(CPUHas(CPUFeatures::kPmull1Q) || vd.Is8H());
Emit(VFormat(vn) | NEON_PMULL2 | Rm(vm) | Rn(vn) | Rd(vd));
}
void Assembler::NEON3DifferentL(const VRegister& vd,
const VRegister& vn,
@ -2925,7 +2979,7 @@ void Assembler::NEON3DifferentL(const VRegister& vd,
(vn.Is8H() && vd.Is4S()) || (vn.Is4S() && vd.Is2D()));
Instr format, op = vop;
if (vd.IsScalar()) {
op |= static_cast<Instr>(NEON_Q) | static_cast<Instr>(NEONScalar);
op |= NEON_Q | NEONScalar;
format = SFormat(vn);
} else {
format = VFormat(vn);
@ -2960,8 +3014,6 @@ void Assembler::NEON3DifferentHN(const VRegister& vd,
// clang-format off
#define NEON_3DIFF_LONG_LIST(V) \
V(pmull, NEON_PMULL, vn.IsVector() && vn.Is8B()) \
V(pmull2, NEON_PMULL2, vn.IsVector() && vn.Is16B()) \
V(saddl, NEON_SADDL, vn.IsVector() && vn.IsD()) \
V(saddl2, NEON_SADDL2, vn.IsVector() && vn.IsQ()) \
V(sabal, NEON_SABAL, vn.IsVector() && vn.IsD()) \
@ -3650,7 +3702,7 @@ void Assembler::NEONFPConvertToInt(const VRegister& vd,
Instr op) {
if (vn.IsScalar()) {
VIXL_ASSERT((vd.Is1S() && vn.Is1S()) || (vd.Is1D() && vn.Is1D()));
op |= static_cast<Instr>(NEON_Q) | static_cast<Instr>(NEONScalar);
op |= NEON_Q | NEONScalar;
}
Emit(FPFormat(vn) | op | Rn(vn) | Rd(vd));
}
@ -3662,9 +3714,9 @@ void Assembler::NEONFP16ConvertToInt(const VRegister& vd,
VIXL_ASSERT(AreSameFormat(vd, vn));
VIXL_ASSERT(vn.IsLaneSizeH());
if (vn.IsScalar()) {
op |= static_cast<Instr>(NEON_Q) | static_cast<Instr>(NEONScalar);
op |= NEON_Q | NEONScalar;
} else if (vn.Is8H()) {
op |= static_cast<Instr>(NEON_Q);
op |= NEON_Q;
}
Emit(op | Rn(vn) | Rd(vd));
}
@ -3838,7 +3890,7 @@ void Assembler::NEON3Same(const VRegister& vd,
Instr format, op = vop;
if (vd.IsScalar()) {
op |= static_cast<Instr>(NEON_Q) | static_cast<Instr>(NEONScalar);
op |= NEON_Q | NEONScalar;
format = SFormat(vd);
} else {
format = VFormat(vd);
@ -3890,18 +3942,15 @@ void Assembler::NEON3SameFP16(const VRegister& vd,
Instr op; \
if (vd.IsScalar()) { \
if (vd.Is1H()) { \
if ((static_cast<uint32_t>(SCA_OP_H) & \
static_cast<uint32_t>(NEONScalar2RegMiscFP16FMask)) == \
static_cast<uint32_t>(NEONScalar2RegMiscFP16Fixed)) { \
if ((SCA_OP_H & NEONScalar2RegMiscFP16FMask) == \
NEONScalar2RegMiscFP16Fixed) { \
VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kNEONHalf)); \
} else { \
VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); \
} \
op = SCA_OP_H; \
} else { \
if ((static_cast<uint32_t>(SCA_OP) & \
static_cast<uint32_t>(NEONScalar2RegMiscFMask)) == \
static_cast<uint32_t>(NEONScalar2RegMiscFixed)) { \
if ((SCA_OP & NEONScalar2RegMiscFMask) == NEONScalar2RegMiscFixed) { \
VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); \
} \
VIXL_ASSERT(vd.Is1S() || vd.Is1D()); \
@ -3915,7 +3964,7 @@ void Assembler::NEON3SameFP16(const VRegister& vd,
VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); \
op = VEC_OP##_H; \
if (vd.Is8H()) { \
op |= static_cast<Instr>(NEON_Q); \
op |= NEON_Q; \
} \
} else { \
op = VEC_OP; \
@ -3981,7 +4030,7 @@ void Assembler::NEON2RegMisc(const VRegister& vd,
Instr format, op = vop;
if (vd.IsScalar()) {
op |= static_cast<Instr>(NEON_Q) | static_cast<Instr>(NEONScalar);
op |= NEON_Q | NEONScalar;
format = SFormat(vd);
} else {
format = VFormat(vd);
@ -4057,7 +4106,7 @@ void Assembler::NEONFP2RegMisc(const VRegister& vd,
Instr op = vop;
if (vd.IsScalar()) {
VIXL_ASSERT(vd.Is1S() || vd.Is1D());
op |= static_cast<Instr>(NEON_Q) | static_cast<Instr>(NEONScalar);
op |= NEON_Q | NEONScalar;
} else {
VIXL_ASSERT(vd.Is2S() || vd.Is2D() || vd.Is4S());
}
@ -4077,11 +4126,11 @@ void Assembler::NEONFP2RegMiscFP16(const VRegister& vd,
Instr op = vop;
if (vd.IsScalar()) {
VIXL_ASSERT(vd.Is1H());
op |= static_cast<Instr>(NEON_Q) | static_cast<Instr>(NEONScalar);
op |= NEON_Q | NEONScalar;
} else {
VIXL_ASSERT(vd.Is4H() || vd.Is8H());
if (vd.Is8H()) {
op |= static_cast<Instr>(NEON_Q);
op |= NEON_Q;
}
}
@ -4273,9 +4322,7 @@ NEON_3SAME_LIST(VIXL_DEFINE_ASM_FUNC)
op = SCA_OP_H; \
} else { \
VIXL_ASSERT(vd.Is1H() || vd.Is1S() || vd.Is1D()); \
if ((static_cast<uint32_t>(SCA_OP) & \
static_cast<uint32_t>(NEONScalar3SameFMask)) == \
static_cast<uint32_t>(NEONScalar3SameFixed)) { \
if ((SCA_OP & NEONScalar3SameFMask) == NEONScalar3SameFixed) { \
VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); \
if (vd.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); \
} else if (vd.Is1H()) { \
@ -4341,11 +4388,11 @@ void Assembler::sqrdmlah(const VRegister& vd,
const VRegister& vm) {
VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kRDM));
VIXL_ASSERT(AreSameFormat(vd, vn, vm));
VIXL_ASSERT(vd.IsVector() || !vd.IsQ());
VIXL_ASSERT(vd.IsLaneSizeH() || vd.IsLaneSizeS());
Instr format, op = NEON_SQRDMLAH;
if (vd.IsScalar()) {
op |= static_cast<Instr>(NEON_Q) | static_cast<Instr>(NEONScalar);
op |= NEON_Q | NEONScalar;
format = SFormat(vd);
} else {
format = VFormat(vd);
@ -4360,11 +4407,11 @@ void Assembler::sqrdmlsh(const VRegister& vd,
const VRegister& vm) {
VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kRDM));
VIXL_ASSERT(AreSameFormat(vd, vn, vm));
VIXL_ASSERT(vd.IsVector() || !vd.IsQ());
VIXL_ASSERT(vd.IsLaneSizeH() || vd.IsLaneSizeS());
Instr format, op = NEON_SQRDMLSH;
if (vd.IsScalar()) {
op |= static_cast<Instr>(NEON_Q) | static_cast<Instr>(NEONScalar);
op |= NEON_Q | NEONScalar;
format = SFormat(vd);
} else {
format = VFormat(vd);
@ -4625,13 +4672,13 @@ void Assembler::NEONFPByElement(const VRegister& vd,
}
if (vd.IsScalar()) {
op |= static_cast<Instr>(NEON_Q) | static_cast<Instr>(NEONScalar);
op |= NEON_Q | NEONScalar;
}
if (!vm.Is1H()) {
op |= FPFormat(vd);
} else if (vd.Is8H()) {
op |= static_cast<Instr>(NEON_Q);
op |= NEON_Q;
}
Emit(op | ImmNEONHLM(vm_index, index_num_bits) | Rm(vm) | Rn(vn) | Rd(vd));
@ -4653,7 +4700,7 @@ void Assembler::NEONByElement(const VRegister& vd,
Instr format, op = vop;
int index_num_bits = vm.Is1H() ? 3 : 2;
if (vd.IsScalar()) {
op |= static_cast<Instr>(NEONScalar) | static_cast<Instr>(NEON_Q);
op |= NEONScalar | NEON_Q;
format = SFormat(vn);
} else {
format = VFormat(vn);
@ -4681,7 +4728,7 @@ void Assembler::NEONByElementL(const VRegister& vd,
Instr format, op = vop;
int index_num_bits = vm.Is1H() ? 3 : 2;
if (vd.IsScalar()) {
op |= static_cast<Instr>(NEONScalar) | static_cast<Instr>(NEON_Q);
op |= NEONScalar | NEON_Q;
format = SFormat(vn);
} else {
format = VFormat(vn);
@ -4917,7 +4964,7 @@ void Assembler::NEONXtn(const VRegister& vd,
if (vd.IsScalar()) {
VIXL_ASSERT((vd.Is1B() && vn.Is1H()) || (vd.Is1H() && vn.Is1S()) ||
(vd.Is1S() && vn.Is1D()));
op |= static_cast<Instr>(NEON_Q) | static_cast<Instr>(NEONScalar);
op |= NEON_Q | NEONScalar;
format = SFormat(vd);
} else {
VIXL_ASSERT((vd.Is8B() && vn.Is8H()) || (vd.Is4H() && vn.Is4S()) ||
@ -5829,6 +5876,247 @@ void Assembler::ummla(const VRegister& vd, const VRegister& vn, const VRegister&
Emit(0x6e80a400 | Rd(vd) | Rn(vn) | Rm(vm));
}
void Assembler::bcax(const VRegister& vd, const VRegister& vn, const VRegister& vm, const VRegister& va) {
VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
VIXL_ASSERT(CPUHas(CPUFeatures::kSHA3));
VIXL_ASSERT(vd.Is16B() && vn.Is16B() && vm.Is16B());
Emit(0xce200000 | Rd(vd) | Rn(vn) | Rm(vm) | Ra(va));
}
void Assembler::eor3(const VRegister& vd, const VRegister& vn, const VRegister& vm, const VRegister& va) {
VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
VIXL_ASSERT(CPUHas(CPUFeatures::kSHA3));
VIXL_ASSERT(vd.Is16B() && vn.Is16B() && vm.Is16B() && va.Is16B());
Emit(0xce000000 | Rd(vd) | Rn(vn) | Rm(vm) | Ra(va));
}
void Assembler::xar(const VRegister& vd, const VRegister& vn, const VRegister& vm, int rotate) {
VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
VIXL_ASSERT(CPUHas(CPUFeatures::kSHA3));
VIXL_ASSERT(vd.Is2D() && vn.Is2D() && vm.Is2D());
VIXL_ASSERT(IsUint6(rotate));
Emit(0xce800000 | Rd(vd) | Rn(vn) | Rm(vm) | rotate << 10);
}
void Assembler::rax1(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
VIXL_ASSERT(CPUHas(CPUFeatures::kSHA3));
VIXL_ASSERT(vd.Is2D() && vn.Is2D() && vm.Is2D());
Emit(0xce608c00 | Rd(vd) | Rn(vn) | Rm(vm));
}
void Assembler::sha1c(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
VIXL_ASSERT(CPUHas(CPUFeatures::kSHA1));
VIXL_ASSERT(vd.IsQ() && vn.IsS() && vm.Is4S());
Emit(0x5e000000 | Rd(vd) | Rn(vn) | Rm(vm));
}
void Assembler::sha1h(const VRegister& sd, const VRegister& sn) {
VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
VIXL_ASSERT(CPUHas(CPUFeatures::kSHA1));
VIXL_ASSERT(sd.IsS() && sn.IsS());
Emit(0x5e280800 | Rd(sd) | Rn(sn));
}
void Assembler::sha1m(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
VIXL_ASSERT(CPUHas(CPUFeatures::kSHA1));
VIXL_ASSERT(vd.IsQ() && vn.IsS() && vm.Is4S());
Emit(0x5e002000 | Rd(vd) | Rn(vn) | Rm(vm));
}
void Assembler::sha1p(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
VIXL_ASSERT(CPUHas(CPUFeatures::kSHA1));
VIXL_ASSERT(vd.IsQ() && vn.IsS() && vm.Is4S());
Emit(0x5e001000 | Rd(vd) | Rn(vn) | Rm(vm));
}
void Assembler::sha1su0(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
VIXL_ASSERT(CPUHas(CPUFeatures::kSHA1));
VIXL_ASSERT(vd.Is4S() && vn.Is4S() && vm.Is4S());
Emit(0x5e003000 | Rd(vd) | Rn(vn) | Rm(vm));
}
void Assembler::sha1su1(const VRegister& vd, const VRegister& vn) {
VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
VIXL_ASSERT(CPUHas(CPUFeatures::kSHA1));
VIXL_ASSERT(vd.Is4S() && vn.Is4S());
Emit(0x5e281800 | Rd(vd) | Rn(vn));
}
void Assembler::sha256h(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
VIXL_ASSERT(CPUHas(CPUFeatures::kSHA2));
VIXL_ASSERT(vd.IsQ() && vn.IsQ() && vm.Is4S());
Emit(0x5e004000 | Rd(vd) | Rn(vn) | Rm(vm));
}
void Assembler::sha256h2(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
VIXL_ASSERT(CPUHas(CPUFeatures::kSHA2));
VIXL_ASSERT(vd.IsQ() && vn.IsQ() && vm.Is4S());
Emit(0x5e005000 | Rd(vd) | Rn(vn) | Rm(vm));
}
void Assembler::sha256su0(const VRegister& vd, const VRegister& vn) {
VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
VIXL_ASSERT(CPUHas(CPUFeatures::kSHA2));
VIXL_ASSERT(vd.Is4S() && vn.Is4S());
Emit(0x5e282800 | Rd(vd) | Rn(vn));
}
void Assembler::sha256su1(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
VIXL_ASSERT(CPUHas(CPUFeatures::kSHA2));
VIXL_ASSERT(vd.Is4S() && vn.Is4S() && vm.Is4S());
Emit(0x5e006000 | Rd(vd) | Rn(vn) | Rm(vm));
}
void Assembler::sha512h(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
VIXL_ASSERT(CPUHas(CPUFeatures::kSHA512));
VIXL_ASSERT(vd.IsQ() && vn.IsQ() && vm.Is2D());
Emit(0xce608000 | Rd(vd) | Rn(vn) | Rm(vm));
}
void Assembler::sha512h2(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
VIXL_ASSERT(CPUHas(CPUFeatures::kSHA512));
VIXL_ASSERT(vd.IsQ() && vn.IsQ() && vm.Is2D());
Emit(0xce608400 | Rd(vd) | Rn(vn) | Rm(vm));
}
void Assembler::sha512su0(const VRegister& vd, const VRegister& vn) {
VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
VIXL_ASSERT(CPUHas(CPUFeatures::kSHA512));
VIXL_ASSERT(vd.Is2D() && vn.Is2D());
Emit(0xcec08000 | Rd(vd) | Rn(vn));
}
void Assembler::sha512su1(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
VIXL_ASSERT(CPUHas(CPUFeatures::kSHA512));
VIXL_ASSERT(vd.Is2D() && vn.Is2D() && vm.Is2D());
Emit(0xce608800 | Rd(vd) | Rn(vn) | Rm(vm));
}
void Assembler::aesd(const VRegister& vd, const VRegister& vn) {
VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
VIXL_ASSERT(CPUHas(CPUFeatures::kAES));
VIXL_ASSERT(vd.Is16B() && vn.Is16B());
Emit(0x4e285800 | Rd(vd) | Rn(vn));
}
void Assembler::aese(const VRegister& vd, const VRegister& vn) {
VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
VIXL_ASSERT(CPUHas(CPUFeatures::kAES));
VIXL_ASSERT(vd.Is16B() && vn.Is16B());
Emit(0x4e284800 | Rd(vd) | Rn(vn));
}
void Assembler::aesimc(const VRegister& vd, const VRegister& vn) {
VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
VIXL_ASSERT(CPUHas(CPUFeatures::kAES));
VIXL_ASSERT(vd.Is16B() && vn.Is16B());
Emit(0x4e287800 | Rd(vd) | Rn(vn));
}
void Assembler::aesmc(const VRegister& vd, const VRegister& vn) {
VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
VIXL_ASSERT(CPUHas(CPUFeatures::kAES));
VIXL_ASSERT(vd.Is16B() && vn.Is16B());
Emit(0x4e286800 | Rd(vd) | Rn(vn));
}
void Assembler::sm3partw1(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
VIXL_ASSERT(CPUHas(CPUFeatures::kSM3));
VIXL_ASSERT(vd.Is4S() && vn.Is4S() && vm.Is4S());
Emit(0xce60c000 | Rd(vd) | Rn(vn) | Rm(vm));
}
void Assembler::sm3partw2(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
VIXL_ASSERT(CPUHas(CPUFeatures::kSM3));
VIXL_ASSERT(vd.Is4S() && vn.Is4S() && vm.Is4S());
Emit(0xce60c400 | Rd(vd) | Rn(vn) | Rm(vm));
}
void Assembler::sm3ss1(const VRegister& vd, const VRegister& vn, const VRegister& vm, const VRegister& va) {
VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
VIXL_ASSERT(CPUHas(CPUFeatures::kSM3));
VIXL_ASSERT(vd.Is4S() && vn.Is4S() && vm.Is4S() && va.Is4S());
Emit(0xce400000 | Rd(vd) | Rn(vn) | Rm(vm) | Ra(va));
}
void Assembler::sm3tt1a(const VRegister& vd, const VRegister& vn, const VRegister& vm, int index) {
VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
VIXL_ASSERT(CPUHas(CPUFeatures::kSM3));
VIXL_ASSERT(vd.Is4S() && vn.Is4S() && vm.Is4S());
VIXL_ASSERT(IsUint2(index));
Instr i = static_cast<uint32_t>(index) << 12;
Emit(0xce408000 | Rd(vd) | Rn(vn) | Rm(vm) | i);
}
void Assembler::sm3tt1b(const VRegister& vd, const VRegister& vn, const VRegister& vm, int index) {
VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
VIXL_ASSERT(CPUHas(CPUFeatures::kSM3));
VIXL_ASSERT(vd.Is4S() && vn.Is4S() && vm.Is4S());
VIXL_ASSERT(IsUint2(index));
Instr i = static_cast<uint32_t>(index) << 12;
Emit(0xce408400 | Rd(vd) | Rn(vn) | Rm(vm) | i);
}
void Assembler::sm3tt2a(const VRegister& vd, const VRegister& vn, const VRegister& vm, int index) {
VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
VIXL_ASSERT(CPUHas(CPUFeatures::kSM3));
VIXL_ASSERT(vd.Is4S() && vn.Is4S() && vm.Is4S());
VIXL_ASSERT(IsUint2(index));
Instr i = static_cast<uint32_t>(index) << 12;
Emit(0xce408800 | Rd(vd) | Rn(vn) | Rm(vm) | i);
}
void Assembler::sm3tt2b(const VRegister& vd, const VRegister& vn, const VRegister& vm, int index) {
VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
VIXL_ASSERT(CPUHas(CPUFeatures::kSM3));
VIXL_ASSERT(vd.Is4S() && vn.Is4S() && vm.Is4S());
VIXL_ASSERT(IsUint2(index));
Instr i = static_cast<uint32_t>(index) << 12;
Emit(0xce408c00 | Rd(vd) | Rn(vn) | Rm(vm) | i);
}
// Note:
// For all ToImm instructions below, a difference in case
// for the same letter indicates a negated bit.
@ -6005,15 +6293,13 @@ void Assembler::AddSub(const Register& rd,
rn,
operand.ToExtendedRegister(),
S,
static_cast<Instr>(AddSubExtendedFixed) | static_cast<Instr>(op));
AddSubExtendedFixed | op);
} else {
DataProcShiftedRegister(rd, rn, operand, S,
static_cast<Instr>(AddSubShiftedFixed) | static_cast<Instr>(op));
DataProcShiftedRegister(rd, rn, operand, S, AddSubShiftedFixed | op);
}
} else {
VIXL_ASSERT(operand.IsExtendedRegister());
DataProcExtendedRegister(rd, rn, operand, S,
static_cast<Instr>(AddSubExtendedFixed) | static_cast<Instr>(op));
DataProcExtendedRegister(rd, rn, operand, S, AddSubExtendedFixed | op);
}
}
@ -6079,7 +6365,7 @@ void Assembler::Logical(const Register& rd,
} else {
VIXL_ASSERT(operand.IsShiftedRegister());
VIXL_ASSERT(operand.GetRegister().GetSizeInBits() == rd.GetSizeInBits());
Instr dp_op = static_cast<Instr>(op) | static_cast<Instr>(LogicalShiftedFixed);
Instr dp_op = static_cast<Instr>(op | LogicalShiftedFixed);
DataProcShiftedRegister(rd, rn, operand, LeaveFlags, dp_op);
}
}
@ -6108,14 +6394,11 @@ void Assembler::ConditionalCompare(const Register& rn,
if (operand.IsImmediate()) {
int64_t immediate = operand.GetImmediate();
VIXL_ASSERT(IsImmConditionalCompare(immediate));
ccmpop = static_cast<Instr>(ConditionalCompareImmediateFixed) |
static_cast<Instr>(op) |
ccmpop = ConditionalCompareImmediateFixed | op |
ImmCondCmp(static_cast<unsigned>(immediate));
} else {
VIXL_ASSERT(operand.IsShiftedRegister() && (operand.GetShiftAmount() == 0));
ccmpop = static_cast<Instr>(ConditionalCompareRegisterFixed) |
static_cast<Instr>(op) |
Rm(operand.GetRegister());
ccmpop = ConditionalCompareRegisterFixed | op | Rm(operand.GetRegister());
}
Emit(SF(rn) | ccmpop | Cond(cond) | Rn(rn) | Nzcv(nzcv));
}

View File

@ -24,12 +24,13 @@
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "cpu-features-auditor-aarch64.h"
#include "cpu-features.h"
#include "globals-vixl.h"
#include "utils-vixl.h"
#include "decoder-aarch64.h"
#include "cpu-features-auditor-aarch64.h"
#include "decoder-aarch64.h"
namespace vixl {
namespace aarch64 {
@ -246,16 +247,41 @@ void CPUFeaturesAuditor::VisitConditionalSelect(const Instruction* instr) {
void CPUFeaturesAuditor::VisitCrypto2RegSHA(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
if (form_hash_ == "sha256su0_vv_cryptosha2"_h) {
scope.Record(CPUFeatures::kNEON, CPUFeatures::kSHA2);
} else {
scope.Record(CPUFeatures::kNEON, CPUFeatures::kSHA1);
}
USE(instr);
}
void CPUFeaturesAuditor::VisitCrypto3RegSHA(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
switch (form_hash_) {
case "sha1c_qsv_cryptosha3"_h:
case "sha1m_qsv_cryptosha3"_h:
case "sha1p_qsv_cryptosha3"_h:
case "sha1su0_vvv_cryptosha3"_h:
scope.Record(CPUFeatures::kNEON, CPUFeatures::kSHA1);
break;
case "sha256h_qqv_cryptosha3"_h:
case "sha256h2_qqv_cryptosha3"_h:
case "sha256su1_vvv_cryptosha3"_h:
scope.Record(CPUFeatures::kNEON, CPUFeatures::kSHA2);
break;
}
USE(instr);
}
void CPUFeaturesAuditor::VisitCryptoAES(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
scope.Record(CPUFeatures::kNEON, CPUFeatures::kAES);
USE(instr);
}
void CPUFeaturesAuditor::VisitCryptoSM3(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
scope.Record(CPUFeatures::kNEON, CPUFeatures::kSM3);
USE(instr);
}
@ -735,6 +761,12 @@ void CPUFeaturesAuditor::VisitNEON3Different(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
// All of these instructions require NEON.
scope.Record(CPUFeatures::kNEON);
if (form_hash_ == "pmull_asimddiff_l"_h) {
if (instr->GetNEONSize() == 3) {
// Source is 1D or 2D, destination is 1Q.
scope.Record(CPUFeatures::kPmull1Q);
}
}
USE(instr);
}
@ -1269,91 +1301,93 @@ VIXL_SIMPLE_SVE_VISITOR_LIST(VIXL_DEFINE_SIMPLE_SVE_VISITOR)
void CPUFeaturesAuditor::VisitSystem(const Instruction* instr) {
RecordInstructionFeaturesScope scope(this);
if (instr->Mask(SystemHintFMask) == SystemHintFixed) {
CPUFeatures required;
switch (instr->GetInstructionBits()) {
case PACIA1716:
case PACIB1716:
case AUTIA1716:
case AUTIB1716:
case PACIAZ:
case PACIASP:
case PACIBZ:
case PACIBSP:
case AUTIAZ:
case AUTIASP:
case AUTIBZ:
case AUTIBSP:
case XPACLRI:
required.Combine(CPUFeatures::kPAuth);
break;
default:
switch (instr->GetImmHint()) {
case ESB:
required.Combine(CPUFeatures::kRAS);
break;
case BTI:
case BTI_j:
case BTI_c:
case BTI_jc:
required.Combine(CPUFeatures::kBTI);
break;
default:
break;
}
break;
}
// These are all HINT instructions, and behave as NOPs if the corresponding
// features are not implemented, so we record the corresponding features
// only if they are available.
if (available_.Has(required)) scope.Record(required);
} else if (instr->Mask(SystemSysMask) == SYS) {
switch (instr->GetSysOp()) {
// DC instruction variants.
case CGVAC:
case CGDVAC:
case CGVAP:
case CGDVAP:
case CIGVAC:
case CIGDVAC:
case GVA:
case GZVA:
scope.Record(CPUFeatures::kMTE);
break;
case CVAP:
scope.Record(CPUFeatures::kDCPoP);
break;
case CVADP:
scope.Record(CPUFeatures::kDCCVADP);
break;
case IVAU:
case CVAC:
case CVAU:
case CIVAC:
case ZVA:
// No special CPU features.
break;
}
} else if (instr->Mask(SystemPStateFMask) == SystemPStateFixed) {
switch (instr->Mask(SystemPStateMask)) {
case CFINV:
scope.Record(CPUFeatures::kFlagM);
break;
case AXFLAG:
case XAFLAG:
scope.Record(CPUFeatures::kAXFlag);
break;
}
} else if (instr->Mask(SystemSysRegFMask) == SystemSysRegFixed) {
if (instr->Mask(SystemSysRegMask) == MRS) {
CPUFeatures required;
switch (form_hash_) {
case "pacib1716_hi_hints"_h:
case "pacia1716_hi_hints"_h:
case "pacibsp_hi_hints"_h:
case "paciasp_hi_hints"_h:
case "pacibz_hi_hints"_h:
case "paciaz_hi_hints"_h:
case "autib1716_hi_hints"_h:
case "autia1716_hi_hints"_h:
case "autibsp_hi_hints"_h:
case "autiasp_hi_hints"_h:
case "autibz_hi_hints"_h:
case "autiaz_hi_hints"_h:
case "xpaclri_hi_hints"_h:
required.Combine(CPUFeatures::kPAuth);
break;
case "esb_hi_hints"_h:
required.Combine(CPUFeatures::kRAS);
break;
case "bti_hb_hints"_h:
required.Combine(CPUFeatures::kBTI);
break;
}
// The instructions above are all HINTs and behave as NOPs if the
// corresponding features are not implemented, so we record the corresponding
// features only if they are available.
if (available_.Has(required)) scope.Record(required);
switch (form_hash_) {
case "cfinv_m_pstate"_h:
scope.Record(CPUFeatures::kFlagM);
break;
case "axflag_m_pstate"_h:
case "xaflag_m_pstate"_h:
scope.Record(CPUFeatures::kAXFlag);
break;
case "mrs_rs_systemmove"_h:
switch (instr->GetImmSystemRegister()) {
case RNDR:
case RNDRRS:
scope.Record(CPUFeatures::kRNG);
break;
}
}
break;
case "sys_cr_systeminstrs"_h:
switch (instr->GetSysOp()) {
// DC instruction variants.
case CGVAC:
case CGDVAC:
case CGVAP:
case CGDVAP:
case CIGVAC:
case CIGDVAC:
case GVA:
case GZVA:
scope.Record(CPUFeatures::kMTE);
break;
case CVAP:
scope.Record(CPUFeatures::kDCPoP);
break;
case CVADP:
scope.Record(CPUFeatures::kDCCVADP);
break;
case IVAU:
case CVAC:
case CVAU:
case CIVAC:
case ZVA:
// No special CPU features.
break;
case GCSPUSHM:
case GCSSS1:
scope.Record(CPUFeatures::kGCS);
break;
}
break;
case "sysl_rc_systeminstrs"_h:
switch (instr->GetSysOp()) {
case GCSPOPM:
case GCSSS2:
scope.Record(CPUFeatures::kGCS);
break;
}
break;
}
}
@ -1407,9 +1441,9 @@ void CPUFeaturesAuditor::VisitUnimplemented(const Instruction* instr) {
void CPUFeaturesAuditor::Visit(Metadata* metadata, const Instruction* instr) {
VIXL_ASSERT(metadata->count("form") > 0);
const std::string& form = (*metadata)["form"];
uint32_t form_hash = Hash(form.c_str());
form_hash_ = Hash(form.c_str());
const FormToVisitorFnMap* fv = CPUFeaturesAuditor::GetFormToVisitorFnMap();
FormToVisitorFnMap::const_iterator it = fv->find(form_hash);
FormToVisitorFnMap::const_iterator it = fv->find(form_hash_);
if (it == fv->end()) {
RecordInstructionFeaturesScope scope(this);
std::map<uint32_t, const CPUFeatures> features = {
@ -1826,10 +1860,26 @@ void CPUFeaturesAuditor::Visit(Metadata* metadata, const Instruction* instr) {
{"umax_64u_minmax_imm"_h, CPUFeatures::kCSSC},
{"umin_32u_minmax_imm"_h, CPUFeatures::kCSSC},
{"umin_64u_minmax_imm"_h, CPUFeatures::kCSSC},
{"bcax_vvv16_crypto4"_h,
CPUFeatures(CPUFeatures::kNEON, CPUFeatures::kSHA3)},
{"eor3_vvv16_crypto4"_h,
CPUFeatures(CPUFeatures::kNEON, CPUFeatures::kSHA3)},
{"rax1_vvv2_cryptosha512_3"_h,
CPUFeatures(CPUFeatures::kNEON, CPUFeatures::kSHA3)},
{"xar_vvv2_crypto3_imm6"_h,
CPUFeatures(CPUFeatures::kNEON, CPUFeatures::kSHA3)},
{"sha512h_qqv_cryptosha512_3"_h,
CPUFeatures(CPUFeatures::kNEON, CPUFeatures::kSHA512)},
{"sha512h2_qqv_cryptosha512_3"_h,
CPUFeatures(CPUFeatures::kNEON, CPUFeatures::kSHA512)},
{"sha512su0_vv2_cryptosha512_2"_h,
CPUFeatures(CPUFeatures::kNEON, CPUFeatures::kSHA512)},
{"sha512su1_vvv2_cryptosha512_3"_h,
CPUFeatures(CPUFeatures::kNEON, CPUFeatures::kSHA512)},
};
if (features.count(form_hash) > 0) {
scope.Record(features[form_hash]);
if (features.count(form_hash_) > 0) {
scope.Record(features[form_hash_]);
}
} else {
(it->second)(this, instr);

View File

@ -24,12 +24,13 @@
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "decoder-aarch64.h"
#include <string>
#include "../globals-vixl.h"
#include "../utils-vixl.h"
#include "decoder-aarch64.h"
#include "decoder-constants-aarch64.h"
namespace vixl {

View File

@ -24,12 +24,12 @@
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "disasm-aarch64.h"
#include <bitset>
#include <cstdlib>
#include <sstream>
#include "disasm-aarch64.h"
namespace vixl {
namespace aarch64 {
@ -330,6 +330,7 @@ const Disassembler::FormToVisitorFnMap *Disassembler::GetFormToVisitorFnMap() {
{"frsqrte_asisdmisc_r"_h, &Disassembler::DisassembleNEONFPScalar2RegMisc},
{"scvtf_asisdmisc_r"_h, &Disassembler::DisassembleNEONFPScalar2RegMisc},
{"ucvtf_asisdmisc_r"_h, &Disassembler::DisassembleNEONFPScalar2RegMisc},
{"pmull_asimddiff_l"_h, &Disassembler::DisassembleNEONPolynomialMul},
{"adclb_z_zzz"_h, &Disassembler::DisassembleSVEAddSubCarry},
{"adclt_z_zzz"_h, &Disassembler::DisassembleSVEAddSubCarry},
{"addhnb_z_zz"_h, &Disassembler::DisassembleSVEAddSubHigh},
@ -752,6 +753,14 @@ const Disassembler::FormToVisitorFnMap *Disassembler::GetFormToVisitorFnMap() {
{"umax_64u_minmax_imm"_h, &Disassembler::DisassembleMinMaxImm},
{"umin_32u_minmax_imm"_h, &Disassembler::DisassembleMinMaxImm},
{"umin_64u_minmax_imm"_h, &Disassembler::DisassembleMinMaxImm},
{"bcax_vvv16_crypto4"_h, &Disassembler::DisassembleNEON4Same},
{"eor3_vvv16_crypto4"_h, &Disassembler::DisassembleNEON4Same},
{"xar_vvv2_crypto3_imm6"_h, &Disassembler::DisassembleNEONXar},
{"rax1_vvv2_cryptosha512_3"_h, &Disassembler::DisassembleNEONRax1},
{"sha512h2_qqv_cryptosha512_3"_h, &Disassembler::DisassembleSHA512},
{"sha512h_qqv_cryptosha512_3"_h, &Disassembler::DisassembleSHA512},
{"sha512su0_vv2_cryptosha512_2"_h, &Disassembler::DisassembleSHA512},
{"sha512su1_vvv2_cryptosha512_3"_h, &Disassembler::DisassembleSHA512},
};
return &form_to_visitor;
} // NOLINT(readability/fn_size)
@ -2017,7 +2026,7 @@ void Disassembler::DisassembleNoArgs(const Instruction *instr) {
void Disassembler::VisitSystem(const Instruction *instr) {
const char *mnemonic = mnemonic_.c_str();
const char *form = "(System)";
const char *form = "";
const char *suffix = NULL;
switch (form_hash_) {
@ -2046,6 +2055,10 @@ void Disassembler::VisitSystem(const Instruction *instr) {
break;
}
break;
case "chkfeat_hf_hints"_h:
mnemonic = "chkfeat";
form = "x16";
break;
case "hint_hm_hints"_h:
form = "'IH";
break;
@ -2066,9 +2079,6 @@ void Disassembler::VisitSystem(const Instruction *instr) {
break;
}
case Hash("sys_cr_systeminstrs"): {
mnemonic = "dc";
suffix = ", 'Xt";
const std::map<uint32_t, const char *> dcop = {
{IVAU, "ivau"},
{CVAC, "cvac"},
@ -2091,17 +2101,36 @@ void Disassembler::VisitSystem(const Instruction *instr) {
if (dcop.count(sysop)) {
if (sysop == IVAU) {
mnemonic = "ic";
} else {
mnemonic = "dc";
}
form = dcop.at(sysop);
suffix = ", 'Xt";
} else if (sysop == GCSSS1) {
mnemonic = "gcsss1";
form = "'Xt";
} else if (sysop == GCSPUSHM) {
mnemonic = "gcspushm";
form = "'Xt";
} else {
mnemonic = "sys";
form = "'G1, 'Kn, 'Km, 'G2";
if (instr->GetRt() == 31) {
suffix = NULL;
if (instr->GetRt() < 31) {
suffix = ", 'Xt";
}
break;
}
break;
}
case "sysl_rc_systeminstrs"_h:
uint32_t sysop = instr->GetSysOp();
if (sysop == GCSPOPM) {
mnemonic = "gcspopm";
form = (instr->GetRt() == 31) ? "" : "'Xt";
} else if (sysop == GCSSS2) {
mnemonic = "gcsss2";
form = "'Xt";
}
break;
}
Format(instr, mnemonic, form, suffix);
}
@ -2147,17 +2176,64 @@ void Disassembler::VisitException(const Instruction *instr) {
void Disassembler::VisitCrypto2RegSHA(const Instruction *instr) {
VisitUnimplemented(instr);
const char *form = "'Vd.4s, 'Vn.4s";
if (form_hash_ == "sha1h_ss_cryptosha2"_h) {
form = "'Sd, 'Sn";
}
FormatWithDecodedMnemonic(instr, form);
}
void Disassembler::VisitCrypto3RegSHA(const Instruction *instr) {
VisitUnimplemented(instr);
const char *form = "'Qd, 'Sn, 'Vm.4s";
switch (form_hash_) {
case "sha1su0_vvv_cryptosha3"_h:
case "sha256su1_vvv_cryptosha3"_h:
form = "'Vd.4s, 'Vn.4s, 'Vm.4s";
break;
case "sha256h_qqv_cryptosha3"_h:
case "sha256h2_qqv_cryptosha3"_h:
form = "'Qd, 'Qn, 'Vm.4s";
break;
}
FormatWithDecodedMnemonic(instr, form);
}
void Disassembler::VisitCryptoAES(const Instruction *instr) {
VisitUnimplemented(instr);
FormatWithDecodedMnemonic(instr, "'Vd.16b, 'Vn.16b");
}
void Disassembler::VisitCryptoSM3(const Instruction *instr) {
const char *form = "'Vd.4s, 'Vn.4s, 'Vm.";
const char *suffix = "4s";
switch (form_hash_) {
case "sm3ss1_vvv4_crypto4"_h:
suffix = "4s, 'Va.4s";
break;
case "sm3tt1a_vvv4_crypto3_imm2"_h:
case "sm3tt1b_vvv4_crypto3_imm2"_h:
case "sm3tt2a_vvv4_crypto3_imm2"_h:
case "sm3tt2b_vvv_crypto3_imm2"_h:
suffix = "s['u1312]";
break;
}
FormatWithDecodedMnemonic(instr, form, suffix);
}
void Disassembler::DisassembleSHA512(const Instruction *instr) {
const char *form = "'Qd, 'Qn, 'Vm.2d";
const char *suffix = NULL;
switch (form_hash_) {
case "sha512su1_vvv2_cryptosha512_3"_h:
suffix = ", 'Vm.2d";
VIXL_FALLTHROUGH();
case "sha512su0_vv2_cryptosha512_2"_h:
form = "'Vd.2d, 'Vn.2d";
}
FormatWithDecodedMnemonic(instr, form, suffix);
}
void Disassembler::DisassembleNEON2RegAddlp(const Instruction *instr) {
@ -2373,13 +2449,19 @@ void Disassembler::VisitNEON3SameFP16(const Instruction *instr) {
}
void Disassembler::VisitNEON3SameExtra(const Instruction *instr) {
static const NEONFormatMap map_usdot = {{30}, {NF_8B, NF_16B}};
static const NEONFormatMap map_dot =
{{23, 22, 30}, {NF_UNDEF, NF_UNDEF, NF_UNDEF, NF_UNDEF, NF_2S, NF_4S}};
static const NEONFormatMap map_fc =
{{23, 22, 30},
{NF_UNDEF, NF_UNDEF, NF_4H, NF_8H, NF_2S, NF_4S, NF_UNDEF, NF_2D}};
static const NEONFormatMap map_rdm =
{{23, 22, 30}, {NF_UNDEF, NF_UNDEF, NF_4H, NF_8H, NF_2S, NF_4S}};
const char *mnemonic = mnemonic_.c_str();
const char *form = "'Vd.%s, 'Vn.%s, 'Vm.%s";
const char *suffix = NULL;
NEONFormatDecoder nfd(instr);
NEONFormatDecoder nfd(instr, &map_fc);
switch (form_hash_) {
case "fcmla_asimdsame2_c"_h:
@ -2392,17 +2474,28 @@ void Disassembler::VisitNEON3SameExtra(const Instruction *instr) {
case "sdot_asimdsame2_d"_h:
case "udot_asimdsame2_d"_h:
case "usdot_asimdsame2_d"_h:
nfd.SetFormatMap(1, &map_usdot);
nfd.SetFormatMap(2, &map_usdot);
nfd.SetFormatMaps(nfd.LogicalFormatMap());
nfd.SetFormatMap(0, &map_dot);
break;
default:
// sqrdml[as]h - nothing to do.
nfd.SetFormatMaps(&map_rdm);
break;
}
Format(instr, mnemonic, nfd.Substitute(form), suffix);
}
void Disassembler::DisassembleNEON4Same(const Instruction *instr) {
FormatWithDecodedMnemonic(instr, "'Vd.16b, 'Vn.16b, 'Vm.16b, 'Va.16b");
}
void Disassembler::DisassembleNEONXar(const Instruction *instr) {
FormatWithDecodedMnemonic(instr, "'Vd.2d, 'Vn.2d, 'Vm.2d, #'u1510");
}
void Disassembler::DisassembleNEONRax1(const Instruction *instr) {
FormatWithDecodedMnemonic(instr, "'Vd.2d, 'Vn.2d, 'Vm.2d");
}
void Disassembler::VisitNEON3Different(const Instruction *instr) {
const char *mnemonic = mnemonic_.c_str();
@ -2425,11 +2518,6 @@ void Disassembler::VisitNEON3Different(const Instruction *instr) {
nfd.SetFormatMaps(nfd.LongIntegerFormatMap());
nfd.SetFormatMap(0, nfd.IntegerFormatMap());
break;
case "pmull_asimddiff_l"_h:
if (nfd.GetVectorFormat(0) != kFormat8H) {
mnemonic = NULL;
}
break;
case "sqdmlal_asimddiff_l"_h:
case "sqdmlsl_asimddiff_l"_h:
case "sqdmull_asimddiff_l"_h:
@ -2441,6 +2529,22 @@ void Disassembler::VisitNEON3Different(const Instruction *instr) {
Format(instr, nfd.Mnemonic(mnemonic), nfd.Substitute(form));
}
void Disassembler::DisassembleNEONPolynomialMul(const Instruction *instr) {
const char *mnemonic = instr->ExtractBit(30) ? "pmull2" : "pmull";
const char *form = NULL;
int size = instr->ExtractBits(23, 22);
if (size == 0) {
// Bits 30:27 of the instruction are x001, where x is the Q bit. Map
// this to "8" and "16" by adding 7.
form = "'Vd.8h, 'Vn.'u3127+7b, 'Vm.'u3127+7b";
} else if (size == 3) {
form = "'Vd.1q, 'Vn.'?30:21d, 'Vm.'?30:21d";
} else {
mnemonic = NULL;
}
Format(instr, mnemonic, form);
}
void Disassembler::DisassembleNEONFPAcrossLanes(const Instruction *instr) {
const char *mnemonic = mnemonic_.c_str();
const char *form = "'Sd, 'Vn.4s";
@ -3298,6 +3402,8 @@ void Disassembler::VisitNEONScalar3Same(const Instruction *instr) {
break;
case "sqdmulh_asisdsame_only"_h:
case "sqrdmulh_asisdsame_only"_h:
case "sqrdmlah_asisdsame2_only"_h:
case "sqrdmlsh_asisdsame2_only"_h:
if ((vform == kFormatB) || (vform == kFormatD)) {
mnemonic = NULL;
}
@ -3916,8 +4022,7 @@ static bool SVEMoveMaskPreferred(uint64_t value, int lane_bytes_log2) {
}
// Check 0x0000pq00_0000pq00 or 0xffffpq00_ffffpq00.
uint64_t rotvalue = RotateRight(value, 32, 64);
if (value == rotvalue) {
if (AllWordsMatch(value)) {
generic_value &= 0xffffffff;
if ((generic_value == 0xffff) || (generic_value == UINT32_MAX)) {
return false;
@ -3925,8 +4030,7 @@ static bool SVEMoveMaskPreferred(uint64_t value, int lane_bytes_log2) {
}
// Check 0xpq00pq00_pq00pq00.
rotvalue = RotateRight(value, 16, 64);
if (value == rotvalue) {
if (AllHalfwordsMatch(value)) {
return false;
}
} else {
@ -3940,8 +4044,7 @@ static bool SVEMoveMaskPreferred(uint64_t value, int lane_bytes_log2) {
}
// Check 0x000000pq_000000pq or 0xffffffpq_ffffffpq.
uint64_t rotvalue = RotateRight(value, 32, 64);
if (value == rotvalue) {
if (AllWordsMatch(value)) {
generic_value &= 0xffffffff;
if ((generic_value == 0xff) || (generic_value == UINT32_MAX)) {
return false;
@ -3949,8 +4052,7 @@ static bool SVEMoveMaskPreferred(uint64_t value, int lane_bytes_log2) {
}
// Check 0x00pq00pq_00pq00pq or 0xffpqffpq_ffpqffpq.
rotvalue = RotateRight(value, 16, 64);
if (value == rotvalue) {
if (AllHalfwordsMatch(value)) {
generic_value &= 0xffff;
if ((generic_value == 0xff) || (generic_value == UINT16_MAX)) {
return false;
@ -3958,8 +4060,7 @@ static bool SVEMoveMaskPreferred(uint64_t value, int lane_bytes_log2) {
}
// Check 0xpqpqpqpq_pqpqpqpq.
rotvalue = RotateRight(value, 8, 64);
if (value == rotvalue) {
if (AllBytesMatch(value)) {
return false;
}
}

View File

@ -25,6 +25,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "instructions-aarch64.h"
#include "assembler-aarch64.h"
namespace vixl {
@ -1010,6 +1011,8 @@ VectorFormat VectorFormatHalfWidth(VectorFormat vform) {
return kFormat4H;
case kFormat2D:
return kFormat2S;
case kFormat1Q:
return kFormat1D;
case kFormatH:
return kFormatB;
case kFormatS:
@ -1094,6 +1097,8 @@ VectorFormat VectorFormatHalfWidthDoubleLanes(VectorFormat vform) {
return kFormat2S;
case kFormat2D:
return kFormat4S;
case kFormat1Q:
return kFormat2D;
case kFormatVnH:
return kFormatVnB;
case kFormatVnS:
@ -1245,6 +1250,7 @@ unsigned RegisterSizeInBitsFromFormat(VectorFormat vform) {
case kFormat8H:
case kFormat4S:
case kFormat2D:
case kFormat1Q:
return kQRegSize;
default:
VIXL_UNREACHABLE();
@ -1282,6 +1288,7 @@ unsigned LaneSizeInBitsFromFormat(VectorFormat vform) {
case kFormat2D:
case kFormatVnD:
return 64;
case kFormat1Q:
case kFormatVnQ:
return 128;
case kFormatVnO:
@ -1347,6 +1354,7 @@ int LaneCountFromFormat(VectorFormat vform) {
case kFormat2D:
return 2;
case kFormat1D:
case kFormat1Q:
case kFormatB:
case kFormatH:
case kFormatS:

File diff suppressed because it is too large Load Diff

View File

@ -24,10 +24,10 @@
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <cctype>
#include "macro-assembler-aarch64.h"
#include <cctype>
namespace vixl {
namespace aarch64 {
@ -194,9 +194,8 @@ void VeneerPool::Reset() {
void VeneerPool::Release() {
if (--monitor_ == 0) {
VIXL_ASSERT(IsEmpty() ||
masm_->GetCursorOffset() <
unresolved_branches_.GetFirstLimit());
VIXL_ASSERT(IsEmpty() || masm_->GetCursorOffset() <
unresolved_branches_.GetFirstLimit());
}
}
@ -313,6 +312,48 @@ void VeneerPool::Emit(EmitOption option, size_t amount) {
}
MacroAssembler::MacroAssembler(PositionIndependentCodeOption pic)
: Assembler(pic),
#ifdef VIXL_DEBUG
allow_macro_instructions_(true),
#endif
generate_simulator_code_(VIXL_AARCH64_GENERATE_SIMULATOR_CODE),
sp_(sp),
tmp_list_(ip0, ip1),
v_tmp_list_(d31),
p_tmp_list_(CPURegList::Empty(CPURegister::kPRegister)),
current_scratch_scope_(NULL),
literal_pool_(this),
veneer_pool_(this),
recommended_checkpoint_(Pool::kNoCheckpointRequired),
fp_nan_propagation_(NoFPMacroNaNPropagationSelected) {
checkpoint_ = GetNextCheckPoint();
#ifndef VIXL_DEBUG
USE(allow_macro_instructions_);
#endif
}
MacroAssembler::MacroAssembler(size_t capacity,
PositionIndependentCodeOption pic)
: Assembler(capacity, pic),
#ifdef VIXL_DEBUG
allow_macro_instructions_(true),
#endif
generate_simulator_code_(VIXL_AARCH64_GENERATE_SIMULATOR_CODE),
sp_(sp),
tmp_list_(ip0, ip1),
v_tmp_list_(d31),
p_tmp_list_(CPURegList::Empty(CPURegister::kPRegister)),
current_scratch_scope_(NULL),
literal_pool_(this),
veneer_pool_(this),
recommended_checkpoint_(Pool::kNoCheckpointRequired),
fp_nan_propagation_(NoFPMacroNaNPropagationSelected) {
checkpoint_ = GetNextCheckPoint();
}
MacroAssembler::MacroAssembler(byte* buffer,
size_t capacity,
PositionIndependentCodeOption pic)
@ -363,7 +404,7 @@ void MacroAssembler::FinalizeCode(FinalizeOption option) {
void MacroAssembler::CheckEmitFor(size_t amount) {
CheckEmitPoolsFor(amount);
VIXL_ASSERT(GetBuffer()->HasSpaceFor(amount));
GetBuffer()->EnsureSpaceFor(amount);
}
@ -1108,11 +1149,14 @@ void MacroAssembler::Ccmp(const Register& rn,
StatusFlags nzcv,
Condition cond) {
VIXL_ASSERT(allow_macro_instructions_);
if (operand.IsImmediate() && (operand.GetImmediate() < 0)) {
ConditionalCompareMacro(rn, -operand.GetImmediate(), nzcv, cond, CCMN);
} else {
ConditionalCompareMacro(rn, operand, nzcv, cond, CCMP);
if (operand.IsImmediate()) {
int64_t imm = operand.GetImmediate();
if ((imm < 0) && CanBeNegated(imm)) {
ConditionalCompareMacro(rn, -imm, nzcv, cond, CCMN);
return;
}
}
ConditionalCompareMacro(rn, operand, nzcv, cond, CCMP);
}
@ -1121,11 +1165,14 @@ void MacroAssembler::Ccmn(const Register& rn,
StatusFlags nzcv,
Condition cond) {
VIXL_ASSERT(allow_macro_instructions_);
if (operand.IsImmediate() && (operand.GetImmediate() < 0)) {
ConditionalCompareMacro(rn, -operand.GetImmediate(), nzcv, cond, CCMP);
} else {
ConditionalCompareMacro(rn, operand, nzcv, cond, CCMN);
if (operand.IsImmediate()) {
int64_t imm = operand.GetImmediate();
if ((imm < 0) && CanBeNegated(imm)) {
ConditionalCompareMacro(rn, -imm, nzcv, cond, CCMP);
return;
}
}
ConditionalCompareMacro(rn, operand, nzcv, cond, CCMN);
}
@ -1359,8 +1406,7 @@ void MacroAssembler::Add(const Register& rd,
VIXL_ASSERT(allow_macro_instructions_);
if (operand.IsImmediate()) {
int64_t imm = operand.GetImmediate();
if ((imm < 0) && (imm != std::numeric_limits<int64_t>::min()) &&
IsImmAddSub(-imm)) {
if ((imm < 0) && CanBeNegated(imm) && IsImmAddSub(-imm)) {
AddSubMacro(rd, rn, -imm, S, SUB);
return;
}
@ -1447,8 +1493,7 @@ void MacroAssembler::Sub(const Register& rd,
VIXL_ASSERT(allow_macro_instructions_);
if (operand.IsImmediate()) {
int64_t imm = operand.GetImmediate();
if ((imm < 0) && (imm != std::numeric_limits<int64_t>::min()) &&
IsImmAddSub(-imm)) {
if ((imm < 0) && CanBeNegated(imm) && IsImmAddSub(-imm)) {
AddSubMacro(rd, rn, -imm, S, ADD);
return;
}
@ -1609,7 +1654,7 @@ void MacroAssembler::Fmov(VRegister vd, Float16 imm) {
void MacroAssembler::Neg(const Register& rd, const Operand& operand) {
VIXL_ASSERT(allow_macro_instructions_);
if (operand.IsImmediate()) {
if (operand.IsImmediate() && CanBeNegated(operand.GetImmediate())) {
Mov(rd, -operand.GetImmediate());
} else {
Sub(rd, AppropriateZeroRegFor(rd), operand);
@ -1925,6 +1970,22 @@ void MacroAssembler::Setf16(const Register& wn) {
setf16(wn);
}
void MacroAssembler::Chkfeat(const Register& xdn) {
VIXL_ASSERT(allow_macro_instructions_);
MacroEmissionCheckScope guard(this);
if (xdn.Is(x16)) {
chkfeat(xdn);
} else {
UseScratchRegisterScope temps(this);
if (temps.TryAcquire(x16)) {
Mov(x16, xdn);
chkfeat(x16);
Mov(xdn, x16);
} else {
VIXL_ABORT();
}
}
}
#define DEFINE_FUNCTION(FN, REGTYPE, REG, OP) \
void MacroAssembler::FN(const REGTYPE REG, const MemOperand& addr) { \

View File

@ -465,5 +465,5 @@ bool GenericOperand::Equals(const GenericOperand& other) const {
}
return false;
}
}
} // namespace vixl::aarch64
} // namespace aarch64
} // namespace vixl

View File

@ -26,10 +26,10 @@
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
#include "simulator-aarch64.h"
#include "utils-vixl.h"
#include "simulator-aarch64.h"
namespace vixl {
namespace aarch64 {
@ -151,7 +151,7 @@ uint64_t Simulator::AuthPAC(uint64_t ptr,
uint64_t pac = ComputePAC(original_ptr, context, key);
uint64_t error_code = 1 << key.number;
uint64_t error_code = uint64_t{1} << key.number;
if ((pac & pac_mask) == (ptr & pac_mask)) {
return original_ptr;
} else {

View File

@ -24,11 +24,11 @@
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "registers-aarch64.h"
#include <sstream>
#include <string>
#include "registers-aarch64.h"
namespace vixl {
namespace aarch64 {
@ -153,7 +153,8 @@ VIXL_CPUREG_COERCION_LIST(VIXL_DEFINE_CPUREG_COERCION)
V(2, S) \
V(4, S) \
V(1, D) \
V(2, D)
V(2, D) \
V(1, Q)
#define VIXL_DEFINE_CPUREG_NEON_COERCION(LANES, LANE_TYPE) \
VRegister VRegister::V##LANES##LANE_TYPE() const { \
VIXL_ASSERT(IsVRegister()); \
@ -317,5 +318,5 @@ bool AreSameLaneSize(const CPURegister& reg1,
!reg4.IsValid() || (reg4.GetLaneSizeInBits() == reg1.GetLaneSizeInBits());
return match;
}
}
} // namespace vixl::aarch64
} // namespace aarch64
} // namespace vixl

View File

@ -24,14 +24,51 @@
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifdef VIXL_CODE_BUFFER_MMAP
extern "C" {
#include <sys/mman.h>
}
#endif
#include "code-buffer-vixl.h"
#include "utils-vixl.h"
namespace vixl {
CodeBuffer::CodeBuffer(size_t capacity)
: buffer_(NULL),
managed_(true),
cursor_(NULL),
dirty_(false),
capacity_(capacity) {
if (capacity_ == 0) {
return;
}
#ifdef VIXL_CODE_BUFFER_MALLOC
buffer_ = reinterpret_cast<byte*>(malloc(capacity_));
#elif defined(VIXL_CODE_BUFFER_MMAP)
buffer_ = reinterpret_cast<byte*>(mmap(NULL,
capacity,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS,
-1,
0));
#else
#error Unknown code buffer allocator.
#endif
VIXL_CHECK(buffer_ != NULL);
// Aarch64 instructions must be word aligned, we assert the default allocator
// always returns word align memory.
VIXL_ASSERT(IsWordAligned(buffer_));
cursor_ = buffer_;
}
CodeBuffer::CodeBuffer(byte* buffer, size_t capacity)
: buffer_(reinterpret_cast<byte*>(buffer)),
managed_(false),
cursor_(reinterpret_cast<byte*>(buffer)),
dirty_(false),
capacity_(capacity) {
@ -41,6 +78,39 @@ CodeBuffer::CodeBuffer(byte* buffer, size_t capacity)
CodeBuffer::~CodeBuffer() VIXL_NEGATIVE_TESTING_ALLOW_EXCEPTION {
VIXL_ASSERT(!IsDirty());
if (managed_) {
#ifdef VIXL_CODE_BUFFER_MALLOC
free(buffer_);
#elif defined(VIXL_CODE_BUFFER_MMAP)
munmap(buffer_, capacity_);
#else
#error Unknown code buffer allocator.
#endif
}
}
void CodeBuffer::SetExecutable() {
#ifdef VIXL_CODE_BUFFER_MMAP
int ret = mprotect(buffer_, capacity_, PROT_READ | PROT_EXEC);
VIXL_CHECK(ret == 0);
#else
// This requires page-aligned memory blocks, which we can only guarantee with
// mmap.
VIXL_UNIMPLEMENTED();
#endif
}
void CodeBuffer::SetWritable() {
#ifdef VIXL_CODE_BUFFER_MMAP
int ret = mprotect(buffer_, capacity_, PROT_READ | PROT_WRITE);
VIXL_CHECK(ret == 0);
#else
// This requires page-aligned memory blocks, which we can only guarantee with
// mmap.
VIXL_UNIMPLEMENTED();
#endif
}
@ -78,16 +148,42 @@ void CodeBuffer::Align() {
}
void CodeBuffer::EmitZeroedBytes(int n) {
VIXL_ASSERT(HasSpaceFor(n));
EnsureSpaceFor(n);
dirty_ = true;
memset(cursor_, 0, n);
cursor_ += n;
}
void CodeBuffer::Reset() {
#ifdef VIXL_DEBUG
if (managed_) {
// Fill with zeros (there is no useful value common to A32 and T32).
memset(buffer_, 0, capacity_);
}
#endif
cursor_ = buffer_;
SetClean();
}
void CodeBuffer::Grow(size_t new_capacity) {
VIXL_ASSERT(managed_);
VIXL_ASSERT(new_capacity > capacity_);
ptrdiff_t cursor_offset = GetCursorOffset();
#ifdef VIXL_CODE_BUFFER_MALLOC
buffer_ = static_cast<byte*>(realloc(buffer_, new_capacity));
VIXL_CHECK(buffer_ != NULL);
#elif defined(VIXL_CODE_BUFFER_MMAP)
buffer_ = static_cast<byte*>(
mremap(buffer_, capacity_, new_capacity, MREMAP_MAYMOVE));
VIXL_CHECK(buffer_ != MAP_FAILED);
#else
#error Unknown code buffer allocator.
#endif
cursor_ = buffer_ + cursor_offset;
capacity_ = new_capacity;
}
} // namespace vixl

View File

@ -25,6 +25,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "compiler-intrinsics-vixl.h"
#include "utils-vixl.h"
namespace vixl {

View File

@ -24,9 +24,10 @@
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "cpu-features.h"
#include <ostream>
#include "cpu-features.h"
#include "globals-vixl.h"
#include "utils-vixl.h"

View File

@ -48,6 +48,7 @@
<ClInclude Include="include\vixl\aarch64\constants-aarch64.h" />
<ClInclude Include="include\vixl\aarch64\cpu-aarch64.h" />
<ClInclude Include="include\vixl\aarch64\cpu-features-auditor-aarch64.h" />
<ClInclude Include="include\vixl\aarch64\debugger-aarch64.h" />
<ClInclude Include="include\vixl\aarch64\decoder-aarch64.h" />
<ClInclude Include="include\vixl\aarch64\decoder-constants-aarch64.h" />
<ClInclude Include="include\vixl\aarch64\decoder-visitor-map-aarch64.h" />

View File

@ -45,6 +45,9 @@
<ClInclude Include="include\vixl\aarch64\decoder-aarch64.h">
<Filter>aarch64</Filter>
</ClInclude>
<ClInclude Include="include\vixl\aarch64\debugger-aarch64.h">
<Filter>aarch64</Filter>
</ClInclude>
<ClInclude Include="include\vixl\assembler-base-vixl.h" />
<ClInclude Include="include\vixl\code-buffer-vixl.h" />
<ClInclude Include="include\vixl\code-generation-scopes-vixl.h" />