RISC-V PR for 9.1

* Support the zimop, zcmop, zama16b and zabha extensions
 * Validate the mode when setting vstvec CSR
 * Add decode support for Zawrs extension
 * Update the KVM regs to Linux 6.10-rc5
 * Add smcntrpmf extension support
 * Raise an exception when CSRRS/CSRRC writes a read-only CSR
 * Re-insert and deprecate 'riscv,delegate' in virt machine device tree
 * roms/opensbi: Update to v1.5
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEaukCtqfKh31tZZKWr3yVEwxTgBMFAmaYeUcACgkQr3yVEwxT
 gBMtdw//U2NbmnmECa0uXuE7fdFul0tUkl2oHb9Cr8g5Se5g/HVFqexAKOFZ8Lcm
 DvTl94zJ2dms4RntcmJHwTIusa+oU6qqOekediotjgpeH4BHZNCOHe0E9hIAHn9F
 uoJ1P186L7VeVr7OFAAgSCE7F6egCk7iC0h8L8/vuL4xcuyfbZ2r7ybiTl1+45N2
 YBBv5/00wsYnyMeqRYYtyqgX9QR017JRqNSfTJSbKxhQM/L1GA1xxisUvIGeyDqc
 Pn8E3dMN6sscR6bPs4RP+SBi0JIlRCgth/jteSUkbYf42osw3/5sl4oK/e6Xiogo
 SjELOF7QJNxE8H6EUIScDaCVB5ZhvELZcuOL2NRdUuVDkjhWXM633HwfEcXkZdFK
 W/H9wOvNxPAJIOGXOpv10+MLmhdyIOZwE0uk6evHvdcTn3FP9DurdUCc1se0zKOA
 Qg/H6usTbLGNQ7KKTNQ6GpQ6u89iE1CIyZqYVvB1YuF5t7vtAmxvNk3SVZ6aq3VL
 lPJW2Zd1eO09Q+kRnBVDV7MV4OJrRNsU+ryd91NrSVo9aLADtyiNC28dCSkjU3Gn
 6YQZt65zHuhH5IBB/PGIPo7dLRT8KNWOiYVoy3c6p6DC6oXsKIibh0ue1nrVnnVQ
 NRqyxPYaj6P8zzqwTk+iJj36UXZZVtqPIhtRu9MrO6Opl2AbsXI=
 =pM6B
 -----END PGP SIGNATURE-----

Merge tag 'pull-riscv-to-apply-20240718-1' of https://github.com/alistair23/qemu into staging

RISC-V PR for 9.1

* Support the zimop, zcmop, zama16b and zabha extensions
* Validate the mode when setting vstvec CSR
* Add decode support for Zawrs extension
* Update the KVM regs to Linux 6.10-rc5
* Add smcntrpmf extension support
* Raise an exception when CSRRS/CSRRC writes a read-only CSR
* Re-insert and deprecate 'riscv,delegate' in virt machine device tree
* roms/opensbi: Update to v1.5

# -----BEGIN PGP SIGNATURE-----
#
# iQIzBAABCAAdFiEEaukCtqfKh31tZZKWr3yVEwxTgBMFAmaYeUcACgkQr3yVEwxT
# gBMtdw//U2NbmnmECa0uXuE7fdFul0tUkl2oHb9Cr8g5Se5g/HVFqexAKOFZ8Lcm
# DvTl94zJ2dms4RntcmJHwTIusa+oU6qqOekediotjgpeH4BHZNCOHe0E9hIAHn9F
# uoJ1P186L7VeVr7OFAAgSCE7F6egCk7iC0h8L8/vuL4xcuyfbZ2r7ybiTl1+45N2
# YBBv5/00wsYnyMeqRYYtyqgX9QR017JRqNSfTJSbKxhQM/L1GA1xxisUvIGeyDqc
# Pn8E3dMN6sscR6bPs4RP+SBi0JIlRCgth/jteSUkbYf42osw3/5sl4oK/e6Xiogo
# SjELOF7QJNxE8H6EUIScDaCVB5ZhvELZcuOL2NRdUuVDkjhWXM633HwfEcXkZdFK
# W/H9wOvNxPAJIOGXOpv10+MLmhdyIOZwE0uk6evHvdcTn3FP9DurdUCc1se0zKOA
# Qg/H6usTbLGNQ7KKTNQ6GpQ6u89iE1CIyZqYVvB1YuF5t7vtAmxvNk3SVZ6aq3VL
# lPJW2Zd1eO09Q+kRnBVDV7MV4OJrRNsU+ryd91NrSVo9aLADtyiNC28dCSkjU3Gn
# 6YQZt65zHuhH5IBB/PGIPo7dLRT8KNWOiYVoy3c6p6DC6oXsKIibh0ue1nrVnnVQ
# NRqyxPYaj6P8zzqwTk+iJj36UXZZVtqPIhtRu9MrO6Opl2AbsXI=
# =pM6B
# -----END PGP SIGNATURE-----
# gpg: Signature made Thu 18 Jul 2024 12:09:11 PM AEST
# gpg:                using RSA key 6AE902B6A7CA877D6D659296AF7C95130C538013
# gpg: Good signature from "Alistair Francis <alistair@alistair23.me>" [unknown]
# gpg: WARNING: This key is not certified with a trusted signature!
# gpg:          There is no indication that the signature belongs to the owner.
# Primary key fingerprint: 6AE9 02B6 A7CA 877D 6D65  9296 AF7C 9513 0C53 8013

* tag 'pull-riscv-to-apply-20240718-1' of https://github.com/alistair23/qemu: (30 commits)
  roms/opensbi: Update to v1.5
  hw/riscv/virt.c: re-insert and deprecate 'riscv,delegate'
  target/riscv: raise an exception when CSRRS/CSRRC writes a read-only CSR
  target/riscv: Expose the Smcntrpmf config
  target/riscv: Do not setup pmu timer if OF is disabled
  target/riscv: More accurately model priv mode filtering.
  target/riscv: Start counters from both mhpmcounter and mcountinhibit
  target/riscv: Enforce WARL behavior for scounteren/hcounteren
  target/riscv: Save counter values during countinhibit update
  target/riscv: Implement privilege mode filtering for cycle/instret
  target/riscv: Only set INH fields if priv mode is available
  target/riscv: Add cycle & instret privilege mode filtering support
  target/riscv: Add cycle & instret privilege mode filtering definitions
  target/riscv: Add cycle & instret privilege mode filtering properties
  target/riscv: Fix the predicate functions for mhpmeventhX CSRs
  target/riscv: Combine set_mode and set_virt functions.
  target/riscv/kvm: update KVM regs to Linux 6.10-rc5
  disas/riscv: Add decode for Zawrs extension
  target/riscv: Validate the mode in write_vstvec
  disas/riscv: Support zabha disassemble
  ...

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2024-07-18 21:23:24 +10:00
commit 0d9f1016d4
29 changed files with 1246 additions and 211 deletions

View File

@ -906,6 +906,76 @@ typedef enum {
rv_op_amocas_w = 875,
rv_op_amocas_d = 876,
rv_op_amocas_q = 877,
rv_mop_r_0 = 878,
rv_mop_r_1 = 879,
rv_mop_r_2 = 880,
rv_mop_r_3 = 881,
rv_mop_r_4 = 882,
rv_mop_r_5 = 883,
rv_mop_r_6 = 884,
rv_mop_r_7 = 885,
rv_mop_r_8 = 886,
rv_mop_r_9 = 887,
rv_mop_r_10 = 888,
rv_mop_r_11 = 889,
rv_mop_r_12 = 890,
rv_mop_r_13 = 891,
rv_mop_r_14 = 892,
rv_mop_r_15 = 893,
rv_mop_r_16 = 894,
rv_mop_r_17 = 895,
rv_mop_r_18 = 896,
rv_mop_r_19 = 897,
rv_mop_r_20 = 898,
rv_mop_r_21 = 899,
rv_mop_r_22 = 900,
rv_mop_r_23 = 901,
rv_mop_r_24 = 902,
rv_mop_r_25 = 903,
rv_mop_r_26 = 904,
rv_mop_r_27 = 905,
rv_mop_r_28 = 906,
rv_mop_r_29 = 907,
rv_mop_r_30 = 908,
rv_mop_r_31 = 909,
rv_mop_rr_0 = 910,
rv_mop_rr_1 = 911,
rv_mop_rr_2 = 912,
rv_mop_rr_3 = 913,
rv_mop_rr_4 = 914,
rv_mop_rr_5 = 915,
rv_mop_rr_6 = 916,
rv_mop_rr_7 = 917,
rv_c_mop_1 = 918,
rv_c_mop_3 = 919,
rv_c_mop_5 = 920,
rv_c_mop_7 = 921,
rv_c_mop_9 = 922,
rv_c_mop_11 = 923,
rv_c_mop_13 = 924,
rv_c_mop_15 = 925,
rv_op_amoswap_b = 926,
rv_op_amoadd_b = 927,
rv_op_amoxor_b = 928,
rv_op_amoor_b = 929,
rv_op_amoand_b = 930,
rv_op_amomin_b = 931,
rv_op_amomax_b = 932,
rv_op_amominu_b = 933,
rv_op_amomaxu_b = 934,
rv_op_amoswap_h = 935,
rv_op_amoadd_h = 936,
rv_op_amoxor_h = 937,
rv_op_amoor_h = 938,
rv_op_amoand_h = 939,
rv_op_amomin_h = 940,
rv_op_amomax_h = 941,
rv_op_amominu_h = 942,
rv_op_amomaxu_h = 943,
rv_op_amocas_b = 944,
rv_op_amocas_h = 945,
rv_op_wrs_sto = 946,
rv_op_wrs_nto = 947,
} rv_op;
/* register names */
@ -2096,6 +2166,76 @@ const rv_opcode_data rvi_opcode_data[] = {
{ "amocas.w", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 },
{ "amocas.d", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 },
{ "amocas.q", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 },
{ "mop.r.0", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0 },
{ "mop.r.1", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0 },
{ "mop.r.2", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0 },
{ "mop.r.3", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0 },
{ "mop.r.4", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0 },
{ "mop.r.5", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0 },
{ "mop.r.6", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0 },
{ "mop.r.7", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0 },
{ "mop.r.8", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0 },
{ "mop.r.9", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0 },
{ "mop.r.10", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0 },
{ "mop.r.11", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0 },
{ "mop.r.12", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0 },
{ "mop.r.13", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0 },
{ "mop.r.14", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0 },
{ "mop.r.15", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0 },
{ "mop.r.16", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0 },
{ "mop.r.17", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0 },
{ "mop.r.18", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0 },
{ "mop.r.19", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0 },
{ "mop.r.20", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0 },
{ "mop.r.21", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0 },
{ "mop.r.22", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0 },
{ "mop.r.23", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0 },
{ "mop.r.24", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0 },
{ "mop.r.25", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0 },
{ "mop.r.26", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0 },
{ "mop.r.27", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0 },
{ "mop.r.28", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0 },
{ "mop.r.29", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0 },
{ "mop.r.30", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0 },
{ "mop.r.31", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0 },
{ "mop.rr.0", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 },
{ "mop.rr.1", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 },
{ "mop.rr.2", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 },
{ "mop.rr.3", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 },
{ "mop.rr.4", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 },
{ "mop.rr.5", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 },
{ "mop.rr.6", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 },
{ "mop.rr.7", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 },
{ "c.mop.1", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
{ "c.mop.3", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
{ "c.mop.5", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
{ "c.mop.7", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
{ "c.mop.9", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
{ "c.mop.11", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
{ "c.mop.13", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
{ "c.mop.15", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
{ "amoswap.b", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 },
{ "amoadd.b", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 },
{ "amoxor.b", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 },
{ "amoor.b", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 },
{ "amoand.b", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 },
{ "amomin.b", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 },
{ "amomax.b", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 },
{ "amominu.b", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 },
{ "amomaxu.b", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 },
{ "amoswap.h", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 },
{ "amoadd.h", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 },
{ "amoxor.h", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 },
{ "amoor.h", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 },
{ "amoand.h", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 },
{ "amomin.h", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 },
{ "amomax.h", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 },
{ "amominu.h", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 },
{ "amomaxu.h", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 },
{ "amocas.b", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 },
{ "amocas.h", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 },
{ "wrs.sto", rv_codec_none, rv_fmt_none, NULL, 0, 0, 0 },
{ "wrs.nto", rv_codec_none, rv_fmt_none, NULL, 0, 0, 0 },
};
/* CSR names */
@ -2452,6 +2592,13 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa)
break;
case 2: op = rv_op_c_li; break;
case 3:
if (dec->cfg->ext_zcmop) {
if ((((inst >> 2) & 0b111111) == 0b100000) &&
(((inst >> 11) & 0b11) == 0b0)) {
op = rv_c_mop_1 + ((inst >> 8) & 0b111);
break;
}
}
switch ((inst >> 7) & 0b11111) {
case 2: op = rv_op_c_addi16sp; break;
default: op = rv_op_c_lui; break;
@ -2883,9 +3030,13 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa)
case 11:
switch (((inst >> 24) & 0b11111000) |
((inst >> 12) & 0b00000111)) {
case 0: op = rv_op_amoadd_b; break;
case 1: op = rv_op_amoadd_h; break;
case 2: op = rv_op_amoadd_w; break;
case 3: op = rv_op_amoadd_d; break;
case 4: op = rv_op_amoadd_q; break;
case 8: op = rv_op_amoswap_b; break;
case 9: op = rv_op_amoswap_h; break;
case 10: op = rv_op_amoswap_w; break;
case 11: op = rv_op_amoswap_d; break;
case 12: op = rv_op_amoswap_q; break;
@ -2907,27 +3058,43 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa)
case 26: op = rv_op_sc_w; break;
case 27: op = rv_op_sc_d; break;
case 28: op = rv_op_sc_q; break;
case 32: op = rv_op_amoxor_b; break;
case 33: op = rv_op_amoxor_h; break;
case 34: op = rv_op_amoxor_w; break;
case 35: op = rv_op_amoxor_d; break;
case 36: op = rv_op_amoxor_q; break;
case 40: op = rv_op_amocas_b; break;
case 41: op = rv_op_amocas_h; break;
case 42: op = rv_op_amocas_w; break;
case 43: op = rv_op_amocas_d; break;
case 44: op = rv_op_amocas_q; break;
case 64: op = rv_op_amoor_b; break;
case 65: op = rv_op_amoor_h; break;
case 66: op = rv_op_amoor_w; break;
case 67: op = rv_op_amoor_d; break;
case 68: op = rv_op_amoor_q; break;
case 96: op = rv_op_amoand_b; break;
case 97: op = rv_op_amoand_h; break;
case 98: op = rv_op_amoand_w; break;
case 99: op = rv_op_amoand_d; break;
case 100: op = rv_op_amoand_q; break;
case 128: op = rv_op_amomin_b; break;
case 129: op = rv_op_amomin_h; break;
case 130: op = rv_op_amomin_w; break;
case 131: op = rv_op_amomin_d; break;
case 132: op = rv_op_amomin_q; break;
case 160: op = rv_op_amomax_b; break;
case 161: op = rv_op_amomax_h; break;
case 162: op = rv_op_amomax_w; break;
case 163: op = rv_op_amomax_d; break;
case 164: op = rv_op_amomax_q; break;
case 192: op = rv_op_amominu_b; break;
case 193: op = rv_op_amominu_h; break;
case 194: op = rv_op_amominu_w; break;
case 195: op = rv_op_amominu_d; break;
case 196: op = rv_op_amominu_q; break;
case 224: op = rv_op_amomaxu_b; break;
case 225: op = rv_op_amomaxu_h; break;
case 226: op = rv_op_amomaxu_w; break;
case 227: op = rv_op_amomaxu_d; break;
case 228: op = rv_op_amomaxu_q; break;
@ -3817,6 +3984,8 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa)
case 0: op = rv_op_ecall; break;
case 32: op = rv_op_ebreak; break;
case 64: op = rv_op_uret; break;
case 416: op = rv_op_wrs_nto; break;
case 928: op = rv_op_wrs_sto; break;
}
break;
case 256:
@ -3855,6 +4024,24 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa)
case 1: op = rv_op_csrrw; break;
case 2: op = rv_op_csrrs; break;
case 3: op = rv_op_csrrc; break;
case 4:
if (dec->cfg->ext_zimop) {
int imm_mop5, imm_mop3;
if ((extract32(inst, 22, 10) & 0b1011001111)
== 0b1000000111) {
imm_mop5 = deposit32(deposit32(extract32(inst, 20, 2),
2, 2,
extract32(inst, 26, 2)),
4, 1, extract32(inst, 30, 1));
op = rv_mop_r_0 + imm_mop5;
} else if ((extract32(inst, 25, 7) & 0b1011001)
== 0b1000001) {
imm_mop3 = deposit32(extract32(inst, 26, 2),
2, 1, extract32(inst, 30, 1));
op = rv_mop_rr_0 + imm_mop3;
}
}
break;
case 5: op = rv_op_csrrwi; break;
case 6: op = rv_op_csrrsi; break;
case 7: op = rv_op_csrrci; break;

View File

@ -479,6 +479,17 @@ versions, aliases will point to newer CPU model versions
depending on the machine type, so management software must
resolve CPU model aliases before starting a virtual machine.
RISC-V "virt" board "riscv,delegate" DT property (since 9.1)
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
The "riscv,delegate" DT property was added in QEMU 7.0 as part of
the AIA APLIC support. The property changed name during the review
process in Linux and the correct name ended up being
"riscv,delegation". Changing the DT property name will break all
available firmwares that are using the current (wrong) name. The
property is kept as is in 9.1, together with "riscv,delegation", to
give more time for firmware developers to change their code.
Migration
---------

View File

@ -651,6 +651,15 @@ static void create_fdt_one_aplic(RISCVVirtState *s, int socket,
qemu_fdt_setprop_cells(ms->fdt, aplic_name, "riscv,delegation",
aplic_child_phandle, 0x1,
VIRT_IRQCHIP_NUM_SOURCES);
/*
* DEPRECATED_9.1: Compat property kept temporarily
* to allow old firmwares to work with AIA. Do *not*
* use 'riscv,delegate' in new code: use
* 'riscv,delegation' instead.
*/
qemu_fdt_setprop_cells(ms->fdt, aplic_name, "riscv,delegate",
aplic_child_phandle, 0x1,
VIRT_IRQCHIP_NUM_SOURCES);
}
riscv_socket_fdt_write_id(ms, aplic_name, socket);

@ -1 +1 @@
Subproject commit a2b255b88918715173942f2c5e1f97ac9e90c877
Subproject commit 455de672dd7c2aa1992df54dfb08dc11abbc1b1a

View File

@ -113,10 +113,13 @@ const RISCVIsaExtData isa_edata_arr[] = {
ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl),
ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause),
ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm),
ISA_EXT_DATA_ENTRY(zimop, PRIV_VERSION_1_13_0, ext_zimop),
ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul),
ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_11),
ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo),
ISA_EXT_DATA_ENTRY(zabha, PRIV_VERSION_1_13_0, ext_zabha),
ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas),
ISA_EXT_DATA_ENTRY(zama16b, PRIV_VERSION_1_13_0, ext_zama16b),
ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc),
ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs),
ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa),
@ -130,6 +133,7 @@ const RISCVIsaExtData isa_edata_arr[] = {
ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf),
ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd),
ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce),
ISA_EXT_DATA_ENTRY(zcmop, PRIV_VERSION_1_13_0, ext_zcmop),
ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp),
ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt),
ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba),
@ -178,6 +182,7 @@ const RISCVIsaExtData isa_edata_arr[] = {
ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
ISA_EXT_DATA_ENTRY(smcntrpmf, PRIV_VERSION_1_12_0, ext_smcntrpmf),
ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp),
ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen),
ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia),
@ -1467,11 +1472,16 @@ const char *riscv_get_misa_ext_description(uint32_t bit)
const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
/* Defaults for standard extensions */
MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false),
MULTI_EXT_CFG_BOOL("smcntrpmf", ext_smcntrpmf, false),
MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true),
MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true),
MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true),
MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true),
MULTI_EXT_CFG_BOOL("zimop", ext_zimop, false),
MULTI_EXT_CFG_BOOL("zcmop", ext_zcmop, false),
MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false),
MULTI_EXT_CFG_BOOL("zama16b", ext_zama16b, false),
MULTI_EXT_CFG_BOOL("zabha", ext_zabha, false),
MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false),
MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false),
MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true),

View File

@ -176,11 +176,19 @@ typedef struct PMUCTRState {
target_ulong mhpmcounter_prev;
/* Snapshort value of a counter in RV32 */
target_ulong mhpmcounterh_prev;
bool started;
/* Value beyond UINT32_MAX/UINT64_MAX before overflow interrupt trigger */
target_ulong irq_overflow_left;
} PMUCTRState;
typedef struct PMUFixedCtrState {
/* Track cycle and icount for each privilege mode */
uint64_t counter[4];
uint64_t counter_prev[4];
/* Track cycle and icount for each privilege mode when V = 1*/
uint64_t counter_virt[2];
uint64_t counter_virt_prev[2];
} PMUFixedCtrState;
struct CPUArchState {
target_ulong gpr[32];
target_ulong gprh[32]; /* 64 top bits of the 128-bit registers */
@ -362,6 +370,12 @@ struct CPUArchState {
uint32_t mcountinhibit;
/* PMU cycle & instret privilege mode filtering */
target_ulong mcyclecfg;
target_ulong mcyclecfgh;
target_ulong minstretcfg;
target_ulong minstretcfgh;
/* PMU counter state */
PMUCTRState pmu_ctrs[RV_MAX_MHPMCOUNTERS];
@ -371,6 +385,8 @@ struct CPUArchState {
/* PMU event selector configured values for RV32 */
target_ulong mhpmeventh_val[RV_MAX_MHPMEVENTS];
PMUFixedCtrState pmu_fixed_ctrs[2];
target_ulong sscratch;
target_ulong mscratch;
@ -567,7 +583,7 @@ void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv,
RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit);
#endif /* !CONFIG_USER_ONLY */
void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv);
void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en);
void riscv_translate_init(void);
G_NORETURN void riscv_raise_exception(CPURISCVState *env,
@ -735,6 +751,8 @@ void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
void riscv_cpu_update_mask(CPURISCVState *env);
bool riscv_cpu_is_32bit(RISCVCPU *cpu);
RISCVException riscv_csrr(CPURISCVState *env, int csrno,
target_ulong *ret_value);
RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
target_ulong *ret_value,
target_ulong new_value, target_ulong write_mask);
@ -767,6 +785,8 @@ typedef RISCVException (*riscv_csr_op_fn)(CPURISCVState *env, int csrno,
target_ulong new_value,
target_ulong write_mask);
RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno,
Int128 *ret_value);
RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
Int128 *ret_value,
Int128 new_value, Int128 write_mask);

View File

@ -397,6 +397,10 @@
/* Machine counter-inhibit register */
#define CSR_MCOUNTINHIBIT 0x320
/* Machine counter configuration registers */
#define CSR_MCYCLECFG 0x321
#define CSR_MINSTRETCFG 0x322
#define CSR_MHPMEVENT3 0x323
#define CSR_MHPMEVENT4 0x324
#define CSR_MHPMEVENT5 0x325
@ -427,6 +431,9 @@
#define CSR_MHPMEVENT30 0x33e
#define CSR_MHPMEVENT31 0x33f
#define CSR_MCYCLECFGH 0x721
#define CSR_MINSTRETCFGH 0x722
#define CSR_MHPMEVENT3H 0x723
#define CSR_MHPMEVENT4H 0x724
#define CSR_MHPMEVENT5H 0x725
@ -884,6 +891,28 @@ typedef enum RISCVException {
/* PMU related bits */
#define MIE_LCOFIE (1 << IRQ_PMU_OVF)
#define MCYCLECFG_BIT_MINH BIT_ULL(62)
#define MCYCLECFGH_BIT_MINH BIT(30)
#define MCYCLECFG_BIT_SINH BIT_ULL(61)
#define MCYCLECFGH_BIT_SINH BIT(29)
#define MCYCLECFG_BIT_UINH BIT_ULL(60)
#define MCYCLECFGH_BIT_UINH BIT(28)
#define MCYCLECFG_BIT_VSINH BIT_ULL(59)
#define MCYCLECFGH_BIT_VSINH BIT(27)
#define MCYCLECFG_BIT_VUINH BIT_ULL(58)
#define MCYCLECFGH_BIT_VUINH BIT(26)
#define MINSTRETCFG_BIT_MINH BIT_ULL(62)
#define MINSTRETCFGH_BIT_MINH BIT(30)
#define MINSTRETCFG_BIT_SINH BIT_ULL(61)
#define MINSTRETCFGH_BIT_SINH BIT(29)
#define MINSTRETCFG_BIT_UINH BIT_ULL(60)
#define MINSTRETCFGH_BIT_UINH BIT(28)
#define MINSTRETCFG_BIT_VSINH BIT_ULL(59)
#define MINSTRETCFGH_BIT_VSINH BIT(27)
#define MINSTRETCFG_BIT_VUINH BIT_ULL(58)
#define MINSTRETCFGH_BIT_VUINH BIT(26)
#define MHPMEVENT_BIT_OF BIT_ULL(63)
#define MHPMEVENTH_BIT_OF BIT(31)
#define MHPMEVENT_BIT_MINH BIT_ULL(62)
@ -897,6 +926,18 @@ typedef enum RISCVException {
#define MHPMEVENT_BIT_VUINH BIT_ULL(58)
#define MHPMEVENTH_BIT_VUINH BIT(26)
#define MHPMEVENT_FILTER_MASK (MHPMEVENT_BIT_MINH | \
MHPMEVENT_BIT_SINH | \
MHPMEVENT_BIT_UINH | \
MHPMEVENT_BIT_VSINH | \
MHPMEVENT_BIT_VUINH)
#define MHPMEVENTH_FILTER_MASK (MHPMEVENTH_BIT_MINH | \
MHPMEVENTH_BIT_SINH | \
MHPMEVENTH_BIT_UINH | \
MHPMEVENTH_BIT_VSINH | \
MHPMEVENTH_BIT_VUINH)
#define MHPMEVENT_SSCOF_MASK _ULL(0xFFFF000000000000)
#define MHPMEVENT_IDX_MASK 0xFFFFF
#define MHPMEVENT_SSCOF_RESVD 16

View File

@ -71,9 +71,12 @@ struct RISCVCPUConfig {
bool ext_zihintntl;
bool ext_zihintpause;
bool ext_zihpm;
bool ext_zimop;
bool ext_zcmop;
bool ext_ztso;
bool ext_smstateen;
bool ext_sstc;
bool ext_smcntrpmf;
bool ext_svadu;
bool ext_svinval;
bool ext_svnapot;
@ -81,6 +84,8 @@ struct RISCVCPUConfig {
bool ext_zdinx;
bool ext_zaamo;
bool ext_zacas;
bool ext_zama16b;
bool ext_zabha;
bool ext_zalrsc;
bool ext_zawrs;
bool ext_zfa;

View File

@ -619,30 +619,6 @@ void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen)
env->geilen = geilen;
}
/* This function can only be called to set virt when RVH is enabled */
void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable)
{
/* Flush the TLB on all virt mode changes. */
if (env->virt_enabled != enable) {
tlb_flush(env_cpu(env));
}
env->virt_enabled = enable;
if (enable) {
/*
* The guest external interrupts from an interrupt controller are
* delivered only when the Guest/VM is running (i.e. V=1). This means
* any guest external interrupt which is triggered while the Guest/VM
* is not running (i.e. V=0) will be missed on QEMU resulting in guest
* with sluggish response to serial console input and other I/O events.
*
* To solve this, we check and inject interrupt after setting V=1.
*/
riscv_cpu_update_mip(env, 0, 0);
}
}
int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts)
{
CPURISCVState *env = &cpu->env;
@ -715,13 +691,18 @@ void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv,
}
}
void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv)
void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en)
{
g_assert(newpriv <= PRV_M && newpriv != PRV_RESERVED);
if (icount_enabled() && newpriv != env->priv) {
riscv_itrigger_update_priv(env);
if (newpriv != env->priv || env->virt_enabled != virt_en) {
if (icount_enabled()) {
riscv_itrigger_update_priv(env);
}
riscv_pmu_update_fixed_ctrs(env, newpriv, virt_en);
}
/* tlb_flush is unnecessary as mode is contained in mmu_idx */
env->priv = newpriv;
env->xl = cpu_recompute_xl(env);
@ -736,6 +717,28 @@ void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv)
* preemptive context switch. As a result, do both.
*/
env->load_res = -1;
if (riscv_has_ext(env, RVH)) {
/* Flush the TLB on all virt mode changes. */
if (env->virt_enabled != virt_en) {
tlb_flush(env_cpu(env));
}
env->virt_enabled = virt_en;
if (virt_en) {
/*
* The guest external interrupts from an interrupt controller are
* delivered only when the Guest/VM is running (i.e. V=1). This
* means any guest external interrupt which is triggered while the
* Guest/VM is not running (i.e. V=0) will be missed on QEMU
* resulting in guest with sluggish response to serial console
* input and other I/O events.
*
* To solve this, we check and inject interrupt after setting V=1.
*/
riscv_cpu_update_mip(env, 0, 0);
}
}
}
/*
@ -1648,6 +1651,7 @@ void riscv_cpu_do_interrupt(CPUState *cs)
{
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
bool virt = env->virt_enabled;
bool write_gva = false;
uint64_t s;
@ -1778,7 +1782,7 @@ void riscv_cpu_do_interrupt(CPUState *cs)
htval = env->guest_phys_fault_addr;
riscv_cpu_set_virt_enabled(env, 0);
virt = false;
} else {
/* Trap into HS mode */
env->hstatus = set_field(env->hstatus, HSTATUS_SPV, false);
@ -1799,7 +1803,7 @@ void riscv_cpu_do_interrupt(CPUState *cs)
env->htinst = tinst;
env->pc = (env->stvec >> 2 << 2) +
((async && (env->stvec & 3) == 1) ? cause * 4 : 0);
riscv_cpu_set_mode(env, PRV_S);
riscv_cpu_set_mode(env, PRV_S, virt);
} else {
/* handle the trap in M-mode */
if (riscv_has_ext(env, RVH)) {
@ -1815,7 +1819,7 @@ void riscv_cpu_do_interrupt(CPUState *cs)
mtval2 = env->guest_phys_fault_addr;
/* Trapping to M mode, virt is disabled */
riscv_cpu_set_virt_enabled(env, 0);
virt = false;
}
s = env->mstatus;
@ -1830,7 +1834,7 @@ void riscv_cpu_do_interrupt(CPUState *cs)
env->mtinst = tinst;
env->pc = (env->mtvec >> 2 << 2) +
((async && (env->mtvec & 3) == 1) ? cause * 4 : 0);
riscv_cpu_set_mode(env, PRV_M);
riscv_cpu_set_mode(env, PRV_M, virt);
}
/*

View File

@ -30,7 +30,6 @@
#include "qemu/guest-random.h"
#include "qapi/error.h"
/* CSR function table public API */
void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops)
{
@ -227,6 +226,33 @@ static RISCVException sscofpmf(CPURISCVState *env, int csrno)
return RISCV_EXCP_NONE;
}
static RISCVException sscofpmf_32(CPURISCVState *env, int csrno)
{
if (riscv_cpu_mxl(env) != MXL_RV32) {
return RISCV_EXCP_ILLEGAL_INST;
}
return sscofpmf(env, csrno);
}
static RISCVException smcntrpmf(CPURISCVState *env, int csrno)
{
if (!riscv_cpu_cfg(env)->ext_smcntrpmf) {
return RISCV_EXCP_ILLEGAL_INST;
}
return RISCV_EXCP_NONE;
}
static RISCVException smcntrpmf_32(CPURISCVState *env, int csrno)
{
if (riscv_cpu_mxl(env) != MXL_RV32) {
return RISCV_EXCP_ILLEGAL_INST;
}
return smcntrpmf(env, csrno);
}
static RISCVException any(CPURISCVState *env, int csrno)
{
return RISCV_EXCP_NONE;
@ -761,36 +787,16 @@ static RISCVException write_vcsr(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
#if defined(CONFIG_USER_ONLY)
/* User Timers and Counters */
static target_ulong get_ticks(bool shift, bool instructions)
static target_ulong get_ticks(bool shift)
{
int64_t val;
target_ulong result;
#if !defined(CONFIG_USER_ONLY)
if (icount_enabled()) {
if (instructions) {
val = icount_get_raw();
} else {
val = icount_get();
}
} else {
val = cpu_get_host_ticks();
}
#else
val = cpu_get_host_ticks();
#endif
if (shift) {
result = val >> 32;
} else {
result = val;
}
int64_t val = cpu_get_host_ticks();
target_ulong result = shift ? val >> 32 : val;
return result;
}
#if defined(CONFIG_USER_ONLY)
static RISCVException read_time(CPURISCVState *env, int csrno,
target_ulong *val)
{
@ -808,19 +814,124 @@ static RISCVException read_timeh(CPURISCVState *env, int csrno,
static RISCVException read_hpmcounter(CPURISCVState *env, int csrno,
target_ulong *val)
{
*val = get_ticks(false, (csrno == CSR_INSTRET));
*val = get_ticks(false);
return RISCV_EXCP_NONE;
}
static RISCVException read_hpmcounterh(CPURISCVState *env, int csrno,
target_ulong *val)
{
*val = get_ticks(true, (csrno == CSR_INSTRETH));
*val = get_ticks(true);
return RISCV_EXCP_NONE;
}
#else /* CONFIG_USER_ONLY */
static RISCVException read_mcyclecfg(CPURISCVState *env, int csrno,
target_ulong *val)
{
*val = env->mcyclecfg;
return RISCV_EXCP_NONE;
}
static RISCVException write_mcyclecfg(CPURISCVState *env, int csrno,
target_ulong val)
{
uint64_t inh_avail_mask;
if (riscv_cpu_mxl(env) == MXL_RV32) {
env->mcyclecfg = val;
} else {
/* Set xINH fields if priv mode supported */
inh_avail_mask = ~MHPMEVENT_FILTER_MASK | MCYCLECFG_BIT_MINH;
inh_avail_mask |= riscv_has_ext(env, RVU) ? MCYCLECFG_BIT_UINH : 0;
inh_avail_mask |= riscv_has_ext(env, RVS) ? MCYCLECFG_BIT_SINH : 0;
inh_avail_mask |= (riscv_has_ext(env, RVH) &&
riscv_has_ext(env, RVU)) ? MCYCLECFG_BIT_VUINH : 0;
inh_avail_mask |= (riscv_has_ext(env, RVH) &&
riscv_has_ext(env, RVS)) ? MCYCLECFG_BIT_VSINH : 0;
env->mcyclecfg = val & inh_avail_mask;
}
return RISCV_EXCP_NONE;
}
static RISCVException read_mcyclecfgh(CPURISCVState *env, int csrno,
target_ulong *val)
{
*val = env->mcyclecfgh;
return RISCV_EXCP_NONE;
}
static RISCVException write_mcyclecfgh(CPURISCVState *env, int csrno,
target_ulong val)
{
target_ulong inh_avail_mask = (target_ulong)(~MHPMEVENTH_FILTER_MASK |
MCYCLECFGH_BIT_MINH);
/* Set xINH fields if priv mode supported */
inh_avail_mask |= riscv_has_ext(env, RVU) ? MCYCLECFGH_BIT_UINH : 0;
inh_avail_mask |= riscv_has_ext(env, RVS) ? MCYCLECFGH_BIT_SINH : 0;
inh_avail_mask |= (riscv_has_ext(env, RVH) &&
riscv_has_ext(env, RVU)) ? MCYCLECFGH_BIT_VUINH : 0;
inh_avail_mask |= (riscv_has_ext(env, RVH) &&
riscv_has_ext(env, RVS)) ? MCYCLECFGH_BIT_VSINH : 0;
env->mcyclecfgh = val & inh_avail_mask;
return RISCV_EXCP_NONE;
}
static RISCVException read_minstretcfg(CPURISCVState *env, int csrno,
target_ulong *val)
{
*val = env->minstretcfg;
return RISCV_EXCP_NONE;
}
static RISCVException write_minstretcfg(CPURISCVState *env, int csrno,
target_ulong val)
{
uint64_t inh_avail_mask;
if (riscv_cpu_mxl(env) == MXL_RV32) {
env->minstretcfg = val;
} else {
inh_avail_mask = ~MHPMEVENT_FILTER_MASK | MINSTRETCFG_BIT_MINH;
inh_avail_mask |= riscv_has_ext(env, RVU) ? MINSTRETCFG_BIT_UINH : 0;
inh_avail_mask |= riscv_has_ext(env, RVS) ? MINSTRETCFG_BIT_SINH : 0;
inh_avail_mask |= (riscv_has_ext(env, RVH) &&
riscv_has_ext(env, RVU)) ? MINSTRETCFG_BIT_VUINH : 0;
inh_avail_mask |= (riscv_has_ext(env, RVH) &&
riscv_has_ext(env, RVS)) ? MINSTRETCFG_BIT_VSINH : 0;
env->minstretcfg = val & inh_avail_mask;
}
return RISCV_EXCP_NONE;
}
static RISCVException read_minstretcfgh(CPURISCVState *env, int csrno,
target_ulong *val)
{
*val = env->minstretcfgh;
return RISCV_EXCP_NONE;
}
static RISCVException write_minstretcfgh(CPURISCVState *env, int csrno,
target_ulong val)
{
target_ulong inh_avail_mask = (target_ulong)(~MHPMEVENTH_FILTER_MASK |
MINSTRETCFGH_BIT_MINH);
inh_avail_mask |= riscv_has_ext(env, RVU) ? MINSTRETCFGH_BIT_UINH : 0;
inh_avail_mask |= riscv_has_ext(env, RVS) ? MINSTRETCFGH_BIT_SINH : 0;
inh_avail_mask |= (riscv_has_ext(env, RVH) &&
riscv_has_ext(env, RVU)) ? MINSTRETCFGH_BIT_VUINH : 0;
inh_avail_mask |= (riscv_has_ext(env, RVH) &&
riscv_has_ext(env, RVS)) ? MINSTRETCFGH_BIT_VSINH : 0;
env->minstretcfgh = val & inh_avail_mask;
return RISCV_EXCP_NONE;
}
static RISCVException read_mhpmevent(CPURISCVState *env, int csrno,
target_ulong *val)
{
@ -836,13 +947,24 @@ static RISCVException write_mhpmevent(CPURISCVState *env, int csrno,
{
int evt_index = csrno - CSR_MCOUNTINHIBIT;
uint64_t mhpmevt_val = val;
env->mhpmevent_val[evt_index] = val;
uint64_t inh_avail_mask;
if (riscv_cpu_mxl(env) == MXL_RV32) {
env->mhpmevent_val[evt_index] = val;
mhpmevt_val = mhpmevt_val |
((uint64_t)env->mhpmeventh_val[evt_index] << 32);
} else {
inh_avail_mask = ~MHPMEVENT_FILTER_MASK | MHPMEVENT_BIT_MINH;
inh_avail_mask |= riscv_has_ext(env, RVU) ? MHPMEVENT_BIT_UINH : 0;
inh_avail_mask |= riscv_has_ext(env, RVS) ? MHPMEVENT_BIT_SINH : 0;
inh_avail_mask |= (riscv_has_ext(env, RVH) &&
riscv_has_ext(env, RVU)) ? MHPMEVENT_BIT_VUINH : 0;
inh_avail_mask |= (riscv_has_ext(env, RVH) &&
riscv_has_ext(env, RVS)) ? MHPMEVENT_BIT_VSINH : 0;
mhpmevt_val = val & inh_avail_mask;
env->mhpmevent_val[evt_index] = mhpmevt_val;
}
riscv_pmu_update_event_map(env, mhpmevt_val, evt_index);
return RISCV_EXCP_NONE;
@ -862,28 +984,107 @@ static RISCVException write_mhpmeventh(CPURISCVState *env, int csrno,
target_ulong val)
{
int evt_index = csrno - CSR_MHPMEVENT3H + 3;
uint64_t mhpmevth_val = val;
uint64_t mhpmevth_val;
uint64_t mhpmevt_val = env->mhpmevent_val[evt_index];
target_ulong inh_avail_mask = (target_ulong)(~MHPMEVENTH_FILTER_MASK |
MHPMEVENTH_BIT_MINH);
inh_avail_mask |= riscv_has_ext(env, RVU) ? MHPMEVENTH_BIT_UINH : 0;
inh_avail_mask |= riscv_has_ext(env, RVS) ? MHPMEVENTH_BIT_SINH : 0;
inh_avail_mask |= (riscv_has_ext(env, RVH) &&
riscv_has_ext(env, RVU)) ? MHPMEVENTH_BIT_VUINH : 0;
inh_avail_mask |= (riscv_has_ext(env, RVH) &&
riscv_has_ext(env, RVS)) ? MHPMEVENTH_BIT_VSINH : 0;
mhpmevth_val = val & inh_avail_mask;
mhpmevt_val = mhpmevt_val | (mhpmevth_val << 32);
env->mhpmeventh_val[evt_index] = val;
env->mhpmeventh_val[evt_index] = mhpmevth_val;
riscv_pmu_update_event_map(env, mhpmevt_val, evt_index);
return RISCV_EXCP_NONE;
}
static target_ulong riscv_pmu_ctr_get_fixed_counters_val(CPURISCVState *env,
int counter_idx,
bool upper_half)
{
int inst = riscv_pmu_ctr_monitor_instructions(env, counter_idx);
uint64_t *counter_arr_virt = env->pmu_fixed_ctrs[inst].counter_virt;
uint64_t *counter_arr = env->pmu_fixed_ctrs[inst].counter;
target_ulong result = 0;
uint64_t curr_val = 0;
uint64_t cfg_val = 0;
if (counter_idx == 0) {
cfg_val = upper_half ? ((uint64_t)env->mcyclecfgh << 32) :
env->mcyclecfg;
} else if (counter_idx == 2) {
cfg_val = upper_half ? ((uint64_t)env->minstretcfgh << 32) :
env->minstretcfg;
} else {
cfg_val = upper_half ?
((uint64_t)env->mhpmeventh_val[counter_idx] << 32) :
env->mhpmevent_val[counter_idx];
cfg_val &= MHPMEVENT_FILTER_MASK;
}
if (!cfg_val) {
if (icount_enabled()) {
curr_val = inst ? icount_get_raw() : icount_get();
} else {
curr_val = cpu_get_host_ticks();
}
goto done;
}
/* Update counter before reading. */
riscv_pmu_update_fixed_ctrs(env, env->priv, env->virt_enabled);
if (!(cfg_val & MCYCLECFG_BIT_MINH)) {
curr_val += counter_arr[PRV_M];
}
if (!(cfg_val & MCYCLECFG_BIT_SINH)) {
curr_val += counter_arr[PRV_S];
}
if (!(cfg_val & MCYCLECFG_BIT_UINH)) {
curr_val += counter_arr[PRV_U];
}
if (!(cfg_val & MCYCLECFG_BIT_VSINH)) {
curr_val += counter_arr_virt[PRV_S];
}
if (!(cfg_val & MCYCLECFG_BIT_VUINH)) {
curr_val += counter_arr_virt[PRV_U];
}
done:
if (riscv_cpu_mxl(env) == MXL_RV32) {
result = upper_half ? curr_val >> 32 : curr_val;
} else {
result = curr_val;
}
return result;
}
static RISCVException write_mhpmcounter(CPURISCVState *env, int csrno,
target_ulong val)
{
int ctr_idx = csrno - CSR_MCYCLE;
PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
uint64_t mhpmctr_val = val;
bool instr = riscv_pmu_ctr_monitor_instructions(env, ctr_idx);
counter->mhpmcounter_val = val;
if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) || instr) {
counter->mhpmcounter_prev = get_ticks(false, instr);
if (!get_field(env->mcountinhibit, BIT(ctr_idx)) &&
(riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
riscv_pmu_ctr_monitor_instructions(env, ctr_idx))) {
counter->mhpmcounter_prev = riscv_pmu_ctr_get_fixed_counters_val(env,
ctr_idx, false);
if (ctr_idx > 2) {
if (riscv_cpu_mxl(env) == MXL_RV32) {
mhpmctr_val = mhpmctr_val |
@ -906,12 +1107,14 @@ static RISCVException write_mhpmcounterh(CPURISCVState *env, int csrno,
PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
uint64_t mhpmctr_val = counter->mhpmcounter_val;
uint64_t mhpmctrh_val = val;
bool instr = riscv_pmu_ctr_monitor_instructions(env, ctr_idx);
counter->mhpmcounterh_val = val;
mhpmctr_val = mhpmctr_val | (mhpmctrh_val << 32);
if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) || instr) {
counter->mhpmcounterh_prev = get_ticks(true, instr);
if (!get_field(env->mcountinhibit, BIT(ctr_idx)) &&
(riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
riscv_pmu_ctr_monitor_instructions(env, ctr_idx))) {
counter->mhpmcounterh_prev = riscv_pmu_ctr_get_fixed_counters_val(env,
ctr_idx, true);
if (ctr_idx > 2) {
riscv_pmu_setup_timer(env, mhpmctr_val, ctr_idx);
}
@ -922,7 +1125,7 @@ static RISCVException write_mhpmcounterh(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
static RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val,
RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val,
bool upper_half, uint32_t ctr_idx)
{
PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
@ -930,29 +1133,24 @@ static RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val,
counter->mhpmcounter_prev;
target_ulong ctr_val = upper_half ? counter->mhpmcounterh_val :
counter->mhpmcounter_val;
bool instr = riscv_pmu_ctr_monitor_instructions(env, ctr_idx);
if (get_field(env->mcountinhibit, BIT(ctr_idx))) {
/*
* Counter should not increment if inhibit bit is set. We can't really
* stop the icount counting. Just return the counter value written by
* the supervisor to indicate that counter was not incremented.
* Counter should not increment if inhibit bit is set. Just return the
* current counter value.
*/
if (!counter->started) {
*val = ctr_val;
return RISCV_EXCP_NONE;
} else {
/* Mark that the counter has been stopped */
counter->started = false;
}
*val = ctr_val;
return RISCV_EXCP_NONE;
}
/*
* The kernel computes the perf delta by subtracting the current value from
* the value it initialized previously (ctr_val).
*/
if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) || instr) {
*val = get_ticks(upper_half, instr) - ctr_prev + ctr_val;
if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
*val = riscv_pmu_ctr_get_fixed_counters_val(env, ctr_idx, upper_half) -
ctr_prev + ctr_val;
} else {
*val = ctr_val;
}
@ -1977,16 +2175,61 @@ static RISCVException write_mcountinhibit(CPURISCVState *env, int csrno,
int cidx;
PMUCTRState *counter;
RISCVCPU *cpu = env_archcpu(env);
uint32_t present_ctrs = cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_IR;
target_ulong updated_ctrs = (env->mcountinhibit ^ val) & present_ctrs;
uint64_t mhpmctr_val, prev_count, curr_count;
/* WARL register - disable unavailable counters; TM bit is always 0 */
env->mcountinhibit =
val & (cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_IR);
env->mcountinhibit = val & present_ctrs;
/* Check if any other counter is also monitoring cycles/instructions */
for (cidx = 0; cidx < RV_MAX_MHPMCOUNTERS; cidx++) {
if (!(updated_ctrs & BIT(cidx)) ||
(!riscv_pmu_ctr_monitor_cycles(env, cidx) &&
!riscv_pmu_ctr_monitor_instructions(env, cidx))) {
continue;
}
counter = &env->pmu_ctrs[cidx];
if (!get_field(env->mcountinhibit, BIT(cidx))) {
counter = &env->pmu_ctrs[cidx];
counter->started = true;
counter->mhpmcounter_prev =
riscv_pmu_ctr_get_fixed_counters_val(env, cidx, false);
if (riscv_cpu_mxl(env) == MXL_RV32) {
counter->mhpmcounterh_prev =
riscv_pmu_ctr_get_fixed_counters_val(env, cidx, true);
}
if (cidx > 2) {
mhpmctr_val = counter->mhpmcounter_val;
if (riscv_cpu_mxl(env) == MXL_RV32) {
mhpmctr_val = mhpmctr_val |
((uint64_t)counter->mhpmcounterh_val << 32);
}
riscv_pmu_setup_timer(env, mhpmctr_val, cidx);
}
} else {
curr_count = riscv_pmu_ctr_get_fixed_counters_val(env, cidx, false);
mhpmctr_val = counter->mhpmcounter_val;
prev_count = counter->mhpmcounter_prev;
if (riscv_cpu_mxl(env) == MXL_RV32) {
uint64_t tmp =
riscv_pmu_ctr_get_fixed_counters_val(env, cidx, true);
curr_count = curr_count | (tmp << 32);
mhpmctr_val = mhpmctr_val |
((uint64_t)counter->mhpmcounterh_val << 32);
prev_count = prev_count |
((uint64_t)counter->mhpmcounterh_prev << 32);
}
/* Adjust the counter for later reads. */
mhpmctr_val = curr_count - prev_count + mhpmctr_val;
counter->mhpmcounter_val = mhpmctr_val;
if (riscv_cpu_mxl(env) == MXL_RV32) {
counter->mhpmcounterh_val = mhpmctr_val >> 32;
}
}
}
@ -2854,7 +3097,11 @@ static RISCVException read_scounteren(CPURISCVState *env, int csrno,
static RISCVException write_scounteren(CPURISCVState *env, int csrno,
target_ulong val)
{
env->scounteren = val;
RISCVCPU *cpu = env_archcpu(env);
/* WARL register - disable unavailable counters */
env->scounteren = val & (cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_TM |
COUNTEREN_IR);
return RISCV_EXCP_NONE;
}
@ -3513,7 +3760,11 @@ static RISCVException read_hcounteren(CPURISCVState *env, int csrno,
static RISCVException write_hcounteren(CPURISCVState *env, int csrno,
target_ulong val)
{
env->hcounteren = val;
RISCVCPU *cpu = env_archcpu(env);
/* WARL register - disable unavailable counters */
env->hcounteren = val & (cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_TM |
COUNTEREN_IR);
return RISCV_EXCP_NONE;
}
@ -3791,7 +4042,12 @@ static RISCVException read_vstvec(CPURISCVState *env, int csrno,
static RISCVException write_vstvec(CPURISCVState *env, int csrno,
target_ulong val)
{
env->vstvec = val;
/* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
if ((val & 3) < 2) {
env->vstvec = val;
} else {
qemu_log_mask(LOG_UNIMP, "CSR_VSTVEC: reserved mode not supported\n");
}
return RISCV_EXCP_NONE;
}
@ -4368,7 +4624,7 @@ static RISCVException rmw_seed(CPURISCVState *env, int csrno,
static inline RISCVException riscv_csrrw_check(CPURISCVState *env,
int csrno,
bool write_mask)
bool write)
{
/* check privileges and return RISCV_EXCP_ILLEGAL_INST if check fails */
bool read_only = get_field(csrno, 0xC00) == 3;
@ -4390,7 +4646,7 @@ static inline RISCVException riscv_csrrw_check(CPURISCVState *env,
}
/* read / write check */
if (write_mask && read_only) {
if (write && read_only) {
return RISCV_EXCP_ILLEGAL_INST;
}
@ -4477,11 +4733,22 @@ static RISCVException riscv_csrrw_do64(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
RISCVException riscv_csrr(CPURISCVState *env, int csrno,
target_ulong *ret_value)
{
RISCVException ret = riscv_csrrw_check(env, csrno, false);
if (ret != RISCV_EXCP_NONE) {
return ret;
}
return riscv_csrrw_do64(env, csrno, ret_value, 0, 0);
}
RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
target_ulong *ret_value,
target_ulong new_value, target_ulong write_mask)
{
RISCVException ret = riscv_csrrw_check(env, csrno, write_mask);
RISCVException ret = riscv_csrrw_check(env, csrno, true);
if (ret != RISCV_EXCP_NONE) {
return ret;
}
@ -4529,13 +4796,45 @@ static RISCVException riscv_csrrw_do128(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno,
Int128 *ret_value)
{
RISCVException ret;
ret = riscv_csrrw_check(env, csrno, false);
if (ret != RISCV_EXCP_NONE) {
return ret;
}
if (csr_ops[csrno].read128) {
return riscv_csrrw_do128(env, csrno, ret_value,
int128_zero(), int128_zero());
}
/*
* Fall back to 64-bit version for now, if the 128-bit alternative isn't
* at all defined.
* Note, some CSRs don't need to extend to MXLEN (64 upper bits non
* significant), for those, this fallback is correctly handling the
* accesses
*/
target_ulong old_value;
ret = riscv_csrrw_do64(env, csrno, &old_value,
(target_ulong)0,
(target_ulong)0);
if (ret == RISCV_EXCP_NONE && ret_value) {
*ret_value = int128_make64(old_value);
}
return ret;
}
RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
Int128 *ret_value,
Int128 new_value, Int128 write_mask)
{
RISCVException ret;
ret = riscv_csrrw_check(env, csrno, int128_nz(write_mask));
ret = riscv_csrrw_check(env, csrno, true);
if (ret != RISCV_EXCP_NONE) {
return ret;
}
@ -4574,7 +4873,11 @@ RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno,
#if !defined(CONFIG_USER_ONLY)
env->debugger = true;
#endif
ret = riscv_csrrw(env, csrno, ret_value, new_value, write_mask);
if (!write_mask) {
ret = riscv_csrr(env, csrno, ret_value);
} else {
ret = riscv_csrrw(env, csrno, ret_value, new_value, write_mask);
}
#if !defined(CONFIG_USER_ONLY)
env->debugger = false;
#endif
@ -5042,6 +5345,13 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
write_mcountinhibit,
.min_priv_ver = PRIV_VERSION_1_11_0 },
[CSR_MCYCLECFG] = { "mcyclecfg", smcntrpmf, read_mcyclecfg,
write_mcyclecfg,
.min_priv_ver = PRIV_VERSION_1_12_0 },
[CSR_MINSTRETCFG] = { "minstretcfg", smcntrpmf, read_minstretcfg,
write_minstretcfg,
.min_priv_ver = PRIV_VERSION_1_12_0 },
[CSR_MHPMEVENT3] = { "mhpmevent3", any, read_mhpmevent,
write_mhpmevent },
[CSR_MHPMEVENT4] = { "mhpmevent4", any, read_mhpmevent,
@ -5101,91 +5411,98 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
[CSR_MHPMEVENT31] = { "mhpmevent31", any, read_mhpmevent,
write_mhpmevent },
[CSR_MHPMEVENT3H] = { "mhpmevent3h", sscofpmf, read_mhpmeventh,
[CSR_MCYCLECFGH] = { "mcyclecfgh", smcntrpmf_32, read_mcyclecfgh,
write_mcyclecfgh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
[CSR_MINSTRETCFGH] = { "minstretcfgh", smcntrpmf_32, read_minstretcfgh,
write_minstretcfgh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
[CSR_MHPMEVENT3H] = { "mhpmevent3h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
[CSR_MHPMEVENT4H] = { "mhpmevent4h", sscofpmf, read_mhpmeventh,
[CSR_MHPMEVENT4H] = { "mhpmevent4h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
[CSR_MHPMEVENT5H] = { "mhpmevent5h", sscofpmf, read_mhpmeventh,
[CSR_MHPMEVENT5H] = { "mhpmevent5h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
[CSR_MHPMEVENT6H] = { "mhpmevent6h", sscofpmf, read_mhpmeventh,
[CSR_MHPMEVENT6H] = { "mhpmevent6h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
[CSR_MHPMEVENT7H] = { "mhpmevent7h", sscofpmf, read_mhpmeventh,
[CSR_MHPMEVENT7H] = { "mhpmevent7h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
[CSR_MHPMEVENT8H] = { "mhpmevent8h", sscofpmf, read_mhpmeventh,
[CSR_MHPMEVENT8H] = { "mhpmevent8h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
[CSR_MHPMEVENT9H] = { "mhpmevent9h", sscofpmf, read_mhpmeventh,
[CSR_MHPMEVENT9H] = { "mhpmevent9h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
[CSR_MHPMEVENT10H] = { "mhpmevent10h", sscofpmf, read_mhpmeventh,
[CSR_MHPMEVENT10H] = { "mhpmevent10h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
[CSR_MHPMEVENT11H] = { "mhpmevent11h", sscofpmf, read_mhpmeventh,
[CSR_MHPMEVENT11H] = { "mhpmevent11h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
[CSR_MHPMEVENT12H] = { "mhpmevent12h", sscofpmf, read_mhpmeventh,
[CSR_MHPMEVENT12H] = { "mhpmevent12h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
[CSR_MHPMEVENT13H] = { "mhpmevent13h", sscofpmf, read_mhpmeventh,
[CSR_MHPMEVENT13H] = { "mhpmevent13h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
[CSR_MHPMEVENT14H] = { "mhpmevent14h", sscofpmf, read_mhpmeventh,
[CSR_MHPMEVENT14H] = { "mhpmevent14h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
[CSR_MHPMEVENT15H] = { "mhpmevent15h", sscofpmf, read_mhpmeventh,
[CSR_MHPMEVENT15H] = { "mhpmevent15h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
[CSR_MHPMEVENT16H] = { "mhpmevent16h", sscofpmf, read_mhpmeventh,
[CSR_MHPMEVENT16H] = { "mhpmevent16h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
[CSR_MHPMEVENT17H] = { "mhpmevent17h", sscofpmf, read_mhpmeventh,
[CSR_MHPMEVENT17H] = { "mhpmevent17h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
[CSR_MHPMEVENT18H] = { "mhpmevent18h", sscofpmf, read_mhpmeventh,
[CSR_MHPMEVENT18H] = { "mhpmevent18h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
[CSR_MHPMEVENT19H] = { "mhpmevent19h", sscofpmf, read_mhpmeventh,
[CSR_MHPMEVENT19H] = { "mhpmevent19h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
[CSR_MHPMEVENT20H] = { "mhpmevent20h", sscofpmf, read_mhpmeventh,
[CSR_MHPMEVENT20H] = { "mhpmevent20h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
[CSR_MHPMEVENT21H] = { "mhpmevent21h", sscofpmf, read_mhpmeventh,
[CSR_MHPMEVENT21H] = { "mhpmevent21h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
[CSR_MHPMEVENT22H] = { "mhpmevent22h", sscofpmf, read_mhpmeventh,
[CSR_MHPMEVENT22H] = { "mhpmevent22h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
[CSR_MHPMEVENT23H] = { "mhpmevent23h", sscofpmf, read_mhpmeventh,
[CSR_MHPMEVENT23H] = { "mhpmevent23h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
[CSR_MHPMEVENT24H] = { "mhpmevent24h", sscofpmf, read_mhpmeventh,
[CSR_MHPMEVENT24H] = { "mhpmevent24h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
[CSR_MHPMEVENT25H] = { "mhpmevent25h", sscofpmf, read_mhpmeventh,
[CSR_MHPMEVENT25H] = { "mhpmevent25h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
[CSR_MHPMEVENT26H] = { "mhpmevent26h", sscofpmf, read_mhpmeventh,
[CSR_MHPMEVENT26H] = { "mhpmevent26h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
[CSR_MHPMEVENT27H] = { "mhpmevent27h", sscofpmf, read_mhpmeventh,
[CSR_MHPMEVENT27H] = { "mhpmevent27h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
[CSR_MHPMEVENT28H] = { "mhpmevent28h", sscofpmf, read_mhpmeventh,
[CSR_MHPMEVENT28H] = { "mhpmevent28h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
[CSR_MHPMEVENT29H] = { "mhpmevent29h", sscofpmf, read_mhpmeventh,
[CSR_MHPMEVENT29H] = { "mhpmevent29h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
[CSR_MHPMEVENT30H] = { "mhpmevent30h", sscofpmf, read_mhpmeventh,
[CSR_MHPMEVENT30H] = { "mhpmevent30h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
[CSR_MHPMEVENT31H] = { "mhpmevent31h", sscofpmf, read_mhpmeventh,
[CSR_MHPMEVENT31H] = { "mhpmevent31h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },

View File

@ -140,6 +140,7 @@ sw 110 ... ... .. ... 00 @cs_w
addi 000 . ..... ..... 01 @ci
addi 010 . ..... ..... 01 @c_li
{
c_mop_n 011 0 0 n:3 1 00000 01
illegal 011 0 ----- 00000 01 # c.addi16sp and c.lui, RES nzimm=0
addi 011 . 00010 ..... 01 @c_addi16sp
lui 011 . ..... ..... 01 @c_lui

View File

@ -38,6 +38,8 @@
%imm_bs 30:2 !function=ex_shift_3
%imm_rnum 20:4
%imm_z6 26:1 15:5
%imm_mop5 30:1 26:2 20:2
%imm_mop3 30:1 26:2
# Argument sets:
&empty
@ -56,6 +58,8 @@
&r2nfvm vm rd rs1 nf
&rnfvm vm rd rs1 rs2 nf
&k_aes shamt rs2 rs1 rd
&mop5 imm rd rs1
&mop3 imm rd rs1 rs2
# Formats 32:
@r ....... ..... ..... ... ..... ....... &r %rs2 %rs1 %rd
@ -98,6 +102,9 @@
@k_aes .. ..... ..... ..... ... ..... ....... &k_aes shamt=%imm_bs %rs2 %rs1 %rd
@i_aes .. ..... ..... ..... ... ..... ....... &i imm=%imm_rnum %rs1 %rd
@mop5 . . .. .. .... .. ..... ... ..... ....... &mop5 imm=%imm_mop5 %rd %rs1
@mop3 . . .. .. . ..... ..... ... ..... ....... &mop3 imm=%imm_mop3 %rd %rs1 %rs2
# Formats 64:
@sh5 ....... ..... ..... ... ..... ....... &shift shamt=%sh5 %rs1 %rd
@ -1010,3 +1017,29 @@ amocas_w 00101 . . ..... ..... 010 ..... 0101111 @atom_st
amocas_d 00101 . . ..... ..... 011 ..... 0101111 @atom_st
# *** RV64 Zacas Standard Extension ***
amocas_q 00101 . . ..... ..... 100 ..... 0101111 @atom_st
# *** Zimop may-be-operation extension ***
mop_r_n 1 . 00 .. 0111 .. ..... 100 ..... 1110011 @mop5
mop_rr_n 1 . 00 .. 1 ..... ..... 100 ..... 1110011 @mop3
# *** Zabhb Standard Extension ***
amoswap_b 00001 . . ..... ..... 000 ..... 0101111 @atom_st
amoadd_b 00000 . . ..... ..... 000 ..... 0101111 @atom_st
amoxor_b 00100 . . ..... ..... 000 ..... 0101111 @atom_st
amoand_b 01100 . . ..... ..... 000 ..... 0101111 @atom_st
amoor_b 01000 . . ..... ..... 000 ..... 0101111 @atom_st
amomin_b 10000 . . ..... ..... 000 ..... 0101111 @atom_st
amomax_b 10100 . . ..... ..... 000 ..... 0101111 @atom_st
amominu_b 11000 . . ..... ..... 000 ..... 0101111 @atom_st
amomaxu_b 11100 . . ..... ..... 000 ..... 0101111 @atom_st
amoswap_h 00001 . . ..... ..... 001 ..... 0101111 @atom_st
amoadd_h 00000 . . ..... ..... 001 ..... 0101111 @atom_st
amoxor_h 00100 . . ..... ..... 001 ..... 0101111 @atom_st
amoand_h 01100 . . ..... ..... 001 ..... 0101111 @atom_st
amoor_h 01000 . . ..... ..... 001 ..... 0101111 @atom_st
amomin_h 10000 . . ..... ..... 001 ..... 0101111 @atom_st
amomax_h 10100 . . ..... ..... 001 ..... 0101111 @atom_st
amominu_h 11000 . . ..... ..... 001 ..... 0101111 @atom_st
amomaxu_h 11100 . . ..... ..... 001 ..... 0101111 @atom_st
amocas_b 00101 . . ..... ..... 000 ..... 0101111 @atom_st
amocas_h 00101 . . ..... ..... 001 ..... 0101111 @atom_st

View File

@ -96,21 +96,6 @@ static bool gen_sc(DisasContext *ctx, arg_atomic *a, MemOp mop)
return true;
}
static bool gen_amo(DisasContext *ctx, arg_atomic *a,
void(*func)(TCGv, TCGv, TCGv, TCGArg, MemOp),
MemOp mop)
{
TCGv dest = dest_gpr(ctx, a->rd);
TCGv src1, src2 = get_gpr(ctx, a->rs2, EXT_NONE);
decode_save_opc(ctx);
src1 = get_address(ctx, a->rs1, 0);
func(dest, src1, src2, ctx->mem_idx, mop);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
static bool trans_lr_w(DisasContext *ctx, arg_lr_w *a)
{
REQUIRE_A_OR_ZALRSC(ctx);
@ -126,55 +111,55 @@ static bool trans_sc_w(DisasContext *ctx, arg_sc_w *a)
static bool trans_amoswap_w(DisasContext *ctx, arg_amoswap_w *a)
{
REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TESL));
return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, MO_TESL);
}
static bool trans_amoadd_w(DisasContext *ctx, arg_amoadd_w *a)
{
REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TESL));
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, MO_TESL);
}
static bool trans_amoxor_w(DisasContext *ctx, arg_amoxor_w *a)
{
REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TESL));
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, MO_TESL);
}
static bool trans_amoand_w(DisasContext *ctx, arg_amoand_w *a)
{
REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TESL));
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, MO_TESL);
}
static bool trans_amoor_w(DisasContext *ctx, arg_amoor_w *a)
{
REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TESL));
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, MO_TESL);
}
static bool trans_amomin_w(DisasContext *ctx, arg_amomin_w *a)
{
REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TESL));
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, MO_TESL);
}
static bool trans_amomax_w(DisasContext *ctx, arg_amomax_w *a)
{
REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TESL));
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, MO_TESL);
}
static bool trans_amominu_w(DisasContext *ctx, arg_amominu_w *a)
{
REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TESL));
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, MO_TESL);
}
static bool trans_amomaxu_w(DisasContext *ctx, arg_amomaxu_w *a)
{
REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TESL));
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, MO_TESL);
}
static bool trans_lr_d(DisasContext *ctx, arg_lr_d *a)
@ -195,61 +180,61 @@ static bool trans_amoswap_d(DisasContext *ctx, arg_amoswap_d *a)
{
REQUIRE_64BIT(ctx);
REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TEUQ));
return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, MO_TEUQ);
}
static bool trans_amoadd_d(DisasContext *ctx, arg_amoadd_d *a)
{
REQUIRE_64BIT(ctx);
REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TEUQ));
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, MO_TEUQ);
}
static bool trans_amoxor_d(DisasContext *ctx, arg_amoxor_d *a)
{
REQUIRE_64BIT(ctx);
REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TEUQ));
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, MO_TEUQ);
}
static bool trans_amoand_d(DisasContext *ctx, arg_amoand_d *a)
{
REQUIRE_64BIT(ctx);
REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TEUQ));
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, MO_TEUQ);
}
static bool trans_amoor_d(DisasContext *ctx, arg_amoor_d *a)
{
REQUIRE_64BIT(ctx);
REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TEUQ));
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, MO_TEUQ);
}
static bool trans_amomin_d(DisasContext *ctx, arg_amomin_d *a)
{
REQUIRE_64BIT(ctx);
REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TEUQ));
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, MO_TEUQ);
}
static bool trans_amomax_d(DisasContext *ctx, arg_amomax_d *a)
{
REQUIRE_64BIT(ctx);
REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TEUQ));
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, MO_TEUQ);
}
static bool trans_amominu_d(DisasContext *ctx, arg_amominu_d *a)
{
REQUIRE_64BIT(ctx);
REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TEUQ));
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, MO_TEUQ);
}
static bool trans_amomaxu_d(DisasContext *ctx, arg_amomaxu_d *a)
{
REQUIRE_64BIT(ctx);
REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TEUQ));
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, MO_TEUQ);
}

View File

@ -42,13 +42,18 @@
static bool trans_fld(DisasContext *ctx, arg_fld *a)
{
TCGv addr;
MemOp memop = MO_TEUQ;
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVD);
if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) {
memop |= MO_ATOM_WITHIN16;
}
decode_save_opc(ctx);
addr = get_address(ctx, a->rs1, a->imm);
tcg_gen_qemu_ld_i64(cpu_fpr[a->rd], addr, ctx->mem_idx, MO_TEUQ);
tcg_gen_qemu_ld_i64(cpu_fpr[a->rd], addr, ctx->mem_idx, memop);
mark_fs_dirty(ctx);
return true;
@ -57,13 +62,18 @@ static bool trans_fld(DisasContext *ctx, arg_fld *a)
static bool trans_fsd(DisasContext *ctx, arg_fsd *a)
{
TCGv addr;
MemOp memop = MO_TEUQ;
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVD);
if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) {
memop |= MO_ATOM_WITHIN16;
}
decode_save_opc(ctx);
addr = get_address(ctx, a->rs1, a->imm);
tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, MO_TEUQ);
tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, memop);
return true;
}

View File

@ -43,14 +43,19 @@ static bool trans_flw(DisasContext *ctx, arg_flw *a)
{
TCGv_i64 dest;
TCGv addr;
MemOp memop = MO_TEUL;
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVF);
if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) {
memop |= MO_ATOM_WITHIN16;
}
decode_save_opc(ctx);
addr = get_address(ctx, a->rs1, a->imm);
dest = cpu_fpr[a->rd];
tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, MO_TEUL);
tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, memop);
gen_nanbox_s(dest, dest);
mark_fs_dirty(ctx);
@ -60,13 +65,18 @@ static bool trans_flw(DisasContext *ctx, arg_flw *a)
static bool trans_fsw(DisasContext *ctx, arg_fsw *a)
{
TCGv addr;
MemOp memop = MO_TEUL;
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVF);
if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) {
memop |= MO_ATOM_WITHIN16;
}
decode_save_opc(ctx);
addr = get_address(ctx, a->rs1, a->imm);
tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, MO_TEUL);
tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, memop);
return true;
}

View File

@ -268,6 +268,9 @@ static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop)
{
bool out;
if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) {
memop |= MO_ATOM_WITHIN16;
}
decode_save_opc(ctx);
if (get_xl(ctx) == MXL_RV128) {
out = gen_load_i128(ctx, a, memop);
@ -366,6 +369,9 @@ static bool gen_store_i128(DisasContext *ctx, arg_sb *a, MemOp memop)
static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop)
{
if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) {
memop |= MO_ATOM_WITHIN16;
}
decode_save_opc(ctx);
if (get_xl(ctx) == MXL_RV128) {
return gen_store_i128(ctx, a, memop);

View File

@ -0,0 +1,145 @@
/*
* RISC-V translation routines for the Zabha Standard Extension.
*
* Copyright (c) 2024 Alibaba Group
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2 or later, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define REQUIRE_ZABHA(ctx) do { \
if (!ctx->cfg_ptr->ext_zabha) { \
return false; \
} \
} while (0)
static bool trans_amoswap_b(DisasContext *ctx, arg_amoswap_b *a)
{
REQUIRE_ZABHA(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, MO_SB);
}
static bool trans_amoadd_b(DisasContext *ctx, arg_amoadd_b *a)
{
REQUIRE_ZABHA(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, MO_SB);
}
static bool trans_amoxor_b(DisasContext *ctx, arg_amoxor_b *a)
{
REQUIRE_ZABHA(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, MO_SB);
}
static bool trans_amoand_b(DisasContext *ctx, arg_amoand_b *a)
{
REQUIRE_ZABHA(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, MO_SB);
}
static bool trans_amoor_b(DisasContext *ctx, arg_amoor_b *a)
{
REQUIRE_ZABHA(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, MO_SB);
}
static bool trans_amomin_b(DisasContext *ctx, arg_amomin_b *a)
{
REQUIRE_ZABHA(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, MO_SB);
}
static bool trans_amomax_b(DisasContext *ctx, arg_amomax_b *a)
{
REQUIRE_ZABHA(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, MO_SB);
}
static bool trans_amominu_b(DisasContext *ctx, arg_amominu_b *a)
{
REQUIRE_ZABHA(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, MO_SB);
}
static bool trans_amomaxu_b(DisasContext *ctx, arg_amomaxu_b *a)
{
REQUIRE_ZABHA(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, MO_SB);
}
static bool trans_amoswap_h(DisasContext *ctx, arg_amoswap_h *a)
{
REQUIRE_ZABHA(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, MO_TESW);
}
static bool trans_amoadd_h(DisasContext *ctx, arg_amoadd_h *a)
{
REQUIRE_ZABHA(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, MO_TESW);
}
static bool trans_amoxor_h(DisasContext *ctx, arg_amoxor_h *a)
{
REQUIRE_ZABHA(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, MO_TESW);
}
static bool trans_amoand_h(DisasContext *ctx, arg_amoand_h *a)
{
REQUIRE_ZABHA(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, MO_TESW);
}
static bool trans_amoor_h(DisasContext *ctx, arg_amoor_h *a)
{
REQUIRE_ZABHA(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, MO_TESW);
}
static bool trans_amomin_h(DisasContext *ctx, arg_amomin_h *a)
{
REQUIRE_ZABHA(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, MO_TESW);
}
static bool trans_amomax_h(DisasContext *ctx, arg_amomax_h *a)
{
REQUIRE_ZABHA(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, MO_TESW);
}
static bool trans_amominu_h(DisasContext *ctx, arg_amominu_h *a)
{
REQUIRE_ZABHA(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, MO_TESW);
}
static bool trans_amomaxu_h(DisasContext *ctx, arg_amomaxu_h *a)
{
REQUIRE_ZABHA(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, MO_TESW);
}
static bool trans_amocas_b(DisasContext *ctx, arg_amocas_b *a)
{
REQUIRE_ZACAS(ctx);
REQUIRE_ZABHA(ctx);
return gen_cmpxchg(ctx, a, MO_SB);
}
static bool trans_amocas_h(DisasContext *ctx, arg_amocas_h *a)
{
REQUIRE_ZACAS(ctx);
REQUIRE_ZABHA(ctx);
return gen_cmpxchg(ctx, a, MO_ALIGN | MO_TESW);
}

View File

@ -22,19 +22,6 @@
} \
} while (0)
static bool gen_cmpxchg(DisasContext *ctx, arg_atomic *a, MemOp mop)
{
TCGv dest = get_gpr(ctx, a->rd, EXT_NONE);
TCGv src1 = get_address(ctx, a->rs1, 0);
TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE);
decode_save_opc(ctx);
tcg_gen_atomic_cmpxchg_tl(dest, src1, dest, src2, ctx->mem_idx, mop);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
static bool trans_amocas_w(DisasContext *ctx, arg_amocas_w *a)
{
REQUIRE_ZACAS(ctx);

View File

@ -0,0 +1,29 @@
/*
* RISC-V translation routines for compressed May-Be-Operation(zcmop).
*
* Copyright (c) 2024 Alibaba Group.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2 or later, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define REQUIRE_ZCMOP(ctx) do { \
if (!ctx->cfg_ptr->ext_zcmop) { \
return false; \
} \
} while (0)
static bool trans_c_mop_n(DisasContext *ctx, arg_c_mop_n *a)
{
REQUIRE_ZCMOP(ctx);
return true;
}

View File

@ -0,0 +1,37 @@
/*
* RISC-V translation routines for May-Be-Operation(zimop).
*
* Copyright (c) 2024 Alibaba Group.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2 or later, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define REQUIRE_ZIMOP(ctx) do { \
if (!ctx->cfg_ptr->ext_zimop) { \
return false; \
} \
} while (0)
static bool trans_mop_r_n(DisasContext *ctx, arg_mop_r_n *a)
{
REQUIRE_ZIMOP(ctx);
gen_set_gpr(ctx, a->rd, ctx->zero);
return true;
}
static bool trans_mop_rr_n(DisasContext *ctx, arg_mop_rr_n *a)
{
REQUIRE_ZIMOP(ctx);
gen_set_gpr(ctx, a->rd, ctx->zero);
return true;
}

View File

@ -281,6 +281,7 @@ static KVMCPUConfig kvm_multi_ext_cfgs[] = {
KVM_EXT_CFG("zihintntl", ext_zihintntl, KVM_RISCV_ISA_EXT_ZIHINTNTL),
KVM_EXT_CFG("zihintpause", ext_zihintpause, KVM_RISCV_ISA_EXT_ZIHINTPAUSE),
KVM_EXT_CFG("zihpm", ext_zihpm, KVM_RISCV_ISA_EXT_ZIHPM),
KVM_EXT_CFG("zacas", ext_zacas, KVM_RISCV_ISA_EXT_ZACAS),
KVM_EXT_CFG("zfa", ext_zfa, KVM_RISCV_ISA_EXT_ZFA),
KVM_EXT_CFG("zfh", ext_zfh, KVM_RISCV_ISA_EXT_ZFH),
KVM_EXT_CFG("zfhmin", ext_zfhmin, KVM_RISCV_ISA_EXT_ZFHMIN),
@ -298,6 +299,7 @@ static KVMCPUConfig kvm_multi_ext_cfgs[] = {
KVM_EXT_CFG("zksed", ext_zksed, KVM_RISCV_ISA_EXT_ZKSED),
KVM_EXT_CFG("zksh", ext_zksh, KVM_RISCV_ISA_EXT_ZKSH),
KVM_EXT_CFG("zkt", ext_zkt, KVM_RISCV_ISA_EXT_ZKT),
KVM_EXT_CFG("ztso", ext_ztso, KVM_RISCV_ISA_EXT_ZTSO),
KVM_EXT_CFG("zvbb", ext_zvbb, KVM_RISCV_ISA_EXT_ZVBB),
KVM_EXT_CFG("zvbc", ext_zvbc, KVM_RISCV_ISA_EXT_ZVBC),
KVM_EXT_CFG("zvfh", ext_zvfh, KVM_RISCV_ISA_EXT_ZVFH),

View File

@ -320,15 +320,14 @@ static bool pmu_needed(void *opaque)
static const VMStateDescription vmstate_pmu_ctr_state = {
.name = "cpu/pmu",
.version_id = 1,
.minimum_version_id = 1,
.version_id = 2,
.minimum_version_id = 2,
.needed = pmu_needed,
.fields = (const VMStateField[]) {
VMSTATE_UINTTL(mhpmcounter_val, PMUCTRState),
VMSTATE_UINTTL(mhpmcounterh_val, PMUCTRState),
VMSTATE_UINTTL(mhpmcounter_prev, PMUCTRState),
VMSTATE_UINTTL(mhpmcounterh_prev, PMUCTRState),
VMSTATE_BOOL(started, PMUCTRState),
VMSTATE_END_OF_LIST()
}
};

View File

@ -51,7 +51,7 @@ target_ulong helper_csrr(CPURISCVState *env, int csr)
}
target_ulong val = 0;
RISCVException ret = riscv_csrrw(env, csr, &val, 0, 0);
RISCVException ret = riscv_csrr(env, csr, &val);
if (ret != RISCV_EXCP_NONE) {
riscv_raise_exception(env, ret, GETPC());
@ -84,9 +84,7 @@ target_ulong helper_csrrw(CPURISCVState *env, int csr,
target_ulong helper_csrr_i128(CPURISCVState *env, int csr)
{
Int128 rv = int128_zero();
RISCVException ret = riscv_csrrw_i128(env, csr, &rv,
int128_zero(),
int128_zero());
RISCVException ret = riscv_csrr_i128(env, csr, &rv);
if (ret != RISCV_EXCP_NONE) {
riscv_raise_exception(env, ret, GETPC());
@ -264,7 +262,7 @@ void helper_cbo_inval(CPURISCVState *env, target_ulong address)
target_ulong helper_sret(CPURISCVState *env)
{
uint64_t mstatus;
target_ulong prev_priv, prev_virt;
target_ulong prev_priv, prev_virt = env->virt_enabled;
if (!(env->priv >= PRV_S)) {
riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
@ -307,11 +305,9 @@ target_ulong helper_sret(CPURISCVState *env)
if (prev_virt) {
riscv_cpu_swap_hypervisor_regs(env);
}
riscv_cpu_set_virt_enabled(env, prev_virt);
}
riscv_cpu_set_mode(env, prev_priv);
riscv_cpu_set_mode(env, prev_priv, prev_virt);
return retpc;
}
@ -347,16 +343,13 @@ target_ulong helper_mret(CPURISCVState *env)
mstatus = set_field(mstatus, MSTATUS_MPRV, 0);
}
env->mstatus = mstatus;
riscv_cpu_set_mode(env, prev_priv);
if (riscv_has_ext(env, RVH)) {
if (prev_virt) {
riscv_cpu_swap_hypervisor_regs(env);
}
riscv_cpu_set_virt_enabled(env, prev_virt);
if (riscv_has_ext(env, RVH) && prev_virt) {
riscv_cpu_swap_hypervisor_regs(env);
}
riscv_cpu_set_mode(env, prev_priv, prev_virt);
return retpc;
}

View File

@ -19,6 +19,7 @@
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "qemu/error-report.h"
#include "qemu/timer.h"
#include "cpu.h"
#include "pmu.h"
#include "sysemu/cpu-timers.h"
@ -176,6 +177,97 @@ static int riscv_pmu_incr_ctr_rv64(RISCVCPU *cpu, uint32_t ctr_idx)
return 0;
}
/*
* Information needed to update counters:
* new_priv, new_virt: To correctly save starting snapshot for the newly
* started mode. Look at array being indexed with newprv.
* old_priv, old_virt: To correctly select previous snapshot for old priv
* and compute delta. Also to select correct counter
* to inc. Look at arrays being indexed with env->priv.
*
* To avoid the complexity of calling this function, we assume that
* env->priv and env->virt_enabled contain old priv and old virt and
* new priv and new virt values are passed in as arguments.
*/
static void riscv_pmu_icount_update_priv(CPURISCVState *env,
target_ulong newpriv, bool new_virt)
{
uint64_t *snapshot_prev, *snapshot_new;
uint64_t current_icount;
uint64_t *counter_arr;
uint64_t delta;
if (icount_enabled()) {
current_icount = icount_get_raw();
} else {
current_icount = cpu_get_host_ticks();
}
if (env->virt_enabled) {
counter_arr = env->pmu_fixed_ctrs[1].counter_virt;
snapshot_prev = env->pmu_fixed_ctrs[1].counter_virt_prev;
} else {
counter_arr = env->pmu_fixed_ctrs[1].counter;
snapshot_prev = env->pmu_fixed_ctrs[1].counter_prev;
}
if (new_virt) {
snapshot_new = env->pmu_fixed_ctrs[1].counter_virt_prev;
} else {
snapshot_new = env->pmu_fixed_ctrs[1].counter_prev;
}
/*
* new_priv can be same as env->priv. So we need to calculate
* delta first before updating snapshot_new[new_priv].
*/
delta = current_icount - snapshot_prev[env->priv];
snapshot_new[newpriv] = current_icount;
counter_arr[env->priv] += delta;
}
static void riscv_pmu_cycle_update_priv(CPURISCVState *env,
target_ulong newpriv, bool new_virt)
{
uint64_t *snapshot_prev, *snapshot_new;
uint64_t current_ticks;
uint64_t *counter_arr;
uint64_t delta;
if (icount_enabled()) {
current_ticks = icount_get();
} else {
current_ticks = cpu_get_host_ticks();
}
if (env->virt_enabled) {
counter_arr = env->pmu_fixed_ctrs[0].counter_virt;
snapshot_prev = env->pmu_fixed_ctrs[0].counter_virt_prev;
} else {
counter_arr = env->pmu_fixed_ctrs[0].counter;
snapshot_prev = env->pmu_fixed_ctrs[0].counter_prev;
}
if (new_virt) {
snapshot_new = env->pmu_fixed_ctrs[0].counter_virt_prev;
} else {
snapshot_new = env->pmu_fixed_ctrs[0].counter_prev;
}
delta = current_ticks - snapshot_prev[env->priv];
snapshot_new[newpriv] = current_ticks;
counter_arr[env->priv] += delta;
}
void riscv_pmu_update_fixed_ctrs(CPURISCVState *env, target_ulong newpriv,
bool new_virt)
{
riscv_pmu_cycle_update_priv(env, newpriv, new_virt);
riscv_pmu_icount_update_priv(env, newpriv, new_virt);
}
int riscv_pmu_incr_ctr(RISCVCPU *cpu, enum riscv_pmu_event_idx event_idx)
{
uint32_t ctr_idx;
@ -193,8 +285,7 @@ int riscv_pmu_incr_ctr(RISCVCPU *cpu, enum riscv_pmu_event_idx event_idx)
}
ctr_idx = GPOINTER_TO_UINT(value);
if (!riscv_pmu_counter_enabled(cpu, ctr_idx) ||
get_field(env->mcountinhibit, BIT(ctr_idx))) {
if (!riscv_pmu_counter_enabled(cpu, ctr_idx)) {
return -1;
}
@ -325,15 +416,52 @@ int riscv_pmu_update_event_map(CPURISCVState *env, uint64_t value,
return 0;
}
static bool pmu_hpmevent_is_of_set(CPURISCVState *env, uint32_t ctr_idx)
{
target_ulong mhpmevent_val;
uint64_t of_bit_mask;
if (riscv_cpu_mxl(env) == MXL_RV32) {
mhpmevent_val = env->mhpmeventh_val[ctr_idx];
of_bit_mask = MHPMEVENTH_BIT_OF;
} else {
mhpmevent_val = env->mhpmevent_val[ctr_idx];
of_bit_mask = MHPMEVENT_BIT_OF;
}
return get_field(mhpmevent_val, of_bit_mask);
}
static bool pmu_hpmevent_set_of_if_clear(CPURISCVState *env, uint32_t ctr_idx)
{
target_ulong *mhpmevent_val;
uint64_t of_bit_mask;
if (riscv_cpu_mxl(env) == MXL_RV32) {
mhpmevent_val = &env->mhpmeventh_val[ctr_idx];
of_bit_mask = MHPMEVENTH_BIT_OF;
} else {
mhpmevent_val = &env->mhpmevent_val[ctr_idx];
of_bit_mask = MHPMEVENT_BIT_OF;
}
if (!get_field(*mhpmevent_val, of_bit_mask)) {
*mhpmevent_val |= of_bit_mask;
return true;
}
return false;
}
static void pmu_timer_trigger_irq(RISCVCPU *cpu,
enum riscv_pmu_event_idx evt_idx)
{
uint32_t ctr_idx;
CPURISCVState *env = &cpu->env;
PMUCTRState *counter;
target_ulong *mhpmevent_val;
uint64_t of_bit_mask;
int64_t irq_trigger_at;
uint64_t curr_ctr_val, curr_ctrh_val;
uint64_t ctr_val;
if (evt_idx != RISCV_PMU_EVENT_HW_CPU_CYCLES &&
evt_idx != RISCV_PMU_EVENT_HW_INSTRUCTIONS) {
@ -346,12 +474,9 @@ static void pmu_timer_trigger_irq(RISCVCPU *cpu,
return;
}
if (riscv_cpu_mxl(env) == MXL_RV32) {
mhpmevent_val = &env->mhpmeventh_val[ctr_idx];
of_bit_mask = MHPMEVENTH_BIT_OF;
} else {
mhpmevent_val = &env->mhpmevent_val[ctr_idx];
of_bit_mask = MHPMEVENT_BIT_OF;
/* Generate interrupt only if OF bit is clear */
if (pmu_hpmevent_is_of_set(env, ctr_idx)) {
return;
}
counter = &env->pmu_ctrs[ctr_idx];
@ -363,10 +488,28 @@ static void pmu_timer_trigger_irq(RISCVCPU *cpu,
return;
}
riscv_pmu_read_ctr(env, (target_ulong *)&curr_ctr_val, false, ctr_idx);
ctr_val = counter->mhpmcounter_val;
if (riscv_cpu_mxl(env) == MXL_RV32) {
riscv_pmu_read_ctr(env, (target_ulong *)&curr_ctrh_val, true, ctr_idx);
curr_ctr_val = curr_ctr_val | (curr_ctrh_val << 32);
ctr_val = ctr_val |
((uint64_t)counter->mhpmcounterh_val << 32);
}
/*
* We can not accommodate for inhibited modes when setting up timer. Check
* if the counter has actually overflowed or not by comparing current
* counter value (accommodated for inhibited modes) with software written
* counter value.
*/
if (curr_ctr_val >= ctr_val) {
riscv_pmu_setup_timer(env, curr_ctr_val, ctr_idx);
return;
}
if (cpu->pmu_avail_ctrs & BIT(ctr_idx)) {
/* Generate interrupt only if OF bit is clear */
if (!(*mhpmevent_val & of_bit_mask)) {
*mhpmevent_val |= of_bit_mask;
if (pmu_hpmevent_set_of_if_clear(env, ctr_idx)) {
riscv_cpu_update_mip(env, MIP_LCOFIP, BOOL_TO_MASK(1));
}
}
@ -384,12 +527,14 @@ void riscv_pmu_timer_cb(void *priv)
int riscv_pmu_setup_timer(CPURISCVState *env, uint64_t value, uint32_t ctr_idx)
{
uint64_t overflow_delta, overflow_at;
uint64_t overflow_delta, overflow_at, curr_ns;
int64_t overflow_ns, overflow_left = 0;
RISCVCPU *cpu = env_archcpu(env);
PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
if (!riscv_pmu_counter_valid(cpu, ctr_idx) || !cpu->cfg.ext_sscofpmf) {
/* No need to setup a timer if LCOFI is disabled when OF is set */
if (!riscv_pmu_counter_valid(cpu, ctr_idx) || !cpu->cfg.ext_sscofpmf ||
pmu_hpmevent_is_of_set(env, ctr_idx)) {
return -1;
}
@ -415,8 +560,10 @@ int riscv_pmu_setup_timer(CPURISCVState *env, uint64_t value, uint32_t ctr_idx)
} else {
return -1;
}
overflow_at = (uint64_t)qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
overflow_ns;
curr_ns = (uint64_t)qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
overflow_at = curr_ns + overflow_ns;
if (overflow_at <= curr_ns)
overflow_at = UINT64_MAX;
if (overflow_at > INT64_MAX) {
overflow_left += overflow_at - INT64_MAX;

View File

@ -34,5 +34,9 @@ int riscv_pmu_incr_ctr(RISCVCPU *cpu, enum riscv_pmu_event_idx event_idx);
void riscv_pmu_generate_fdt_node(void *fdt, uint32_t cmask, char *pmu_name);
int riscv_pmu_setup_timer(CPURISCVState *env, uint64_t value,
uint32_t ctr_idx);
void riscv_pmu_update_fixed_ctrs(CPURISCVState *env, target_ulong newpriv,
bool new_virt);
RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val,
bool upper_half, uint32_t ctr_idx);
#endif /* RISCV_PMU_H */

View File

@ -549,6 +549,11 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
}
}
if (cpu->cfg.ext_zcmop && !cpu->cfg.ext_zca) {
error_setg(errp, "Zcmop extensions require Zca");
return;
}
if (mcc->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) {
error_setg(errp, "Zcf extension is only relevant to RV32");
return;

View File

@ -1077,6 +1077,41 @@ static bool gen_unary_per_ol(DisasContext *ctx, arg_r2 *a, DisasExtend ext,
return gen_unary(ctx, a, ext, f_tl);
}
static bool gen_amo(DisasContext *ctx, arg_atomic *a,
void(*func)(TCGv, TCGv, TCGv, TCGArg, MemOp),
MemOp mop)
{
TCGv dest = dest_gpr(ctx, a->rd);
TCGv src1, src2 = get_gpr(ctx, a->rs2, EXT_NONE);
MemOp size = mop & MO_SIZE;
if (ctx->cfg_ptr->ext_zama16b && size >= MO_32) {
mop |= MO_ATOM_WITHIN16;
} else {
mop |= MO_ALIGN;
}
decode_save_opc(ctx);
src1 = get_address(ctx, a->rs1, 0);
func(dest, src1, src2, ctx->mem_idx, mop);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
static bool gen_cmpxchg(DisasContext *ctx, arg_atomic *a, MemOp mop)
{
TCGv dest = get_gpr(ctx, a->rd, EXT_NONE);
TCGv src1 = get_address(ctx, a->rs1, 0);
TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE);
decode_save_opc(ctx);
tcg_gen_atomic_cmpxchg_tl(dest, src1, dest, src2, ctx->mem_idx, mop);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
@ -1097,8 +1132,10 @@ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc)
#include "insn_trans/trans_rvb.c.inc"
#include "insn_trans/trans_rvzicond.c.inc"
#include "insn_trans/trans_rvzacas.c.inc"
#include "insn_trans/trans_rvzabha.c.inc"
#include "insn_trans/trans_rvzawrs.c.inc"
#include "insn_trans/trans_rvzicbo.c.inc"
#include "insn_trans/trans_rvzimop.c.inc"
#include "insn_trans/trans_rvzfa.c.inc"
#include "insn_trans/trans_rvzfh.c.inc"
#include "insn_trans/trans_rvk.c.inc"
@ -1113,6 +1150,7 @@ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc)
/* Include the auto-generated decoder for 16 bit insn */
#include "decode-insn16.c.inc"
#include "insn_trans/trans_rvzce.c.inc"
#include "insn_trans/trans_rvzcmop.c.inc"
/* Include decoders for factored-out extensions */
#include "decode-XVentanaCondOps.c.inc"