From a7d4b1bf418acae58a3f0649c578cc0451136f46 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Le=20Goater?= Date: Mon, 17 Dec 2018 23:34:39 +0100 Subject: [PATCH 01/40] target/ppc: fix the PPC_BIT definitions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Change the PPC_BIT macro to use ULL instead of UL and the PPC_BIT32 and PPC_BIT8 not to use any suffix. This fixes a compile breakage on windows. Signed-off-by: Cédric Le Goater Signed-off-by: David Gibson --- target/ppc/cpu.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h index ab68abe8a2..527181c0f0 100644 --- a/target/ppc/cpu.h +++ b/target/ppc/cpu.h @@ -70,9 +70,9 @@ #define PPC_ELF_MACHINE EM_PPC #endif -#define PPC_BIT(bit) (0x8000000000000000UL >> (bit)) -#define PPC_BIT32(bit) (0x80000000UL >> (bit)) -#define PPC_BIT8(bit) (0x80UL >> (bit)) +#define PPC_BIT(bit) (0x8000000000000000ULL >> (bit)) +#define PPC_BIT32(bit) (0x80000000 >> (bit)) +#define PPC_BIT8(bit) (0x80 >> (bit)) #define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs)) #define PPC_BITMASK32(bs, be) ((PPC_BIT32(bs) - PPC_BIT32(be)) | \ PPC_BIT32(bs)) From 6187ec05edc240204935fdf0d89cded563fdeab4 Mon Sep 17 00:00:00 2001 From: David Gibson Date: Wed, 19 Dec 2018 12:26:20 +1100 Subject: [PATCH 02/40] target/ppc: Remove silly GETFIELD/SETFIELD/MASK_TO_LSH macros The (only) obvious use for these macros is constructing and parsing guest visible register fields. But the way they're constructed, they're only valid when used on a *host* long, whose size shouldn't be visible to the guest at all. They also have no current users, so just get rid of them. Signed-off-by: David Gibson --- target/ppc/cpu.h | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h index 527181c0f0..d5f99f1fc7 100644 --- a/target/ppc/cpu.h +++ b/target/ppc/cpu.h @@ -78,18 +78,6 @@ PPC_BIT32(bs)) #define PPC_BITMASK8(bs, be) ((PPC_BIT8(bs) - PPC_BIT8(be)) | PPC_BIT8(bs)) -#if HOST_LONG_BITS == 32 -# define MASK_TO_LSH(m) (__builtin_ffsll(m) - 1) -#elif HOST_LONG_BITS == 64 -# define MASK_TO_LSH(m) (__builtin_ffsl(m) - 1) -#else -# error Unknown sizeof long -#endif - -#define GETFIELD(m, v) (((v) & (m)) >> MASK_TO_LSH(m)) -#define SETFIELD(m, v, val) \ - (((v) & ~(m)) | ((((typeof(v))(val)) << MASK_TO_LSH(m)) & (m))) - /*****************************************************************************/ /* Exception vectors definitions */ enum { From 3908a24fcb83913079d315de0ca6d598e8616dbb Mon Sep 17 00:00:00 2001 From: Serhii Popovych Date: Thu, 22 Nov 2018 08:19:27 -0500 Subject: [PATCH 03/40] spapr: Fix ibm,max-associativity-domains property number of nodes Laurent Vivier reported off by one with maximum number of NUMA nodes provided by qemu-kvm being less by one than required according to description of "ibm,max-associativity-domains" property in LoPAPR. It appears that I incorrectly treated LoPAPR description of this property assuming it provides last valid domain (NUMA node here) instead of maximum number of domains. ### Before hot-add (qemu) info numa 3 nodes node 0 cpus: 0 node 0 size: 0 MB node 0 plugged: 0 MB node 1 cpus: node 1 size: 1024 MB node 1 plugged: 0 MB node 2 cpus: node 2 size: 0 MB node 2 plugged: 0 MB $ numactl -H available: 2 nodes (0-1) node 0 cpus: 0 node 0 size: 0 MB node 0 free: 0 MB node 1 cpus: node 1 size: 999 MB node 1 free: 658 MB node distances: node 0 1 0: 10 40 1: 40 10 ### Hot-add (qemu) object_add memory-backend-ram,id=mem0,size=1G (qemu) device_add pc-dimm,id=dimm1,memdev=mem0,node=2 (qemu) [ 87.704898] pseries-hotplug-mem: Attempting to hot-add 4 ... [ 87.705128] lpar: Attempting to resize HPT to shift 21 ... ### After hot-add (qemu) info numa 3 nodes node 0 cpus: 0 node 0 size: 0 MB node 0 plugged: 0 MB node 1 cpus: node 1 size: 1024 MB node 1 plugged: 0 MB node 2 cpus: node 2 size: 1024 MB node 2 plugged: 1024 MB $ numactl -H available: 2 nodes (0-1) ^^^^^^^^^^^^^^^^^^^^^^^^ Still only two nodes (and memory hot-added to node 0 below) node 0 cpus: 0 node 0 size: 1024 MB node 0 free: 1021 MB node 1 cpus: node 1 size: 999 MB node 1 free: 658 MB node distances: node 0 1 0: 10 40 1: 40 10 After fix applied numactl(8) reports 3 nodes available and memory plugged into node 2 as expected. From David Gibson: ------------------ Qemu makes a distinction between "non NUMA" (nb_numa_nodes == 0) and "NUMA with one node" (nb_numa_nodes == 1). But from a PAPR guests's point of view these are equivalent. I don't want to present two different cases to the guest when we don't need to, so even though the guest can handle it, I'd prefer we put a '1' here for both the nb_numa_nodes == 0 and nb_numa_nodes == 1 case. This consolidates everything discussed previously on mailing list. Fixes: da9f80fbad21 ("spapr: Add ibm,max-associativity-domains property") Reported-by: Laurent Vivier Signed-off-by: Serhii Popovych Signed-off-by: David Gibson Reviewed-by: Greg Kurz Reviewed-by: Laurent Vivier --- hw/ppc/spapr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index 55be0f56cb..b423db311e 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -1033,7 +1033,7 @@ static void spapr_dt_rtas(sPAPRMachineState *spapr, void *fdt) cpu_to_be32(0), cpu_to_be32(0), cpu_to_be32(0), - cpu_to_be32(nb_numa_nodes ? nb_numa_nodes - 1 : 0), + cpu_to_be32(nb_numa_nodes ? nb_numa_nodes : 1), }; _FDT(rtas = fdt_add_subnode(fdt, 0, "rtas")); From 4c5920af4e9e0bd4473a03da32371e1658d168c0 Mon Sep 17 00:00:00 2001 From: Suraj Jitindar Singh Date: Thu, 15 Nov 2018 14:22:59 +1100 Subject: [PATCH 04/40] target/ppc: tcg: Implement addex instruction Implement the addex instruction introduced in ISA V3.00 in qemu tcg. The add extended using alternate carry bit (addex) instruction performs the same operation as the add extended (adde) instruction, but using the overflow (ov) field in the fixed point exception register (xer) as the carry in and out instead of the carry (ca) field. The instruction has a Z23-form, not an XO form, as follows: ------------------------------------------------------------------ | 31 | RT | RA | RB | CY | 170 | 0 | ------------------------------------------------------------------ 0 6 11 16 21 23 31 32 However since the only valid form of the instruction defined so far is CY = 0, we can treat this like an XO form instruction. There is no dot form (addex.) of the instruction and the summary overflow (so) bit in the xer is not modified by this instruction. For simplicity we reuse the gen_op_arith_add function and add a function argument to specify where the carry in input should come from and the carry out output be stored (note must be the same location). Signed-off-by: Suraj Jitindar Singh Signed-off-by: David Gibson --- disas/ppc.c | 2 ++ target/ppc/translate.c | 60 +++++++++++++++++++++++------------------- 2 files changed, 35 insertions(+), 27 deletions(-) diff --git a/disas/ppc.c b/disas/ppc.c index 5ab9c35a84..da1140ba2b 100644 --- a/disas/ppc.c +++ b/disas/ppc.c @@ -3734,6 +3734,8 @@ const struct powerpc_opcode powerpc_opcodes[] = { { "addmeo.", XO(31,234,1,1), XORB_MASK, PPCCOM, { RT, RA } }, { "ameo.", XO(31,234,1,1), XORB_MASK, PWRCOM, { RT, RA } }, +{ "addex", XO(31,170,0,0), XO_MASK, POWER9, { RT, RA, RB } }, + { "mullw", XO(31,235,0,0), XO_MASK, PPCCOM, { RT, RA, RB } }, { "muls", XO(31,235,0,0), XO_MASK, PWRCOM, { RT, RA, RB } }, { "mullw.", XO(31,235,0,1), XO_MASK, PPCCOM, { RT, RA, RB } }, diff --git a/target/ppc/translate.c b/target/ppc/translate.c index 2b37910248..96894ab9a8 100644 --- a/target/ppc/translate.c +++ b/target/ppc/translate.c @@ -849,7 +849,7 @@ static inline void gen_op_arith_compute_ov(DisasContext *ctx, TCGv arg0, static inline void gen_op_arith_compute_ca32(DisasContext *ctx, TCGv res, TCGv arg0, TCGv arg1, - int sub) + TCGv ca32, int sub) { TCGv t0; @@ -864,13 +864,14 @@ static inline void gen_op_arith_compute_ca32(DisasContext *ctx, tcg_gen_xor_tl(t0, arg0, arg1); } tcg_gen_xor_tl(t0, t0, res); - tcg_gen_extract_tl(cpu_ca32, t0, 32, 1); + tcg_gen_extract_tl(ca32, t0, 32, 1); tcg_temp_free(t0); } /* Common add function */ static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1, - TCGv arg2, bool add_ca, bool compute_ca, + TCGv arg2, TCGv ca, TCGv ca32, + bool add_ca, bool compute_ca, bool compute_ov, bool compute_rc0) { TCGv t0 = ret; @@ -888,29 +889,29 @@ static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1, tcg_gen_xor_tl(t1, arg1, arg2); /* add without carry */ tcg_gen_add_tl(t0, arg1, arg2); if (add_ca) { - tcg_gen_add_tl(t0, t0, cpu_ca); + tcg_gen_add_tl(t0, t0, ca); } - tcg_gen_xor_tl(cpu_ca, t0, t1); /* bits changed w/ carry */ + tcg_gen_xor_tl(ca, t0, t1); /* bits changed w/ carry */ tcg_temp_free(t1); - tcg_gen_extract_tl(cpu_ca, cpu_ca, 32, 1); + tcg_gen_extract_tl(ca, ca, 32, 1); if (is_isa300(ctx)) { - tcg_gen_mov_tl(cpu_ca32, cpu_ca); + tcg_gen_mov_tl(ca32, ca); } } else { TCGv zero = tcg_const_tl(0); if (add_ca) { - tcg_gen_add2_tl(t0, cpu_ca, arg1, zero, cpu_ca, zero); - tcg_gen_add2_tl(t0, cpu_ca, t0, cpu_ca, arg2, zero); + tcg_gen_add2_tl(t0, ca, arg1, zero, ca, zero); + tcg_gen_add2_tl(t0, ca, t0, ca, arg2, zero); } else { - tcg_gen_add2_tl(t0, cpu_ca, arg1, zero, arg2, zero); + tcg_gen_add2_tl(t0, ca, arg1, zero, arg2, zero); } - gen_op_arith_compute_ca32(ctx, t0, arg1, arg2, 0); + gen_op_arith_compute_ca32(ctx, t0, arg1, arg2, ca32, 0); tcg_temp_free(zero); } } else { tcg_gen_add_tl(t0, arg1, arg2); if (add_ca) { - tcg_gen_add_tl(t0, t0, cpu_ca); + tcg_gen_add_tl(t0, t0, ca); } } @@ -927,40 +928,44 @@ static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1, } } /* Add functions with two operands */ -#define GEN_INT_ARITH_ADD(name, opc3, add_ca, compute_ca, compute_ov) \ +#define GEN_INT_ARITH_ADD(name, opc3, ca, add_ca, compute_ca, compute_ov) \ static void glue(gen_, name)(DisasContext *ctx) \ { \ gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \ cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \ + ca, glue(ca, 32), \ add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \ } /* Add functions with one operand and one immediate */ -#define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val, \ +#define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val, ca, \ add_ca, compute_ca, compute_ov) \ static void glue(gen_, name)(DisasContext *ctx) \ { \ TCGv t0 = tcg_const_tl(const_val); \ gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \ cpu_gpr[rA(ctx->opcode)], t0, \ + ca, glue(ca, 32), \ add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \ tcg_temp_free(t0); \ } /* add add. addo addo. */ -GEN_INT_ARITH_ADD(add, 0x08, 0, 0, 0) -GEN_INT_ARITH_ADD(addo, 0x18, 0, 0, 1) +GEN_INT_ARITH_ADD(add, 0x08, cpu_ca, 0, 0, 0) +GEN_INT_ARITH_ADD(addo, 0x18, cpu_ca, 0, 0, 1) /* addc addc. addco addco. */ -GEN_INT_ARITH_ADD(addc, 0x00, 0, 1, 0) -GEN_INT_ARITH_ADD(addco, 0x10, 0, 1, 1) +GEN_INT_ARITH_ADD(addc, 0x00, cpu_ca, 0, 1, 0) +GEN_INT_ARITH_ADD(addco, 0x10, cpu_ca, 0, 1, 1) /* adde adde. addeo addeo. */ -GEN_INT_ARITH_ADD(adde, 0x04, 1, 1, 0) -GEN_INT_ARITH_ADD(addeo, 0x14, 1, 1, 1) +GEN_INT_ARITH_ADD(adde, 0x04, cpu_ca, 1, 1, 0) +GEN_INT_ARITH_ADD(addeo, 0x14, cpu_ca, 1, 1, 1) /* addme addme. addmeo addmeo. */ -GEN_INT_ARITH_ADD_CONST(addme, 0x07, -1LL, 1, 1, 0) -GEN_INT_ARITH_ADD_CONST(addmeo, 0x17, -1LL, 1, 1, 1) +GEN_INT_ARITH_ADD_CONST(addme, 0x07, -1LL, cpu_ca, 1, 1, 0) +GEN_INT_ARITH_ADD_CONST(addmeo, 0x17, -1LL, cpu_ca, 1, 1, 1) +/* addex */ +GEN_INT_ARITH_ADD(addex, 0x05, cpu_ov, 1, 1, 0); /* addze addze. addzeo addzeo.*/ -GEN_INT_ARITH_ADD_CONST(addze, 0x06, 0, 1, 1, 0) -GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, 1, 1, 1) +GEN_INT_ARITH_ADD_CONST(addze, 0x06, 0, cpu_ca, 1, 1, 0) +GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, cpu_ca, 1, 1, 1) /* addi */ static void gen_addi(DisasContext *ctx) { @@ -979,7 +984,7 @@ static inline void gen_op_addic(DisasContext *ctx, bool compute_rc0) { TCGv c = tcg_const_tl(SIMM(ctx->opcode)); gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], - c, 0, 1, 0, compute_rc0); + c, cpu_ca, cpu_ca32, 0, 1, 0, compute_rc0); tcg_temp_free(c); } @@ -1432,13 +1437,13 @@ static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1, zero = tcg_const_tl(0); tcg_gen_add2_tl(t0, cpu_ca, arg2, zero, cpu_ca, zero); tcg_gen_add2_tl(t0, cpu_ca, t0, cpu_ca, inv1, zero); - gen_op_arith_compute_ca32(ctx, t0, inv1, arg2, 0); + gen_op_arith_compute_ca32(ctx, t0, inv1, arg2, cpu_ca32, 0); tcg_temp_free(zero); tcg_temp_free(inv1); } else { tcg_gen_setcond_tl(TCG_COND_GEU, cpu_ca, arg2, arg1); tcg_gen_sub_tl(t0, arg2, arg1); - gen_op_arith_compute_ca32(ctx, t0, arg1, arg2, 1); + gen_op_arith_compute_ca32(ctx, t0, arg1, arg2, cpu_ca32, 1); } } else if (add_ca) { /* Since we're ignoring carry-out, we can simplify the @@ -7087,6 +7092,7 @@ GEN_INT_ARITH_ADD(adde, 0x04, 1, 1, 0) GEN_INT_ARITH_ADD(addeo, 0x14, 1, 1, 1) GEN_INT_ARITH_ADD_CONST(addme, 0x07, -1LL, 1, 1, 0) GEN_INT_ARITH_ADD_CONST(addmeo, 0x17, -1LL, 1, 1, 1) +GEN_HANDLER_E(addex, 0x1F, 0x0A, 0x05, 0x00000000, PPC_NONE, PPC2_ISA300), GEN_INT_ARITH_ADD_CONST(addze, 0x06, 0, 1, 1, 0) GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, 1, 1, 1) From 118abc71ed900e7a2cafa3c12eb39b0184f2ba22 Mon Sep 17 00:00:00 2001 From: Greg Kurz Date: Thu, 22 Nov 2018 15:31:36 +0100 Subject: [PATCH 05/40] spapr: drop redundant statement in spapr_populate_drconf_memory() Signed-off-by: Greg Kurz Signed-off-by: David Gibson Reviewed-by: Laurent Vivier --- hw/ppc/spapr.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index b423db311e..051d080fe5 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -889,8 +889,6 @@ static int spapr_populate_drconf_memory(sPAPRMachineState *spapr, void *fdt) /* ibm,associativity-lookup-arrays */ buf_len = (nr_nodes * 4 + 2) * sizeof(uint32_t); cur_index = int_buf = g_malloc0(buf_len); - - cur_index = int_buf; int_buf[0] = cpu_to_be32(nr_nodes); int_buf[1] = cpu_to_be32(4); /* Number of entries per associativity list */ cur_index += 2; From cc226c068f985e7788fcbf21e146efe54402eda1 Mon Sep 17 00:00:00 2001 From: Greg Kurz Date: Tue, 27 Nov 2018 14:05:06 +0100 Subject: [PATCH 06/40] target/ppc: use g_new(T, n) instead of g_malloc(sizeof(T) * n) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Because it is a recommended coding practice (see HACKING). Signed-off-by: Greg Kurz Reviewed-by: Philippe Mathieu-Daudé Signed-off-by: David Gibson --- target/ppc/translate_init.inc.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/target/ppc/translate_init.inc.c b/target/ppc/translate_init.inc.c index 168d0cec28..03f1d34a97 100644 --- a/target/ppc/translate_init.inc.c +++ b/target/ppc/translate_init.inc.c @@ -9081,13 +9081,13 @@ static void init_ppc_proc(PowerPCCPU *cpu) nb_tlb *= 2; switch (env->tlb_type) { case TLB_6XX: - env->tlb.tlb6 = g_malloc0(nb_tlb * sizeof(ppc6xx_tlb_t)); + env->tlb.tlb6 = g_new0(ppc6xx_tlb_t, nb_tlb); break; case TLB_EMB: - env->tlb.tlbe = g_malloc0(nb_tlb * sizeof(ppcemb_tlb_t)); + env->tlb.tlbe = g_new0(ppcemb_tlb_t, nb_tlb); break; case TLB_MAS: - env->tlb.tlbm = g_malloc0(nb_tlb * sizeof(ppcmas_tlb_t)); + env->tlb.tlbm = g_new0(ppcmas_tlb_t, nb_tlb); break; } /* Pre-compute some useful values */ From dec4ec40a110a43bf6304b9c4ee25663d059029d Mon Sep 17 00:00:00 2001 From: Greg Kurz Date: Tue, 27 Nov 2018 14:05:18 +0100 Subject: [PATCH 07/40] spapr: use g_new(T, n) instead of g_malloc(sizeof(T) * n) Because it is a recommended coding practice (see HACKING). Signed-off-by: Greg Kurz Signed-off-by: David Gibson --- hw/ppc/spapr_iommu.c | 2 +- hw/ppc/spapr_vio.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hw/ppc/spapr_iommu.c b/hw/ppc/spapr_iommu.c index 1b0880ac9e..b56466f89a 100644 --- a/hw/ppc/spapr_iommu.c +++ b/hw/ppc/spapr_iommu.c @@ -93,7 +93,7 @@ static uint64_t *spapr_tce_alloc_table(uint32_t liobn, if (!table) { *fd = -1; - table = g_malloc0(nb_table * sizeof(uint64_t)); + table = g_new0(uint64_t, nb_table); } trace_spapr_iommu_new_table(liobn, table, *fd); diff --git a/hw/ppc/spapr_vio.c b/hw/ppc/spapr_vio.c index 840d4a3c45..7e8a9ad093 100644 --- a/hw/ppc/spapr_vio.c +++ b/hw/ppc/spapr_vio.c @@ -730,7 +730,7 @@ void spapr_dt_vdevice(VIOsPAPRBus *bus, void *fdt) } /* Copy out into an array of pointers */ - qdevs = g_malloc(sizeof(qdev) * num); + qdevs = g_new(DeviceState *, num); num = 0; QTAILQ_FOREACH(kid, &bus->bus.children, sibling) { qdevs[num++] = kid->child; From 779db4c7ca4b2aa3902fcfaab0fcb19fc78638e2 Mon Sep 17 00:00:00 2001 From: Greg Kurz Date: Tue, 27 Nov 2018 14:05:29 +0100 Subject: [PATCH 08/40] ppc405_boards: use g_new(T, n) instead of g_malloc(sizeof(T) * n) Because it is a recommended coding practice (see HACKING). Signed-off-by: Greg Kurz Signed-off-by: David Gibson --- hw/ppc/ppc405_boards.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hw/ppc/ppc405_boards.c b/hw/ppc/ppc405_boards.c index 1b0a0a8ba3..f47b15f10e 100644 --- a/hw/ppc/ppc405_boards.c +++ b/hw/ppc/ppc405_boards.c @@ -149,7 +149,7 @@ static void ref405ep_init(MachineState *machine) MemoryRegion *bios; MemoryRegion *sram = g_new(MemoryRegion, 1); ram_addr_t bdloc; - MemoryRegion *ram_memories = g_malloc(2 * sizeof(*ram_memories)); + MemoryRegion *ram_memories = g_new(MemoryRegion, 2); hwaddr ram_bases[2], ram_sizes[2]; target_ulong sram_size; long bios_size; @@ -448,7 +448,7 @@ static void taihu_405ep_init(MachineState *machine) qemu_irq *pic; MemoryRegion *sysmem = get_system_memory(); MemoryRegion *bios; - MemoryRegion *ram_memories = g_malloc(2 * sizeof(*ram_memories)); + MemoryRegion *ram_memories = g_new(MemoryRegion, 2); MemoryRegion *ram = g_malloc0(sizeof(*ram)); hwaddr ram_bases[2], ram_sizes[2]; long bios_size; From c4f46986fc856dc1d50d21e2ad617ae845e4ab16 Mon Sep 17 00:00:00 2001 From: Greg Kurz Date: Tue, 27 Nov 2018 14:05:38 +0100 Subject: [PATCH 09/40] ppc405_uc: use g_new(T, n) instead of g_malloc(sizeof(T) * n) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Because it is a recommended coding practice (see HACKING). Signed-off-by: Greg Kurz Reviewed-by: Philippe Mathieu-Daudé Signed-off-by: David Gibson --- hw/ppc/ppc405_uc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hw/ppc/ppc405_uc.c b/hw/ppc/ppc405_uc.c index 5c58415cf1..e1aadf126d 100644 --- a/hw/ppc/ppc405_uc.c +++ b/hw/ppc/ppc405_uc.c @@ -1519,7 +1519,7 @@ CPUPPCState *ppc405cr_init(MemoryRegion *address_space_mem, /* OBP arbitrer */ ppc4xx_opba_init(0xef600600); /* Universal interrupt controller */ - irqs = g_malloc0(sizeof(qemu_irq) * PPCUIC_OUTPUT_NB); + irqs = g_new0(qemu_irq, PPCUIC_OUTPUT_NB); irqs[PPCUIC_OUTPUT_INT] = ((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_INT]; irqs[PPCUIC_OUTPUT_CINT] = @@ -1877,7 +1877,7 @@ CPUPPCState *ppc405ep_init(MemoryRegion *address_space_mem, /* Initialize timers */ ppc_booke_timers_init(cpu, sysclk, 0); /* Universal interrupt controller */ - irqs = g_malloc0(sizeof(qemu_irq) * PPCUIC_OUTPUT_NB); + irqs = g_new0(qemu_irq, PPCUIC_OUTPUT_NB); irqs[PPCUIC_OUTPUT_INT] = ((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_INT]; irqs[PPCUIC_OUTPUT_CINT] = From 30f8ec76309e7b7ce235f4e544553d078a88ce5e Mon Sep 17 00:00:00 2001 From: Greg Kurz Date: Tue, 27 Nov 2018 14:05:51 +0100 Subject: [PATCH 10/40] ppc440_bamboo: use g_new(T, n) instead of g_malloc(sizeof(T) * n) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Because it is a recommended coding practice (see HACKING). Signed-off-by: Greg Kurz Reviewed-by: Philippe Mathieu-Daudé Reviewed-by: Edgar E. Iglesias Signed-off-by: David Gibson --- hw/ppc/ppc440_bamboo.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/hw/ppc/ppc440_bamboo.c b/hw/ppc/ppc440_bamboo.c index f5720f979e..b8aa55d526 100644 --- a/hw/ppc/ppc440_bamboo.c +++ b/hw/ppc/ppc440_bamboo.c @@ -169,8 +169,7 @@ static void bamboo_init(MachineState *machine) unsigned int pci_irq_nrs[4] = { 28, 27, 26, 25 }; MemoryRegion *address_space_mem = get_system_memory(); MemoryRegion *isa = g_new(MemoryRegion, 1); - MemoryRegion *ram_memories - = g_malloc(PPC440EP_SDRAM_NR_BANKS * sizeof(*ram_memories)); + MemoryRegion *ram_memories = g_new(MemoryRegion, PPC440EP_SDRAM_NR_BANKS); hwaddr ram_bases[PPC440EP_SDRAM_NR_BANKS]; hwaddr ram_sizes[PPC440EP_SDRAM_NR_BANKS]; qemu_irq *pic; @@ -200,7 +199,7 @@ static void bamboo_init(MachineState *machine) ppc_dcr_init(env, NULL, NULL); /* interrupt controller */ - irqs = g_malloc0(sizeof(qemu_irq) * PPCUIC_OUTPUT_NB); + irqs = g_new0(qemu_irq, PPCUIC_OUTPUT_NB); irqs[PPCUIC_OUTPUT_INT] = ((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_INT]; irqs[PPCUIC_OUTPUT_CINT] = ((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_CINT]; pic = ppcuic_init(env, irqs, 0x0C0, 0, 1); From 0989e6d1f265c04efa07db52617793e9862ed159 Mon Sep 17 00:00:00 2001 From: Greg Kurz Date: Tue, 27 Nov 2018 14:06:01 +0100 Subject: [PATCH 11/40] sam460ex: use g_new(T, n) instead of g_malloc(sizeof(T) * n) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Because it is a recommended coding practice (see HACKING). Signed-off-by: Greg Kurz Reviewed-by: Philippe Mathieu-Daudé Signed-off-by: David Gibson --- hw/ppc/sam460ex.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hw/ppc/sam460ex.c b/hw/ppc/sam460ex.c index 5aac58f36e..4b051c0950 100644 --- a/hw/ppc/sam460ex.c +++ b/hw/ppc/sam460ex.c @@ -430,7 +430,7 @@ static void sam460ex_init(MachineState *machine) ppc4xx_plb_init(env); /* interrupt controllers */ - irqs = g_malloc0(sizeof(*irqs) * PPCUIC_OUTPUT_NB); + irqs = g_new0(qemu_irq, PPCUIC_OUTPUT_NB); irqs[PPCUIC_OUTPUT_INT] = ((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_INT]; irqs[PPCUIC_OUTPUT_CINT] = ((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_CINT]; uic[0] = ppcuic_init(env, irqs, 0xc0, 0, 1); From 57aa218818c06c3fd7e10b5b6e1cbccdca6790ab Mon Sep 17 00:00:00 2001 From: Greg Kurz Date: Tue, 27 Nov 2018 14:06:12 +0100 Subject: [PATCH 12/40] virtex_ml507: use g_new(T, n) instead of g_malloc(sizeof(T) * n) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Because it is a recommended coding practice (see HACKING). Signed-off-by: Greg Kurz Reviewed-by: Philippe Mathieu-Daudé Reviewed-by: Edgar E. Iglesias Signed-off-by: David Gibson --- hw/ppc/virtex_ml507.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hw/ppc/virtex_ml507.c b/hw/ppc/virtex_ml507.c index ee9b4b4490..5177120574 100644 --- a/hw/ppc/virtex_ml507.c +++ b/hw/ppc/virtex_ml507.c @@ -105,7 +105,7 @@ static PowerPCCPU *ppc440_init_xilinx(ram_addr_t *ram_size, ppc_dcr_init(env, NULL, NULL); /* interrupt controller */ - irqs = g_malloc0(sizeof(qemu_irq) * PPCUIC_OUTPUT_NB); + irqs = g_new0(qemu_irq, PPCUIC_OUTPUT_NB); irqs[PPCUIC_OUTPUT_INT] = ((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_INT]; irqs[PPCUIC_OUTPUT_CINT] = ((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_CINT]; ppcuic_init(env, irqs, 0x0C0, 0, 1); From 9929301ee12fb2bc8afe0d954cb1b58a8b1e8880 Mon Sep 17 00:00:00 2001 From: Greg Kurz Date: Tue, 27 Nov 2018 14:06:22 +0100 Subject: [PATCH 13/40] mac_newworld: simplify IRQ wiring The OpenPIC have 5 outputs per connected CPU. The machine init code hence needs a bi-dimensional array (smp_cpu lines, 5 columns) to wire up the irqs between the PIC and the CPUs. The current code first allocates an array of smp_cpus pointers to qemu_irq type, then it allocates another array of smp_cpus * 5 qemu_irq and fills the first array with pointers to each line of the second array. This is rather convoluted. Simplify the logic by introducing a structured type that describes all the OpenPIC outputs for a single CPU, ie, fixed size of 5 qemu_irq, and only allocate a smp_cpu sized array of those. This also allows to use g_new(T, n) instead of g_malloc(sizeof(T) * n) as recommended in HACKING. Signed-off-by: Greg Kurz Signed-off-by: David Gibson --- hw/ppc/mac_newworld.c | 30 +++++++++++++----------------- include/hw/ppc/openpic.h | 2 ++ 2 files changed, 15 insertions(+), 17 deletions(-) diff --git a/hw/ppc/mac_newworld.c b/hw/ppc/mac_newworld.c index 7e45afae7c..bb19eaba36 100644 --- a/hw/ppc/mac_newworld.c +++ b/hw/ppc/mac_newworld.c @@ -115,7 +115,7 @@ static void ppc_core99_init(MachineState *machine) PowerPCCPU *cpu = NULL; CPUPPCState *env = NULL; char *filename; - qemu_irq **openpic_irqs; + IrqLines *openpic_irqs; int linux_boot, i, j, k; MemoryRegion *ram = g_new(MemoryRegion, 1), *bios = g_new(MemoryRegion, 1); hwaddr kernel_base, initrd_base, cmdline_base = 0; @@ -248,41 +248,37 @@ static void ppc_core99_init(MachineState *machine) memory_region_add_subregion(get_system_memory(), 0xf8000000, sysbus_mmio_get_region(s, 0)); - openpic_irqs = g_malloc0(smp_cpus * sizeof(qemu_irq *)); - openpic_irqs[0] = - g_malloc0(smp_cpus * sizeof(qemu_irq) * OPENPIC_OUTPUT_NB); + openpic_irqs = g_new0(IrqLines, smp_cpus); for (i = 0; i < smp_cpus; i++) { /* Mac99 IRQ connection between OpenPIC outputs pins * and PowerPC input pins */ switch (PPC_INPUT(env)) { case PPC_FLAGS_INPUT_6xx: - openpic_irqs[i] = openpic_irqs[0] + (i * OPENPIC_OUTPUT_NB); - openpic_irqs[i][OPENPIC_OUTPUT_INT] = + openpic_irqs[i].irq[OPENPIC_OUTPUT_INT] = ((qemu_irq *)env->irq_inputs)[PPC6xx_INPUT_INT]; - openpic_irqs[i][OPENPIC_OUTPUT_CINT] = + openpic_irqs[i].irq[OPENPIC_OUTPUT_CINT] = ((qemu_irq *)env->irq_inputs)[PPC6xx_INPUT_INT]; - openpic_irqs[i][OPENPIC_OUTPUT_MCK] = + openpic_irqs[i].irq[OPENPIC_OUTPUT_MCK] = ((qemu_irq *)env->irq_inputs)[PPC6xx_INPUT_MCP]; /* Not connected ? */ - openpic_irqs[i][OPENPIC_OUTPUT_DEBUG] = NULL; + openpic_irqs[i].irq[OPENPIC_OUTPUT_DEBUG] = NULL; /* Check this */ - openpic_irqs[i][OPENPIC_OUTPUT_RESET] = + openpic_irqs[i].irq[OPENPIC_OUTPUT_RESET] = ((qemu_irq *)env->irq_inputs)[PPC6xx_INPUT_HRESET]; break; #if defined(TARGET_PPC64) case PPC_FLAGS_INPUT_970: - openpic_irqs[i] = openpic_irqs[0] + (i * OPENPIC_OUTPUT_NB); - openpic_irqs[i][OPENPIC_OUTPUT_INT] = + openpic_irqs[i].irq[OPENPIC_OUTPUT_INT] = ((qemu_irq *)env->irq_inputs)[PPC970_INPUT_INT]; - openpic_irqs[i][OPENPIC_OUTPUT_CINT] = + openpic_irqs[i].irq[OPENPIC_OUTPUT_CINT] = ((qemu_irq *)env->irq_inputs)[PPC970_INPUT_INT]; - openpic_irqs[i][OPENPIC_OUTPUT_MCK] = + openpic_irqs[i].irq[OPENPIC_OUTPUT_MCK] = ((qemu_irq *)env->irq_inputs)[PPC970_INPUT_MCP]; /* Not connected ? */ - openpic_irqs[i][OPENPIC_OUTPUT_DEBUG] = NULL; + openpic_irqs[i].irq[OPENPIC_OUTPUT_DEBUG] = NULL; /* Check this */ - openpic_irqs[i][OPENPIC_OUTPUT_RESET] = + openpic_irqs[i].irq[OPENPIC_OUTPUT_RESET] = ((qemu_irq *)env->irq_inputs)[PPC970_INPUT_HRESET]; break; #endif /* defined(TARGET_PPC64) */ @@ -299,7 +295,7 @@ static void ppc_core99_init(MachineState *machine) k = 0; for (i = 0; i < smp_cpus; i++) { for (j = 0; j < OPENPIC_OUTPUT_NB; j++) { - sysbus_connect_irq(s, k++, openpic_irqs[i][j]); + sysbus_connect_irq(s, k++, openpic_irqs[i].irq[j]); } } g_free(openpic_irqs); diff --git a/include/hw/ppc/openpic.h b/include/hw/ppc/openpic.h index 5eb982197d..dad08fe9be 100644 --- a/include/hw/ppc/openpic.h +++ b/include/hw/ppc/openpic.h @@ -20,6 +20,8 @@ enum { OPENPIC_OUTPUT_NB, }; +typedef struct IrqLines { qemu_irq irq[OPENPIC_OUTPUT_NB]; } IrqLines; + #define OPENPIC_MODEL_RAVEN 0 #define OPENPIC_MODEL_FSL_MPIC_20 1 #define OPENPIC_MODEL_FSL_MPIC_42 2 From 2104d4f5bc8f296b3f6f9272bceb8ecfb9581043 Mon Sep 17 00:00:00 2001 From: Greg Kurz Date: Tue, 27 Nov 2018 14:06:31 +0100 Subject: [PATCH 14/40] e500: simplify IRQ wiring The OpenPIC have 5 outputs per connected CPU. The machine init code hence needs a bi-dimensional array (smp_cpu lines, 5 columns) to wire up the irqs between the PIC and the CPUs. The current code first allocates an array of smp_cpus pointers to qemu_irq type, then it allocates another array of smp_cpus * 5 qemu_irq and fills the first array with pointers to each line of the second array. This is rather convoluted. Simplify the logic by introducing a structured type that describes all the OpenPIC outputs for a single CPU, ie, fixed size of 5 qemu_irq, and only allocate a smp_cpu sized array of those. This also allows to use g_new(T, n) instead of g_malloc(sizeof(T) * n) as recommended in HACKING. Signed-off-by: Greg Kurz Signed-off-by: David Gibson --- hw/ppc/e500.c | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/hw/ppc/e500.c b/hw/ppc/e500.c index e6747fce28..b20fea0dfc 100644 --- a/hw/ppc/e500.c +++ b/hw/ppc/e500.c @@ -685,7 +685,7 @@ static void ppce500_cpu_reset(void *opaque) } static DeviceState *ppce500_init_mpic_qemu(PPCE500MachineState *pms, - qemu_irq **irqs) + IrqLines *irqs) { DeviceState *dev; SysBusDevice *s; @@ -705,7 +705,7 @@ static DeviceState *ppce500_init_mpic_qemu(PPCE500MachineState *pms, k = 0; for (i = 0; i < smp_cpus; i++) { for (j = 0; j < OPENPIC_OUTPUT_NB; j++) { - sysbus_connect_irq(s, k++, irqs[i][j]); + sysbus_connect_irq(s, k++, irqs[i].irq[j]); } } @@ -713,7 +713,7 @@ static DeviceState *ppce500_init_mpic_qemu(PPCE500MachineState *pms, } static DeviceState *ppce500_init_mpic_kvm(const PPCE500MachineClass *pmc, - qemu_irq **irqs, Error **errp) + IrqLines *irqs, Error **errp) { Error *err = NULL; DeviceState *dev; @@ -742,7 +742,7 @@ static DeviceState *ppce500_init_mpic_kvm(const PPCE500MachineClass *pmc, static DeviceState *ppce500_init_mpic(PPCE500MachineState *pms, MemoryRegion *ccsr, - qemu_irq **irqs) + IrqLines *irqs) { MachineState *machine = MACHINE(pms); const PPCE500MachineClass *pmc = PPCE500_MACHINE_GET_CLASS(pms); @@ -806,15 +806,14 @@ void ppce500_init(MachineState *machine) /* irq num for pin INTA, INTB, INTC and INTD is 1, 2, 3 and * 4 respectively */ unsigned int pci_irq_nrs[PCI_NUM_PINS] = {1, 2, 3, 4}; - qemu_irq **irqs; + IrqLines *irqs; DeviceState *dev, *mpicdev; CPUPPCState *firstenv = NULL; MemoryRegion *ccsr_addr_space; SysBusDevice *s; PPCE500CCSRState *ccsr; - irqs = g_malloc0(smp_cpus * sizeof(qemu_irq *)); - irqs[0] = g_malloc0(smp_cpus * sizeof(qemu_irq) * OPENPIC_OUTPUT_NB); + irqs = g_new0(IrqLines, smp_cpus); for (i = 0; i < smp_cpus; i++) { PowerPCCPU *cpu; CPUState *cs; @@ -834,10 +833,9 @@ void ppce500_init(MachineState *machine) firstenv = env; } - irqs[i] = irqs[0] + (i * OPENPIC_OUTPUT_NB); input = (qemu_irq *)env->irq_inputs; - irqs[i][OPENPIC_OUTPUT_INT] = input[PPCE500_INPUT_INT]; - irqs[i][OPENPIC_OUTPUT_CINT] = input[PPCE500_INPUT_CINT]; + irqs[i].irq[OPENPIC_OUTPUT_INT] = input[PPCE500_INPUT_INT]; + irqs[i].irq[OPENPIC_OUTPUT_CINT] = input[PPCE500_INPUT_CINT]; env->spr_cb[SPR_BOOKE_PIR].default_value = cs->cpu_index = i; env->mpic_iack = pmc->ccsrbar_base + MPC8544_MPIC_REGS_OFFSET + 0xa0; From 02e3ff548d2379c16990bac9cb84833231e0d20f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Le=20Goater?= Date: Thu, 6 Dec 2018 00:22:15 +0100 Subject: [PATCH 15/40] ppc/xive: introduce a XIVE interrupt source model MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The first sub-engine of the overall XIVE architecture is the Interrupt Virtualization Source Engine (IVSE). An IVSE can be integrated into another logic, like in a PCI PHB or in the main interrupt controller to manage IPIs. Each IVSE instance is associated with an Event State Buffer (ESB) that contains a two bit state entry for each possible event source. When an event is signaled to the IVSE, by MMIO or some other means, the associated interrupt state bits are fetched from the ESB and modified. Depending on the resulting ESB state, the event is forwarded to the IVRE sub-engine of the controller doing the routing. Each supported ESB entry is associated with either a single or a even/odd pair of pages which provides commands to manage the source: to EOI, to turn off the source for instance. On a sPAPR machine, the O/S will obtain the page address of the ESB entry associated with a source and its characteristic using the H_INT_GET_SOURCE_INFO hcall. On PowerNV, a similar OPAL call is used. The xive_source_notify() routine is in charge forwarding the source event notification to the routing engine. It will be filled later on. Signed-off-by: Cédric Le Goater Signed-off-by: David Gibson --- default-configs/ppc64-softmmu.mak | 1 + hw/intc/Makefile.objs | 1 + hw/intc/xive.c | 382 ++++++++++++++++++++++++++++++ include/hw/ppc/xive.h | 260 ++++++++++++++++++++ 4 files changed, 644 insertions(+) create mode 100644 hw/intc/xive.c create mode 100644 include/hw/ppc/xive.h diff --git a/default-configs/ppc64-softmmu.mak b/default-configs/ppc64-softmmu.mak index aec2855750..2d1e7c5c46 100644 --- a/default-configs/ppc64-softmmu.mak +++ b/default-configs/ppc64-softmmu.mak @@ -16,6 +16,7 @@ CONFIG_VIRTIO_VGA=y CONFIG_XICS=$(CONFIG_PSERIES) CONFIG_XICS_SPAPR=$(CONFIG_PSERIES) CONFIG_XICS_KVM=$(call land,$(CONFIG_PSERIES),$(CONFIG_KVM)) +CONFIG_XIVE=$(CONFIG_PSERIES) CONFIG_MEM_DEVICE=y CONFIG_DIMM=y CONFIG_SPAPR_RNG=y diff --git a/hw/intc/Makefile.objs b/hw/intc/Makefile.objs index 0e9963f5ee..72a46ed91c 100644 --- a/hw/intc/Makefile.objs +++ b/hw/intc/Makefile.objs @@ -37,6 +37,7 @@ obj-$(CONFIG_SH4) += sh_intc.o obj-$(CONFIG_XICS) += xics.o obj-$(CONFIG_XICS_SPAPR) += xics_spapr.o obj-$(CONFIG_XICS_KVM) += xics_kvm.o +obj-$(CONFIG_XIVE) += xive.o obj-$(CONFIG_POWERNV) += xics_pnv.o obj-$(CONFIG_ALLWINNER_A10_PIC) += allwinner-a10-pic.o obj-$(CONFIG_S390_FLIC) += s390_flic.o diff --git a/hw/intc/xive.c b/hw/intc/xive.c new file mode 100644 index 0000000000..6389bd8323 --- /dev/null +++ b/hw/intc/xive.c @@ -0,0 +1,382 @@ +/* + * QEMU PowerPC XIVE interrupt controller model + * + * Copyright (c) 2017-2018, IBM Corporation. + * + * This code is licensed under the GPL version 2 or later. See the + * COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qapi/error.h" +#include "target/ppc/cpu.h" +#include "sysemu/cpus.h" +#include "sysemu/dma.h" +#include "hw/qdev-properties.h" +#include "monitor/monitor.h" +#include "hw/ppc/xive.h" + +/* + * XIVE ESB helpers + */ + +static uint8_t xive_esb_set(uint8_t *pq, uint8_t value) +{ + uint8_t old_pq = *pq & 0x3; + + *pq &= ~0x3; + *pq |= value & 0x3; + + return old_pq; +} + +static bool xive_esb_trigger(uint8_t *pq) +{ + uint8_t old_pq = *pq & 0x3; + + switch (old_pq) { + case XIVE_ESB_RESET: + xive_esb_set(pq, XIVE_ESB_PENDING); + return true; + case XIVE_ESB_PENDING: + case XIVE_ESB_QUEUED: + xive_esb_set(pq, XIVE_ESB_QUEUED); + return false; + case XIVE_ESB_OFF: + xive_esb_set(pq, XIVE_ESB_OFF); + return false; + default: + g_assert_not_reached(); + } +} + +static bool xive_esb_eoi(uint8_t *pq) +{ + uint8_t old_pq = *pq & 0x3; + + switch (old_pq) { + case XIVE_ESB_RESET: + case XIVE_ESB_PENDING: + xive_esb_set(pq, XIVE_ESB_RESET); + return false; + case XIVE_ESB_QUEUED: + xive_esb_set(pq, XIVE_ESB_PENDING); + return true; + case XIVE_ESB_OFF: + xive_esb_set(pq, XIVE_ESB_OFF); + return false; + default: + g_assert_not_reached(); + } +} + +/* + * XIVE Interrupt Source (or IVSE) + */ + +uint8_t xive_source_esb_get(XiveSource *xsrc, uint32_t srcno) +{ + assert(srcno < xsrc->nr_irqs); + + return xsrc->status[srcno] & 0x3; +} + +uint8_t xive_source_esb_set(XiveSource *xsrc, uint32_t srcno, uint8_t pq) +{ + assert(srcno < xsrc->nr_irqs); + + return xive_esb_set(&xsrc->status[srcno], pq); +} + +/* + * Returns whether the event notification should be forwarded. + */ +static bool xive_source_esb_trigger(XiveSource *xsrc, uint32_t srcno) +{ + assert(srcno < xsrc->nr_irqs); + + return xive_esb_trigger(&xsrc->status[srcno]); +} + +/* + * Returns whether the event notification should be forwarded. + */ +static bool xive_source_esb_eoi(XiveSource *xsrc, uint32_t srcno) +{ + assert(srcno < xsrc->nr_irqs); + + return xive_esb_eoi(&xsrc->status[srcno]); +} + +/* + * Forward the source event notification to the Router + */ +static void xive_source_notify(XiveSource *xsrc, int srcno) +{ + +} + +/* + * In a two pages ESB MMIO setting, even page is the trigger page, odd + * page is for management + */ +static inline bool addr_is_even(hwaddr addr, uint32_t shift) +{ + return !((addr >> shift) & 1); +} + +static inline bool xive_source_is_trigger_page(XiveSource *xsrc, hwaddr addr) +{ + return xive_source_esb_has_2page(xsrc) && + addr_is_even(addr, xsrc->esb_shift - 1); +} + +/* + * ESB MMIO loads + * Trigger page Management/EOI page + * + * ESB MMIO setting 2 pages 1 or 2 pages + * + * 0x000 .. 0x3FF -1 EOI and return 0|1 + * 0x400 .. 0x7FF -1 EOI and return 0|1 + * 0x800 .. 0xBFF -1 return PQ + * 0xC00 .. 0xCFF -1 return PQ and atomically PQ=00 + * 0xD00 .. 0xDFF -1 return PQ and atomically PQ=01 + * 0xE00 .. 0xDFF -1 return PQ and atomically PQ=10 + * 0xF00 .. 0xDFF -1 return PQ and atomically PQ=11 + */ +static uint64_t xive_source_esb_read(void *opaque, hwaddr addr, unsigned size) +{ + XiveSource *xsrc = XIVE_SOURCE(opaque); + uint32_t offset = addr & 0xFFF; + uint32_t srcno = addr >> xsrc->esb_shift; + uint64_t ret = -1; + + /* In a two pages ESB MMIO setting, trigger page should not be read */ + if (xive_source_is_trigger_page(xsrc, addr)) { + qemu_log_mask(LOG_GUEST_ERROR, + "XIVE: invalid load on IRQ %d trigger page at " + "0x%"HWADDR_PRIx"\n", srcno, addr); + return -1; + } + + switch (offset) { + case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF: + ret = xive_source_esb_eoi(xsrc, srcno); + + /* Forward the source event notification for routing */ + if (ret) { + xive_source_notify(xsrc, srcno); + } + break; + + case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF: + ret = xive_source_esb_get(xsrc, srcno); + break; + + case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF: + case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF: + case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF: + case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF: + ret = xive_source_esb_set(xsrc, srcno, (offset >> 8) & 0x3); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB load addr %x\n", + offset); + } + + return ret; +} + +/* + * ESB MMIO stores + * Trigger page Management/EOI page + * + * ESB MMIO setting 2 pages 1 or 2 pages + * + * 0x000 .. 0x3FF Trigger Trigger + * 0x400 .. 0x7FF Trigger EOI + * 0x800 .. 0xBFF Trigger undefined + * 0xC00 .. 0xCFF Trigger PQ=00 + * 0xD00 .. 0xDFF Trigger PQ=01 + * 0xE00 .. 0xDFF Trigger PQ=10 + * 0xF00 .. 0xDFF Trigger PQ=11 + */ +static void xive_source_esb_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + XiveSource *xsrc = XIVE_SOURCE(opaque); + uint32_t offset = addr & 0xFFF; + uint32_t srcno = addr >> xsrc->esb_shift; + bool notify = false; + + /* In a two pages ESB MMIO setting, trigger page only triggers */ + if (xive_source_is_trigger_page(xsrc, addr)) { + notify = xive_source_esb_trigger(xsrc, srcno); + goto out; + } + + switch (offset) { + case 0 ... 0x3FF: + notify = xive_source_esb_trigger(xsrc, srcno); + break; + + case XIVE_ESB_STORE_EOI ... XIVE_ESB_STORE_EOI + 0x3FF: + if (!(xsrc->esb_flags & XIVE_SRC_STORE_EOI)) { + qemu_log_mask(LOG_GUEST_ERROR, + "XIVE: invalid Store EOI for IRQ %d\n", srcno); + return; + } + + notify = xive_source_esb_eoi(xsrc, srcno); + break; + + case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF: + case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF: + case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF: + case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF: + xive_source_esb_set(xsrc, srcno, (offset >> 8) & 0x3); + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB write addr %x\n", + offset); + return; + } + +out: + /* Forward the source event notification for routing */ + if (notify) { + xive_source_notify(xsrc, srcno); + } +} + +static const MemoryRegionOps xive_source_esb_ops = { + .read = xive_source_esb_read, + .write = xive_source_esb_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 8, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 8, + .max_access_size = 8, + }, +}; + +static void xive_source_set_irq(void *opaque, int srcno, int val) +{ + XiveSource *xsrc = XIVE_SOURCE(opaque); + bool notify = false; + + if (val) { + notify = xive_source_esb_trigger(xsrc, srcno); + } + + /* Forward the source event notification for routing */ + if (notify) { + xive_source_notify(xsrc, srcno); + } +} + +void xive_source_pic_print_info(XiveSource *xsrc, uint32_t offset, Monitor *mon) +{ + int i; + + for (i = 0; i < xsrc->nr_irqs; i++) { + uint8_t pq = xive_source_esb_get(xsrc, i); + + if (pq == XIVE_ESB_OFF) { + continue; + } + + monitor_printf(mon, " %08x %c%c\n", i + offset, + pq & XIVE_ESB_VAL_P ? 'P' : '-', + pq & XIVE_ESB_VAL_Q ? 'Q' : '-'); + } +} + +static void xive_source_reset(void *dev) +{ + XiveSource *xsrc = XIVE_SOURCE(dev); + + /* PQs are initialized to 0b01 (Q=1) which corresponds to "ints off" */ + memset(xsrc->status, XIVE_ESB_OFF, xsrc->nr_irqs); +} + +static void xive_source_realize(DeviceState *dev, Error **errp) +{ + XiveSource *xsrc = XIVE_SOURCE(dev); + + if (!xsrc->nr_irqs) { + error_setg(errp, "Number of interrupt needs to be greater than 0"); + return; + } + + if (xsrc->esb_shift != XIVE_ESB_4K && + xsrc->esb_shift != XIVE_ESB_4K_2PAGE && + xsrc->esb_shift != XIVE_ESB_64K && + xsrc->esb_shift != XIVE_ESB_64K_2PAGE) { + error_setg(errp, "Invalid ESB shift setting"); + return; + } + + xsrc->status = g_malloc0(xsrc->nr_irqs); + + memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc), + &xive_source_esb_ops, xsrc, "xive.esb", + (1ull << xsrc->esb_shift) * xsrc->nr_irqs); + + xsrc->qirqs = qemu_allocate_irqs(xive_source_set_irq, xsrc, + xsrc->nr_irqs); + + qemu_register_reset(xive_source_reset, dev); +} + +static const VMStateDescription vmstate_xive_source = { + .name = TYPE_XIVE_SOURCE, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_EQUAL(nr_irqs, XiveSource, NULL), + VMSTATE_VBUFFER_UINT32(status, XiveSource, 1, NULL, nr_irqs), + VMSTATE_END_OF_LIST() + }, +}; + +/* + * The default XIVE interrupt source setting for the ESB MMIOs is two + * 64k pages without Store EOI, to be in sync with KVM. + */ +static Property xive_source_properties[] = { + DEFINE_PROP_UINT64("flags", XiveSource, esb_flags, 0), + DEFINE_PROP_UINT32("nr-irqs", XiveSource, nr_irqs, 0), + DEFINE_PROP_UINT32("shift", XiveSource, esb_shift, XIVE_ESB_64K_2PAGE), + DEFINE_PROP_END_OF_LIST(), +}; + +static void xive_source_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "XIVE Interrupt Source"; + dc->props = xive_source_properties; + dc->realize = xive_source_realize; + dc->vmsd = &vmstate_xive_source; +} + +static const TypeInfo xive_source_info = { + .name = TYPE_XIVE_SOURCE, + .parent = TYPE_DEVICE, + .instance_size = sizeof(XiveSource), + .class_init = xive_source_class_init, +}; + +static void xive_register_types(void) +{ + type_register_static(&xive_source_info); +} + +type_init(xive_register_types) diff --git a/include/hw/ppc/xive.h b/include/hw/ppc/xive.h new file mode 100644 index 0000000000..7aa2e38012 --- /dev/null +++ b/include/hw/ppc/xive.h @@ -0,0 +1,260 @@ +/* + * QEMU PowerPC XIVE interrupt controller model + * + * + * The POWER9 processor comes with a new interrupt controller, called + * XIVE as "eXternal Interrupt Virtualization Engine". + * + * = Overall architecture + * + * + * XIVE Interrupt Controller + * +------------------------------------+ IPIs + * | +---------+ +---------+ +--------+ | +-------+ + * | |VC | |CQ | |PC |----> | CORES | + * | | esb | | | | |----> | | + * | | eas | | Bridge | | tctx |----> | | + * | |SC end | | | | nvt | | | | + * +------+ | +---------+ +----+----+ +--------+ | +-+-+-+-+ + * | RAM | +------------------|-----------------+ | | | + * | | | | | | + * | | | | | | + * | | +--------------------v------------------------v-v-v--+ other + * | <--+ Power Bus +--> chips + * | esb | +---------+-----------------------+------------------+ + * | eas | | | + * | end | +--|------+ | + * | nvt | +----+----+ | +----+----+ + * +------+ |SC | | |SC | + * | | | | | + * | PQ-bits | | | PQ-bits | + * | local |-+ | in VC | + * +---------+ +---------+ + * PCIe NX,NPU,CAPI + * + * SC: Source Controller (aka. IVSE) + * VC: Virtualization Controller (aka. IVRE) + * PC: Presentation Controller (aka. IVPE) + * CQ: Common Queue (Bridge) + * + * PQ-bits: 2 bits source state machine (P:pending Q:queued) + * esb: Event State Buffer (Array of PQ bits in an IVSE) + * eas: Event Assignment Structure + * end: Event Notification Descriptor + * nvt: Notification Virtual Target + * tctx: Thread interrupt Context + * + * + * The XIVE IC is composed of three sub-engines : + * + * - Interrupt Virtualization Source Engine (IVSE), or Source + * Controller (SC). These are found in PCI PHBs, in the PSI host + * bridge controller, but also inside the main controller for the + * core IPIs and other sub-chips (NX, CAP, NPU) of the + * chip/processor. They are configured to feed the IVRE with events. + * + * - Interrupt Virtualization Routing Engine (IVRE) or Virtualization + * Controller (VC). Its job is to match an event source with an + * Event Notification Descriptor (END). + * + * - Interrupt Virtualization Presentation Engine (IVPE) or + * Presentation Controller (PC). It maintains the interrupt context + * state of each thread and handles the delivery of the external + * exception to the thread. + * + * In XIVE 1.0, the sub-engines used to be referred as: + * + * SC Source Controller + * VC Virtualization Controller + * PC Presentation Controller + * CQ Common Queue (PowerBUS Bridge) + * + * + * = XIVE internal tables + * + * Each of the sub-engines uses a set of tables to redirect exceptions + * from event sources to CPU threads. + * + * +-------+ + * User or OS | EQ | + * or +------>|entries| + * Hypervisor | | .. | + * Memory | +-------+ + * | ^ + * | | + * +-------------------------------------------------+ + * | | + * Hypervisor +------+ +---+--+ +---+--+ +------+ + * Memory | ESB | | EAT | | ENDT | | NVTT | + * (skiboot) +----+-+ +----+-+ +----+-+ +------+ + * ^ | ^ | ^ | ^ + * | | | | | | | + * +-------------------------------------------------+ + * | | | | | | | + * | | | | | | | + * +----|--|--------|--|--------|--|-+ +-|-----+ +------+ + * | | | | | | | | | | tctx| |Thread| + * IPI or --> | + v + v + v |---| + .. |-----> | + * HW events --> | | | | | | + * IVSE | IVRE | | IVPE | +------+ + * +---------------------------------+ +-------+ + * + * + * + * The IVSE have a 2-bits state machine, P for pending and Q for queued, + * for each source that allows events to be triggered. They are stored in + * an Event State Buffer (ESB) array and can be controlled by MMIOs. + * + * If the event is let through, the IVRE looks up in the Event Assignment + * Structure (EAS) table for an Event Notification Descriptor (END) + * configured for the source. Each Event Notification Descriptor defines + * a notification path to a CPU and an in-memory Event Queue, in which + * will be enqueued an EQ data for the OS to pull. + * + * The IVPE determines if a Notification Virtual Target (NVT) can + * handle the event by scanning the thread contexts of the VCPUs + * dispatched on the processor HW threads. It maintains the state of + * the thread interrupt context (TCTX) of each thread in a NVT table. + * + * = Acronyms + * + * Description In XIVE 1.0, used to be referred as + * + * EAS Event Assignment Structure IVE Interrupt Virt. Entry + * EAT Event Assignment Table IVT Interrupt Virt. Table + * ENDT Event Notif. Descriptor Table EQDT Event Queue Desc. Table + * EQ Event Queue same + * ESB Event State Buffer SBE State Bit Entry + * NVT Notif. Virtual Target VPD Virtual Processor Desc. + * NVTT Notif. Virtual Target Table VPDT Virtual Processor Desc. Table + * TCTX Thread interrupt Context + * + * + * Copyright (c) 2017-2018, IBM Corporation. + * + * This code is licensed under the GPL version 2 or later. See the + * COPYING file in the top-level directory. + * + */ + +#ifndef PPC_XIVE_H +#define PPC_XIVE_H + +#include "hw/qdev-core.h" + +/* + * XIVE Interrupt Source + */ + +#define TYPE_XIVE_SOURCE "xive-source" +#define XIVE_SOURCE(obj) OBJECT_CHECK(XiveSource, (obj), TYPE_XIVE_SOURCE) + +/* + * XIVE Interrupt Source characteristics, which define how the ESB are + * controlled. + */ +#define XIVE_SRC_H_INT_ESB 0x1 /* ESB managed with hcall H_INT_ESB */ +#define XIVE_SRC_STORE_EOI 0x2 /* Store EOI supported */ + +typedef struct XiveSource { + DeviceState parent; + + /* IRQs */ + uint32_t nr_irqs; + qemu_irq *qirqs; + + /* PQ bits */ + uint8_t *status; + + /* ESB memory region */ + uint64_t esb_flags; + uint32_t esb_shift; + MemoryRegion esb_mmio; +} XiveSource; + +/* + * ESB MMIO setting. Can be one page, for both source triggering and + * source management, or two different pages. See below for magic + * values. + */ +#define XIVE_ESB_4K 12 /* PSI HB only */ +#define XIVE_ESB_4K_2PAGE 13 +#define XIVE_ESB_64K 16 +#define XIVE_ESB_64K_2PAGE 17 + +static inline bool xive_source_esb_has_2page(XiveSource *xsrc) +{ + return xsrc->esb_shift == XIVE_ESB_64K_2PAGE || + xsrc->esb_shift == XIVE_ESB_4K_2PAGE; +} + +/* The trigger page is always the first/even page */ +static inline hwaddr xive_source_esb_page(XiveSource *xsrc, uint32_t srcno) +{ + assert(srcno < xsrc->nr_irqs); + return (1ull << xsrc->esb_shift) * srcno; +} + +/* In a two pages ESB MMIO setting, the odd page is for management */ +static inline hwaddr xive_source_esb_mgmt(XiveSource *xsrc, int srcno) +{ + hwaddr addr = xive_source_esb_page(xsrc, srcno); + + if (xive_source_esb_has_2page(xsrc)) { + addr += (1 << (xsrc->esb_shift - 1)); + } + + return addr; +} + +/* + * Each interrupt source has a 2-bit state machine which can be + * controlled by MMIO. P indicates that an interrupt is pending (has + * been sent to a queue and is waiting for an EOI). Q indicates that + * the interrupt has been triggered while pending. + * + * This acts as a coalescing mechanism in order to guarantee that a + * given interrupt only occurs at most once in a queue. + * + * When doing an EOI, the Q bit will indicate if the interrupt + * needs to be re-triggered. + */ +#define XIVE_ESB_VAL_P 0x2 +#define XIVE_ESB_VAL_Q 0x1 + +#define XIVE_ESB_RESET 0x0 +#define XIVE_ESB_PENDING XIVE_ESB_VAL_P +#define XIVE_ESB_QUEUED (XIVE_ESB_VAL_P | XIVE_ESB_VAL_Q) +#define XIVE_ESB_OFF XIVE_ESB_VAL_Q + +/* + * "magic" Event State Buffer (ESB) MMIO offsets. + * + * The following offsets into the ESB MMIO allow to read or manipulate + * the PQ bits. They must be used with an 8-byte load instruction. + * They all return the previous state of the interrupt (atomically). + * + * Additionally, some ESB pages support doing an EOI via a store and + * some ESBs support doing a trigger via a separate trigger page. + */ +#define XIVE_ESB_STORE_EOI 0x400 /* Store */ +#define XIVE_ESB_LOAD_EOI 0x000 /* Load */ +#define XIVE_ESB_GET 0x800 /* Load */ +#define XIVE_ESB_SET_PQ_00 0xc00 /* Load */ +#define XIVE_ESB_SET_PQ_01 0xd00 /* Load */ +#define XIVE_ESB_SET_PQ_10 0xe00 /* Load */ +#define XIVE_ESB_SET_PQ_11 0xf00 /* Load */ + +uint8_t xive_source_esb_get(XiveSource *xsrc, uint32_t srcno); +uint8_t xive_source_esb_set(XiveSource *xsrc, uint32_t srcno, uint8_t pq); + +void xive_source_pic_print_info(XiveSource *xsrc, uint32_t offset, + Monitor *mon); + +static inline qemu_irq xive_source_qirq(XiveSource *xsrc, uint32_t srcno) +{ + assert(srcno < xsrc->nr_irqs); + return xsrc->qirqs[srcno]; +} + +#endif /* PPC_XIVE_H */ From 5fd9ef18a9707c17d0f1d4262a76fa878edb65c3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Le=20Goater?= Date: Thu, 6 Dec 2018 00:22:16 +0100 Subject: [PATCH 16/40] ppc/xive: add support for the LSI interrupt sources MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The 'sent' status of the LSI interrupt source is modeled with the 'P' bit of the ESB and the assertion status of the source is maintained with an extra bit under the main XiveSource object. The type of the source is stored in the same array for practical reasons. Signed-off-by: Cédric Le Goater [dwg: Fix style nit] Signed-off-by: David Gibson --- hw/intc/xive.c | 67 +++++++++++++++++++++++++++++++++++++++---- include/hw/ppc/xive.h | 19 +++++++++++- 2 files changed, 79 insertions(+), 7 deletions(-) diff --git a/hw/intc/xive.c b/hw/intc/xive.c index 6389bd8323..4998f128e7 100644 --- a/hw/intc/xive.c +++ b/hw/intc/xive.c @@ -89,14 +89,42 @@ uint8_t xive_source_esb_set(XiveSource *xsrc, uint32_t srcno, uint8_t pq) return xive_esb_set(&xsrc->status[srcno], pq); } +/* + * Returns whether the event notification should be forwarded. + */ +static bool xive_source_lsi_trigger(XiveSource *xsrc, uint32_t srcno) +{ + uint8_t old_pq = xive_source_esb_get(xsrc, srcno); + + xsrc->status[srcno] |= XIVE_STATUS_ASSERTED; + + switch (old_pq) { + case XIVE_ESB_RESET: + xive_source_esb_set(xsrc, srcno, XIVE_ESB_PENDING); + return true; + default: + return false; + } +} + /* * Returns whether the event notification should be forwarded. */ static bool xive_source_esb_trigger(XiveSource *xsrc, uint32_t srcno) { + bool ret; + assert(srcno < xsrc->nr_irqs); - return xive_esb_trigger(&xsrc->status[srcno]); + ret = xive_esb_trigger(&xsrc->status[srcno]); + + if (xive_source_irq_is_lsi(xsrc, srcno) && + xive_source_esb_get(xsrc, srcno) == XIVE_ESB_QUEUED) { + qemu_log_mask(LOG_GUEST_ERROR, + "XIVE: queued an event on LSI IRQ %d\n", srcno); + } + + return ret; } /* @@ -104,9 +132,23 @@ static bool xive_source_esb_trigger(XiveSource *xsrc, uint32_t srcno) */ static bool xive_source_esb_eoi(XiveSource *xsrc, uint32_t srcno) { + bool ret; + assert(srcno < xsrc->nr_irqs); - return xive_esb_eoi(&xsrc->status[srcno]); + ret = xive_esb_eoi(&xsrc->status[srcno]); + + /* + * LSI sources do not set the Q bit but they can still be + * asserted, in which case we should forward a new event + * notification + */ + if (xive_source_irq_is_lsi(xsrc, srcno) && + xsrc->status[srcno] & XIVE_STATUS_ASSERTED) { + ret = xive_source_lsi_trigger(xsrc, srcno); + } + + return ret; } /* @@ -271,8 +313,16 @@ static void xive_source_set_irq(void *opaque, int srcno, int val) XiveSource *xsrc = XIVE_SOURCE(opaque); bool notify = false; - if (val) { - notify = xive_source_esb_trigger(xsrc, srcno); + if (xive_source_irq_is_lsi(xsrc, srcno)) { + if (val) { + notify = xive_source_lsi_trigger(xsrc, srcno); + } else { + xsrc->status[srcno] &= ~XIVE_STATUS_ASSERTED; + } + } else { + if (val) { + notify = xive_source_esb_trigger(xsrc, srcno); + } } /* Forward the source event notification for routing */ @@ -292,9 +342,11 @@ void xive_source_pic_print_info(XiveSource *xsrc, uint32_t offset, Monitor *mon) continue; } - monitor_printf(mon, " %08x %c%c\n", i + offset, + monitor_printf(mon, " %08x %s %c%c%c\n", i + offset, + xive_source_irq_is_lsi(xsrc, i) ? "LSI" : "MSI", pq & XIVE_ESB_VAL_P ? 'P' : '-', - pq & XIVE_ESB_VAL_Q ? 'Q' : '-'); + pq & XIVE_ESB_VAL_Q ? 'Q' : '-', + xsrc->status[i] & XIVE_STATUS_ASSERTED ? 'A' : ' '); } } @@ -302,6 +354,8 @@ static void xive_source_reset(void *dev) { XiveSource *xsrc = XIVE_SOURCE(dev); + /* Do not clear the LSI bitmap */ + /* PQs are initialized to 0b01 (Q=1) which corresponds to "ints off" */ memset(xsrc->status, XIVE_ESB_OFF, xsrc->nr_irqs); } @@ -324,6 +378,7 @@ static void xive_source_realize(DeviceState *dev, Error **errp) } xsrc->status = g_malloc0(xsrc->nr_irqs); + xsrc->lsi_map = bitmap_new(xsrc->nr_irqs); memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc), &xive_source_esb_ops, xsrc, "xive.esb", diff --git a/include/hw/ppc/xive.h b/include/hw/ppc/xive.h index 7aa2e38012..7cebc32eba 100644 --- a/include/hw/ppc/xive.h +++ b/include/hw/ppc/xive.h @@ -162,8 +162,9 @@ typedef struct XiveSource { /* IRQs */ uint32_t nr_irqs; qemu_irq *qirqs; + unsigned long *lsi_map; - /* PQ bits */ + /* PQ bits and LSI assertion bit */ uint8_t *status; /* ESB memory region */ @@ -219,6 +220,7 @@ static inline hwaddr xive_source_esb_mgmt(XiveSource *xsrc, int srcno) * When doing an EOI, the Q bit will indicate if the interrupt * needs to be re-triggered. */ +#define XIVE_STATUS_ASSERTED 0x4 /* Extra bit for LSI */ #define XIVE_ESB_VAL_P 0x2 #define XIVE_ESB_VAL_Q 0x1 @@ -257,4 +259,19 @@ static inline qemu_irq xive_source_qirq(XiveSource *xsrc, uint32_t srcno) return xsrc->qirqs[srcno]; } +static inline bool xive_source_irq_is_lsi(XiveSource *xsrc, uint32_t srcno) +{ + assert(srcno < xsrc->nr_irqs); + return test_bit(srcno, xsrc->lsi_map); +} + +static inline void xive_source_irq_set(XiveSource *xsrc, uint32_t srcno, + bool lsi) +{ + assert(srcno < xsrc->nr_irqs); + if (lsi) { + bitmap_set(xsrc->lsi_map, srcno, 1); + } +} + #endif /* PPC_XIVE_H */ From 5e79b155a8ca342cb6ccfcd2a779e200d34f2a9f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Le=20Goater?= Date: Thu, 6 Dec 2018 00:22:17 +0100 Subject: [PATCH 17/40] ppc/xive: introduce the XiveNotifier interface MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The XiveNotifier offers a simple interface, between the XiveSource object and the main interrupt controller of the machine. It will forward event notifications to the XIVE Interrupt Virtualization Routing Engine (IVRE). Signed-off-by: Cédric Le Goater [dwg: Adjust type name string for XiveNotifier] Signed-off-by: David Gibson --- hw/intc/xive.c | 25 +++++++++++++++++++++++++ include/hw/ppc/xive.h | 23 +++++++++++++++++++++++ 2 files changed, 48 insertions(+) diff --git a/hw/intc/xive.c b/hw/intc/xive.c index 4998f128e7..8d5434d6bd 100644 --- a/hw/intc/xive.c +++ b/hw/intc/xive.c @@ -156,7 +156,11 @@ static bool xive_source_esb_eoi(XiveSource *xsrc, uint32_t srcno) */ static void xive_source_notify(XiveSource *xsrc, int srcno) { + XiveNotifierClass *xnc = XIVE_NOTIFIER_GET_CLASS(xsrc->xive); + if (xnc->notify) { + xnc->notify(xsrc->xive, srcno); + } } /* @@ -363,6 +367,17 @@ static void xive_source_reset(void *dev) static void xive_source_realize(DeviceState *dev, Error **errp) { XiveSource *xsrc = XIVE_SOURCE(dev); + Object *obj; + Error *local_err = NULL; + + obj = object_property_get_link(OBJECT(dev), "xive", &local_err); + if (!obj) { + error_propagate(errp, local_err); + error_prepend(errp, "required link 'xive' not found: "); + return; + } + + xsrc->xive = XIVE_NOTIFIER(obj); if (!xsrc->nr_irqs) { error_setg(errp, "Number of interrupt needs to be greater than 0"); @@ -429,9 +444,19 @@ static const TypeInfo xive_source_info = { .class_init = xive_source_class_init, }; +/* + * XIVE Fabric + */ +static const TypeInfo xive_fabric_info = { + .name = TYPE_XIVE_NOTIFIER, + .parent = TYPE_INTERFACE, + .class_size = sizeof(XiveNotifierClass), +}; + static void xive_register_types(void) { type_register_static(&xive_source_info); + type_register_static(&xive_fabric_info); } type_init(xive_register_types) diff --git a/include/hw/ppc/xive.h b/include/hw/ppc/xive.h index 7cebc32eba..436f1bf756 100644 --- a/include/hw/ppc/xive.h +++ b/include/hw/ppc/xive.h @@ -142,6 +142,27 @@ #include "hw/qdev-core.h" +/* + * XIVE Fabric (Interface between Source and Router) + */ + +typedef struct XiveNotifier { + Object parent; +} XiveNotifier; + +#define TYPE_XIVE_NOTIFIER "xive-notifier" +#define XIVE_NOTIFIER(obj) \ + OBJECT_CHECK(XiveNotifier, (obj), TYPE_XIVE_NOTIFIER) +#define XIVE_NOTIFIER_CLASS(klass) \ + OBJECT_CLASS_CHECK(XiveNotifierClass, (klass), TYPE_XIVE_NOTIFIER) +#define XIVE_NOTIFIER_GET_CLASS(obj) \ + OBJECT_GET_CLASS(XiveNotifierClass, (obj), TYPE_XIVE_NOTIFIER) + +typedef struct XiveNotifierClass { + InterfaceClass parent; + void (*notify)(XiveNotifier *xn, uint32_t lisn); +} XiveNotifierClass; + /* * XIVE Interrupt Source */ @@ -171,6 +192,8 @@ typedef struct XiveSource { uint64_t esb_flags; uint32_t esb_shift; MemoryRegion esb_mmio; + + XiveNotifier *xive; } XiveSource; /* From 7ff7ea928039e418dfa584c91f3f78512284a79a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Le=20Goater?= Date: Thu, 6 Dec 2018 00:22:18 +0100 Subject: [PATCH 18/40] ppc/xive: introduce the XiveRouter model MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The XiveRouter models the second sub-engine of the XIVE architecture : the Interrupt Virtualization Routing Engine (IVRE). The IVRE handles event notifications of the IVSE and performs the interrupt routing process. For this purpose, it uses a set of tables stored in system memory, the first of which being the Event Assignment Structure (EAS) table. The EAT associates an interrupt source number with an Event Notification Descriptor (END) which will be used in a second phase of the routing process to identify a Notification Virtual Target. The XiveRouter is an abstract class which needs to be inherited from to define a storage for the EAT, and other upcoming tables. Signed-off-by: Cédric Le Goater [dwg: Folded in parts of a later fix by Cédric fixing field access] [dwg: Fix style nits] Signed-off-by: David Gibson --- hw/intc/xive.c | 77 ++++++++++++++++++++++++++++++++++++++ include/hw/ppc/xive.h | 31 +++++++++++++++ include/hw/ppc/xive_regs.h | 62 ++++++++++++++++++++++++++++++ 3 files changed, 170 insertions(+) create mode 100644 include/hw/ppc/xive_regs.h diff --git a/hw/intc/xive.c b/hw/intc/xive.c index 8d5434d6bd..8878abc317 100644 --- a/hw/intc/xive.c +++ b/hw/intc/xive.c @@ -444,6 +444,82 @@ static const TypeInfo xive_source_info = { .class_init = xive_source_class_init, }; +/* + * XIVE Router (aka. Virtualization Controller or IVRE) + */ + +int xive_router_get_eas(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx, + XiveEAS *eas) +{ + XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); + + return xrc->get_eas(xrtr, eas_blk, eas_idx, eas); +} + +static void xive_router_notify(XiveNotifier *xn, uint32_t lisn) +{ + XiveRouter *xrtr = XIVE_ROUTER(xn); + uint8_t eas_blk = XIVE_SRCNO_BLOCK(lisn); + uint32_t eas_idx = XIVE_SRCNO_INDEX(lisn); + XiveEAS eas; + + /* EAS cache lookup */ + if (xive_router_get_eas(xrtr, eas_blk, eas_idx, &eas)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN %x\n", lisn); + return; + } + + /* + * The IVRE checks the State Bit Cache at this point. We skip the + * SBC lookup because the state bits of the sources are modeled + * internally in QEMU. + */ + + if (!xive_eas_is_valid(&eas)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid LISN %x\n", lisn); + return; + } + + if (xive_eas_is_masked(&eas)) { + /* Notification completed */ + return; + } +} + +static void xive_router_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass); + + dc->desc = "XIVE Router Engine"; + xnc->notify = xive_router_notify; +} + +static const TypeInfo xive_router_info = { + .name = TYPE_XIVE_ROUTER, + .parent = TYPE_SYS_BUS_DEVICE, + .abstract = true, + .class_size = sizeof(XiveRouterClass), + .class_init = xive_router_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_XIVE_NOTIFIER }, + { } + } +}; + +void xive_eas_pic_print_info(XiveEAS *eas, uint32_t lisn, Monitor *mon) +{ + if (!xive_eas_is_valid(eas)) { + return; + } + + monitor_printf(mon, " %08x %s end:%02x/%04x data:%08x\n", + lisn, xive_eas_is_masked(eas) ? "M" : " ", + (uint8_t) xive_get_field64(EAS_END_BLOCK, eas->w), + (uint32_t) xive_get_field64(EAS_END_INDEX, eas->w), + (uint32_t) xive_get_field64(EAS_END_DATA, eas->w)); +} + /* * XIVE Fabric */ @@ -457,6 +533,7 @@ static void xive_register_types(void) { type_register_static(&xive_source_info); type_register_static(&xive_fabric_info); + type_register_static(&xive_router_info); } type_init(xive_register_types) diff --git a/include/hw/ppc/xive.h b/include/hw/ppc/xive.h index 436f1bf756..527aa73366 100644 --- a/include/hw/ppc/xive.h +++ b/include/hw/ppc/xive.h @@ -141,6 +141,8 @@ #define PPC_XIVE_H #include "hw/qdev-core.h" +#include "hw/sysbus.h" +#include "hw/ppc/xive_regs.h" /* * XIVE Fabric (Interface between Source and Router) @@ -297,4 +299,33 @@ static inline void xive_source_irq_set(XiveSource *xsrc, uint32_t srcno, } } +/* + * XIVE Router + */ + +typedef struct XiveRouter { + SysBusDevice parent; +} XiveRouter; + +#define TYPE_XIVE_ROUTER "xive-router" +#define XIVE_ROUTER(obj) \ + OBJECT_CHECK(XiveRouter, (obj), TYPE_XIVE_ROUTER) +#define XIVE_ROUTER_CLASS(klass) \ + OBJECT_CLASS_CHECK(XiveRouterClass, (klass), TYPE_XIVE_ROUTER) +#define XIVE_ROUTER_GET_CLASS(obj) \ + OBJECT_GET_CLASS(XiveRouterClass, (obj), TYPE_XIVE_ROUTER) + +typedef struct XiveRouterClass { + SysBusDeviceClass parent; + + /* XIVE table accessors */ + int (*get_eas)(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx, + XiveEAS *eas); +} XiveRouterClass; + +void xive_eas_pic_print_info(XiveEAS *eas, uint32_t lisn, Monitor *mon); + +int xive_router_get_eas(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx, + XiveEAS *eas); + #endif /* PPC_XIVE_H */ diff --git a/include/hw/ppc/xive_regs.h b/include/hw/ppc/xive_regs.h new file mode 100644 index 0000000000..e1193fb3e4 --- /dev/null +++ b/include/hw/ppc/xive_regs.h @@ -0,0 +1,62 @@ +/* + * QEMU PowerPC XIVE internal structure definitions + * + * + * The XIVE structures are accessed by the HW and their format is + * architected to be big-endian. Some macros are provided to ease + * access to the different fields. + * + * + * Copyright (c) 2016-2018, IBM Corporation. + * + * This code is licensed under the GPL version 2 or later. See the + * COPYING file in the top-level directory. + */ + +#ifndef PPC_XIVE_REGS_H +#define PPC_XIVE_REGS_H + +/* + * Interrupt source number encoding on PowerBUS + */ +#define XIVE_SRCNO_BLOCK(srcno) (((srcno) >> 28) & 0xf) +#define XIVE_SRCNO_INDEX(srcno) ((srcno) & 0x0fffffff) +#define XIVE_SRCNO(blk, idx) ((uint32_t)(blk) << 28 | (idx)) + +/* + * EAS (Event Assignment Structure) + * + * One per interrupt source. Targets an interrupt to a given Event + * Notification Descriptor (END) and provides the corresponding + * logical interrupt number (END data) + */ +typedef struct XiveEAS { + /* + * Use a single 64-bit definition to make it easier to perform + * atomic updates + */ + uint64_t w; +#define EAS_VALID PPC_BIT(0) +#define EAS_END_BLOCK PPC_BITMASK(4, 7) /* Destination END block# */ +#define EAS_END_INDEX PPC_BITMASK(8, 31) /* Destination END index */ +#define EAS_MASKED PPC_BIT(32) /* Masked */ +#define EAS_END_DATA PPC_BITMASK(33, 63) /* Data written to the END */ +} XiveEAS; + +#define xive_eas_is_valid(eas) (be64_to_cpu((eas)->w) & EAS_VALID) +#define xive_eas_is_masked(eas) (be64_to_cpu((eas)->w) & EAS_MASKED) + +static inline uint64_t xive_get_field64(uint64_t mask, uint64_t word) +{ + return (be64_to_cpu(word) & mask) >> ctz64(mask); +} + +static inline uint64_t xive_set_field64(uint64_t mask, uint64_t word, + uint64_t value) +{ + uint64_t tmp = + (be64_to_cpu(word) & ~mask) | ((value << ctz64(mask)) & mask); + return cpu_to_be64(tmp); +} + +#endif /* PPC_XIVE_REGS_H */ From e4ddaac67f1fbdeea207fe28d71dca744832377b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Le=20Goater?= Date: Thu, 6 Dec 2018 00:22:19 +0100 Subject: [PATCH 19/40] ppc/xive: introduce the XIVE Event Notification Descriptors MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit To complete the event routing, the IVRE sub-engine uses a second table containing Event Notification Descriptor (END) structures. An END specifies on which Event Queue (EQ) the event notification data, defined in the associated EAS, should be posted when an exception occurs. It also defines which Notification Virtual Target (NVT) should be notified. The Event Queue is a memory page provided by the O/S defining a circular buffer, one per server and priority couple, containing Event Queue entries. These are 4 bytes long, the first bit being a 'generation' bit and the 31 following bits the END Data field. They are pulled by the O/S when the exception occurs. The END Data field is a way to set an invariant logical event source number for an IRQ. On sPAPR machines, it is set with the H_INT_SET_SOURCE_CONFIG hcall when the EISN flag is used. Signed-off-by: Cédric Le Goater [dwg: Fold in a later fix from Cédric fixing field accessors] Signed-off-by: David Gibson --- hw/intc/xive.c | 174 +++++++++++++++++++++++++++++++++++++ include/hw/ppc/xive.h | 18 ++++ include/hw/ppc/xive_regs.h | 67 ++++++++++++++ 3 files changed, 259 insertions(+) diff --git a/hw/intc/xive.c b/hw/intc/xive.c index 8878abc317..9ec841f741 100644 --- a/hw/intc/xive.c +++ b/hw/intc/xive.c @@ -444,6 +444,95 @@ static const TypeInfo xive_source_info = { .class_init = xive_source_class_init, }; +/* + * XiveEND helpers + */ + +void xive_end_queue_pic_print_info(XiveEND *end, uint32_t width, Monitor *mon) +{ + uint64_t qaddr_base = (uint64_t) be32_to_cpu(end->w2 & 0x0fffffff) << 32 + | be32_to_cpu(end->w3); + uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0); + uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1); + uint32_t qentries = 1 << (qsize + 10); + int i; + + /* + * print out the [ (qindex - (width - 1)) .. (qindex + 1)] window + */ + monitor_printf(mon, " [ "); + qindex = (qindex - (width - 1)) & (qentries - 1); + for (i = 0; i < width; i++) { + uint64_t qaddr = qaddr_base + (qindex << 2); + uint32_t qdata = -1; + + if (dma_memory_read(&address_space_memory, qaddr, &qdata, + sizeof(qdata))) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to read EQ @0x%" + HWADDR_PRIx "\n", qaddr); + return; + } + monitor_printf(mon, "%s%08x ", i == width - 1 ? "^" : "", + be32_to_cpu(qdata)); + qindex = (qindex + 1) & (qentries - 1); + } +} + +void xive_end_pic_print_info(XiveEND *end, uint32_t end_idx, Monitor *mon) +{ + uint64_t qaddr_base = (uint64_t) be32_to_cpu(end->w2 & 0x0fffffff) << 32 + | be32_to_cpu(end->w3); + uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1); + uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1); + uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0); + uint32_t qentries = 1 << (qsize + 10); + + uint32_t nvt = xive_get_field32(END_W6_NVT_INDEX, end->w6); + uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7); + + if (!xive_end_is_valid(end)) { + return; + } + + monitor_printf(mon, " %08x %c%c%c%c%c prio:%d nvt:%04x eq:@%08"PRIx64 + "% 6d/%5d ^%d", end_idx, + xive_end_is_valid(end) ? 'v' : '-', + xive_end_is_enqueue(end) ? 'q' : '-', + xive_end_is_notify(end) ? 'n' : '-', + xive_end_is_backlog(end) ? 'b' : '-', + xive_end_is_escalate(end) ? 'e' : '-', + priority, nvt, qaddr_base, qindex, qentries, qgen); + + xive_end_queue_pic_print_info(end, 6, mon); + monitor_printf(mon, "]\n"); +} + +static void xive_end_enqueue(XiveEND *end, uint32_t data) +{ + uint64_t qaddr_base = (uint64_t) be32_to_cpu(end->w2 & 0x0fffffff) << 32 + | be32_to_cpu(end->w3); + uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0); + uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1); + uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1); + + uint64_t qaddr = qaddr_base + (qindex << 2); + uint32_t qdata = cpu_to_be32((qgen << 31) | (data & 0x7fffffff)); + uint32_t qentries = 1 << (qsize + 10); + + if (dma_memory_write(&address_space_memory, qaddr, &qdata, sizeof(qdata))) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to write END data @0x%" + HWADDR_PRIx "\n", qaddr); + return; + } + + qindex = (qindex + 1) & (qentries - 1); + if (qindex == 0) { + qgen ^= 1; + end->w1 = xive_set_field32(END_W1_GENERATION, end->w1, qgen); + } + end->w1 = xive_set_field32(END_W1_PAGE_OFF, end->w1, qindex); +} + /* * XIVE Router (aka. Virtualization Controller or IVRE) */ @@ -456,6 +545,83 @@ int xive_router_get_eas(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx, return xrc->get_eas(xrtr, eas_blk, eas_idx, eas); } +int xive_router_get_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx, + XiveEND *end) +{ + XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); + + return xrc->get_end(xrtr, end_blk, end_idx, end); +} + +int xive_router_write_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx, + XiveEND *end, uint8_t word_number) +{ + XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); + + return xrc->write_end(xrtr, end_blk, end_idx, end, word_number); +} + +/* + * An END trigger can come from an event trigger (IPI or HW) or from + * another chip. We don't model the PowerBus but the END trigger + * message has the same parameters than in the function below. + */ +static void xive_router_end_notify(XiveRouter *xrtr, uint8_t end_blk, + uint32_t end_idx, uint32_t end_data) +{ + XiveEND end; + uint8_t priority; + uint8_t format; + + /* END cache lookup */ + if (xive_router_get_end(xrtr, end_blk, end_idx, &end)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk, + end_idx); + return; + } + + if (!xive_end_is_valid(&end)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n", + end_blk, end_idx); + return; + } + + if (xive_end_is_enqueue(&end)) { + xive_end_enqueue(&end, end_data); + /* Enqueuing event data modifies the EQ toggle and index */ + xive_router_write_end(xrtr, end_blk, end_idx, &end, 1); + } + + /* + * The W7 format depends on the F bit in W6. It defines the type + * of the notification : + * + * F=0 : single or multiple NVT notification + * F=1 : User level Event-Based Branch (EBB) notification, no + * priority + */ + format = xive_get_field32(END_W6_FORMAT_BIT, end.w6); + priority = xive_get_field32(END_W7_F0_PRIORITY, end.w7); + + /* The END is masked */ + if (format == 0 && priority == 0xff) { + return; + } + + /* + * Check the END ESn (Event State Buffer for notification) for + * even futher coalescing in the Router + */ + if (!xive_end_is_notify(&end)) { + qemu_log_mask(LOG_UNIMP, "XIVE: !UCOND_NOTIFY not implemented\n"); + return; + } + + /* + * Follows IVPE notification + */ +} + static void xive_router_notify(XiveNotifier *xn, uint32_t lisn) { XiveRouter *xrtr = XIVE_ROUTER(xn); @@ -484,6 +650,14 @@ static void xive_router_notify(XiveNotifier *xn, uint32_t lisn) /* Notification completed */ return; } + + /* + * The event trigger becomes an END trigger + */ + xive_router_end_notify(xrtr, + xive_get_field64(EAS_END_BLOCK, eas.w), + xive_get_field64(EAS_END_INDEX, eas.w), + xive_get_field64(EAS_END_DATA, eas.w)); } static void xive_router_class_init(ObjectClass *klass, void *data) diff --git a/include/hw/ppc/xive.h b/include/hw/ppc/xive.h index 527aa73366..4851d3b3a4 100644 --- a/include/hw/ppc/xive.h +++ b/include/hw/ppc/xive.h @@ -321,11 +321,29 @@ typedef struct XiveRouterClass { /* XIVE table accessors */ int (*get_eas)(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx, XiveEAS *eas); + int (*get_end)(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx, + XiveEND *end); + int (*write_end)(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx, + XiveEND *end, uint8_t word_number); } XiveRouterClass; void xive_eas_pic_print_info(XiveEAS *eas, uint32_t lisn, Monitor *mon); int xive_router_get_eas(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx, XiveEAS *eas); +int xive_router_get_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx, + XiveEND *end); +int xive_router_write_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx, + XiveEND *end, uint8_t word_number); + +/* + * For legacy compatibility, the exceptions define up to 256 different + * priorities. P9 implements only 9 levels : 8 active levels [0 - 7] + * and the least favored level 0xFF. + */ +#define XIVE_PRIORITY_MAX 7 + +void xive_end_pic_print_info(XiveEND *end, uint32_t end_idx, Monitor *mon); +void xive_end_queue_pic_print_info(XiveEND *end, uint32_t width, Monitor *mon); #endif /* PPC_XIVE_H */ diff --git a/include/hw/ppc/xive_regs.h b/include/hw/ppc/xive_regs.h index e1193fb3e4..b1d55ecf94 100644 --- a/include/hw/ppc/xive_regs.h +++ b/include/hw/ppc/xive_regs.h @@ -59,4 +59,71 @@ static inline uint64_t xive_set_field64(uint64_t mask, uint64_t word, return cpu_to_be64(tmp); } +static inline uint32_t xive_get_field32(uint32_t mask, uint32_t word) +{ + return (be32_to_cpu(word) & mask) >> ctz32(mask); +} + +static inline uint32_t xive_set_field32(uint32_t mask, uint32_t word, + uint32_t value) +{ + uint32_t tmp = + (be32_to_cpu(word) & ~mask) | ((value << ctz32(mask)) & mask); + return cpu_to_be32(tmp); +} + +/* Event Notification Descriptor (END) */ +typedef struct XiveEND { + uint32_t w0; +#define END_W0_VALID PPC_BIT32(0) /* "v" bit */ +#define END_W0_ENQUEUE PPC_BIT32(1) /* "q" bit */ +#define END_W0_UCOND_NOTIFY PPC_BIT32(2) /* "n" bit */ +#define END_W0_BACKLOG PPC_BIT32(3) /* "b" bit */ +#define END_W0_PRECL_ESC_CTL PPC_BIT32(4) /* "p" bit */ +#define END_W0_ESCALATE_CTL PPC_BIT32(5) /* "e" bit */ +#define END_W0_UNCOND_ESCALATE PPC_BIT32(6) /* "u" bit - DD2.0 */ +#define END_W0_SILENT_ESCALATE PPC_BIT32(7) /* "s" bit - DD2.0 */ +#define END_W0_QSIZE PPC_BITMASK32(12, 15) +#define END_W0_SW0 PPC_BIT32(16) +#define END_W0_FIRMWARE END_W0_SW0 /* Owned by FW */ +#define END_QSIZE_4K 0 +#define END_QSIZE_64K 4 +#define END_W0_HWDEP PPC_BITMASK32(24, 31) + uint32_t w1; +#define END_W1_ESn PPC_BITMASK32(0, 1) +#define END_W1_ESn_P PPC_BIT32(0) +#define END_W1_ESn_Q PPC_BIT32(1) +#define END_W1_ESe PPC_BITMASK32(2, 3) +#define END_W1_ESe_P PPC_BIT32(2) +#define END_W1_ESe_Q PPC_BIT32(3) +#define END_W1_GENERATION PPC_BIT32(9) +#define END_W1_PAGE_OFF PPC_BITMASK32(10, 31) + uint32_t w2; +#define END_W2_MIGRATION_REG PPC_BITMASK32(0, 3) +#define END_W2_OP_DESC_HI PPC_BITMASK32(4, 31) + uint32_t w3; +#define END_W3_OP_DESC_LO PPC_BITMASK32(0, 31) + uint32_t w4; +#define END_W4_ESC_END_BLOCK PPC_BITMASK32(4, 7) +#define END_W4_ESC_END_INDEX PPC_BITMASK32(8, 31) + uint32_t w5; +#define END_W5_ESC_END_DATA PPC_BITMASK32(1, 31) + uint32_t w6; +#define END_W6_FORMAT_BIT PPC_BIT32(8) +#define END_W6_NVT_BLOCK PPC_BITMASK32(9, 12) +#define END_W6_NVT_INDEX PPC_BITMASK32(13, 31) + uint32_t w7; +#define END_W7_F0_IGNORE PPC_BIT32(0) +#define END_W7_F0_BLK_GROUPING PPC_BIT32(1) +#define END_W7_F0_PRIORITY PPC_BITMASK32(8, 15) +#define END_W7_F1_WAKEZ PPC_BIT32(0) +#define END_W7_F1_LOG_SERVER_ID PPC_BITMASK32(1, 31) +} XiveEND; + +#define xive_end_is_valid(end) (be32_to_cpu((end)->w0) & END_W0_VALID) +#define xive_end_is_enqueue(end) (be32_to_cpu((end)->w0) & END_W0_ENQUEUE) +#define xive_end_is_notify(end) (be32_to_cpu((end)->w0) & END_W0_UCOND_NOTIFY) +#define xive_end_is_backlog(end) (be32_to_cpu((end)->w0) & END_W0_BACKLOG) +#define xive_end_is_escalate(end) (be32_to_cpu((end)->w0) & END_W0_ESCALATE_CTL) + #endif /* PPC_XIVE_REGS_H */ From 482969d680b7ae9c903198db2b7ff1984361e0a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Le=20Goater?= Date: Thu, 6 Dec 2018 00:22:26 +0100 Subject: [PATCH 20/40] spapr: initialize VSMT before initializing the IRQ backend MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We will need to use xics_max_server_number() to create the sPAPRXive object modeling the interrupt controller of the machine which is created before the CPUs. Signed-off-by: Cédric Le Goater Reviewed-by: Greg Kurz [dwg: Fix style nit] Signed-off-by: David Gibson --- hw/ppc/spapr.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index 051d080fe5..2b2df6b848 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -2464,11 +2464,6 @@ static void spapr_init_cpus(sPAPRMachineState *spapr) boot_cores_nr = possible_cpus->len; } - /* VSMT must be set in order to be able to compute VCPU ids, ie to - * call xics_max_server_number() or spapr_vcpu_id(). - */ - spapr_set_vsmt_mode(spapr, &error_fatal); - if (smc->pre_2_10_has_unused_icps) { int i; @@ -2591,6 +2586,12 @@ static void spapr_machine_init(MachineState *machine) /* Setup a load limit for the ramdisk leaving room for SLOF and FDT */ load_limit = MIN(spapr->rma_size, RTAS_MAX_ADDR) - FW_OVERHEAD; + /* + * VSMT must be set in order to be able to compute VCPU ids, ie to + * call xics_max_server_number() or spapr_vcpu_id(). + */ + spapr_set_vsmt_mode(spapr, &error_fatal); + /* Set up Interrupt Controller before we create the VCPUs */ smc->irq->init(spapr, &error_fatal); From fab397d84ab62b99f6e18ce3618f9f85c30f79c5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Le=20Goater?= Date: Thu, 6 Dec 2018 00:22:27 +0100 Subject: [PATCH 21/40] spapr: introduce a spapr_irq_init() routine MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Initialize the MSI bitmap from it as this will be necessary for the sPAPR IRQ backend for XIVE. Signed-off-by: Cédric Le Goater Reviewed-by: David Gibson Signed-off-by: David Gibson --- hw/ppc/spapr.c | 2 +- hw/ppc/spapr_irq.c | 16 +++++++++++----- include/hw/ppc/spapr_irq.h | 1 + 3 files changed, 13 insertions(+), 6 deletions(-) diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index 2b2df6b848..c1c0e75fcd 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -2593,7 +2593,7 @@ static void spapr_machine_init(MachineState *machine) spapr_set_vsmt_mode(spapr, &error_fatal); /* Set up Interrupt Controller before we create the VCPUs */ - smc->irq->init(spapr, &error_fatal); + spapr_irq_init(spapr, &error_fatal); /* Set up containers for ibm,client-architecture-support negotiated options */ diff --git a/hw/ppc/spapr_irq.c b/hw/ppc/spapr_irq.c index e77b94cc68..f8b651de0e 100644 --- a/hw/ppc/spapr_irq.c +++ b/hw/ppc/spapr_irq.c @@ -97,11 +97,6 @@ static void spapr_irq_init_xics(sPAPRMachineState *spapr, Error **errp) int nr_irqs = smc->irq->nr_irqs; Error *local_err = NULL; - /* Initialize the MSI IRQ allocator. */ - if (!SPAPR_MACHINE_GET_CLASS(spapr)->legacy_irq_allocation) { - spapr_irq_msi_init(spapr, smc->irq->nr_msis); - } - if (kvm_enabled()) { if (machine_kernel_irqchip_allowed(machine) && !xics_kvm_init(spapr, &local_err)) { @@ -213,6 +208,17 @@ sPAPRIrq spapr_irq_xics = { /* * sPAPR IRQ frontend routines for devices */ +void spapr_irq_init(sPAPRMachineState *spapr, Error **errp) +{ + sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); + + /* Initialize the MSI IRQ allocator. */ + if (!SPAPR_MACHINE_GET_CLASS(spapr)->legacy_irq_allocation) { + spapr_irq_msi_init(spapr, smc->irq->nr_msis); + } + + smc->irq->init(spapr, errp); +} int spapr_irq_claim(sPAPRMachineState *spapr, int irq, bool lsi, Error **errp) { diff --git a/include/hw/ppc/spapr_irq.h b/include/hw/ppc/spapr_irq.h index a467ce696e..bd7301e6d9 100644 --- a/include/hw/ppc/spapr_irq.h +++ b/include/hw/ppc/spapr_irq.h @@ -43,6 +43,7 @@ typedef struct sPAPRIrq { extern sPAPRIrq spapr_irq_xics; extern sPAPRIrq spapr_irq_xics_legacy; +void spapr_irq_init(sPAPRMachineState *spapr, Error **errp); int spapr_irq_claim(sPAPRMachineState *spapr, int irq, bool lsi, Error **errp); void spapr_irq_free(sPAPRMachineState *spapr, int irq, int num); qemu_irq spapr_qirq(sPAPRMachineState *spapr, int irq); From 1a518e7693c9691b1b26865d1da9cd217e67bcd8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Le=20Goater?= Date: Thu, 6 Dec 2018 00:22:29 +0100 Subject: [PATCH 22/40] spapr: export and rename the xics_max_server_number() routine MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The XIVE sPAPR IRQ backend will use it to define the number of ENDs of the IC controller. Signed-off-by: Cédric Le Goater Signed-off-by: David Gibson --- hw/ppc/spapr.c | 8 ++++---- include/hw/ppc/spapr.h | 1 + 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index c1c0e75fcd..fc47a058dd 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -150,7 +150,7 @@ static void pre_2_10_vmstate_unregister_dummy_icp(int i) (void *)(uintptr_t) i); } -static int xics_max_server_number(sPAPRMachineState *spapr) +int spapr_max_server_number(sPAPRMachineState *spapr) { assert(spapr->vsmt); return DIV_ROUND_UP(max_cpus * spapr->vsmt, smp_threads); @@ -1268,7 +1268,7 @@ static void *spapr_build_fdt(sPAPRMachineState *spapr, _FDT(fdt_setprop_cell(fdt, 0, "#size-cells", 2)); /* /interrupt controller */ - spapr_dt_xics(xics_max_server_number(spapr), fdt, PHANDLE_XICP); + spapr_dt_xics(spapr_max_server_number(spapr), fdt, PHANDLE_XICP); ret = spapr_populate_memory(spapr, fdt); if (ret < 0) { @@ -2467,7 +2467,7 @@ static void spapr_init_cpus(sPAPRMachineState *spapr) if (smc->pre_2_10_has_unused_icps) { int i; - for (i = 0; i < xics_max_server_number(spapr); i++) { + for (i = 0; i < spapr_max_server_number(spapr); i++) { /* Dummy entries get deregistered when real ICPState objects * are registered during CPU core hotplug. */ @@ -2588,7 +2588,7 @@ static void spapr_machine_init(MachineState *machine) /* * VSMT must be set in order to be able to compute VCPU ids, ie to - * call xics_max_server_number() or spapr_vcpu_id(). + * call spapr_max_server_number() or spapr_vcpu_id(). */ spapr_set_vsmt_mode(spapr, &error_fatal); diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h index 6279711fe8..198764066d 100644 --- a/include/hw/ppc/spapr.h +++ b/include/hw/ppc/spapr.h @@ -737,6 +737,7 @@ int spapr_hpt_shift_for_ramsize(uint64_t ramsize); void spapr_reallocate_hpt(sPAPRMachineState *spapr, int shift, Error **errp); void spapr_clear_pending_events(sPAPRMachineState *spapr); +int spapr_max_server_number(sPAPRMachineState *spapr); /* CPU and LMB DRC release callbacks. */ void spapr_core_release(DeviceState *dev); From fcfbc18d00b10335310c9665edd6e04f2d152be8 Mon Sep 17 00:00:00 2001 From: "Paul A. Clarke" Date: Fri, 7 Dec 2018 15:13:14 -0200 Subject: [PATCH 23/40] Changes requirement for "vsubsbs" instruction Changes requirement for "vsubsbs" instruction, which has been supported since ISA 2.03. (Please see section 5.9.1.2 of ISA 2.03) Reported-by: Paul A. Clarke Signed-off-by: Paul A. Clarke Signed-off-by: Leonardo Bras Signed-off-by: David Gibson --- target/ppc/translate/vmx-ops.inc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/target/ppc/translate/vmx-ops.inc.c b/target/ppc/translate/vmx-ops.inc.c index 139f80cb24..84e05fb827 100644 --- a/target/ppc/translate/vmx-ops.inc.c +++ b/target/ppc/translate/vmx-ops.inc.c @@ -143,7 +143,7 @@ GEN_VXFORM(vaddsws, 0, 14), GEN_VXFORM_DUAL(vsububs, bcdadd, 0, 24, PPC_ALTIVEC, PPC_NONE), GEN_VXFORM_DUAL(vsubuhs, bcdsub, 0, 25, PPC_ALTIVEC, PPC_NONE), GEN_VXFORM(vsubuws, 0, 26), -GEN_VXFORM_DUAL(vsubsbs, bcdtrunc, 0, 28, PPC_NONE, PPC2_ISA300), +GEN_VXFORM_DUAL(vsubsbs, bcdtrunc, 0, 28, PPC_ALTIVEC, PPC2_ISA300), GEN_VXFORM(vsubshs, 0, 29), GEN_VXFORM_DUAL(vsubsws, xpnd04_2, 0, 30, PPC_ALTIVEC, PPC_NONE), GEN_VXFORM_207(vadduqm, 0, 4), From 002686be42784fdce4c1c8ecd1987ddf740cab77 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Le=20Goater?= Date: Sun, 9 Dec 2018 20:45:52 +0100 Subject: [PATCH 24/40] ppc/xive: add support for the END Event State Buffers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The Event Notification Descriptor (END) XIVE structure also contains two Event State Buffers providing further coalescing of interrupts, one for the notification event (ESn) and one for the escalation events (ESe). A MMIO page is assigned for each to control the EOI through loads only. Stores are not allowed. The END ESBs are modeled through an object resembling the 'XiveSource' It is stateless as the END state bits are backed into the XiveEND structure under the XiveRouter and the MMIO accesses follow the same rules as for the XiveSource ESBs. END ESBs are not supported by the Linux drivers neither on OPAL nor on sPAPR. Nevetherless, it provides a mean to study the question in the future and validates a bit more the XIVE model. Signed-off-by: Cédric Le Goater [dwg: Fold in a later fix for field access] Signed-off-by: David Gibson --- hw/intc/xive.c | 160 +++++++++++++++++++++++++++++++++++++++++- include/hw/ppc/xive.h | 21 ++++++ 2 files changed, 179 insertions(+), 2 deletions(-) diff --git a/hw/intc/xive.c b/hw/intc/xive.c index 9ec841f741..7b2ef7480d 100644 --- a/hw/intc/xive.c +++ b/hw/intc/xive.c @@ -613,8 +613,18 @@ static void xive_router_end_notify(XiveRouter *xrtr, uint8_t end_blk, * even futher coalescing in the Router */ if (!xive_end_is_notify(&end)) { - qemu_log_mask(LOG_UNIMP, "XIVE: !UCOND_NOTIFY not implemented\n"); - return; + uint8_t pq = xive_get_field32(END_W1_ESn, end.w1); + bool notify = xive_esb_trigger(&pq); + + if (pq != xive_get_field32(END_W1_ESn, end.w1)) { + end.w1 = xive_set_field32(END_W1_ESn, end.w1, pq); + xive_router_write_end(xrtr, end_blk, end_idx, &end, 1); + } + + /* ESn[Q]=1 : end of notification */ + if (!notify) { + return; + } } /* @@ -694,6 +704,151 @@ void xive_eas_pic_print_info(XiveEAS *eas, uint32_t lisn, Monitor *mon) (uint32_t) xive_get_field64(EAS_END_DATA, eas->w)); } +/* + * END ESB MMIO loads + */ +static uint64_t xive_end_source_read(void *opaque, hwaddr addr, unsigned size) +{ + XiveENDSource *xsrc = XIVE_END_SOURCE(opaque); + uint32_t offset = addr & 0xFFF; + uint8_t end_blk; + uint32_t end_idx; + XiveEND end; + uint32_t end_esmask; + uint8_t pq; + uint64_t ret = -1; + + end_blk = xsrc->block_id; + end_idx = addr >> (xsrc->esb_shift + 1); + + if (xive_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk, + end_idx); + return -1; + } + + if (!xive_end_is_valid(&end)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n", + end_blk, end_idx); + return -1; + } + + end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END_W1_ESn : END_W1_ESe; + pq = xive_get_field32(end_esmask, end.w1); + + switch (offset) { + case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF: + ret = xive_esb_eoi(&pq); + + /* Forward the source event notification for routing ?? */ + break; + + case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF: + ret = pq; + break; + + case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF: + case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF: + case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF: + case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF: + ret = xive_esb_set(&pq, (offset >> 8) & 0x3); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB load addr %d\n", + offset); + return -1; + } + + if (pq != xive_get_field32(end_esmask, end.w1)) { + end.w1 = xive_set_field32(end_esmask, end.w1, pq); + xive_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1); + } + + return ret; +} + +/* + * END ESB MMIO stores are invalid + */ +static void xive_end_source_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB write addr 0x%" + HWADDR_PRIx"\n", addr); +} + +static const MemoryRegionOps xive_end_source_ops = { + .read = xive_end_source_read, + .write = xive_end_source_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 8, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 8, + .max_access_size = 8, + }, +}; + +static void xive_end_source_realize(DeviceState *dev, Error **errp) +{ + XiveENDSource *xsrc = XIVE_END_SOURCE(dev); + Object *obj; + Error *local_err = NULL; + + obj = object_property_get_link(OBJECT(dev), "xive", &local_err); + if (!obj) { + error_propagate(errp, local_err); + error_prepend(errp, "required link 'xive' not found: "); + return; + } + + xsrc->xrtr = XIVE_ROUTER(obj); + + if (!xsrc->nr_ends) { + error_setg(errp, "Number of interrupt needs to be greater than 0"); + return; + } + + if (xsrc->esb_shift != XIVE_ESB_4K && + xsrc->esb_shift != XIVE_ESB_64K) { + error_setg(errp, "Invalid ESB shift setting"); + return; + } + + /* + * Each END is assigned an even/odd pair of MMIO pages, the even page + * manages the ESn field while the odd page manages the ESe field. + */ + memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc), + &xive_end_source_ops, xsrc, "xive.end", + (1ull << (xsrc->esb_shift + 1)) * xsrc->nr_ends); +} + +static Property xive_end_source_properties[] = { + DEFINE_PROP_UINT8("block-id", XiveENDSource, block_id, 0), + DEFINE_PROP_UINT32("nr-ends", XiveENDSource, nr_ends, 0), + DEFINE_PROP_UINT32("shift", XiveENDSource, esb_shift, XIVE_ESB_64K), + DEFINE_PROP_END_OF_LIST(), +}; + +static void xive_end_source_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "XIVE END Source"; + dc->props = xive_end_source_properties; + dc->realize = xive_end_source_realize; +} + +static const TypeInfo xive_end_source_info = { + .name = TYPE_XIVE_END_SOURCE, + .parent = TYPE_DEVICE, + .instance_size = sizeof(XiveENDSource), + .class_init = xive_end_source_class_init, +}; + /* * XIVE Fabric */ @@ -708,6 +863,7 @@ static void xive_register_types(void) type_register_static(&xive_source_info); type_register_static(&xive_fabric_info); type_register_static(&xive_router_info); + type_register_static(&xive_end_source_info); } type_init(xive_register_types) diff --git a/include/hw/ppc/xive.h b/include/hw/ppc/xive.h index 4851d3b3a4..014f64aa98 100644 --- a/include/hw/ppc/xive.h +++ b/include/hw/ppc/xive.h @@ -336,6 +336,27 @@ int xive_router_get_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx, int xive_router_write_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx, XiveEND *end, uint8_t word_number); +/* + * XIVE END ESBs + */ + +#define TYPE_XIVE_END_SOURCE "xive-end-source" +#define XIVE_END_SOURCE(obj) \ + OBJECT_CHECK(XiveENDSource, (obj), TYPE_XIVE_END_SOURCE) + +typedef struct XiveENDSource { + DeviceState parent; + + uint32_t nr_ends; + uint8_t block_id; + + /* ESB memory region */ + uint32_t esb_shift; + MemoryRegion esb_mmio; + + XiveRouter *xrtr; +} XiveENDSource; + /* * For legacy compatibility, the exceptions define up to 256 different * priorities. P9 implements only 9 levels : 8 active levels [0 - 7] From 207d9fe98510eaac575bfde8d1be58137e9a22ff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Le=20Goater?= Date: Sun, 9 Dec 2018 20:45:53 +0100 Subject: [PATCH 25/40] ppc/xive: introduce the XIVE interrupt thread context MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Each POWER9 processor chip has a XIVE presenter that can generate four different exceptions to its threads: - hypervisor exception, - O/S exception - Event-Based Branch (EBB) - msgsnd (doorbell). Each exception has a state independent from the others called a Thread Interrupt Management context. This context is a set of registers which lets the thread handle priority management and interrupt acknowledgment among other things. The most important ones being : - Interrupt Priority Register (PIPR) - Interrupt Pending Buffer (IPB) - Current Processor Priority (CPPR) - Notification Source Register (NSR) These registers are accessible through a specific MMIO region, called the Thread Interrupt Management Area (TIMA), four aligned pages, each exposing a different view of the registers. First page (page address ending in 0b00) gives access to the entire context and is reserved for the ring 0 view for the physical thread context. The second (page address ending in 0b01) is for the hypervisor, ring 1 view. The third (page address ending in 0b10) is for the operating system, ring 2 view. The fourth (page address ending in 0b11) is for user level, ring 3 view. The thread interrupt context is modeled with a XiveTCTX object containing the values of the different exception registers. The TIMA region is mapped at the same address for each CPU. Signed-off-by: Cédric Le Goater Reviewed-by: David Gibson Signed-off-by: David Gibson --- hw/intc/xive.c | 424 +++++++++++++++++++++++++++++++++++++ include/hw/ppc/xive.h | 44 ++++ include/hw/ppc/xive_regs.h | 82 +++++++ 3 files changed, 550 insertions(+) diff --git a/hw/intc/xive.c b/hw/intc/xive.c index 7b2ef7480d..06a835c454 100644 --- a/hw/intc/xive.c +++ b/hw/intc/xive.c @@ -16,6 +16,429 @@ #include "hw/qdev-properties.h" #include "monitor/monitor.h" #include "hw/ppc/xive.h" +#include "hw/ppc/xive_regs.h" + +/* + * XIVE Thread Interrupt Management context + */ + +static uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring) +{ + return 0; +} + +static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr) +{ + if (cppr > XIVE_PRIORITY_MAX) { + cppr = 0xff; + } + + tctx->regs[ring + TM_CPPR] = cppr; +} + +/* + * XIVE Thread Interrupt Management Area (TIMA) + */ + +/* + * Define an access map for each page of the TIMA that we will use in + * the memory region ops to filter values when doing loads and stores + * of raw registers values + * + * Registers accessibility bits : + * + * 0x0 - no access + * 0x1 - write only + * 0x2 - read only + * 0x3 - read/write + */ + +static const uint8_t xive_tm_hw_view[] = { + /* QW-0 User */ 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, + /* QW-1 OS */ 3, 3, 3, 3, 3, 3, 0, 3, 3, 3, 3, 3, 0, 0, 0, 0, + /* QW-2 POOL */ 0, 0, 3, 3, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, + /* QW-3 PHYS */ 3, 3, 3, 3, 0, 3, 0, 3, 3, 0, 0, 3, 3, 3, 3, 0, +}; + +static const uint8_t xive_tm_hv_view[] = { + /* QW-0 User */ 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, + /* QW-1 OS */ 3, 3, 3, 3, 3, 3, 0, 3, 3, 3, 3, 3, 0, 0, 0, 0, + /* QW-2 POOL */ 0, 0, 3, 3, 0, 0, 0, 0, 0, 3, 3, 3, 0, 0, 0, 0, + /* QW-3 PHYS */ 3, 3, 3, 3, 0, 3, 0, 3, 3, 0, 0, 3, 0, 0, 0, 0, +}; + +static const uint8_t xive_tm_os_view[] = { + /* QW-0 User */ 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, + /* QW-1 OS */ 2, 3, 2, 2, 2, 2, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, + /* QW-2 POOL */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + /* QW-3 PHYS */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +}; + +static const uint8_t xive_tm_user_view[] = { + /* QW-0 User */ 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + /* QW-1 OS */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + /* QW-2 POOL */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + /* QW-3 PHYS */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +}; + +/* + * Overall TIMA access map for the thread interrupt management context + * registers + */ +static const uint8_t *xive_tm_views[] = { + [XIVE_TM_HW_PAGE] = xive_tm_hw_view, + [XIVE_TM_HV_PAGE] = xive_tm_hv_view, + [XIVE_TM_OS_PAGE] = xive_tm_os_view, + [XIVE_TM_USER_PAGE] = xive_tm_user_view, +}; + +/* + * Computes a register access mask for a given offset in the TIMA + */ +static uint64_t xive_tm_mask(hwaddr offset, unsigned size, bool write) +{ + uint8_t page_offset = (offset >> TM_SHIFT) & 0x3; + uint8_t reg_offset = offset & 0x3F; + uint8_t reg_mask = write ? 0x1 : 0x2; + uint64_t mask = 0x0; + int i; + + for (i = 0; i < size; i++) { + if (xive_tm_views[page_offset][reg_offset + i] & reg_mask) { + mask |= (uint64_t) 0xff << (8 * (size - i - 1)); + } + } + + return mask; +} + +static void xive_tm_raw_write(XiveTCTX *tctx, hwaddr offset, uint64_t value, + unsigned size) +{ + uint8_t ring_offset = offset & 0x30; + uint8_t reg_offset = offset & 0x3F; + uint64_t mask = xive_tm_mask(offset, size, true); + int i; + + /* + * Only 4 or 8 bytes stores are allowed and the User ring is + * excluded + */ + if (size < 4 || !mask || ring_offset == TM_QW0_USER) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA @%" + HWADDR_PRIx"\n", offset); + return; + } + + /* + * Use the register offset for the raw values and filter out + * reserved values + */ + for (i = 0; i < size; i++) { + uint8_t byte_mask = (mask >> (8 * (size - i - 1))); + if (byte_mask) { + tctx->regs[reg_offset + i] = (value >> (8 * (size - i - 1))) & + byte_mask; + } + } +} + +static uint64_t xive_tm_raw_read(XiveTCTX *tctx, hwaddr offset, unsigned size) +{ + uint8_t ring_offset = offset & 0x30; + uint8_t reg_offset = offset & 0x3F; + uint64_t mask = xive_tm_mask(offset, size, false); + uint64_t ret; + int i; + + /* + * Only 4 or 8 bytes loads are allowed and the User ring is + * excluded + */ + if (size < 4 || !mask || ring_offset == TM_QW0_USER) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access at TIMA @%" + HWADDR_PRIx"\n", offset); + return -1; + } + + /* Use the register offset for the raw values */ + ret = 0; + for (i = 0; i < size; i++) { + ret |= (uint64_t) tctx->regs[reg_offset + i] << (8 * (size - i - 1)); + } + + /* filter out reserved values */ + return ret & mask; +} + +/* + * The TM context is mapped twice within each page. Stores and loads + * to the first mapping below 2K write and read the specified values + * without modification. The second mapping above 2K performs specific + * state changes (side effects) in addition to setting/returning the + * interrupt management area context of the processor thread. + */ +static uint64_t xive_tm_ack_os_reg(XiveTCTX *tctx, hwaddr offset, unsigned size) +{ + return xive_tctx_accept(tctx, TM_QW1_OS); +} + +static void xive_tm_set_os_cppr(XiveTCTX *tctx, hwaddr offset, + uint64_t value, unsigned size) +{ + xive_tctx_set_cppr(tctx, TM_QW1_OS, value & 0xff); +} + +/* + * Define a mapping of "special" operations depending on the TIMA page + * offset and the size of the operation. + */ +typedef struct XiveTmOp { + uint8_t page_offset; + uint32_t op_offset; + unsigned size; + void (*write_handler)(XiveTCTX *tctx, hwaddr offset, uint64_t value, + unsigned size); + uint64_t (*read_handler)(XiveTCTX *tctx, hwaddr offset, unsigned size); +} XiveTmOp; + +static const XiveTmOp xive_tm_operations[] = { + /* + * MMIOs below 2K : raw values and special operations without side + * effects + */ + { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive_tm_set_os_cppr, NULL }, + + /* MMIOs above 2K : special operations with side effects */ + { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL, xive_tm_ack_os_reg }, +}; + +static const XiveTmOp *xive_tm_find_op(hwaddr offset, unsigned size, bool write) +{ + uint8_t page_offset = (offset >> TM_SHIFT) & 0x3; + uint32_t op_offset = offset & 0xFFF; + int i; + + for (i = 0; i < ARRAY_SIZE(xive_tm_operations); i++) { + const XiveTmOp *xto = &xive_tm_operations[i]; + + /* Accesses done from a more privileged TIMA page is allowed */ + if (xto->page_offset >= page_offset && + xto->op_offset == op_offset && + xto->size == size && + ((write && xto->write_handler) || (!write && xto->read_handler))) { + return xto; + } + } + return NULL; +} + +/* + * TIMA MMIO handlers + */ +static void xive_tm_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + PowerPCCPU *cpu = POWERPC_CPU(current_cpu); + XiveTCTX *tctx = XIVE_TCTX(cpu->intc); + const XiveTmOp *xto; + + /* + * TODO: check V bit in Q[0-3]W2, check PTER bit associated with CPU + */ + + /* + * First, check for special operations in the 2K region + */ + if (offset & 0x800) { + xto = xive_tm_find_op(offset, size, true); + if (!xto) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA" + "@%"HWADDR_PRIx"\n", offset); + } else { + xto->write_handler(tctx, offset, value, size); + } + return; + } + + /* + * Then, for special operations in the region below 2K. + */ + xto = xive_tm_find_op(offset, size, true); + if (xto) { + xto->write_handler(tctx, offset, value, size); + return; + } + + /* + * Finish with raw access to the register values + */ + xive_tm_raw_write(tctx, offset, value, size); +} + +static uint64_t xive_tm_read(void *opaque, hwaddr offset, unsigned size) +{ + PowerPCCPU *cpu = POWERPC_CPU(current_cpu); + XiveTCTX *tctx = XIVE_TCTX(cpu->intc); + const XiveTmOp *xto; + + /* + * TODO: check V bit in Q[0-3]W2, check PTER bit associated with CPU + */ + + /* + * First, check for special operations in the 2K region + */ + if (offset & 0x800) { + xto = xive_tm_find_op(offset, size, false); + if (!xto) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access to TIMA" + "@%"HWADDR_PRIx"\n", offset); + return -1; + } + return xto->read_handler(tctx, offset, size); + } + + /* + * Then, for special operations in the region below 2K. + */ + xto = xive_tm_find_op(offset, size, false); + if (xto) { + return xto->read_handler(tctx, offset, size); + } + + /* + * Finish with raw access to the register values + */ + return xive_tm_raw_read(tctx, offset, size); +} + +const MemoryRegionOps xive_tm_ops = { + .read = xive_tm_read, + .write = xive_tm_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 1, + .max_access_size = 8, + }, +}; + +static inline uint32_t xive_tctx_word2(uint8_t *ring) +{ + return *((uint32_t *) &ring[TM_WORD2]); +} + +static char *xive_tctx_ring_print(uint8_t *ring) +{ + uint32_t w2 = xive_tctx_word2(ring); + + return g_strdup_printf("%02x %02x %02x %02x %02x " + "%02x %02x %02x %08x", + ring[TM_NSR], ring[TM_CPPR], ring[TM_IPB], ring[TM_LSMFB], + ring[TM_ACK_CNT], ring[TM_INC], ring[TM_AGE], ring[TM_PIPR], + be32_to_cpu(w2)); +} + +static const char * const xive_tctx_ring_names[] = { + "USER", "OS", "POOL", "PHYS", +}; + +void xive_tctx_pic_print_info(XiveTCTX *tctx, Monitor *mon) +{ + int cpu_index = tctx->cs ? tctx->cs->cpu_index : -1; + int i; + + monitor_printf(mon, "CPU[%04x]: QW NSR CPPR IPB LSMFB ACK# INC AGE PIPR" + " W2\n", cpu_index); + + for (i = 0; i < XIVE_TM_RING_COUNT; i++) { + char *s = xive_tctx_ring_print(&tctx->regs[i * XIVE_TM_RING_SIZE]); + monitor_printf(mon, "CPU[%04x]: %4s %s\n", cpu_index, + xive_tctx_ring_names[i], s); + g_free(s); + } +} + +static void xive_tctx_reset(void *dev) +{ + XiveTCTX *tctx = XIVE_TCTX(dev); + + memset(tctx->regs, 0, sizeof(tctx->regs)); + + /* Set some defaults */ + tctx->regs[TM_QW1_OS + TM_LSMFB] = 0xFF; + tctx->regs[TM_QW1_OS + TM_ACK_CNT] = 0xFF; + tctx->regs[TM_QW1_OS + TM_AGE] = 0xFF; +} + +static void xive_tctx_realize(DeviceState *dev, Error **errp) +{ + XiveTCTX *tctx = XIVE_TCTX(dev); + PowerPCCPU *cpu; + CPUPPCState *env; + Object *obj; + Error *local_err = NULL; + + obj = object_property_get_link(OBJECT(dev), "cpu", &local_err); + if (!obj) { + error_propagate(errp, local_err); + error_prepend(errp, "required link 'cpu' not found: "); + return; + } + + cpu = POWERPC_CPU(obj); + tctx->cs = CPU(obj); + + env = &cpu->env; + switch (PPC_INPUT(env)) { + case PPC_FLAGS_INPUT_POWER7: + tctx->output = env->irq_inputs[POWER7_INPUT_INT]; + break; + + default: + error_setg(errp, "XIVE interrupt controller does not support " + "this CPU bus model"); + return; + } + + qemu_register_reset(xive_tctx_reset, dev); +} + +static void xive_tctx_unrealize(DeviceState *dev, Error **errp) +{ + qemu_unregister_reset(xive_tctx_reset, dev); +} + +static const VMStateDescription vmstate_xive_tctx = { + .name = TYPE_XIVE_TCTX, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_BUFFER(regs, XiveTCTX), + VMSTATE_END_OF_LIST() + }, +}; + +static void xive_tctx_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "XIVE Interrupt Thread Context"; + dc->realize = xive_tctx_realize; + dc->unrealize = xive_tctx_unrealize; + dc->vmsd = &vmstate_xive_tctx; +} + +static const TypeInfo xive_tctx_info = { + .name = TYPE_XIVE_TCTX, + .parent = TYPE_DEVICE, + .instance_size = sizeof(XiveTCTX), + .class_init = xive_tctx_class_init, +}; /* * XIVE ESB helpers @@ -864,6 +1287,7 @@ static void xive_register_types(void) type_register_static(&xive_fabric_info); type_register_static(&xive_router_info); type_register_static(&xive_end_source_info); + type_register_static(&xive_tctx_info); } type_init(xive_register_types) diff --git a/include/hw/ppc/xive.h b/include/hw/ppc/xive.h index 014f64aa98..1e823a4c64 100644 --- a/include/hw/ppc/xive.h +++ b/include/hw/ppc/xive.h @@ -367,4 +367,48 @@ typedef struct XiveENDSource { void xive_end_pic_print_info(XiveEND *end, uint32_t end_idx, Monitor *mon); void xive_end_queue_pic_print_info(XiveEND *end, uint32_t width, Monitor *mon); +/* + * XIVE Thread interrupt Management (TM) context + */ + +#define TYPE_XIVE_TCTX "xive-tctx" +#define XIVE_TCTX(obj) OBJECT_CHECK(XiveTCTX, (obj), TYPE_XIVE_TCTX) + +/* + * XIVE Thread interrupt Management register rings : + * + * QW-0 User event-based exception state + * QW-1 O/S OS context for priority management, interrupt acks + * QW-2 Pool hypervisor pool context for virtual processors dispatched + * QW-3 Physical physical thread context and security context + */ +#define XIVE_TM_RING_COUNT 4 +#define XIVE_TM_RING_SIZE 0x10 + +typedef struct XiveTCTX { + DeviceState parent_obj; + + CPUState *cs; + qemu_irq output; + + uint8_t regs[XIVE_TM_RING_COUNT * XIVE_TM_RING_SIZE]; +} XiveTCTX; + +/* + * XIVE Thread Interrupt Management Aera (TIMA) + * + * This region gives access to the registers of the thread interrupt + * management context. It is four page wide, each page providing a + * different view of the registers. The page with the lower offset is + * the most privileged and gives access to the entire context. + */ +#define XIVE_TM_HW_PAGE 0x0 +#define XIVE_TM_HV_PAGE 0x1 +#define XIVE_TM_OS_PAGE 0x2 +#define XIVE_TM_USER_PAGE 0x3 + +extern const MemoryRegionOps xive_tm_ops; + +void xive_tctx_pic_print_info(XiveTCTX *tctx, Monitor *mon); + #endif /* PPC_XIVE_H */ diff --git a/include/hw/ppc/xive_regs.h b/include/hw/ppc/xive_regs.h index b1d55ecf94..8b3cc6c9b9 100644 --- a/include/hw/ppc/xive_regs.h +++ b/include/hw/ppc/xive_regs.h @@ -23,6 +23,88 @@ #define XIVE_SRCNO_INDEX(srcno) ((srcno) & 0x0fffffff) #define XIVE_SRCNO(blk, idx) ((uint32_t)(blk) << 28 | (idx)) +#define TM_SHIFT 16 + +/* TM register offsets */ +#define TM_QW0_USER 0x000 /* All rings */ +#define TM_QW1_OS 0x010 /* Ring 0..2 */ +#define TM_QW2_HV_POOL 0x020 /* Ring 0..1 */ +#define TM_QW3_HV_PHYS 0x030 /* Ring 0..1 */ + +/* Byte offsets inside a QW QW0 QW1 QW2 QW3 */ +#define TM_NSR 0x0 /* + + - + */ +#define TM_CPPR 0x1 /* - + - + */ +#define TM_IPB 0x2 /* - + + + */ +#define TM_LSMFB 0x3 /* - + + + */ +#define TM_ACK_CNT 0x4 /* - + - - */ +#define TM_INC 0x5 /* - + - + */ +#define TM_AGE 0x6 /* - + - + */ +#define TM_PIPR 0x7 /* - + - + */ + +#define TM_WORD0 0x0 +#define TM_WORD1 0x4 + +/* + * QW word 2 contains the valid bit at the top and other fields + * depending on the QW. + */ +#define TM_WORD2 0x8 +#define TM_QW0W2_VU PPC_BIT32(0) +#define TM_QW0W2_LOGIC_SERV PPC_BITMASK32(1, 31) /* XX 2,31 ? */ +#define TM_QW1W2_VO PPC_BIT32(0) +#define TM_QW1W2_OS_CAM PPC_BITMASK32(8, 31) +#define TM_QW2W2_VP PPC_BIT32(0) +#define TM_QW2W2_POOL_CAM PPC_BITMASK32(8, 31) +#define TM_QW3W2_VT PPC_BIT32(0) +#define TM_QW3W2_LP PPC_BIT32(6) +#define TM_QW3W2_LE PPC_BIT32(7) +#define TM_QW3W2_T PPC_BIT32(31) + +/* + * In addition to normal loads to "peek" and writes (only when invalid) + * using 4 and 8 bytes accesses, the above registers support these + * "special" byte operations: + * + * - Byte load from QW0[NSR] - User level NSR (EBB) + * - Byte store to QW0[NSR] - User level NSR (EBB) + * - Byte load/store to QW1[CPPR] and QW3[CPPR] - CPPR access + * - Byte load from QW3[TM_WORD2] - Read VT||00000||LP||LE on thrd 0 + * otherwise VT||0000000 + * - Byte store to QW3[TM_WORD2] - Set VT bit (and LP/LE if present) + * + * Then we have all these "special" CI ops at these offset that trigger + * all sorts of side effects: + */ +#define TM_SPC_ACK_EBB 0x800 /* Load8 ack EBB to reg*/ +#define TM_SPC_ACK_OS_REG 0x810 /* Load16 ack OS irq to reg */ +#define TM_SPC_PUSH_USR_CTX 0x808 /* Store32 Push/Validate user context */ +#define TM_SPC_PULL_USR_CTX 0x808 /* Load32 Pull/Invalidate user + * context */ +#define TM_SPC_SET_OS_PENDING 0x812 /* Store8 Set OS irq pending bit */ +#define TM_SPC_PULL_OS_CTX 0x818 /* Load32/Load64 Pull/Invalidate OS + * context to reg */ +#define TM_SPC_PULL_POOL_CTX 0x828 /* Load32/Load64 Pull/Invalidate Pool + * context to reg*/ +#define TM_SPC_ACK_HV_REG 0x830 /* Load16 ack HV irq to reg */ +#define TM_SPC_PULL_USR_CTX_OL 0xc08 /* Store8 Pull/Inval usr ctx to odd + * line */ +#define TM_SPC_ACK_OS_EL 0xc10 /* Store8 ack OS irq to even line */ +#define TM_SPC_ACK_HV_POOL_EL 0xc20 /* Store8 ack HV evt pool to even + * line */ +#define TM_SPC_ACK_HV_EL 0xc30 /* Store8 ack HV irq to even line */ +/* XXX more... */ + +/* NSR fields for the various QW ack types */ +#define TM_QW0_NSR_EB PPC_BIT8(0) +#define TM_QW1_NSR_EO PPC_BIT8(0) +#define TM_QW3_NSR_HE PPC_BITMASK8(0, 1) +#define TM_QW3_NSR_HE_NONE 0 +#define TM_QW3_NSR_HE_POOL 1 +#define TM_QW3_NSR_HE_PHYS 2 +#define TM_QW3_NSR_HE_LSI 3 +#define TM_QW3_NSR_I PPC_BIT8(2) +#define TM_QW3_NSR_GRP_LVL PPC_BIT8(3, 7) + /* * EAS (Event Assignment Structure) * From af53dbf6227a78a25ead654998fd8caf46639810 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Le=20Goater?= Date: Sun, 9 Dec 2018 20:45:54 +0100 Subject: [PATCH 26/40] ppc/xive: introduce a simplified XIVE presenter MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The last sub-engine of the XIVE architecture is the Interrupt Virtualization Presentation Engine (IVPE). On HW, the IVRE and the IVPE share elements, the Power Bus interface (CQ), the routing table descriptors, and they can be combined in the same HW logic. We do the same in QEMU and combine both engines in the XiveRouter for simplicity. When the IVRE has completed its job of matching an event source with a Notification Virtual Target (NVT) to notify, it forwards the event notification to the IVPE sub-engine. The IVPE scans the thread interrupt contexts of the Notification Virtual Targets (NVT) dispatched on the HW processor threads and if a match is found, it signals the thread. If not, the IVPE escalates the notification to some other targets and records the notification in a backlog queue. The IVPE maintains the thread interrupt context state for each of its NVTs not dispatched on HW processor threads in the Notification Virtual Target table (NVTT). The model currently only supports single NVT notifications. Signed-off-by: Cédric Le Goater [dwg: Folded in fix for field accessors] Signed-off-by: David Gibson --- hw/intc/xive.c | 190 +++++++++++++++++++++++++++++++++++++ include/hw/ppc/xive.h | 14 +++ include/hw/ppc/xive_regs.h | 24 +++++ 3 files changed, 228 insertions(+) diff --git a/hw/intc/xive.c b/hw/intc/xive.c index 06a835c454..1d737346c3 100644 --- a/hw/intc/xive.c +++ b/hw/intc/xive.c @@ -984,6 +984,188 @@ int xive_router_write_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx, return xrc->write_end(xrtr, end_blk, end_idx, end, word_number); } +int xive_router_get_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx, + XiveNVT *nvt) +{ + XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); + + return xrc->get_nvt(xrtr, nvt_blk, nvt_idx, nvt); +} + +int xive_router_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx, + XiveNVT *nvt, uint8_t word_number) +{ + XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); + + return xrc->write_nvt(xrtr, nvt_blk, nvt_idx, nvt, word_number); +} + +/* + * The thread context register words are in big-endian format. + */ +static int xive_presenter_tctx_match(XiveTCTX *tctx, uint8_t format, + uint8_t nvt_blk, uint32_t nvt_idx, + bool cam_ignore, uint32_t logic_serv) +{ + uint32_t cam = xive_nvt_cam_line(nvt_blk, nvt_idx); + uint32_t qw2w2 = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]); + uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]); + uint32_t qw0w2 = xive_tctx_word2(&tctx->regs[TM_QW0_USER]); + + /* + * TODO (PowerNV): ignore mode. The low order bits of the NVT + * identifier are ignored in the "CAM" match. + */ + + if (format == 0) { + if (cam_ignore == true) { + /* + * F=0 & i=1: Logical server notification (bits ignored at + * the end of the NVT identifier) + */ + qemu_log_mask(LOG_UNIMP, "XIVE: no support for LS NVT %x/%x\n", + nvt_blk, nvt_idx); + return -1; + } + + /* F=0 & i=0: Specific NVT notification */ + + /* TODO (PowerNV) : PHYS ring */ + + /* HV POOL ring */ + if ((be32_to_cpu(qw2w2) & TM_QW2W2_VP) && + cam == xive_get_field32(TM_QW2W2_POOL_CAM, qw2w2)) { + return TM_QW2_HV_POOL; + } + + /* OS ring */ + if ((be32_to_cpu(qw1w2) & TM_QW1W2_VO) && + cam == xive_get_field32(TM_QW1W2_OS_CAM, qw1w2)) { + return TM_QW1_OS; + } + } else { + /* F=1 : User level Event-Based Branch (EBB) notification */ + + /* USER ring */ + if ((be32_to_cpu(qw1w2) & TM_QW1W2_VO) && + (cam == xive_get_field32(TM_QW1W2_OS_CAM, qw1w2)) && + (be32_to_cpu(qw0w2) & TM_QW0W2_VU) && + (logic_serv == xive_get_field32(TM_QW0W2_LOGIC_SERV, qw0w2))) { + return TM_QW0_USER; + } + } + return -1; +} + +typedef struct XiveTCTXMatch { + XiveTCTX *tctx; + uint8_t ring; +} XiveTCTXMatch; + +static bool xive_presenter_match(XiveRouter *xrtr, uint8_t format, + uint8_t nvt_blk, uint32_t nvt_idx, + bool cam_ignore, uint8_t priority, + uint32_t logic_serv, XiveTCTXMatch *match) +{ + CPUState *cs; + + /* + * TODO (PowerNV): handle chip_id overwrite of block field for + * hardwired CAM compares + */ + + CPU_FOREACH(cs) { + PowerPCCPU *cpu = POWERPC_CPU(cs); + XiveTCTX *tctx = XIVE_TCTX(cpu->intc); + int ring; + + /* + * HW checks that the CPU is enabled in the Physical Thread + * Enable Register (PTER). + */ + + /* + * Check the thread context CAM lines and record matches. We + * will handle CPU exception delivery later + */ + ring = xive_presenter_tctx_match(tctx, format, nvt_blk, nvt_idx, + cam_ignore, logic_serv); + /* + * Save the context and follow on to catch duplicates, that we + * don't support yet. + */ + if (ring != -1) { + if (match->tctx) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a thread " + "context NVT %x/%x\n", nvt_blk, nvt_idx); + return false; + } + + match->ring = ring; + match->tctx = tctx; + } + } + + if (!match->tctx) { + qemu_log_mask(LOG_UNIMP, "XIVE: NVT %x/%x is not dispatched\n", + nvt_blk, nvt_idx); + return false; + } + + return true; +} + +/* + * This is our simple Xive Presenter Engine model. It is merged in the + * Router as it does not require an extra object. + * + * It receives notification requests sent by the IVRE to find one + * matching NVT (or more) dispatched on the processor threads. In case + * of a single NVT notification, the process is abreviated and the + * thread is signaled if a match is found. In case of a logical server + * notification (bits ignored at the end of the NVT identifier), the + * IVPE and IVRE select a winning thread using different filters. This + * involves 2 or 3 exchanges on the PowerBus that the model does not + * support. + * + * The parameters represent what is sent on the PowerBus + */ +static void xive_presenter_notify(XiveRouter *xrtr, uint8_t format, + uint8_t nvt_blk, uint32_t nvt_idx, + bool cam_ignore, uint8_t priority, + uint32_t logic_serv) +{ + XiveNVT nvt; + XiveTCTXMatch match = { .tctx = NULL, .ring = 0 }; + bool found; + + /* NVT cache lookup */ + if (xive_router_get_nvt(xrtr, nvt_blk, nvt_idx, &nvt)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVT %x/%x\n", + nvt_blk, nvt_idx); + return; + } + + if (!xive_nvt_is_valid(&nvt)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVT %x/%x is invalid\n", + nvt_blk, nvt_idx); + return; + } + + found = xive_presenter_match(xrtr, format, nvt_blk, nvt_idx, cam_ignore, + priority, logic_serv, &match); + if (found) { + return; + } + + /* + * If no matching NVT is dispatched on a HW thread : + * - update the NVT structure if backlog is activated + * - escalate (ESe PQ bits and EAS in w4-5) if escalation is + * activated + */ +} + /* * An END trigger can come from an event trigger (IPI or HW) or from * another chip. We don't model the PowerBus but the END trigger @@ -1053,6 +1235,14 @@ static void xive_router_end_notify(XiveRouter *xrtr, uint8_t end_blk, /* * Follows IVPE notification */ + xive_presenter_notify(xrtr, format, + xive_get_field32(END_W6_NVT_BLOCK, end.w6), + xive_get_field32(END_W6_NVT_INDEX, end.w6), + xive_get_field32(END_W7_F0_IGNORE, end.w7), + priority, + xive_get_field32(END_W7_F1_LOG_SERVER_ID, end.w7)); + + /* TODO: Auto EOI. */ } static void xive_router_notify(XiveNotifier *xn, uint32_t lisn) diff --git a/include/hw/ppc/xive.h b/include/hw/ppc/xive.h index 1e823a4c64..19309d1d65 100644 --- a/include/hw/ppc/xive.h +++ b/include/hw/ppc/xive.h @@ -325,6 +325,10 @@ typedef struct XiveRouterClass { XiveEND *end); int (*write_end)(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx, XiveEND *end, uint8_t word_number); + int (*get_nvt)(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx, + XiveNVT *nvt); + int (*write_nvt)(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx, + XiveNVT *nvt, uint8_t word_number); } XiveRouterClass; void xive_eas_pic_print_info(XiveEAS *eas, uint32_t lisn, Monitor *mon); @@ -335,6 +339,11 @@ int xive_router_get_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx, XiveEND *end); int xive_router_write_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx, XiveEND *end, uint8_t word_number); +int xive_router_get_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx, + XiveNVT *nvt); +int xive_router_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx, + XiveNVT *nvt, uint8_t word_number); + /* * XIVE END ESBs @@ -411,4 +420,9 @@ extern const MemoryRegionOps xive_tm_ops; void xive_tctx_pic_print_info(XiveTCTX *tctx, Monitor *mon); +static inline uint32_t xive_nvt_cam_line(uint8_t nvt_blk, uint32_t nvt_idx) +{ + return (nvt_blk << 19) | nvt_idx; +} + #endif /* PPC_XIVE_H */ diff --git a/include/hw/ppc/xive_regs.h b/include/hw/ppc/xive_regs.h index 8b3cc6c9b9..bf36678a24 100644 --- a/include/hw/ppc/xive_regs.h +++ b/include/hw/ppc/xive_regs.h @@ -208,4 +208,28 @@ typedef struct XiveEND { #define xive_end_is_backlog(end) (be32_to_cpu((end)->w0) & END_W0_BACKLOG) #define xive_end_is_escalate(end) (be32_to_cpu((end)->w0) & END_W0_ESCALATE_CTL) +/* Notification Virtual Target (NVT) */ +typedef struct XiveNVT { + uint32_t w0; +#define NVT_W0_VALID PPC_BIT32(0) + uint32_t w1; + uint32_t w2; + uint32_t w3; + uint32_t w4; + uint32_t w5; + uint32_t w6; + uint32_t w7; + uint32_t w8; +#define NVT_W8_GRP_VALID PPC_BIT32(0) + uint32_t w9; + uint32_t wa; + uint32_t wb; + uint32_t wc; + uint32_t wd; + uint32_t we; + uint32_t wf; +} XiveNVT; + +#define xive_nvt_is_valid(nvt) (be32_to_cpu((nvt)->w0) & NVT_W0_VALID) + #endif /* PPC_XIVE_REGS_H */ From cdd4de68edb6745d35e2a9e14c32f9a588c1fee7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Le=20Goater?= Date: Sun, 9 Dec 2018 20:45:55 +0100 Subject: [PATCH 27/40] ppc/xive: notify the CPU when the interrupt priority is more privileged MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit After the event data was enqueued in the O/S Event Queue, the IVPE raises the bit corresponding to the priority of the pending interrupt in the register IBP (Interrupt Pending Buffer) to indicate there is an event pending in one of the 8 priority queues. The Pending Interrupt Priority Register (PIPR) is also updated using the IPB. This register represent the priority of the most favored pending notification. The PIPR is then compared to the the Current Processor Priority Register (CPPR). If it is more favored (numerically less than), the CPU interrupt line is raised and the EO bit of the Notification Source Register (NSR) is updated to notify the presence of an exception for the O/S. The check needs to be done whenever the PIPR or the CPPR are changed. The O/S acknowledges the interrupt with a special load in the Thread Interrupt Management Area. If the EO bit of the NSR is set, the CPPR takes the value of PIPR. The bit number in the IBP corresponding to the priority of the pending interrupt is reseted and so is the EO bit of the NSR. Signed-off-by: Cédric Le Goater Reviewed-by: David Gibson [dwg: Fix style nits] Signed-off-by: David Gibson --- hw/intc/xive.c | 96 +++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 95 insertions(+), 1 deletion(-) diff --git a/hw/intc/xive.c b/hw/intc/xive.c index 1d737346c3..607e74acd2 100644 --- a/hw/intc/xive.c +++ b/hw/intc/xive.c @@ -22,9 +22,75 @@ * XIVE Thread Interrupt Management context */ +/* + * Convert a priority number to an Interrupt Pending Buffer (IPB) + * register, which indicates a pending interrupt at the priority + * corresponding to the bit number + */ +static uint8_t priority_to_ipb(uint8_t priority) +{ + return priority > XIVE_PRIORITY_MAX ? + 0 : 1 << (XIVE_PRIORITY_MAX - priority); +} + +/* + * Convert an Interrupt Pending Buffer (IPB) register to a Pending + * Interrupt Priority Register (PIPR), which contains the priority of + * the most favored pending notification. + */ +static uint8_t ipb_to_pipr(uint8_t ibp) +{ + return ibp ? clz32((uint32_t)ibp << 24) : 0xff; +} + +static void ipb_update(uint8_t *regs, uint8_t priority) +{ + regs[TM_IPB] |= priority_to_ipb(priority); + regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]); +} + +static uint8_t exception_mask(uint8_t ring) +{ + switch (ring) { + case TM_QW1_OS: + return TM_QW1_NSR_EO; + default: + g_assert_not_reached(); + } +} + static uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring) { - return 0; + uint8_t *regs = &tctx->regs[ring]; + uint8_t nsr = regs[TM_NSR]; + uint8_t mask = exception_mask(ring); + + qemu_irq_lower(tctx->output); + + if (regs[TM_NSR] & mask) { + uint8_t cppr = regs[TM_PIPR]; + + regs[TM_CPPR] = cppr; + + /* Reset the pending buffer bit */ + regs[TM_IPB] &= ~priority_to_ipb(cppr); + regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]); + + /* Drop Exception bit */ + regs[TM_NSR] &= ~mask; + } + + return (nsr << 8) | regs[TM_CPPR]; +} + +static void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring) +{ + uint8_t *regs = &tctx->regs[ring]; + + if (regs[TM_PIPR] < regs[TM_CPPR]) { + regs[TM_NSR] |= exception_mask(ring); + qemu_irq_raise(tctx->output); + } } static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr) @@ -34,6 +100,9 @@ static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr) } tctx->regs[ring + TM_CPPR] = cppr; + + /* CPPR has changed, check if we need to raise a pending exception */ + xive_tctx_notify(tctx, ring); } /* @@ -189,6 +258,17 @@ static void xive_tm_set_os_cppr(XiveTCTX *tctx, hwaddr offset, xive_tctx_set_cppr(tctx, TM_QW1_OS, value & 0xff); } +/* + * Adjust the IPB to allow a CPU to process event queues of other + * priorities during one physical interrupt cycle. + */ +static void xive_tm_set_os_pending(XiveTCTX *tctx, hwaddr offset, + uint64_t value, unsigned size) +{ + ipb_update(&tctx->regs[TM_QW1_OS], value & 0xff); + xive_tctx_notify(tctx, TM_QW1_OS); +} + /* * Define a mapping of "special" operations depending on the TIMA page * offset and the size of the operation. @@ -211,6 +291,7 @@ static const XiveTmOp xive_tm_operations[] = { /* MMIOs above 2K : special operations with side effects */ { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL, xive_tm_ack_os_reg }, + { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending, NULL }, }; static const XiveTmOp *xive_tm_find_op(hwaddr offset, unsigned size, bool write) @@ -373,6 +454,13 @@ static void xive_tctx_reset(void *dev) tctx->regs[TM_QW1_OS + TM_LSMFB] = 0xFF; tctx->regs[TM_QW1_OS + TM_ACK_CNT] = 0xFF; tctx->regs[TM_QW1_OS + TM_AGE] = 0xFF; + + /* + * Initialize PIPR to 0xFF to avoid phantom interrupts when the + * CPPR is first set. + */ + tctx->regs[TM_QW1_OS + TM_PIPR] = + ipb_to_pipr(tctx->regs[TM_QW1_OS + TM_IPB]); } static void xive_tctx_realize(DeviceState *dev, Error **errp) @@ -1155,9 +1243,15 @@ static void xive_presenter_notify(XiveRouter *xrtr, uint8_t format, found = xive_presenter_match(xrtr, format, nvt_blk, nvt_idx, cam_ignore, priority, logic_serv, &match); if (found) { + ipb_update(&match.tctx->regs[match.ring], priority); + xive_tctx_notify(match.tctx, match.ring); return; } + /* Record the IPB in the associated NVT structure */ + ipb_update((uint8_t *) &nvt.w4, priority); + xive_router_write_nvt(xrtr, nvt_blk, nvt_idx, &nvt, 4); + /* * If no matching NVT is dispatched on a HW thread : * - update the NVT structure if backlog is activated From 3aa597f6505b4d7b62a1b77ab95a233dd5c7c5f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Le=20Goater?= Date: Sun, 9 Dec 2018 20:45:56 +0100 Subject: [PATCH 28/40] spapr/xive: introduce a XIVE interrupt controller MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sPAPRXive models the XIVE interrupt controller of the sPAPR machine. It inherits from the XiveRouter and provisions storage for the routing tables : - Event Assignment Structure (EAS) - Event Notification Descriptor (END) The sPAPRXive model incorporates an internal XiveSource for the IPIs and for the interrupts of the virtual devices of the guest. This model is consistent with XIVE architecture which also incorporates an internal IVSE for IPIs and accelerator interrupts in the IVRE sub-engine. The sPAPRXive model exports two memory regions, one for the ESB trigger and management pages used to control the sources and one for the TIMA pages. They are mapped by default at the addresses found on chip 0 of a baremetal system. This is also consistent with the XIVE architecture which defines a Virtualization Controller BAR for the internal IVSE ESB pages and a Thread Managment BAR for the TIMA. Signed-off-by: Cédric Le Goater Reviewed-by: David Gibson [dwg: Fold in field accessor fixes] Signed-off-by: David Gibson --- default-configs/ppc64-softmmu.mak | 1 + hw/intc/Makefile.objs | 1 + hw/intc/spapr_xive.c | 366 ++++++++++++++++++++++++++++++ include/hw/ppc/spapr_xive.h | 45 ++++ 4 files changed, 413 insertions(+) create mode 100644 hw/intc/spapr_xive.c create mode 100644 include/hw/ppc/spapr_xive.h diff --git a/default-configs/ppc64-softmmu.mak b/default-configs/ppc64-softmmu.mak index 2d1e7c5c46..7f34ad0528 100644 --- a/default-configs/ppc64-softmmu.mak +++ b/default-configs/ppc64-softmmu.mak @@ -17,6 +17,7 @@ CONFIG_XICS=$(CONFIG_PSERIES) CONFIG_XICS_SPAPR=$(CONFIG_PSERIES) CONFIG_XICS_KVM=$(call land,$(CONFIG_PSERIES),$(CONFIG_KVM)) CONFIG_XIVE=$(CONFIG_PSERIES) +CONFIG_XIVE_SPAPR=$(CONFIG_PSERIES) CONFIG_MEM_DEVICE=y CONFIG_DIMM=y CONFIG_SPAPR_RNG=y diff --git a/hw/intc/Makefile.objs b/hw/intc/Makefile.objs index 72a46ed91c..301a8e972d 100644 --- a/hw/intc/Makefile.objs +++ b/hw/intc/Makefile.objs @@ -38,6 +38,7 @@ obj-$(CONFIG_XICS) += xics.o obj-$(CONFIG_XICS_SPAPR) += xics_spapr.o obj-$(CONFIG_XICS_KVM) += xics_kvm.o obj-$(CONFIG_XIVE) += xive.o +obj-$(CONFIG_XIVE_SPAPR) += spapr_xive.o obj-$(CONFIG_POWERNV) += xics_pnv.o obj-$(CONFIG_ALLWINNER_A10_PIC) += allwinner-a10-pic.o obj-$(CONFIG_S390_FLIC) += s390_flic.o diff --git a/hw/intc/spapr_xive.c b/hw/intc/spapr_xive.c new file mode 100644 index 0000000000..5f03adca56 --- /dev/null +++ b/hw/intc/spapr_xive.c @@ -0,0 +1,366 @@ +/* + * QEMU PowerPC sPAPR XIVE interrupt controller model + * + * Copyright (c) 2017-2018, IBM Corporation. + * + * This code is licensed under the GPL version 2 or later. See the + * COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "target/ppc/cpu.h" +#include "sysemu/cpus.h" +#include "monitor/monitor.h" +#include "hw/ppc/spapr.h" +#include "hw/ppc/spapr_xive.h" +#include "hw/ppc/xive.h" +#include "hw/ppc/xive_regs.h" + +/* + * XIVE Virtualization Controller BAR and Thread Managment BAR that we + * use for the ESB pages and the TIMA pages + */ +#define SPAPR_XIVE_VC_BASE 0x0006010000000000ull +#define SPAPR_XIVE_TM_BASE 0x0006030203180000ull + +/* + * On sPAPR machines, use a simplified output for the XIVE END + * structure dumping only the information related to the OS EQ. + */ +static void spapr_xive_end_pic_print_info(sPAPRXive *xive, XiveEND *end, + Monitor *mon) +{ + uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1); + uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1); + uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0); + uint32_t qentries = 1 << (qsize + 10); + uint32_t nvt = xive_get_field32(END_W6_NVT_INDEX, end->w6); + uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7); + + monitor_printf(mon, "%3d/%d % 6d/%5d ^%d", nvt, + priority, qindex, qentries, qgen); + + xive_end_queue_pic_print_info(end, 6, mon); + monitor_printf(mon, "]"); +} + +void spapr_xive_pic_print_info(sPAPRXive *xive, Monitor *mon) +{ + XiveSource *xsrc = &xive->source; + int i; + + monitor_printf(mon, " LSIN PQ EISN CPU/PRIO EQ\n"); + + for (i = 0; i < xive->nr_irqs; i++) { + uint8_t pq = xive_source_esb_get(xsrc, i); + XiveEAS *eas = &xive->eat[i]; + + if (!xive_eas_is_valid(eas)) { + continue; + } + + monitor_printf(mon, " %08x %s %c%c%c %s %08x ", i, + xive_source_irq_is_lsi(xsrc, i) ? "LSI" : "MSI", + pq & XIVE_ESB_VAL_P ? 'P' : '-', + pq & XIVE_ESB_VAL_Q ? 'Q' : '-', + xsrc->status[i] & XIVE_STATUS_ASSERTED ? 'A' : ' ', + xive_eas_is_masked(eas) ? "M" : " ", + (int) xive_get_field64(EAS_END_DATA, eas->w)); + + if (!xive_eas_is_masked(eas)) { + uint32_t end_idx = xive_get_field64(EAS_END_INDEX, eas->w); + XiveEND *end; + + assert(end_idx < xive->nr_ends); + end = &xive->endt[end_idx]; + + if (xive_end_is_valid(end)) { + spapr_xive_end_pic_print_info(xive, end, mon); + } + } + monitor_printf(mon, "\n"); + } +} + +static void spapr_xive_map_mmio(sPAPRXive *xive) +{ + sysbus_mmio_map(SYS_BUS_DEVICE(xive), 0, xive->vc_base); + sysbus_mmio_map(SYS_BUS_DEVICE(xive), 1, xive->end_base); + sysbus_mmio_map(SYS_BUS_DEVICE(xive), 2, xive->tm_base); +} + +static void spapr_xive_end_reset(XiveEND *end) +{ + memset(end, 0, sizeof(*end)); + + /* switch off the escalation and notification ESBs */ + end->w1 = cpu_to_be32(END_W1_ESe_Q | END_W1_ESn_Q); +} + +static void spapr_xive_reset(void *dev) +{ + sPAPRXive *xive = SPAPR_XIVE(dev); + int i; + + /* + * The XiveSource has its own reset handler, which mask off all + * IRQs (!P|Q) + */ + + /* Mask all valid EASs in the IRQ number space. */ + for (i = 0; i < xive->nr_irqs; i++) { + XiveEAS *eas = &xive->eat[i]; + if (xive_eas_is_valid(eas)) { + eas->w = cpu_to_be64(EAS_VALID | EAS_MASKED); + } else { + eas->w = 0; + } + } + + /* Clear all ENDs */ + for (i = 0; i < xive->nr_ends; i++) { + spapr_xive_end_reset(&xive->endt[i]); + } +} + +static void spapr_xive_instance_init(Object *obj) +{ + sPAPRXive *xive = SPAPR_XIVE(obj); + + object_initialize(&xive->source, sizeof(xive->source), TYPE_XIVE_SOURCE); + object_property_add_child(obj, "source", OBJECT(&xive->source), NULL); + + object_initialize(&xive->end_source, sizeof(xive->end_source), + TYPE_XIVE_END_SOURCE); + object_property_add_child(obj, "end_source", OBJECT(&xive->end_source), + NULL); +} + +static void spapr_xive_realize(DeviceState *dev, Error **errp) +{ + sPAPRXive *xive = SPAPR_XIVE(dev); + XiveSource *xsrc = &xive->source; + XiveENDSource *end_xsrc = &xive->end_source; + Error *local_err = NULL; + + if (!xive->nr_irqs) { + error_setg(errp, "Number of interrupt needs to be greater 0"); + return; + } + + if (!xive->nr_ends) { + error_setg(errp, "Number of interrupt needs to be greater 0"); + return; + } + + /* + * Initialize the internal sources, for IPIs and virtual devices. + */ + object_property_set_int(OBJECT(xsrc), xive->nr_irqs, "nr-irqs", + &error_fatal); + object_property_add_const_link(OBJECT(xsrc), "xive", OBJECT(xive), + &error_fatal); + object_property_set_bool(OBJECT(xsrc), true, "realized", &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + /* + * Initialize the END ESB source + */ + object_property_set_int(OBJECT(end_xsrc), xive->nr_irqs, "nr-ends", + &error_fatal); + object_property_add_const_link(OBJECT(end_xsrc), "xive", OBJECT(xive), + &error_fatal); + object_property_set_bool(OBJECT(end_xsrc), true, "realized", &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + /* Set the mapping address of the END ESB pages after the source ESBs */ + xive->end_base = xive->vc_base + (1ull << xsrc->esb_shift) * xsrc->nr_irqs; + + /* + * Allocate the routing tables + */ + xive->eat = g_new0(XiveEAS, xive->nr_irqs); + xive->endt = g_new0(XiveEND, xive->nr_ends); + + /* TIMA initialization */ + memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &xive_tm_ops, xive, + "xive.tima", 4ull << TM_SHIFT); + + /* Define all XIVE MMIO regions on SysBus */ + sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xsrc->esb_mmio); + sysbus_init_mmio(SYS_BUS_DEVICE(xive), &end_xsrc->esb_mmio); + sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xive->tm_mmio); + + /* Map all regions */ + spapr_xive_map_mmio(xive); + + qemu_register_reset(spapr_xive_reset, dev); +} + +static int spapr_xive_get_eas(XiveRouter *xrtr, uint8_t eas_blk, + uint32_t eas_idx, XiveEAS *eas) +{ + sPAPRXive *xive = SPAPR_XIVE(xrtr); + + if (eas_idx >= xive->nr_irqs) { + return -1; + } + + *eas = xive->eat[eas_idx]; + return 0; +} + +static int spapr_xive_get_end(XiveRouter *xrtr, + uint8_t end_blk, uint32_t end_idx, XiveEND *end) +{ + sPAPRXive *xive = SPAPR_XIVE(xrtr); + + if (end_idx >= xive->nr_ends) { + return -1; + } + + memcpy(end, &xive->endt[end_idx], sizeof(XiveEND)); + return 0; +} + +static int spapr_xive_write_end(XiveRouter *xrtr, uint8_t end_blk, + uint32_t end_idx, XiveEND *end, + uint8_t word_number) +{ + sPAPRXive *xive = SPAPR_XIVE(xrtr); + + if (end_idx >= xive->nr_ends) { + return -1; + } + + memcpy(&xive->endt[end_idx], end, sizeof(XiveEND)); + return 0; +} + +static const VMStateDescription vmstate_spapr_xive_end = { + .name = TYPE_SPAPR_XIVE "/end", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField []) { + VMSTATE_UINT32(w0, XiveEND), + VMSTATE_UINT32(w1, XiveEND), + VMSTATE_UINT32(w2, XiveEND), + VMSTATE_UINT32(w3, XiveEND), + VMSTATE_UINT32(w4, XiveEND), + VMSTATE_UINT32(w5, XiveEND), + VMSTATE_UINT32(w6, XiveEND), + VMSTATE_UINT32(w7, XiveEND), + VMSTATE_END_OF_LIST() + }, +}; + +static const VMStateDescription vmstate_spapr_xive_eas = { + .name = TYPE_SPAPR_XIVE "/eas", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField []) { + VMSTATE_UINT64(w, XiveEAS), + VMSTATE_END_OF_LIST() + }, +}; + +static const VMStateDescription vmstate_spapr_xive = { + .name = TYPE_SPAPR_XIVE, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_EQUAL(nr_irqs, sPAPRXive, NULL), + VMSTATE_STRUCT_VARRAY_POINTER_UINT32(eat, sPAPRXive, nr_irqs, + vmstate_spapr_xive_eas, XiveEAS), + VMSTATE_STRUCT_VARRAY_POINTER_UINT32(endt, sPAPRXive, nr_ends, + vmstate_spapr_xive_end, XiveEND), + VMSTATE_END_OF_LIST() + }, +}; + +static Property spapr_xive_properties[] = { + DEFINE_PROP_UINT32("nr-irqs", sPAPRXive, nr_irqs, 0), + DEFINE_PROP_UINT32("nr-ends", sPAPRXive, nr_ends, 0), + DEFINE_PROP_UINT64("vc-base", sPAPRXive, vc_base, SPAPR_XIVE_VC_BASE), + DEFINE_PROP_UINT64("tm-base", sPAPRXive, tm_base, SPAPR_XIVE_TM_BASE), + DEFINE_PROP_END_OF_LIST(), +}; + +static void spapr_xive_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass); + + dc->desc = "sPAPR XIVE Interrupt Controller"; + dc->props = spapr_xive_properties; + dc->realize = spapr_xive_realize; + dc->vmsd = &vmstate_spapr_xive; + + xrc->get_eas = spapr_xive_get_eas; + xrc->get_end = spapr_xive_get_end; + xrc->write_end = spapr_xive_write_end; +} + +static const TypeInfo spapr_xive_info = { + .name = TYPE_SPAPR_XIVE, + .parent = TYPE_XIVE_ROUTER, + .instance_init = spapr_xive_instance_init, + .instance_size = sizeof(sPAPRXive), + .class_init = spapr_xive_class_init, +}; + +static void spapr_xive_register_types(void) +{ + type_register_static(&spapr_xive_info); +} + +type_init(spapr_xive_register_types) + +bool spapr_xive_irq_claim(sPAPRXive *xive, uint32_t lisn, bool lsi) +{ + XiveSource *xsrc = &xive->source; + + if (lisn >= xive->nr_irqs) { + return false; + } + + xive->eat[lisn].w |= cpu_to_be64(EAS_VALID); + xive_source_irq_set(xsrc, lisn, lsi); + return true; +} + +bool spapr_xive_irq_free(sPAPRXive *xive, uint32_t lisn) +{ + XiveSource *xsrc = &xive->source; + + if (lisn >= xive->nr_irqs) { + return false; + } + + xive->eat[lisn].w &= cpu_to_be64(~EAS_VALID); + xive_source_irq_set(xsrc, lisn, false); + return true; +} + +qemu_irq spapr_xive_qirq(sPAPRXive *xive, uint32_t lisn) +{ + XiveSource *xsrc = &xive->source; + + if (lisn >= xive->nr_irqs) { + return NULL; + } + + /* The sPAPR machine/device should have claimed the IRQ before */ + assert(xive_eas_is_valid(&xive->eat[lisn])); + + return xive_source_qirq(xsrc, lisn); +} diff --git a/include/hw/ppc/spapr_xive.h b/include/hw/ppc/spapr_xive.h new file mode 100644 index 0000000000..f087959b99 --- /dev/null +++ b/include/hw/ppc/spapr_xive.h @@ -0,0 +1,45 @@ +/* + * QEMU PowerPC sPAPR XIVE interrupt controller model + * + * Copyright (c) 2017-2018, IBM Corporation. + * + * This code is licensed under the GPL version 2 or later. See the + * COPYING file in the top-level directory. + */ + +#ifndef PPC_SPAPR_XIVE_H +#define PPC_SPAPR_XIVE_H + +#include "hw/ppc/xive.h" + +#define TYPE_SPAPR_XIVE "spapr-xive" +#define SPAPR_XIVE(obj) OBJECT_CHECK(sPAPRXive, (obj), TYPE_SPAPR_XIVE) + +typedef struct sPAPRXive { + XiveRouter parent; + + /* Internal interrupt source for IPIs and virtual devices */ + XiveSource source; + hwaddr vc_base; + + /* END ESB MMIOs */ + XiveENDSource end_source; + hwaddr end_base; + + /* Routing table */ + XiveEAS *eat; + uint32_t nr_irqs; + XiveEND *endt; + uint32_t nr_ends; + + /* TIMA mapping address */ + hwaddr tm_base; + MemoryRegion tm_mmio; +} sPAPRXive; + +bool spapr_xive_irq_claim(sPAPRXive *xive, uint32_t lisn, bool lsi); +bool spapr_xive_irq_free(sPAPRXive *xive, uint32_t lisn); +void spapr_xive_pic_print_info(sPAPRXive *xive, Monitor *mon); +qemu_irq spapr_xive_qirq(sPAPRXive *xive, uint32_t lisn); + +#endif /* PPC_SPAPR_XIVE_H */ From 0cddee8d488667a7de60e75f76ead8cffe613d75 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Le=20Goater?= Date: Sun, 9 Dec 2018 20:45:57 +0100 Subject: [PATCH 29/40] spapr/xive: use the VCPU id as a NVT identifier MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The IVPE scans the O/S CAM line of the XIVE thread interrupt contexts to find a matching Notification Virtual Target (NVT) among the NVTs dispatched on the HW processor threads. On a real system, the thread interrupt contexts are updated by the hypervisor when a Virtual Processor is scheduled to run on a HW thread. Under QEMU, the model will emulate the same behavior by hardwiring the NVT identifier in the thread context registers at reset. The NVT identifier used by the sPAPRXive model is the VCPU id. The END identifier is also derived from the VCPU id. A set of helpers doing the conversion between identifiers are provided for the hcalls configuring the sources and the ENDs. The model does not need a NVT table but the XiveRouter NVT operations are provided to perform some extra checks in the routing algorithm. Signed-off-by: Cédric Le Goater Signed-off-by: David Gibson --- hw/intc/spapr_xive.c | 56 +++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 55 insertions(+), 1 deletion(-) diff --git a/hw/intc/spapr_xive.c b/hw/intc/spapr_xive.c index 5f03adca56..d6291c6470 100644 --- a/hw/intc/spapr_xive.c +++ b/hw/intc/spapr_xive.c @@ -26,6 +26,26 @@ #define SPAPR_XIVE_VC_BASE 0x0006010000000000ull #define SPAPR_XIVE_TM_BASE 0x0006030203180000ull +/* + * The allocation of VP blocks is a complex operation in OPAL and the + * VP identifiers have a relation with the number of HW chips, the + * size of the VP blocks, VP grouping, etc. The QEMU sPAPR XIVE + * controller model does not have the same constraints and can use a + * simple mapping scheme of the CPU vcpu_id + * + * These identifiers are never returned to the OS. + */ + +#define SPAPR_XIVE_NVT_BASE 0x400 + +/* + * sPAPR NVT and END indexing helpers + */ +static uint32_t spapr_xive_nvt_to_target(uint8_t nvt_blk, uint32_t nvt_idx) +{ + return nvt_idx - SPAPR_XIVE_NVT_BASE; +} + /* * On sPAPR machines, use a simplified output for the XIVE END * structure dumping only the information related to the OS EQ. @@ -40,7 +60,8 @@ static void spapr_xive_end_pic_print_info(sPAPRXive *xive, XiveEND *end, uint32_t nvt = xive_get_field32(END_W6_NVT_INDEX, end->w6); uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7); - monitor_printf(mon, "%3d/%d % 6d/%5d ^%d", nvt, + monitor_printf(mon, "%3d/%d % 6d/%5d ^%d", + spapr_xive_nvt_to_target(0, nvt), priority, qindex, qentries, qgen); xive_end_queue_pic_print_info(end, 6, mon); @@ -246,6 +267,37 @@ static int spapr_xive_write_end(XiveRouter *xrtr, uint8_t end_blk, return 0; } +static int spapr_xive_get_nvt(XiveRouter *xrtr, + uint8_t nvt_blk, uint32_t nvt_idx, XiveNVT *nvt) +{ + uint32_t vcpu_id = spapr_xive_nvt_to_target(nvt_blk, nvt_idx); + PowerPCCPU *cpu = spapr_find_cpu(vcpu_id); + + if (!cpu) { + /* TODO: should we assert() if we can find a NVT ? */ + return -1; + } + + /* + * sPAPR does not maintain a NVT table. Return that the NVT is + * valid if we have found a matching CPU + */ + nvt->w0 = cpu_to_be32(NVT_W0_VALID); + return 0; +} + +static int spapr_xive_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk, + uint32_t nvt_idx, XiveNVT *nvt, + uint8_t word_number) +{ + /* + * We don't need to write back to the NVTs because the sPAPR + * machine should never hit a non-scheduled NVT. It should never + * get called. + */ + g_assert_not_reached(); +} + static const VMStateDescription vmstate_spapr_xive_end = { .name = TYPE_SPAPR_XIVE "/end", .version_id = 1, @@ -308,6 +360,8 @@ static void spapr_xive_class_init(ObjectClass *klass, void *data) xrc->get_eas = spapr_xive_get_eas; xrc->get_end = spapr_xive_get_end; xrc->write_end = spapr_xive_write_end; + xrc->get_nvt = spapr_xive_get_nvt; + xrc->write_nvt = spapr_xive_write_nvt; } static const TypeInfo spapr_xive_info = { From 8994e91e963ed8ba6abd9c2afbb3d6be6f323ab5 Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Fri, 14 Dec 2018 15:21:22 +1100 Subject: [PATCH 30/40] spapr-iommu: Always advertise the maximum possible DMA window size When deciding about the huge DMA window, the typical Linux pseries guest uses the maximum allowed RAM size as the upper limit. We did the same on QEMU side to match that logic. Now we are going to support a GPU RAM pass through which is not available at the guest boot time as it requires the guest driver interaction. As the result, the guest requests a smaller window than it should. Therefore the guest needs to be patched to understand this new memory and so does QEMU. Instead of reimplementing here whatever solution we choose for the guest, this advertises the biggest possible window size limited by 32 bit (as defined by LoPAPR). Since the window size has to be power-of-two (the create rtas call receives a window shift, not a size), this uses 0x8000.0000 as the maximum number of TCEs possible (rather than 32bit maximum of 0xffff.ffff). This is safe as: 1. The guest visible emulated table is allocated in KVM (actual pages are allocated in page fault handler) and QEMU (actual pages are allocated when updated); 2. The hardware table (and corresponding userspace address table) supports sparse allocation and also checks for locked_vm limit so it is unable to cause the host any damage. Signed-off-by: Alexey Kardashevskiy Signed-off-by: David Gibson --- hw/ppc/spapr_rtas_ddw.c | 19 +++---------------- 1 file changed, 3 insertions(+), 16 deletions(-) diff --git a/hw/ppc/spapr_rtas_ddw.c b/hw/ppc/spapr_rtas_ddw.c index 329feb148f..cb8a410359 100644 --- a/hw/ppc/spapr_rtas_ddw.c +++ b/hw/ppc/spapr_rtas_ddw.c @@ -96,9 +96,8 @@ static void rtas_ibm_query_pe_dma_window(PowerPCCPU *cpu, uint32_t nret, target_ulong rets) { sPAPRPHBState *sphb; - uint64_t buid, max_window_size; + uint64_t buid; uint32_t avail, addr, pgmask = 0; - MachineState *machine = MACHINE(spapr); if ((nargs != 3) || (nret != 5)) { goto param_error_exit; @@ -114,27 +113,15 @@ static void rtas_ibm_query_pe_dma_window(PowerPCCPU *cpu, /* Translate page mask to LoPAPR format */ pgmask = spapr_page_mask_to_query_mask(sphb->page_size_mask); - /* - * This is "Largest contiguous block of TCEs allocated specifically - * for (that is, are reserved for) this PE". - * Return the maximum number as maximum supported RAM size was in 4K pages. - */ - if (machine->ram_size == machine->maxram_size) { - max_window_size = machine->ram_size; - } else { - max_window_size = machine->device_memory->base + - memory_region_size(&machine->device_memory->mr); - } - avail = SPAPR_PCI_DMA_MAX_WINDOWS - spapr_phb_get_active_win_num(sphb); rtas_st(rets, 0, RTAS_OUT_SUCCESS); rtas_st(rets, 1, avail); - rtas_st(rets, 2, max_window_size >> SPAPR_TCE_PAGE_SHIFT); + rtas_st(rets, 2, 0x80000000); /* The largest window we can possibly have */ rtas_st(rets, 3, pgmask); rtas_st(rets, 4, 0); /* DMA migration mask, not supported */ - trace_spapr_iommu_ddw_query(buid, addr, avail, max_window_size, pgmask); + trace_spapr_iommu_ddw_query(buid, addr, avail, 0x80000000, pgmask); return; param_error_exit: From dcc345b61ebe499f8f707de2535c2790c52cc703 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Le=20Goater?= Date: Tue, 11 Dec 2018 23:38:12 +0100 Subject: [PATCH 31/40] spapr: introduce a new machine IRQ backend for XIVE MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The XIVE IRQ backend uses the same layout as the new XICS backend but covers the full range of the IRQ number space. The IRQ numbers for the CPU IPIs are allocated at the bottom of this space, below 4K, to preserve compatibility with XICS which does not use that range. This should be enough given that the maximum number of CPUs is 1024 for the sPAPR machine under QEMU. For the record, the biggest POWER8 or POWER9 system has a maximum of 1536 HW threads (16 sockets, 192 cores, SMT8). Signed-off-by: Cédric Le Goater Reviewed-by: David Gibson Signed-off-by: David Gibson --- hw/ppc/spapr_irq.c | 93 ++++++++++++++++++++++++++++++++++++++ include/hw/ppc/spapr.h | 2 + include/hw/ppc/spapr_irq.h | 2 + 3 files changed, 97 insertions(+) diff --git a/hw/ppc/spapr_irq.c b/hw/ppc/spapr_irq.c index f8b651de0e..1f5aac55d3 100644 --- a/hw/ppc/spapr_irq.c +++ b/hw/ppc/spapr_irq.c @@ -12,6 +12,7 @@ #include "qemu/error-report.h" #include "qapi/error.h" #include "hw/ppc/spapr.h" +#include "hw/ppc/spapr_xive.h" #include "hw/ppc/xics.h" #include "sysemu/kvm.h" @@ -205,6 +206,98 @@ sPAPRIrq spapr_irq_xics = { .print_info = spapr_irq_print_info_xics, }; +/* + * XIVE IRQ backend. + */ +static void spapr_irq_init_xive(sPAPRMachineState *spapr, Error **errp) +{ + MachineState *machine = MACHINE(spapr); + sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); + uint32_t nr_servers = spapr_max_server_number(spapr); + DeviceState *dev; + int i; + + /* KVM XIVE device not yet available */ + if (kvm_enabled()) { + if (machine_kernel_irqchip_required(machine)) { + error_setg(errp, "kernel_irqchip requested. no KVM XIVE support"); + return; + } + } + + dev = qdev_create(NULL, TYPE_SPAPR_XIVE); + qdev_prop_set_uint32(dev, "nr-irqs", smc->irq->nr_irqs); + /* + * 8 XIVE END structures per CPU. One for each available priority + */ + qdev_prop_set_uint32(dev, "nr-ends", nr_servers << 3); + qdev_init_nofail(dev); + + spapr->xive = SPAPR_XIVE(dev); + + /* Enable the CPU IPIs */ + for (i = 0; i < nr_servers; ++i) { + spapr_xive_irq_claim(spapr->xive, SPAPR_IRQ_IPI + i, false); + } +} + +static int spapr_irq_claim_xive(sPAPRMachineState *spapr, int irq, bool lsi, + Error **errp) +{ + if (!spapr_xive_irq_claim(spapr->xive, irq, lsi)) { + error_setg(errp, "IRQ %d is invalid", irq); + return -1; + } + return 0; +} + +static void spapr_irq_free_xive(sPAPRMachineState *spapr, int irq, int num) +{ + int i; + + for (i = irq; i < irq + num; ++i) { + spapr_xive_irq_free(spapr->xive, i); + } +} + +static qemu_irq spapr_qirq_xive(sPAPRMachineState *spapr, int irq) +{ + return spapr_xive_qirq(spapr->xive, irq); +} + +static void spapr_irq_print_info_xive(sPAPRMachineState *spapr, + Monitor *mon) +{ + CPUState *cs; + + CPU_FOREACH(cs) { + PowerPCCPU *cpu = POWERPC_CPU(cs); + + xive_tctx_pic_print_info(XIVE_TCTX(cpu->intc), mon); + } + + spapr_xive_pic_print_info(spapr->xive, mon); +} + +/* + * XIVE uses the full IRQ number space. Set it to 8K to be compatible + * with XICS. + */ + +#define SPAPR_IRQ_XIVE_NR_IRQS 0x2000 +#define SPAPR_IRQ_XIVE_NR_MSIS (SPAPR_IRQ_XIVE_NR_IRQS - SPAPR_IRQ_MSI) + +sPAPRIrq spapr_irq_xive = { + .nr_irqs = SPAPR_IRQ_XIVE_NR_IRQS, + .nr_msis = SPAPR_IRQ_XIVE_NR_MSIS, + + .init = spapr_irq_init_xive, + .claim = spapr_irq_claim_xive, + .free = spapr_irq_free_xive, + .qirq = spapr_qirq_xive, + .print_info = spapr_irq_print_info_xive, +}; + /* * sPAPR IRQ frontend routines for devices */ diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h index 198764066d..cb3082d319 100644 --- a/include/hw/ppc/spapr.h +++ b/include/hw/ppc/spapr.h @@ -16,6 +16,7 @@ typedef struct sPAPREventLogEntry sPAPREventLogEntry; typedef struct sPAPREventSource sPAPREventSource; typedef struct sPAPRPendingHPT sPAPRPendingHPT; typedef struct ICSState ICSState; +typedef struct sPAPRXive sPAPRXive; #define HPTE64_V_HPTE_DIRTY 0x0000000000000040ULL #define SPAPR_ENTRY_POINT 0x100 @@ -175,6 +176,7 @@ struct sPAPRMachineState { const char *icp_type; int32_t irq_map_nr; unsigned long *irq_map; + sPAPRXive *xive; bool cmd_line_caps[SPAPR_CAP_NUM]; sPAPRCapabilities def, eff, mig; diff --git a/include/hw/ppc/spapr_irq.h b/include/hw/ppc/spapr_irq.h index bd7301e6d9..23cdb51b87 100644 --- a/include/hw/ppc/spapr_irq.h +++ b/include/hw/ppc/spapr_irq.h @@ -13,6 +13,7 @@ /* * IRQ range offsets per device type */ +#define SPAPR_IRQ_IPI 0x0 #define SPAPR_IRQ_EPOW 0x1000 /* XICS_IRQ_BASE offset */ #define SPAPR_IRQ_HOTPLUG 0x1001 #define SPAPR_IRQ_VIO 0x1100 /* 256 VIO devices */ @@ -42,6 +43,7 @@ typedef struct sPAPRIrq { extern sPAPRIrq spapr_irq_xics; extern sPAPRIrq spapr_irq_xics_legacy; +extern sPAPRIrq spapr_irq_xive; void spapr_irq_init(sPAPRMachineState *spapr, Error **errp); int spapr_irq_claim(sPAPRMachineState *spapr, int irq, bool lsi, Error **errp); From 23bcd5eb9a472cc7bd147403d9ba18e293ee6adc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Le=20Goater?= Date: Tue, 11 Dec 2018 23:38:13 +0100 Subject: [PATCH 32/40] spapr: add hcalls support for the XIVE exploitation interrupt mode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The different XIVE virtualization structures (sources and event queues) are configured with a set of Hypervisor calls : - H_INT_GET_SOURCE_INFO used to obtain the address of the MMIO page of the Event State Buffer (ESB) entry associated with the source. - H_INT_SET_SOURCE_CONFIG assigns a source to a "target". - H_INT_GET_SOURCE_CONFIG determines which "target" and "priority" is assigned to a source - H_INT_GET_QUEUE_INFO returns the address of the notification management page associated with the specified "target" and "priority". - H_INT_SET_QUEUE_CONFIG sets or resets the event queue for a given "target" and "priority". It is also used to set the notification configuration associated with the queue, only unconditional notification is supported for the moment. Reset is performed with a queue size of 0 and queueing is disabled in that case. - H_INT_GET_QUEUE_CONFIG returns the queue settings for a given "target" and "priority". - H_INT_RESET resets all of the guest's internal interrupt structures to their initial state, losing all configuration set via the hcalls H_INT_SET_SOURCE_CONFIG and H_INT_SET_QUEUE_CONFIG. - H_INT_SYNC issue a synchronisation on a source to make sure all notifications have reached their queue. Calls that still need to be addressed : H_INT_SET_OS_REPORTING_LINE H_INT_GET_OS_REPORTING_LINE See the code for more documentation on each hcall. Signed-off-by: Cédric Le Goater Reviewed-by: David Gibson [dwg: Folded in fix for field accessors] Signed-off-by: David Gibson --- hw/intc/spapr_xive.c | 982 ++++++++++++++++++++++++++++++++++++ hw/ppc/spapr_irq.c | 2 + include/hw/ppc/spapr.h | 15 +- include/hw/ppc/spapr_xive.h | 4 + 4 files changed, 1002 insertions(+), 1 deletion(-) diff --git a/hw/intc/spapr_xive.c b/hw/intc/spapr_xive.c index d6291c6470..9f2820039c 100644 --- a/hw/intc/spapr_xive.c +++ b/hw/intc/spapr_xive.c @@ -38,6 +38,13 @@ #define SPAPR_XIVE_NVT_BASE 0x400 +/* + * The sPAPR machine has a unique XIVE IC device. Assign a fixed value + * to the controller block id value. It can nevertheless be changed + * for testing purpose. + */ +#define SPAPR_XIVE_BLOCK_ID 0x0 + /* * sPAPR NVT and END indexing helpers */ @@ -46,6 +53,64 @@ static uint32_t spapr_xive_nvt_to_target(uint8_t nvt_blk, uint32_t nvt_idx) return nvt_idx - SPAPR_XIVE_NVT_BASE; } +static void spapr_xive_cpu_to_nvt(PowerPCCPU *cpu, + uint8_t *out_nvt_blk, uint32_t *out_nvt_idx) +{ + assert(cpu); + + if (out_nvt_blk) { + *out_nvt_blk = SPAPR_XIVE_BLOCK_ID; + } + + if (out_nvt_blk) { + *out_nvt_idx = SPAPR_XIVE_NVT_BASE + cpu->vcpu_id; + } +} + +static int spapr_xive_target_to_nvt(uint32_t target, + uint8_t *out_nvt_blk, uint32_t *out_nvt_idx) +{ + PowerPCCPU *cpu = spapr_find_cpu(target); + + if (!cpu) { + return -1; + } + + spapr_xive_cpu_to_nvt(cpu, out_nvt_blk, out_nvt_idx); + return 0; +} + +/* + * sPAPR END indexing uses a simple mapping of the CPU vcpu_id, 8 + * priorities per CPU + */ +static void spapr_xive_cpu_to_end(PowerPCCPU *cpu, uint8_t prio, + uint8_t *out_end_blk, uint32_t *out_end_idx) +{ + assert(cpu); + + if (out_end_blk) { + *out_end_blk = SPAPR_XIVE_BLOCK_ID; + } + + if (out_end_idx) { + *out_end_idx = (cpu->vcpu_id << 3) + prio; + } +} + +static int spapr_xive_target_to_end(uint32_t target, uint8_t prio, + uint8_t *out_end_blk, uint32_t *out_end_idx) +{ + PowerPCCPU *cpu = spapr_find_cpu(target); + + if (!cpu) { + return -1; + } + + spapr_xive_cpu_to_end(cpu, prio, out_end_blk, out_end_idx); + return 0; +} + /* * On sPAPR machines, use a simplified output for the XIVE END * structure dumping only the information related to the OS EQ. @@ -418,3 +483,920 @@ qemu_irq spapr_xive_qirq(sPAPRXive *xive, uint32_t lisn) return xive_source_qirq(xsrc, lisn); } + +/* + * XIVE hcalls + * + * The terminology used by the XIVE hcalls is the following : + * + * TARGET vCPU number + * EQ Event Queue assigned by OS to receive event data + * ESB page for source interrupt management + * LISN Logical Interrupt Source Number identifying a source in the + * machine + * EISN Effective Interrupt Source Number used by guest OS to + * identify source in the guest + * + * The EAS, END, NVT structures are not exposed. + */ + +/* + * Linux hosts under OPAL reserve priority 7 for their own escalation + * interrupts (DD2.X POWER9). So we only allow the guest to use + * priorities [0..6]. + */ +static bool spapr_xive_priority_is_reserved(uint8_t priority) +{ + switch (priority) { + case 0 ... 6: + return false; + case 7: /* OPAL escalation queue */ + default: + return true; + } +} + +/* + * The H_INT_GET_SOURCE_INFO hcall() is used to obtain the logical + * real address of the MMIO page through which the Event State Buffer + * entry associated with the value of the "lisn" parameter is managed. + * + * Parameters: + * Input + * - R4: "flags" + * Bits 0-63 reserved + * - R5: "lisn" is per "interrupts", "interrupt-map", or + * "ibm,xive-lisn-ranges" properties, or as returned by the + * ibm,query-interrupt-source-number RTAS call, or as returned + * by the H_ALLOCATE_VAS_WINDOW hcall + * + * Output + * - R4: "flags" + * Bits 0-59: Reserved + * Bit 60: H_INT_ESB must be used for Event State Buffer + * management + * Bit 61: 1 == LSI 0 == MSI + * Bit 62: the full function page supports trigger + * Bit 63: Store EOI Supported + * - R5: Logical Real address of full function Event State Buffer + * management page, -1 if H_INT_ESB hcall flag is set to 1. + * - R6: Logical Real Address of trigger only Event State Buffer + * management page or -1. + * - R7: Power of 2 page size for the ESB management pages returned in + * R5 and R6. + */ + +#define SPAPR_XIVE_SRC_H_INT_ESB PPC_BIT(60) /* ESB manage with H_INT_ESB */ +#define SPAPR_XIVE_SRC_LSI PPC_BIT(61) /* Virtual LSI type */ +#define SPAPR_XIVE_SRC_TRIGGER PPC_BIT(62) /* Trigger and management + on same page */ +#define SPAPR_XIVE_SRC_STORE_EOI PPC_BIT(63) /* Store EOI support */ + +static target_ulong h_int_get_source_info(PowerPCCPU *cpu, + sPAPRMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + sPAPRXive *xive = spapr->xive; + XiveSource *xsrc = &xive->source; + target_ulong flags = args[0]; + target_ulong lisn = args[1]; + + if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { + return H_FUNCTION; + } + + if (flags) { + return H_PARAMETER; + } + + if (lisn >= xive->nr_irqs) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n", + lisn); + return H_P2; + } + + if (!xive_eas_is_valid(&xive->eat[lisn])) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n", + lisn); + return H_P2; + } + + /* + * All sources are emulated under the main XIVE object and share + * the same characteristics. + */ + args[0] = 0; + if (!xive_source_esb_has_2page(xsrc)) { + args[0] |= SPAPR_XIVE_SRC_TRIGGER; + } + if (xsrc->esb_flags & XIVE_SRC_STORE_EOI) { + args[0] |= SPAPR_XIVE_SRC_STORE_EOI; + } + + /* + * Force the use of the H_INT_ESB hcall in case of an LSI + * interrupt. This is necessary under KVM to re-trigger the + * interrupt if the level is still asserted + */ + if (xive_source_irq_is_lsi(xsrc, lisn)) { + args[0] |= SPAPR_XIVE_SRC_H_INT_ESB | SPAPR_XIVE_SRC_LSI; + } + + if (!(args[0] & SPAPR_XIVE_SRC_H_INT_ESB)) { + args[1] = xive->vc_base + xive_source_esb_mgmt(xsrc, lisn); + } else { + args[1] = -1; + } + + if (xive_source_esb_has_2page(xsrc) && + !(args[0] & SPAPR_XIVE_SRC_H_INT_ESB)) { + args[2] = xive->vc_base + xive_source_esb_page(xsrc, lisn); + } else { + args[2] = -1; + } + + if (xive_source_esb_has_2page(xsrc)) { + args[3] = xsrc->esb_shift - 1; + } else { + args[3] = xsrc->esb_shift; + } + + return H_SUCCESS; +} + +/* + * The H_INT_SET_SOURCE_CONFIG hcall() is used to assign a Logical + * Interrupt Source to a target. The Logical Interrupt Source is + * designated with the "lisn" parameter and the target is designated + * with the "target" and "priority" parameters. Upon return from the + * hcall(), no additional interrupts will be directed to the old EQ. + * + * Parameters: + * Input: + * - R4: "flags" + * Bits 0-61: Reserved + * Bit 62: set the "eisn" in the EAS + * Bit 63: masks the interrupt source in the hardware interrupt + * control structure. An interrupt masked by this mechanism will + * be dropped, but it's source state bits will still be + * set. There is no race-free way of unmasking and restoring the + * source. Thus this should only be used in interrupts that are + * also masked at the source, and only in cases where the + * interrupt is not meant to be used for a large amount of time + * because no valid target exists for it for example + * - R5: "lisn" is per "interrupts", "interrupt-map", or + * "ibm,xive-lisn-ranges" properties, or as returned by the + * ibm,query-interrupt-source-number RTAS call, or as returned by + * the H_ALLOCATE_VAS_WINDOW hcall + * - R6: "target" is per "ibm,ppc-interrupt-server#s" or + * "ibm,ppc-interrupt-gserver#s" + * - R7: "priority" is a valid priority not in + * "ibm,plat-res-int-priorities" + * - R8: "eisn" is the guest EISN associated with the "lisn" + * + * Output: + * - None + */ + +#define SPAPR_XIVE_SRC_SET_EISN PPC_BIT(62) +#define SPAPR_XIVE_SRC_MASK PPC_BIT(63) + +static target_ulong h_int_set_source_config(PowerPCCPU *cpu, + sPAPRMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + sPAPRXive *xive = spapr->xive; + XiveEAS eas, new_eas; + target_ulong flags = args[0]; + target_ulong lisn = args[1]; + target_ulong target = args[2]; + target_ulong priority = args[3]; + target_ulong eisn = args[4]; + uint8_t end_blk; + uint32_t end_idx; + + if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { + return H_FUNCTION; + } + + if (flags & ~(SPAPR_XIVE_SRC_SET_EISN | SPAPR_XIVE_SRC_MASK)) { + return H_PARAMETER; + } + + if (lisn >= xive->nr_irqs) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n", + lisn); + return H_P2; + } + + eas = xive->eat[lisn]; + if (!xive_eas_is_valid(&eas)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n", + lisn); + return H_P2; + } + + /* priority 0xff is used to reset the EAS */ + if (priority == 0xff) { + new_eas.w = cpu_to_be64(EAS_VALID | EAS_MASKED); + goto out; + } + + if (flags & SPAPR_XIVE_SRC_MASK) { + new_eas.w = eas.w | cpu_to_be64(EAS_MASKED); + } else { + new_eas.w = eas.w & cpu_to_be64(~EAS_MASKED); + } + + if (spapr_xive_priority_is_reserved(priority)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld + " is reserved\n", priority); + return H_P4; + } + + /* + * Validate that "target" is part of the list of threads allocated + * to the partition. For that, find the END corresponding to the + * target. + */ + if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) { + return H_P3; + } + + new_eas.w = xive_set_field64(EAS_END_BLOCK, new_eas.w, end_blk); + new_eas.w = xive_set_field64(EAS_END_INDEX, new_eas.w, end_idx); + + if (flags & SPAPR_XIVE_SRC_SET_EISN) { + new_eas.w = xive_set_field64(EAS_END_DATA, new_eas.w, eisn); + } + +out: + xive->eat[lisn] = new_eas; + return H_SUCCESS; +} + +/* + * The H_INT_GET_SOURCE_CONFIG hcall() is used to determine to which + * target/priority pair is assigned to the specified Logical Interrupt + * Source. + * + * Parameters: + * Input: + * - R4: "flags" + * Bits 0-63 Reserved + * - R5: "lisn" is per "interrupts", "interrupt-map", or + * "ibm,xive-lisn-ranges" properties, or as returned by the + * ibm,query-interrupt-source-number RTAS call, or as + * returned by the H_ALLOCATE_VAS_WINDOW hcall + * + * Output: + * - R4: Target to which the specified Logical Interrupt Source is + * assigned + * - R5: Priority to which the specified Logical Interrupt Source is + * assigned + * - R6: EISN for the specified Logical Interrupt Source (this will be + * equivalent to the LISN if not changed by H_INT_SET_SOURCE_CONFIG) + */ +static target_ulong h_int_get_source_config(PowerPCCPU *cpu, + sPAPRMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + sPAPRXive *xive = spapr->xive; + target_ulong flags = args[0]; + target_ulong lisn = args[1]; + XiveEAS eas; + XiveEND *end; + uint8_t nvt_blk; + uint32_t end_idx, nvt_idx; + + if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { + return H_FUNCTION; + } + + if (flags) { + return H_PARAMETER; + } + + if (lisn >= xive->nr_irqs) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n", + lisn); + return H_P2; + } + + eas = xive->eat[lisn]; + if (!xive_eas_is_valid(&eas)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n", + lisn); + return H_P2; + } + + /* EAS_END_BLOCK is unused on sPAPR */ + end_idx = xive_get_field64(EAS_END_INDEX, eas.w); + + assert(end_idx < xive->nr_ends); + end = &xive->endt[end_idx]; + + nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end->w6); + nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end->w6); + args[0] = spapr_xive_nvt_to_target(nvt_blk, nvt_idx); + + if (xive_eas_is_masked(&eas)) { + args[1] = 0xff; + } else { + args[1] = xive_get_field32(END_W7_F0_PRIORITY, end->w7); + } + + args[2] = xive_get_field64(EAS_END_DATA, eas.w); + + return H_SUCCESS; +} + +/* + * The H_INT_GET_QUEUE_INFO hcall() is used to get the logical real + * address of the notification management page associated with the + * specified target and priority. + * + * Parameters: + * Input: + * - R4: "flags" + * Bits 0-63 Reserved + * - R5: "target" is per "ibm,ppc-interrupt-server#s" or + * "ibm,ppc-interrupt-gserver#s" + * - R6: "priority" is a valid priority not in + * "ibm,plat-res-int-priorities" + * + * Output: + * - R4: Logical real address of notification page + * - R5: Power of 2 page size of the notification page + */ +static target_ulong h_int_get_queue_info(PowerPCCPU *cpu, + sPAPRMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + sPAPRXive *xive = spapr->xive; + XiveENDSource *end_xsrc = &xive->end_source; + target_ulong flags = args[0]; + target_ulong target = args[1]; + target_ulong priority = args[2]; + XiveEND *end; + uint8_t end_blk; + uint32_t end_idx; + + if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { + return H_FUNCTION; + } + + if (flags) { + return H_PARAMETER; + } + + /* + * H_STATE should be returned if a H_INT_RESET is in progress. + * This is not needed when running the emulation under QEMU + */ + + if (spapr_xive_priority_is_reserved(priority)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld + " is reserved\n", priority); + return H_P3; + } + + /* + * Validate that "target" is part of the list of threads allocated + * to the partition. For that, find the END corresponding to the + * target. + */ + if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) { + return H_P2; + } + + assert(end_idx < xive->nr_ends); + end = &xive->endt[end_idx]; + + args[0] = xive->end_base + (1ull << (end_xsrc->esb_shift + 1)) * end_idx; + if (xive_end_is_enqueue(end)) { + args[1] = xive_get_field32(END_W0_QSIZE, end->w0) + 12; + } else { + args[1] = 0; + } + + return H_SUCCESS; +} + +/* + * The H_INT_SET_QUEUE_CONFIG hcall() is used to set or reset a EQ for + * a given "target" and "priority". It is also used to set the + * notification config associated with the EQ. An EQ size of 0 is + * used to reset the EQ config for a given target and priority. If + * resetting the EQ config, the END associated with the given "target" + * and "priority" will be changed to disable queueing. + * + * Upon return from the hcall(), no additional interrupts will be + * directed to the old EQ (if one was set). The old EQ (if one was + * set) should be investigated for interrupts that occurred prior to + * or during the hcall(). + * + * Parameters: + * Input: + * - R4: "flags" + * Bits 0-62: Reserved + * Bit 63: Unconditional Notify (n) per the XIVE spec + * - R5: "target" is per "ibm,ppc-interrupt-server#s" or + * "ibm,ppc-interrupt-gserver#s" + * - R6: "priority" is a valid priority not in + * "ibm,plat-res-int-priorities" + * - R7: "eventQueue": The logical real address of the start of the EQ + * - R8: "eventQueueSize": The power of 2 EQ size per "ibm,xive-eq-sizes" + * + * Output: + * - None + */ + +#define SPAPR_XIVE_END_ALWAYS_NOTIFY PPC_BIT(63) + +static target_ulong h_int_set_queue_config(PowerPCCPU *cpu, + sPAPRMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + sPAPRXive *xive = spapr->xive; + target_ulong flags = args[0]; + target_ulong target = args[1]; + target_ulong priority = args[2]; + target_ulong qpage = args[3]; + target_ulong qsize = args[4]; + XiveEND end; + uint8_t end_blk, nvt_blk; + uint32_t end_idx, nvt_idx; + + if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { + return H_FUNCTION; + } + + if (flags & ~SPAPR_XIVE_END_ALWAYS_NOTIFY) { + return H_PARAMETER; + } + + /* + * H_STATE should be returned if a H_INT_RESET is in progress. + * This is not needed when running the emulation under QEMU + */ + + if (spapr_xive_priority_is_reserved(priority)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld + " is reserved\n", priority); + return H_P3; + } + + /* + * Validate that "target" is part of the list of threads allocated + * to the partition. For that, find the END corresponding to the + * target. + */ + + if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) { + return H_P2; + } + + assert(end_idx < xive->nr_ends); + memcpy(&end, &xive->endt[end_idx], sizeof(XiveEND)); + + switch (qsize) { + case 12: + case 16: + case 21: + case 24: + end.w2 = cpu_to_be32((qpage >> 32) & 0x0fffffff); + end.w3 = cpu_to_be32(qpage & 0xffffffff); + end.w0 |= cpu_to_be32(END_W0_ENQUEUE); + end.w0 = xive_set_field32(END_W0_QSIZE, end.w0, qsize - 12); + break; + case 0: + /* reset queue and disable queueing */ + spapr_xive_end_reset(&end); + goto out; + + default: + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid EQ size %"PRIx64"\n", + qsize); + return H_P5; + } + + if (qsize) { + hwaddr plen = 1 << qsize; + void *eq; + + /* + * Validate the guest EQ. We should also check that the queue + * has been zeroed by the OS. + */ + eq = address_space_map(CPU(cpu)->as, qpage, &plen, true, + MEMTXATTRS_UNSPECIFIED); + if (plen != 1 << qsize) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to map EQ @0x%" + HWADDR_PRIx "\n", qpage); + return H_P4; + } + address_space_unmap(CPU(cpu)->as, eq, plen, true, plen); + } + + /* "target" should have been validated above */ + if (spapr_xive_target_to_nvt(target, &nvt_blk, &nvt_idx)) { + g_assert_not_reached(); + } + + /* + * Ensure the priority and target are correctly set (they will not + * be right after allocation) + */ + end.w6 = xive_set_field32(END_W6_NVT_BLOCK, 0ul, nvt_blk) | + xive_set_field32(END_W6_NVT_INDEX, 0ul, nvt_idx); + end.w7 = xive_set_field32(END_W7_F0_PRIORITY, 0ul, priority); + + if (flags & SPAPR_XIVE_END_ALWAYS_NOTIFY) { + end.w0 |= cpu_to_be32(END_W0_UCOND_NOTIFY); + } else { + end.w0 &= cpu_to_be32((uint32_t)~END_W0_UCOND_NOTIFY); + } + + /* + * The generation bit for the END starts at 1 and The END page + * offset counter starts at 0. + */ + end.w1 = cpu_to_be32(END_W1_GENERATION) | + xive_set_field32(END_W1_PAGE_OFF, 0ul, 0ul); + end.w0 |= cpu_to_be32(END_W0_VALID); + + /* + * TODO: issue syncs required to ensure all in-flight interrupts + * are complete on the old END + */ + +out: + /* Update END */ + memcpy(&xive->endt[end_idx], &end, sizeof(XiveEND)); + return H_SUCCESS; +} + +/* + * The H_INT_GET_QUEUE_CONFIG hcall() is used to get a EQ for a given + * target and priority. + * + * Parameters: + * Input: + * - R4: "flags" + * Bits 0-62: Reserved + * Bit 63: Debug: Return debug data + * - R5: "target" is per "ibm,ppc-interrupt-server#s" or + * "ibm,ppc-interrupt-gserver#s" + * - R6: "priority" is a valid priority not in + * "ibm,plat-res-int-priorities" + * + * Output: + * - R4: "flags": + * Bits 0-61: Reserved + * Bit 62: The value of Event Queue Generation Number (g) per + * the XIVE spec if "Debug" = 1 + * Bit 63: The value of Unconditional Notify (n) per the XIVE spec + * - R5: The logical real address of the start of the EQ + * - R6: The power of 2 EQ size per "ibm,xive-eq-sizes" + * - R7: The value of Event Queue Offset Counter per XIVE spec + * if "Debug" = 1, else 0 + * + */ + +#define SPAPR_XIVE_END_DEBUG PPC_BIT(63) + +static target_ulong h_int_get_queue_config(PowerPCCPU *cpu, + sPAPRMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + sPAPRXive *xive = spapr->xive; + target_ulong flags = args[0]; + target_ulong target = args[1]; + target_ulong priority = args[2]; + XiveEND *end; + uint8_t end_blk; + uint32_t end_idx; + + if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { + return H_FUNCTION; + } + + if (flags & ~SPAPR_XIVE_END_DEBUG) { + return H_PARAMETER; + } + + /* + * H_STATE should be returned if a H_INT_RESET is in progress. + * This is not needed when running the emulation under QEMU + */ + + if (spapr_xive_priority_is_reserved(priority)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld + " is reserved\n", priority); + return H_P3; + } + + /* + * Validate that "target" is part of the list of threads allocated + * to the partition. For that, find the END corresponding to the + * target. + */ + if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) { + return H_P2; + } + + assert(end_idx < xive->nr_ends); + end = &xive->endt[end_idx]; + + args[0] = 0; + if (xive_end_is_notify(end)) { + args[0] |= SPAPR_XIVE_END_ALWAYS_NOTIFY; + } + + if (xive_end_is_enqueue(end)) { + args[1] = (uint64_t) be32_to_cpu(end->w2 & 0x0fffffff) << 32 + | be32_to_cpu(end->w3); + args[2] = xive_get_field32(END_W0_QSIZE, end->w0) + 12; + } else { + args[1] = 0; + args[2] = 0; + } + + /* TODO: do we need any locking on the END ? */ + if (flags & SPAPR_XIVE_END_DEBUG) { + /* Load the event queue generation number into the return flags */ + args[0] |= (uint64_t)xive_get_field32(END_W1_GENERATION, end->w1) << 62; + + /* Load R7 with the event queue offset counter */ + args[3] = xive_get_field32(END_W1_PAGE_OFF, end->w1); + } else { + args[3] = 0; + } + + return H_SUCCESS; +} + +/* + * The H_INT_SET_OS_REPORTING_LINE hcall() is used to set the + * reporting cache line pair for the calling thread. The reporting + * cache lines will contain the OS interrupt context when the OS + * issues a CI store byte to @TIMA+0xC10 to acknowledge the OS + * interrupt. The reporting cache lines can be reset by inputting -1 + * in "reportingLine". Issuing the CI store byte without reporting + * cache lines registered will result in the data not being accessible + * to the OS. + * + * Parameters: + * Input: + * - R4: "flags" + * Bits 0-63: Reserved + * - R5: "reportingLine": The logical real address of the reporting cache + * line pair + * + * Output: + * - None + */ +static target_ulong h_int_set_os_reporting_line(PowerPCCPU *cpu, + sPAPRMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { + return H_FUNCTION; + } + + /* + * H_STATE should be returned if a H_INT_RESET is in progress. + * This is not needed when running the emulation under QEMU + */ + + /* TODO: H_INT_SET_OS_REPORTING_LINE */ + return H_FUNCTION; +} + +/* + * The H_INT_GET_OS_REPORTING_LINE hcall() is used to get the logical + * real address of the reporting cache line pair set for the input + * "target". If no reporting cache line pair has been set, -1 is + * returned. + * + * Parameters: + * Input: + * - R4: "flags" + * Bits 0-63: Reserved + * - R5: "target" is per "ibm,ppc-interrupt-server#s" or + * "ibm,ppc-interrupt-gserver#s" + * - R6: "reportingLine": The logical real address of the reporting + * cache line pair + * + * Output: + * - R4: The logical real address of the reporting line if set, else -1 + */ +static target_ulong h_int_get_os_reporting_line(PowerPCCPU *cpu, + sPAPRMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { + return H_FUNCTION; + } + + /* + * H_STATE should be returned if a H_INT_RESET is in progress. + * This is not needed when running the emulation under QEMU + */ + + /* TODO: H_INT_GET_OS_REPORTING_LINE */ + return H_FUNCTION; +} + +/* + * The H_INT_ESB hcall() is used to issue a load or store to the ESB + * page for the input "lisn". This hcall is only supported for LISNs + * that have the ESB hcall flag set to 1 when returned from hcall() + * H_INT_GET_SOURCE_INFO. + * + * Parameters: + * Input: + * - R4: "flags" + * Bits 0-62: Reserved + * bit 63: Store: Store=1, store operation, else load operation + * - R5: "lisn" is per "interrupts", "interrupt-map", or + * "ibm,xive-lisn-ranges" properties, or as returned by the + * ibm,query-interrupt-source-number RTAS call, or as + * returned by the H_ALLOCATE_VAS_WINDOW hcall + * - R6: "esbOffset" is the offset into the ESB page for the load or + * store operation + * - R7: "storeData" is the data to write for a store operation + * + * Output: + * - R4: The value of the load if load operation, else -1 + */ + +#define SPAPR_XIVE_ESB_STORE PPC_BIT(63) + +static target_ulong h_int_esb(PowerPCCPU *cpu, + sPAPRMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + sPAPRXive *xive = spapr->xive; + XiveEAS eas; + target_ulong flags = args[0]; + target_ulong lisn = args[1]; + target_ulong offset = args[2]; + target_ulong data = args[3]; + hwaddr mmio_addr; + XiveSource *xsrc = &xive->source; + + if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { + return H_FUNCTION; + } + + if (flags & ~SPAPR_XIVE_ESB_STORE) { + return H_PARAMETER; + } + + if (lisn >= xive->nr_irqs) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n", + lisn); + return H_P2; + } + + eas = xive->eat[lisn]; + if (!xive_eas_is_valid(&eas)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n", + lisn); + return H_P2; + } + + if (offset > (1ull << xsrc->esb_shift)) { + return H_P3; + } + + mmio_addr = xive->vc_base + xive_source_esb_mgmt(xsrc, lisn) + offset; + + if (dma_memory_rw(&address_space_memory, mmio_addr, &data, 8, + (flags & SPAPR_XIVE_ESB_STORE))) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to access ESB @0x%" + HWADDR_PRIx "\n", mmio_addr); + return H_HARDWARE; + } + args[0] = (flags & SPAPR_XIVE_ESB_STORE) ? -1 : data; + return H_SUCCESS; +} + +/* + * The H_INT_SYNC hcall() is used to issue hardware syncs that will + * ensure any in flight events for the input lisn are in the event + * queue. + * + * Parameters: + * Input: + * - R4: "flags" + * Bits 0-63: Reserved + * - R5: "lisn" is per "interrupts", "interrupt-map", or + * "ibm,xive-lisn-ranges" properties, or as returned by the + * ibm,query-interrupt-source-number RTAS call, or as + * returned by the H_ALLOCATE_VAS_WINDOW hcall + * + * Output: + * - None + */ +static target_ulong h_int_sync(PowerPCCPU *cpu, + sPAPRMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + sPAPRXive *xive = spapr->xive; + XiveEAS eas; + target_ulong flags = args[0]; + target_ulong lisn = args[1]; + + if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { + return H_FUNCTION; + } + + if (flags) { + return H_PARAMETER; + } + + if (lisn >= xive->nr_irqs) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n", + lisn); + return H_P2; + } + + eas = xive->eat[lisn]; + if (!xive_eas_is_valid(&eas)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n", + lisn); + return H_P2; + } + + /* + * H_STATE should be returned if a H_INT_RESET is in progress. + * This is not needed when running the emulation under QEMU + */ + + /* This is not real hardware. Nothing to be done */ + return H_SUCCESS; +} + +/* + * The H_INT_RESET hcall() is used to reset all of the partition's + * interrupt exploitation structures to their initial state. This + * means losing all previously set interrupt state set via + * H_INT_SET_SOURCE_CONFIG and H_INT_SET_QUEUE_CONFIG. + * + * Parameters: + * Input: + * - R4: "flags" + * Bits 0-63: Reserved + * + * Output: + * - None + */ +static target_ulong h_int_reset(PowerPCCPU *cpu, + sPAPRMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + sPAPRXive *xive = spapr->xive; + target_ulong flags = args[0]; + + if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { + return H_FUNCTION; + } + + if (flags) { + return H_PARAMETER; + } + + device_reset(DEVICE(xive)); + return H_SUCCESS; +} + +void spapr_xive_hcall_init(sPAPRMachineState *spapr) +{ + spapr_register_hypercall(H_INT_GET_SOURCE_INFO, h_int_get_source_info); + spapr_register_hypercall(H_INT_SET_SOURCE_CONFIG, h_int_set_source_config); + spapr_register_hypercall(H_INT_GET_SOURCE_CONFIG, h_int_get_source_config); + spapr_register_hypercall(H_INT_GET_QUEUE_INFO, h_int_get_queue_info); + spapr_register_hypercall(H_INT_SET_QUEUE_CONFIG, h_int_set_queue_config); + spapr_register_hypercall(H_INT_GET_QUEUE_CONFIG, h_int_get_queue_config); + spapr_register_hypercall(H_INT_SET_OS_REPORTING_LINE, + h_int_set_os_reporting_line); + spapr_register_hypercall(H_INT_GET_OS_REPORTING_LINE, + h_int_get_os_reporting_line); + spapr_register_hypercall(H_INT_ESB, h_int_esb); + spapr_register_hypercall(H_INT_SYNC, h_int_sync); + spapr_register_hypercall(H_INT_RESET, h_int_reset); +} diff --git a/hw/ppc/spapr_irq.c b/hw/ppc/spapr_irq.c index 1f5aac55d3..9eca8a4c8c 100644 --- a/hw/ppc/spapr_irq.c +++ b/hw/ppc/spapr_irq.c @@ -239,6 +239,8 @@ static void spapr_irq_init_xive(sPAPRMachineState *spapr, Error **errp) for (i = 0; i < nr_servers; ++i) { spapr_xive_irq_claim(spapr->xive, SPAPR_IRQ_IPI + i, false); } + + spapr_xive_hcall_init(spapr); } static int spapr_irq_claim_xive(sPAPRMachineState *spapr, int irq, bool lsi, diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h index cb3082d319..6bf028a02f 100644 --- a/include/hw/ppc/spapr.h +++ b/include/hw/ppc/spapr.h @@ -452,7 +452,20 @@ struct sPAPRMachineState { #define H_INVALIDATE_PID 0x378 #define H_REGISTER_PROC_TBL 0x37C #define H_SIGNAL_SYS_RESET 0x380 -#define MAX_HCALL_OPCODE H_SIGNAL_SYS_RESET + +#define H_INT_GET_SOURCE_INFO 0x3A8 +#define H_INT_SET_SOURCE_CONFIG 0x3AC +#define H_INT_GET_SOURCE_CONFIG 0x3B0 +#define H_INT_GET_QUEUE_INFO 0x3B4 +#define H_INT_SET_QUEUE_CONFIG 0x3B8 +#define H_INT_GET_QUEUE_CONFIG 0x3BC +#define H_INT_SET_OS_REPORTING_LINE 0x3C0 +#define H_INT_GET_OS_REPORTING_LINE 0x3C4 +#define H_INT_ESB 0x3C8 +#define H_INT_SYNC 0x3CC +#define H_INT_RESET 0x3D0 + +#define MAX_HCALL_OPCODE H_INT_RESET /* The hcalls above are standardized in PAPR and implemented by pHyp * as well. diff --git a/include/hw/ppc/spapr_xive.h b/include/hw/ppc/spapr_xive.h index f087959b99..9506a8f4d1 100644 --- a/include/hw/ppc/spapr_xive.h +++ b/include/hw/ppc/spapr_xive.h @@ -42,4 +42,8 @@ bool spapr_xive_irq_free(sPAPRXive *xive, uint32_t lisn); void spapr_xive_pic_print_info(sPAPRXive *xive, Monitor *mon); qemu_irq spapr_xive_qirq(sPAPRXive *xive, uint32_t lisn); +typedef struct sPAPRMachineState sPAPRMachineState; + +void spapr_xive_hcall_init(sPAPRMachineState *spapr); + #endif /* PPC_SPAPR_XIVE_H */ From 6e21de4a50fa1caf163e12a6c90424b750119f96 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Le=20Goater?= Date: Tue, 11 Dec 2018 23:38:14 +0100 Subject: [PATCH 33/40] spapr: add device tree support for the XIVE exploitation mode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The XIVE interface for the guest is described in the device tree under the "interrupt-controller" node. A couple of new properties are specific to XIVE : - "reg" contains the base address and size of the thread interrupt managnement areas (TIMA), for the User level and for the Guest OS level. Only the Guest OS level is taken into account today. - "ibm,xive-eq-sizes" the size of the event queues. One cell per size supported, contains log2 of size, in ascending order. - "ibm,xive-lisn-ranges" the IRQ interrupt number ranges assigned to the guest for the IPIs. and also under the root node : - "ibm,plat-res-int-priorities" contains a list of priorities that the hypervisor has reserved for its own use. OPAL uses the priority 7 queue to automatically escalate interrupts for all other queues (DD2.X POWER9). So only priorities [0..6] are allowed for the guest. Extend the sPAPR IRQ backend with a new handler to populate the DT with the appropriate "interrupt-controller" node. Signed-off-by: Cédric Le Goater [dwg: Fix style nits] Signed-off-by: David Gibson --- hw/intc/spapr_xive.c | 67 +++++++++++++++++++++++++++++++++++++ hw/intc/xics_spapr.c | 3 +- hw/ppc/spapr.c | 3 +- hw/ppc/spapr_irq.c | 3 ++ include/hw/ppc/spapr_irq.h | 2 ++ include/hw/ppc/spapr_xive.h | 2 ++ include/hw/ppc/xics.h | 4 +-- 7 files changed, 80 insertions(+), 4 deletions(-) diff --git a/hw/intc/spapr_xive.c b/hw/intc/spapr_xive.c index 9f2820039c..682c192268 100644 --- a/hw/intc/spapr_xive.c +++ b/hw/intc/spapr_xive.c @@ -14,6 +14,7 @@ #include "target/ppc/cpu.h" #include "sysemu/cpus.h" #include "monitor/monitor.h" +#include "hw/ppc/fdt.h" #include "hw/ppc/spapr.h" #include "hw/ppc/spapr_xive.h" #include "hw/ppc/xive.h" @@ -1400,3 +1401,69 @@ void spapr_xive_hcall_init(sPAPRMachineState *spapr) spapr_register_hypercall(H_INT_SYNC, h_int_sync); spapr_register_hypercall(H_INT_RESET, h_int_reset); } + +void spapr_dt_xive(sPAPRMachineState *spapr, uint32_t nr_servers, void *fdt, + uint32_t phandle) +{ + sPAPRXive *xive = spapr->xive; + int node; + uint64_t timas[2 * 2]; + /* Interrupt number ranges for the IPIs */ + uint32_t lisn_ranges[] = { + cpu_to_be32(0), + cpu_to_be32(nr_servers), + }; + /* + * EQ size - the sizes of pages supported by the system 4K, 64K, + * 2M, 16M. We only advertise 64K for the moment. + */ + uint32_t eq_sizes[] = { + cpu_to_be32(16), /* 64K */ + }; + /* + * The following array is in sync with the reserved priorities + * defined by the 'spapr_xive_priority_is_reserved' routine. + */ + uint32_t plat_res_int_priorities[] = { + cpu_to_be32(7), /* start */ + cpu_to_be32(0xf8), /* count */ + }; + gchar *nodename; + + /* Thread Interrupt Management Area : User (ring 3) and OS (ring 2) */ + timas[0] = cpu_to_be64(xive->tm_base + + XIVE_TM_USER_PAGE * (1ull << TM_SHIFT)); + timas[1] = cpu_to_be64(1ull << TM_SHIFT); + timas[2] = cpu_to_be64(xive->tm_base + + XIVE_TM_OS_PAGE * (1ull << TM_SHIFT)); + timas[3] = cpu_to_be64(1ull << TM_SHIFT); + + nodename = g_strdup_printf("interrupt-controller@%" PRIx64, + xive->tm_base + XIVE_TM_USER_PAGE * (1 << TM_SHIFT)); + _FDT(node = fdt_add_subnode(fdt, 0, nodename)); + g_free(nodename); + + _FDT(fdt_setprop_string(fdt, node, "device_type", "power-ivpe")); + _FDT(fdt_setprop(fdt, node, "reg", timas, sizeof(timas))); + + _FDT(fdt_setprop_string(fdt, node, "compatible", "ibm,power-ivpe")); + _FDT(fdt_setprop(fdt, node, "ibm,xive-eq-sizes", eq_sizes, + sizeof(eq_sizes))); + _FDT(fdt_setprop(fdt, node, "ibm,xive-lisn-ranges", lisn_ranges, + sizeof(lisn_ranges))); + + /* For Linux to link the LSIs to the interrupt controller. */ + _FDT(fdt_setprop(fdt, node, "interrupt-controller", NULL, 0)); + _FDT(fdt_setprop_cell(fdt, node, "#interrupt-cells", 2)); + + /* For SLOF */ + _FDT(fdt_setprop_cell(fdt, node, "linux,phandle", phandle)); + _FDT(fdt_setprop_cell(fdt, node, "phandle", phandle)); + + /* + * The "ibm,plat-res-int-priorities" property defines the priority + * ranges reserved by the hypervisor + */ + _FDT(fdt_setprop(fdt, 0, "ibm,plat-res-int-priorities", + plat_res_int_priorities, sizeof(plat_res_int_priorities))); +} diff --git a/hw/intc/xics_spapr.c b/hw/intc/xics_spapr.c index 2e27b92b87..f67d3c80bf 100644 --- a/hw/intc/xics_spapr.c +++ b/hw/intc/xics_spapr.c @@ -244,7 +244,8 @@ void xics_spapr_init(sPAPRMachineState *spapr) spapr_register_hypercall(H_IPOLL, h_ipoll); } -void spapr_dt_xics(int nr_servers, void *fdt, uint32_t phandle) +void spapr_dt_xics(sPAPRMachineState *spapr, uint32_t nr_servers, void *fdt, + uint32_t phandle) { uint32_t interrupt_server_ranges_prop[] = { 0, cpu_to_be32(nr_servers), diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index fc47a058dd..dfb617e580 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -1268,7 +1268,8 @@ static void *spapr_build_fdt(sPAPRMachineState *spapr, _FDT(fdt_setprop_cell(fdt, 0, "#size-cells", 2)); /* /interrupt controller */ - spapr_dt_xics(spapr_max_server_number(spapr), fdt, PHANDLE_XICP); + smc->irq->dt_populate(spapr, spapr_max_server_number(spapr), fdt, + PHANDLE_XICP); ret = spapr_populate_memory(spapr, fdt); if (ret < 0) { diff --git a/hw/ppc/spapr_irq.c b/hw/ppc/spapr_irq.c index 9eca8a4c8c..975954dc27 100644 --- a/hw/ppc/spapr_irq.c +++ b/hw/ppc/spapr_irq.c @@ -204,6 +204,7 @@ sPAPRIrq spapr_irq_xics = { .free = spapr_irq_free_xics, .qirq = spapr_qirq_xics, .print_info = spapr_irq_print_info_xics, + .dt_populate = spapr_dt_xics, }; /* @@ -298,6 +299,7 @@ sPAPRIrq spapr_irq_xive = { .free = spapr_irq_free_xive, .qirq = spapr_qirq_xive, .print_info = spapr_irq_print_info_xive, + .dt_populate = spapr_dt_xive, }; /* @@ -402,4 +404,5 @@ sPAPRIrq spapr_irq_xics_legacy = { .free = spapr_irq_free_xics, .qirq = spapr_qirq_xics, .print_info = spapr_irq_print_info_xics, + .dt_populate = spapr_dt_xics, }; diff --git a/include/hw/ppc/spapr_irq.h b/include/hw/ppc/spapr_irq.h index 23cdb51b87..e51e9f052f 100644 --- a/include/hw/ppc/spapr_irq.h +++ b/include/hw/ppc/spapr_irq.h @@ -39,6 +39,8 @@ typedef struct sPAPRIrq { void (*free)(sPAPRMachineState *spapr, int irq, int num); qemu_irq (*qirq)(sPAPRMachineState *spapr, int irq); void (*print_info)(sPAPRMachineState *spapr, Monitor *mon); + void (*dt_populate)(sPAPRMachineState *spapr, uint32_t nr_servers, + void *fdt, uint32_t phandle); } sPAPRIrq; extern sPAPRIrq spapr_irq_xics; diff --git a/include/hw/ppc/spapr_xive.h b/include/hw/ppc/spapr_xive.h index 9506a8f4d1..728a5e8dc1 100644 --- a/include/hw/ppc/spapr_xive.h +++ b/include/hw/ppc/spapr_xive.h @@ -45,5 +45,7 @@ qemu_irq spapr_xive_qirq(sPAPRXive *xive, uint32_t lisn); typedef struct sPAPRMachineState sPAPRMachineState; void spapr_xive_hcall_init(sPAPRMachineState *spapr); +void spapr_dt_xive(sPAPRMachineState *spapr, uint32_t nr_servers, void *fdt, + uint32_t phandle); #endif /* PPC_SPAPR_XIVE_H */ diff --git a/include/hw/ppc/xics.h b/include/hw/ppc/xics.h index 9958443d19..14afda198c 100644 --- a/include/hw/ppc/xics.h +++ b/include/hw/ppc/xics.h @@ -181,8 +181,6 @@ typedef struct XICSFabricClass { ICPState *(*icp_get)(XICSFabric *xi, int server); } XICSFabricClass; -void spapr_dt_xics(int nr_servers, void *fdt, uint32_t phandle); - ICPState *xics_icp_get(XICSFabric *xi, int server); /* Internal XICS interfaces */ @@ -204,6 +202,8 @@ void icp_resend(ICPState *ss); typedef struct sPAPRMachineState sPAPRMachineState; +void spapr_dt_xics(sPAPRMachineState *spapr, uint32_t nr_servers, void *fdt, + uint32_t phandle); int xics_kvm_init(sPAPRMachineState *spapr, Error **errp); void xics_spapr_init(sPAPRMachineState *spapr); From 1a937ad7e7a1b4eef37c967cbaeeda5ec5b90855 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Le=20Goater?= Date: Tue, 11 Dec 2018 23:38:15 +0100 Subject: [PATCH 34/40] spapr: allocate the interrupt thread context under the CPU core MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Each interrupt mode has its own specific interrupt presenter object, that we store under the CPU object, one for XICS and one for XIVE. Extend the sPAPR IRQ backend with a new handler to support them both. Signed-off-by: Cédric Le Goater Reviewed-by: David Gibson Signed-off-by: David Gibson --- hw/intc/xive.c | 22 ++++++++++++++++++++++ hw/ppc/spapr_cpu_core.c | 5 ++--- hw/ppc/spapr_irq.c | 15 +++++++++++++++ include/hw/ppc/spapr_irq.h | 2 ++ include/hw/ppc/xive.h | 1 + 5 files changed, 42 insertions(+), 3 deletions(-) diff --git a/hw/intc/xive.c b/hw/intc/xive.c index 607e74acd2..ea33494338 100644 --- a/hw/intc/xive.c +++ b/hw/intc/xive.c @@ -528,6 +528,28 @@ static const TypeInfo xive_tctx_info = { .class_init = xive_tctx_class_init, }; +Object *xive_tctx_create(Object *cpu, XiveRouter *xrtr, Error **errp) +{ + Error *local_err = NULL; + Object *obj; + + obj = object_new(TYPE_XIVE_TCTX); + object_property_add_child(cpu, TYPE_XIVE_TCTX, obj, &error_abort); + object_unref(obj); + object_property_add_const_link(obj, "cpu", cpu, &error_abort); + object_property_set_bool(obj, true, "realized", &local_err); + if (local_err) { + goto error; + } + + return obj; + +error: + object_unparent(obj); + error_propagate(errp, local_err); + return NULL; +} + /* * XIVE ESB helpers */ diff --git a/hw/ppc/spapr_cpu_core.c b/hw/ppc/spapr_cpu_core.c index 2398ce62c0..1811cd48db 100644 --- a/hw/ppc/spapr_cpu_core.c +++ b/hw/ppc/spapr_cpu_core.c @@ -11,7 +11,6 @@ #include "hw/ppc/spapr_cpu_core.h" #include "target/ppc/cpu.h" #include "hw/ppc/spapr.h" -#include "hw/ppc/xics.h" /* for icp_create() - to be removed */ #include "hw/boards.h" #include "qapi/error.h" #include "sysemu/cpus.h" @@ -215,6 +214,7 @@ static void spapr_cpu_core_unrealize(DeviceState *dev, Error **errp) static void spapr_realize_vcpu(PowerPCCPU *cpu, sPAPRMachineState *spapr, sPAPRCPUCore *sc, Error **errp) { + sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); CPUPPCState *env = &cpu->env; CPUState *cs = CPU(cpu); Error *local_err = NULL; @@ -233,8 +233,7 @@ static void spapr_realize_vcpu(PowerPCCPU *cpu, sPAPRMachineState *spapr, qemu_register_reset(spapr_cpu_reset, cpu); spapr_cpu_reset(cpu); - cpu->intc = icp_create(OBJECT(cpu), spapr->icp_type, XICS_FABRIC(spapr), - &local_err); + cpu->intc = smc->irq->cpu_intc_create(spapr, OBJECT(cpu), &local_err); if (local_err) { goto error_unregister; } diff --git a/hw/ppc/spapr_irq.c b/hw/ppc/spapr_irq.c index 975954dc27..fdcc7795e4 100644 --- a/hw/ppc/spapr_irq.c +++ b/hw/ppc/spapr_irq.c @@ -191,6 +191,12 @@ static void spapr_irq_print_info_xics(sPAPRMachineState *spapr, Monitor *mon) ics_pic_print_info(spapr->ics, mon); } +static Object *spapr_irq_cpu_intc_create_xics(sPAPRMachineState *spapr, + Object *cpu, Error **errp) +{ + return icp_create(cpu, spapr->icp_type, XICS_FABRIC(spapr), errp); +} + #define SPAPR_IRQ_XICS_NR_IRQS 0x1000 #define SPAPR_IRQ_XICS_NR_MSIS \ (XICS_IRQ_BASE + SPAPR_IRQ_XICS_NR_IRQS - SPAPR_IRQ_MSI) @@ -205,6 +211,7 @@ sPAPRIrq spapr_irq_xics = { .qirq = spapr_qirq_xics, .print_info = spapr_irq_print_info_xics, .dt_populate = spapr_dt_xics, + .cpu_intc_create = spapr_irq_cpu_intc_create_xics, }; /* @@ -282,6 +289,12 @@ static void spapr_irq_print_info_xive(sPAPRMachineState *spapr, spapr_xive_pic_print_info(spapr->xive, mon); } +static Object *spapr_irq_cpu_intc_create_xive(sPAPRMachineState *spapr, + Object *cpu, Error **errp) +{ + return xive_tctx_create(cpu, XIVE_ROUTER(spapr->xive), errp); +} + /* * XIVE uses the full IRQ number space. Set it to 8K to be compatible * with XICS. @@ -300,6 +313,7 @@ sPAPRIrq spapr_irq_xive = { .qirq = spapr_qirq_xive, .print_info = spapr_irq_print_info_xive, .dt_populate = spapr_dt_xive, + .cpu_intc_create = spapr_irq_cpu_intc_create_xive, }; /* @@ -405,4 +419,5 @@ sPAPRIrq spapr_irq_xics_legacy = { .qirq = spapr_qirq_xics, .print_info = spapr_irq_print_info_xics, .dt_populate = spapr_dt_xics, + .cpu_intc_create = spapr_irq_cpu_intc_create_xics, }; diff --git a/include/hw/ppc/spapr_irq.h b/include/hw/ppc/spapr_irq.h index e51e9f052f..13db0428ab 100644 --- a/include/hw/ppc/spapr_irq.h +++ b/include/hw/ppc/spapr_irq.h @@ -41,6 +41,8 @@ typedef struct sPAPRIrq { void (*print_info)(sPAPRMachineState *spapr, Monitor *mon); void (*dt_populate)(sPAPRMachineState *spapr, uint32_t nr_servers, void *fdt, uint32_t phandle); + Object *(*cpu_intc_create)(sPAPRMachineState *spapr, Object *cpu, + Error **errp); } sPAPRIrq; extern sPAPRIrq spapr_irq_xics; diff --git a/include/hw/ppc/xive.h b/include/hw/ppc/xive.h index 19309d1d65..18cd114eb2 100644 --- a/include/hw/ppc/xive.h +++ b/include/hw/ppc/xive.h @@ -419,6 +419,7 @@ typedef struct XiveTCTX { extern const MemoryRegionOps xive_tm_ops; void xive_tctx_pic_print_info(XiveTCTX *tctx, Monitor *mon); +Object *xive_tctx_create(Object *cpu, XiveRouter *xrtr, Error **errp); static inline uint32_t xive_nvt_cam_line(uint8_t nvt_blk, uint32_t nvt_idx) { From 1c53b06c0309104f30ca2c5e1d1e89ddc38de080 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Le=20Goater?= Date: Tue, 11 Dec 2018 23:38:16 +0100 Subject: [PATCH 35/40] spapr: extend the sPAPR IRQ backend for XICS migration MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Introduce a new sPAPR IRQ handler to handle resend after migration when the machine is using a KVM XICS interrupt controller model. Signed-off-by: Cédric Le Goater Reviewed-by: David Gibson Signed-off-by: David Gibson --- hw/ppc/spapr.c | 13 +++++-------- hw/ppc/spapr_irq.c | 27 +++++++++++++++++++++++++++ include/hw/ppc/spapr_irq.h | 2 ++ 3 files changed, 34 insertions(+), 8 deletions(-) diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index dfb617e580..0b09a88753 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -1730,14 +1730,6 @@ static int spapr_post_load(void *opaque, int version_id) return err; } - if (!object_dynamic_cast(OBJECT(spapr->ics), TYPE_ICS_KVM)) { - CPUState *cs; - CPU_FOREACH(cs) { - PowerPCCPU *cpu = POWERPC_CPU(cs); - icp_resend(ICP(cpu->intc)); - } - } - /* In earlier versions, there was no separate qdev for the PAPR * RTC, so the RTC offset was stored directly in sPAPREnvironment. * So when migrating from those versions, poke the incoming offset @@ -1758,6 +1750,11 @@ static int spapr_post_load(void *opaque, int version_id) } } + err = spapr_irq_post_load(spapr, version_id); + if (err) { + return err; + } + return err; } diff --git a/hw/ppc/spapr_irq.c b/hw/ppc/spapr_irq.c index fdcc7795e4..292c448a15 100644 --- a/hw/ppc/spapr_irq.c +++ b/hw/ppc/spapr_irq.c @@ -197,6 +197,18 @@ static Object *spapr_irq_cpu_intc_create_xics(sPAPRMachineState *spapr, return icp_create(cpu, spapr->icp_type, XICS_FABRIC(spapr), errp); } +static int spapr_irq_post_load_xics(sPAPRMachineState *spapr, int version_id) +{ + if (!object_dynamic_cast(OBJECT(spapr->ics), TYPE_ICS_KVM)) { + CPUState *cs; + CPU_FOREACH(cs) { + PowerPCCPU *cpu = POWERPC_CPU(cs); + icp_resend(ICP(cpu->intc)); + } + } + return 0; +} + #define SPAPR_IRQ_XICS_NR_IRQS 0x1000 #define SPAPR_IRQ_XICS_NR_MSIS \ (XICS_IRQ_BASE + SPAPR_IRQ_XICS_NR_IRQS - SPAPR_IRQ_MSI) @@ -212,6 +224,7 @@ sPAPRIrq spapr_irq_xics = { .print_info = spapr_irq_print_info_xics, .dt_populate = spapr_dt_xics, .cpu_intc_create = spapr_irq_cpu_intc_create_xics, + .post_load = spapr_irq_post_load_xics, }; /* @@ -295,6 +308,11 @@ static Object *spapr_irq_cpu_intc_create_xive(sPAPRMachineState *spapr, return xive_tctx_create(cpu, XIVE_ROUTER(spapr->xive), errp); } +static int spapr_irq_post_load_xive(sPAPRMachineState *spapr, int version_id) +{ + return 0; +} + /* * XIVE uses the full IRQ number space. Set it to 8K to be compatible * with XICS. @@ -314,6 +332,7 @@ sPAPRIrq spapr_irq_xive = { .print_info = spapr_irq_print_info_xive, .dt_populate = spapr_dt_xive, .cpu_intc_create = spapr_irq_cpu_intc_create_xive, + .post_load = spapr_irq_post_load_xive, }; /* @@ -352,6 +371,13 @@ qemu_irq spapr_qirq(sPAPRMachineState *spapr, int irq) return smc->irq->qirq(spapr, irq); } +int spapr_irq_post_load(sPAPRMachineState *spapr, int version_id) +{ + sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); + + return smc->irq->post_load(spapr, version_id); +} + /* * XICS legacy routines - to deprecate one day */ @@ -420,4 +446,5 @@ sPAPRIrq spapr_irq_xics_legacy = { .print_info = spapr_irq_print_info_xics, .dt_populate = spapr_dt_xics, .cpu_intc_create = spapr_irq_cpu_intc_create_xics, + .post_load = spapr_irq_post_load_xics, }; diff --git a/include/hw/ppc/spapr_irq.h b/include/hw/ppc/spapr_irq.h index 13db0428ab..84a25ffb6c 100644 --- a/include/hw/ppc/spapr_irq.h +++ b/include/hw/ppc/spapr_irq.h @@ -43,6 +43,7 @@ typedef struct sPAPRIrq { void *fdt, uint32_t phandle); Object *(*cpu_intc_create)(sPAPRMachineState *spapr, Object *cpu, Error **errp); + int (*post_load)(sPAPRMachineState *spapr, int version_id); } sPAPRIrq; extern sPAPRIrq spapr_irq_xics; @@ -53,6 +54,7 @@ void spapr_irq_init(sPAPRMachineState *spapr, Error **errp); int spapr_irq_claim(sPAPRMachineState *spapr, int irq, bool lsi, Error **errp); void spapr_irq_free(sPAPRMachineState *spapr, int irq, int num); qemu_irq spapr_qirq(sPAPRMachineState *spapr, int irq); +int spapr_irq_post_load(sPAPRMachineState *spapr, int version_id); /* * XICS legacy routines From b2e22477166a7f8a32b95317dea747f8af7a807f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Le=20Goater?= Date: Tue, 11 Dec 2018 23:38:17 +0100 Subject: [PATCH 36/40] spapr: add a 'reset' method to the sPAPR IRQ backend MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For the time being, the XIVE reset handler updates the OS CAM line of the vCPU as it is done under a real hypervisor when a vCPU is scheduled to run on a HW thread. This will let the XIVE presenter engine find a match among the NVTs dispatched on the HW threads. This handler will become even more useful when we introduce the machine supporting both interrupt modes, XIVE and XICS. In this machine, the interrupt mode is chosen by the CAS negotiation process and activated after a reset. Signed-off-by: Cédric Le Goater [dwg: Fix style nits] Signed-off-by: David Gibson --- hw/intc/spapr_xive.c | 17 +++++++++++++++++ hw/ppc/spapr.c | 6 ++++++ hw/ppc/spapr_irq.c | 31 ++++++++++++++++++++++++++++++- include/hw/ppc/spapr_irq.h | 2 ++ include/hw/ppc/spapr_xive.h | 1 + 5 files changed, 56 insertions(+), 1 deletion(-) diff --git a/hw/intc/spapr_xive.c b/hw/intc/spapr_xive.c index 682c192268..0e39c90cbd 100644 --- a/hw/intc/spapr_xive.c +++ b/hw/intc/spapr_xive.c @@ -179,6 +179,23 @@ static void spapr_xive_map_mmio(sPAPRXive *xive) sysbus_mmio_map(SYS_BUS_DEVICE(xive), 2, xive->tm_base); } +/* + * When a Virtual Processor is scheduled to run on a HW thread, the + * hypervisor pushes its identifier in the OS CAM line. Emulate the + * same behavior under QEMU. + */ +void spapr_xive_set_tctx_os_cam(XiveTCTX *tctx) +{ + uint8_t nvt_blk; + uint32_t nvt_idx; + uint32_t nvt_cam; + + spapr_xive_cpu_to_nvt(POWERPC_CPU(tctx->cs), &nvt_blk, &nvt_idx); + + nvt_cam = cpu_to_be32(TM_QW1W2_VO | xive_nvt_cam_line(nvt_blk, nvt_idx)); + memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &nvt_cam, 4); +} + static void spapr_xive_end_reset(XiveEND *end) { memset(end, 0, sizeof(*end)); diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index 0b09a88753..487f80e940 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -1619,6 +1619,12 @@ static void spapr_machine_reset(void) qemu_devices_reset(); + /* + * This is fixing some of the default configuration of the XIVE + * devices. To be called after the reset of the machine devices. + */ + spapr_irq_reset(spapr, &error_fatal); + /* DRC reset may cause a device to be unplugged. This will cause troubles * if this device is used by another device (eg, a running vhost backend * will crash QEMU if the DIMM holding the vring goes away). To avoid such diff --git a/hw/ppc/spapr_irq.c b/hw/ppc/spapr_irq.c index 292c448a15..9ecbf47329 100644 --- a/hw/ppc/spapr_irq.c +++ b/hw/ppc/spapr_irq.c @@ -305,7 +305,14 @@ static void spapr_irq_print_info_xive(sPAPRMachineState *spapr, static Object *spapr_irq_cpu_intc_create_xive(sPAPRMachineState *spapr, Object *cpu, Error **errp) { - return xive_tctx_create(cpu, XIVE_ROUTER(spapr->xive), errp); + Object *obj = xive_tctx_create(cpu, XIVE_ROUTER(spapr->xive), errp); + + /* + * (TCG) Early setting the OS CAM line for hotplugged CPUs as they + * don't benificiate from the reset of the XIVE IRQ backend + */ + spapr_xive_set_tctx_os_cam(XIVE_TCTX(obj)); + return obj; } static int spapr_irq_post_load_xive(sPAPRMachineState *spapr, int version_id) @@ -313,6 +320,18 @@ static int spapr_irq_post_load_xive(sPAPRMachineState *spapr, int version_id) return 0; } +static void spapr_irq_reset_xive(sPAPRMachineState *spapr, Error **errp) +{ + CPUState *cs; + + CPU_FOREACH(cs) { + PowerPCCPU *cpu = POWERPC_CPU(cs); + + /* (TCG) Set the OS CAM line of the thread interrupt context. */ + spapr_xive_set_tctx_os_cam(XIVE_TCTX(cpu->intc)); + } +} + /* * XIVE uses the full IRQ number space. Set it to 8K to be compatible * with XICS. @@ -333,6 +352,7 @@ sPAPRIrq spapr_irq_xive = { .dt_populate = spapr_dt_xive, .cpu_intc_create = spapr_irq_cpu_intc_create_xive, .post_load = spapr_irq_post_load_xive, + .reset = spapr_irq_reset_xive, }; /* @@ -378,6 +398,15 @@ int spapr_irq_post_load(sPAPRMachineState *spapr, int version_id) return smc->irq->post_load(spapr, version_id); } +void spapr_irq_reset(sPAPRMachineState *spapr, Error **errp) +{ + sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); + + if (smc->irq->reset) { + smc->irq->reset(spapr, errp); + } +} + /* * XICS legacy routines - to deprecate one day */ diff --git a/include/hw/ppc/spapr_irq.h b/include/hw/ppc/spapr_irq.h index 84a25ffb6c..63061a009b 100644 --- a/include/hw/ppc/spapr_irq.h +++ b/include/hw/ppc/spapr_irq.h @@ -44,6 +44,7 @@ typedef struct sPAPRIrq { Object *(*cpu_intc_create)(sPAPRMachineState *spapr, Object *cpu, Error **errp); int (*post_load)(sPAPRMachineState *spapr, int version_id); + void (*reset)(sPAPRMachineState *spapr, Error **errp); } sPAPRIrq; extern sPAPRIrq spapr_irq_xics; @@ -55,6 +56,7 @@ int spapr_irq_claim(sPAPRMachineState *spapr, int irq, bool lsi, Error **errp); void spapr_irq_free(sPAPRMachineState *spapr, int irq, int num); qemu_irq spapr_qirq(sPAPRMachineState *spapr, int irq); int spapr_irq_post_load(sPAPRMachineState *spapr, int version_id); +void spapr_irq_reset(sPAPRMachineState *spapr, Error **errp); /* * XICS legacy routines diff --git a/include/hw/ppc/spapr_xive.h b/include/hw/ppc/spapr_xive.h index 728a5e8dc1..728735dbcf 100644 --- a/include/hw/ppc/spapr_xive.h +++ b/include/hw/ppc/spapr_xive.h @@ -47,5 +47,6 @@ typedef struct sPAPRMachineState sPAPRMachineState; void spapr_xive_hcall_init(sPAPRMachineState *spapr); void spapr_dt_xive(sPAPRMachineState *spapr, uint32_t nr_servers, void *fdt, uint32_t phandle); +void spapr_xive_set_tctx_os_cam(XiveTCTX *tctx); #endif /* PPC_SPAPR_XIVE_H */ From db592b5b16b4f2821b8bb3f4f46825d660d2d4c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Le=20Goater?= Date: Mon, 17 Dec 2018 23:34:42 +0100 Subject: [PATCH 37/40] spapr: add an extra OV5 field to the sPAPR IRQ backend MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The interrupt modes supported by the hypervisor are advertised to the guest with new bits definitions of the option vector 5 of property "ibm,arch-vec-5-platform-support. The byte 23 bits 0-1 of the OV5 are defined as follow : 0b00 PAPR 2.7 and earlier (Legacy systems) 0b01 XIVE Exploitation mode only 0b10 Either available If the client/guest selects the XIVE interrupt mode, it informs the hypervisor by returning the value 0b01 in byte 23 bits 0-1. A 0b00 value indicates the use of the XICS interrupt mode (Legacy systems). The sPAPR IRQ backend is extended with these definitions and the values are directly used to populate the "ibm,arch-vec-5-platform-support" property. The interrupt mode is advertised under TCG and under KVM. Although a KVM XIVE device is not yet available, the machine can still operate with kernel_irqchip=off. However, we apply a restriction on the CPU which is required to be a POWER9 when a XIVE interrupt controller is in use. Signed-off-by: Cédric Le Goater Signed-off-by: David Gibson --- hw/ppc/spapr.c | 33 ++++++++++++++++++++++++++------- hw/ppc/spapr_irq.c | 3 +++ include/hw/ppc/spapr.h | 6 ++++++ include/hw/ppc/spapr_irq.h | 1 + 4 files changed, 36 insertions(+), 7 deletions(-) diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index 487f80e940..2f87c8ba19 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -1095,15 +1095,19 @@ static void spapr_dt_rtas(sPAPRMachineState *spapr, void *fdt) spapr_dt_rtas_tokens(fdt, rtas); } -/* Prepare ibm,arch-vec-5-platform-support, which indicates the MMU features - * that the guest may request and thus the valid values for bytes 24..26 of - * option vector 5: */ -static void spapr_dt_ov5_platform_support(void *fdt, int chosen) +/* + * Prepare ibm,arch-vec-5-platform-support, which indicates the MMU + * and the XIVE features that the guest may request and thus the valid + * values for bytes 23..26 of option vector 5: + */ +static void spapr_dt_ov5_platform_support(sPAPRMachineState *spapr, void *fdt, + int chosen) { PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu); + sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); char val[2 * 4] = { - 23, 0x00, /* Xive mode, filled in below. */ + 23, smc->irq->ov5, /* Xive mode. */ 24, 0x00, /* Hash/Radix, filled in below. */ 25, 0x00, /* Hash options: Segment Tables == no, GTSE == no. */ 26, 0x40, /* Radix options: GTSE == yes. */ @@ -1111,7 +1115,11 @@ static void spapr_dt_ov5_platform_support(void *fdt, int chosen) if (!ppc_check_compat(first_ppc_cpu, CPU_POWERPC_LOGICAL_3_00, 0, first_ppc_cpu->compat_pvr)) { - /* If we're in a pre POWER9 compat mode then the guest should do hash */ + /* + * If we're in a pre POWER9 compat mode then the guest should + * do hash and use the legacy interrupt mode + */ + val[1] = 0x00; /* XICS */ val[3] = 0x00; /* Hash */ } else if (kvm_enabled()) { if (kvmppc_has_cap_mmu_radix() && kvmppc_has_cap_mmu_hash_v3()) { @@ -1189,7 +1197,7 @@ static void spapr_dt_chosen(sPAPRMachineState *spapr, void *fdt) _FDT(fdt_setprop_string(fdt, chosen, "stdout-path", stdout_path)); } - spapr_dt_ov5_platform_support(fdt, chosen); + spapr_dt_ov5_platform_support(spapr, fdt, chosen); g_free(stdout_path); g_free(bootlist); @@ -2624,6 +2632,17 @@ static void spapr_machine_init(MachineState *machine) /* advertise support for ibm,dyamic-memory-v2 */ spapr_ovec_set(spapr->ov5, OV5_DRMEM_V2); + /* advertise XIVE on POWER9 machines */ + if (smc->irq->ov5 & SPAPR_OV5_XIVE_EXPLOIT) { + if (ppc_type_check_compat(machine->cpu_type, CPU_POWERPC_LOGICAL_3_00, + 0, spapr->max_compat_pvr)) { + spapr_ovec_set(spapr->ov5, OV5_XIVE_EXPLOIT); + } else { + error_report("XIVE-only machines require a POWER9 CPU"); + exit(1); + } + } + /* init CPUs */ spapr_init_cpus(spapr); diff --git a/hw/ppc/spapr_irq.c b/hw/ppc/spapr_irq.c index 9ecbf47329..9e3aa85b6d 100644 --- a/hw/ppc/spapr_irq.c +++ b/hw/ppc/spapr_irq.c @@ -216,6 +216,7 @@ static int spapr_irq_post_load_xics(sPAPRMachineState *spapr, int version_id) sPAPRIrq spapr_irq_xics = { .nr_irqs = SPAPR_IRQ_XICS_NR_IRQS, .nr_msis = SPAPR_IRQ_XICS_NR_MSIS, + .ov5 = SPAPR_OV5_XIVE_LEGACY, .init = spapr_irq_init_xics, .claim = spapr_irq_claim_xics, @@ -343,6 +344,7 @@ static void spapr_irq_reset_xive(sPAPRMachineState *spapr, Error **errp) sPAPRIrq spapr_irq_xive = { .nr_irqs = SPAPR_IRQ_XIVE_NR_IRQS, .nr_msis = SPAPR_IRQ_XIVE_NR_MSIS, + .ov5 = SPAPR_OV5_XIVE_EXPLOIT, .init = spapr_irq_init_xive, .claim = spapr_irq_claim_xive, @@ -467,6 +469,7 @@ int spapr_irq_find(sPAPRMachineState *spapr, int num, bool align, Error **errp) sPAPRIrq spapr_irq_xics_legacy = { .nr_irqs = SPAPR_IRQ_XICS_LEGACY_NR_IRQS, .nr_msis = SPAPR_IRQ_XICS_LEGACY_NR_IRQS, + .ov5 = SPAPR_OV5_XIVE_LEGACY, .init = spapr_irq_init_xics, .claim = spapr_irq_claim_xics, diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h index 6bf028a02f..06765b4e9d 100644 --- a/include/hw/ppc/spapr.h +++ b/include/hw/ppc/spapr.h @@ -824,5 +824,11 @@ int spapr_caps_post_migration(sPAPRMachineState *spapr); void spapr_check_pagesize(sPAPRMachineState *spapr, hwaddr pagesize, Error **errp); +/* + * XIVE definitions + */ +#define SPAPR_OV5_XIVE_LEGACY 0x0 +#define SPAPR_OV5_XIVE_EXPLOIT 0x40 +#define SPAPR_OV5_XIVE_BOTH 0x80 /* Only to advertise on the platform */ #endif /* HW_SPAPR_H */ diff --git a/include/hw/ppc/spapr_irq.h b/include/hw/ppc/spapr_irq.h index 63061a009b..b34d5a0038 100644 --- a/include/hw/ppc/spapr_irq.h +++ b/include/hw/ppc/spapr_irq.h @@ -33,6 +33,7 @@ void spapr_irq_msi_reset(sPAPRMachineState *spapr); typedef struct sPAPRIrq { uint32_t nr_irqs; uint32_t nr_msis; + uint8_t ov5; void (*init)(sPAPRMachineState *spapr, Error **errp); int (*claim)(sPAPRMachineState *spapr, int irq, bool lsi, Error **errp); From 3ba3d0bc338262f5a23c17e2b6e899da59a544d6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Le=20Goater?= Date: Mon, 17 Dec 2018 23:34:43 +0100 Subject: [PATCH 38/40] spapr: introduce an 'ic-mode' machine option MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This option is used to select the interrupt controller mode (XICS or XIVE) with which the machine will operate. XICS being the default mode for now. When running a machine with the XIVE interrupt mode backend, the guest OS is required to have support for the XIVE exploitation mode. In the case of legacy OS, the mode selected by CAS should be XICS and the OS should fail to boot. However, QEMU could possibly detect it, terminate the boot process and reset to stop in the SLOF firmware. This is not yet handled. Signed-off-by: Cédric Le Goater Reviewed-by: David Gibson Signed-off-by: David Gibson --- hw/ppc/spapr.c | 50 +++++++++++++++++++++++++++++++++++------ hw/ppc/spapr_cpu_core.c | 3 +-- hw/ppc/spapr_irq.c | 34 +++++++++------------------- include/hw/ppc/spapr.h | 1 + 4 files changed, 55 insertions(+), 33 deletions(-) diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index 2f87c8ba19..65c6065602 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -1104,10 +1104,9 @@ static void spapr_dt_ov5_platform_support(sPAPRMachineState *spapr, void *fdt, int chosen) { PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu); - sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); char val[2 * 4] = { - 23, smc->irq->ov5, /* Xive mode. */ + 23, spapr->irq->ov5, /* Xive mode. */ 24, 0x00, /* Hash/Radix, filled in below. */ 25, 0x00, /* Hash options: Segment Tables == no, GTSE == no. */ 26, 0x40, /* Radix options: GTSE == yes. */ @@ -1276,7 +1275,7 @@ static void *spapr_build_fdt(sPAPRMachineState *spapr, _FDT(fdt_setprop_cell(fdt, 0, "#size-cells", 2)); /* /interrupt controller */ - smc->irq->dt_populate(spapr, spapr_max_server_number(spapr), fdt, + spapr->irq->dt_populate(spapr, spapr_max_server_number(spapr), fdt, PHANDLE_XICP); ret = spapr_populate_memory(spapr, fdt); @@ -1297,7 +1296,8 @@ static void *spapr_build_fdt(sPAPRMachineState *spapr, } QLIST_FOREACH(phb, &spapr->phbs, list) { - ret = spapr_populate_pci_dt(phb, PHANDLE_XICP, fdt, smc->irq->nr_msis); + ret = spapr_populate_pci_dt(phb, PHANDLE_XICP, fdt, + spapr->irq->nr_msis); if (ret < 0) { error_report("couldn't setup PCI devices in fdt"); exit(1); @@ -2633,7 +2633,7 @@ static void spapr_machine_init(MachineState *machine) spapr_ovec_set(spapr->ov5, OV5_DRMEM_V2); /* advertise XIVE on POWER9 machines */ - if (smc->irq->ov5 & SPAPR_OV5_XIVE_EXPLOIT) { + if (spapr->irq->ov5 & SPAPR_OV5_XIVE_EXPLOIT) { if (ppc_type_check_compat(machine->cpu_type, CPU_POWERPC_LOGICAL_3_00, 0, spapr->max_compat_pvr)) { spapr_ovec_set(spapr->ov5, OV5_XIVE_EXPLOIT); @@ -3053,9 +3053,38 @@ static void spapr_set_vsmt(Object *obj, Visitor *v, const char *name, visit_type_uint32(v, name, (uint32_t *)opaque, errp); } +static char *spapr_get_ic_mode(Object *obj, Error **errp) +{ + sPAPRMachineState *spapr = SPAPR_MACHINE(obj); + + if (spapr->irq == &spapr_irq_xics_legacy) { + return g_strdup("legacy"); + } else if (spapr->irq == &spapr_irq_xics) { + return g_strdup("xics"); + } else if (spapr->irq == &spapr_irq_xive) { + return g_strdup("xive"); + } + g_assert_not_reached(); +} + +static void spapr_set_ic_mode(Object *obj, const char *value, Error **errp) +{ + sPAPRMachineState *spapr = SPAPR_MACHINE(obj); + + /* The legacy IRQ backend can not be set */ + if (strcmp(value, "xics") == 0) { + spapr->irq = &spapr_irq_xics; + } else if (strcmp(value, "xive") == 0) { + spapr->irq = &spapr_irq_xive; + } else { + error_setg(errp, "Bad value for \"ic-mode\" property"); + } +} + static void spapr_instance_init(Object *obj) { sPAPRMachineState *spapr = SPAPR_MACHINE(obj); + sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); spapr->htab_fd = -1; spapr->use_hotplug_event_source = true; @@ -3089,6 +3118,14 @@ static void spapr_instance_init(Object *obj) " the host's SMT mode", &error_abort); object_property_add_bool(obj, "vfio-no-msix-emulation", spapr_get_msix_emulation, NULL, NULL); + + /* The machine class defines the default interrupt controller mode */ + spapr->irq = smc->irq; + object_property_add_str(obj, "ic-mode", spapr_get_ic_mode, + spapr_set_ic_mode, NULL); + object_property_set_description(obj, "ic-mode", + "Specifies the interrupt controller mode (xics, xive)", + NULL); } static void spapr_machine_finalizefn(Object *obj) @@ -3811,9 +3848,8 @@ static void spapr_pic_print_info(InterruptStatsProvider *obj, Monitor *mon) { sPAPRMachineState *spapr = SPAPR_MACHINE(obj); - sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); - smc->irq->print_info(spapr, mon); + spapr->irq->print_info(spapr, mon); } int spapr_get_vcpu_id(PowerPCCPU *cpu) diff --git a/hw/ppc/spapr_cpu_core.c b/hw/ppc/spapr_cpu_core.c index 1811cd48db..82666436e9 100644 --- a/hw/ppc/spapr_cpu_core.c +++ b/hw/ppc/spapr_cpu_core.c @@ -214,7 +214,6 @@ static void spapr_cpu_core_unrealize(DeviceState *dev, Error **errp) static void spapr_realize_vcpu(PowerPCCPU *cpu, sPAPRMachineState *spapr, sPAPRCPUCore *sc, Error **errp) { - sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); CPUPPCState *env = &cpu->env; CPUState *cs = CPU(cpu); Error *local_err = NULL; @@ -233,7 +232,7 @@ static void spapr_realize_vcpu(PowerPCCPU *cpu, sPAPRMachineState *spapr, qemu_register_reset(spapr_cpu_reset, cpu); spapr_cpu_reset(cpu); - cpu->intc = smc->irq->cpu_intc_create(spapr, OBJECT(cpu), &local_err); + cpu->intc = spapr->irq->cpu_intc_create(spapr, OBJECT(cpu), &local_err); if (local_err) { goto error_unregister; } diff --git a/hw/ppc/spapr_irq.c b/hw/ppc/spapr_irq.c index 9e3aa85b6d..7b3b5afec2 100644 --- a/hw/ppc/spapr_irq.c +++ b/hw/ppc/spapr_irq.c @@ -94,8 +94,7 @@ error: static void spapr_irq_init_xics(sPAPRMachineState *spapr, Error **errp) { MachineState *machine = MACHINE(spapr); - sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); - int nr_irqs = smc->irq->nr_irqs; + int nr_irqs = spapr->irq->nr_irqs; Error *local_err = NULL; if (kvm_enabled()) { @@ -234,7 +233,6 @@ sPAPRIrq spapr_irq_xics = { static void spapr_irq_init_xive(sPAPRMachineState *spapr, Error **errp) { MachineState *machine = MACHINE(spapr); - sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); uint32_t nr_servers = spapr_max_server_number(spapr); DeviceState *dev; int i; @@ -248,7 +246,7 @@ static void spapr_irq_init_xive(sPAPRMachineState *spapr, Error **errp) } dev = qdev_create(NULL, TYPE_SPAPR_XIVE); - qdev_prop_set_uint32(dev, "nr-irqs", smc->irq->nr_irqs); + qdev_prop_set_uint32(dev, "nr-irqs", spapr->irq->nr_irqs); /* * 8 XIVE END structures per CPU. One for each available priority */ @@ -362,50 +360,38 @@ sPAPRIrq spapr_irq_xive = { */ void spapr_irq_init(sPAPRMachineState *spapr, Error **errp) { - sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); - /* Initialize the MSI IRQ allocator. */ if (!SPAPR_MACHINE_GET_CLASS(spapr)->legacy_irq_allocation) { - spapr_irq_msi_init(spapr, smc->irq->nr_msis); + spapr_irq_msi_init(spapr, spapr->irq->nr_msis); } - smc->irq->init(spapr, errp); + spapr->irq->init(spapr, errp); } int spapr_irq_claim(sPAPRMachineState *spapr, int irq, bool lsi, Error **errp) { - sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); - - return smc->irq->claim(spapr, irq, lsi, errp); + return spapr->irq->claim(spapr, irq, lsi, errp); } void spapr_irq_free(sPAPRMachineState *spapr, int irq, int num) { - sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); - - smc->irq->free(spapr, irq, num); + spapr->irq->free(spapr, irq, num); } qemu_irq spapr_qirq(sPAPRMachineState *spapr, int irq) { - sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); - - return smc->irq->qirq(spapr, irq); + return spapr->irq->qirq(spapr, irq); } int spapr_irq_post_load(sPAPRMachineState *spapr, int version_id) { - sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); - - return smc->irq->post_load(spapr, version_id); + return spapr->irq->post_load(spapr, version_id); } void spapr_irq_reset(sPAPRMachineState *spapr, Error **errp) { - sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); - - if (smc->irq->reset) { - smc->irq->reset(spapr, errp); + if (spapr->irq->reset) { + spapr->irq->reset(spapr, errp); } } diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h index 06765b4e9d..2c77a8ba88 100644 --- a/include/hw/ppc/spapr.h +++ b/include/hw/ppc/spapr.h @@ -177,6 +177,7 @@ struct sPAPRMachineState { int32_t irq_map_nr; unsigned long *irq_map; sPAPRXive *xive; + sPAPRIrq *irq; bool cmd_line_caps[SPAPR_CAP_NUM]; sPAPRCapabilities def, eff, mig; From 34a6b015a98733a4b32881777dafd70156c5a322 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Le=20Goater?= Date: Mon, 17 Dec 2018 23:34:44 +0100 Subject: [PATCH 39/40] spapr: change default CPU type to POWER9 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Cédric Le Goater Signed-off-by: David Gibson --- hw/ppc/spapr.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index 65c6065602..19a07c5c9d 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -3931,7 +3931,7 @@ static void spapr_machine_class_init(ObjectClass *oc, void *data) hc->unplug = spapr_machine_device_unplug; smc->dr_lmb_enabled = true; - mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power8_v2.0"); + mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power9_v2.0"); mc->has_hotpluggable_cpus = true; smc->resize_hpt_default = SPAPR_RESIZE_HPT_ENABLED; fwc->get_dev_path = spapr_get_fw_dev_path; @@ -4028,6 +4028,7 @@ static void spapr_machine_3_1_class_options(MachineClass *mc) { spapr_machine_4_0_class_options(mc); SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_3_1); + mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power8_v2.0"); } DEFINE_SPAPR_MACHINE(3_1, "3.1", false); From b62c6e1237fb5ca2563f7e72b66ac0c40ff7a714 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Le=20Goater?= Date: Mon, 17 Dec 2018 23:34:45 +0100 Subject: [PATCH 40/40] MAINTAINERS: PPC: add a XIVE section MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Cédric Le Goater Signed-off-by: David Gibson --- MAINTAINERS | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index d676c73f88..0ab4676b06 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1011,6 +1011,14 @@ F: tests/libqos/*spapr* F: tests/rtas* F: tests/libqos/rtas* +XIVE +M: David Gibson +M: Cédric Le Goater +L: qemu-ppc@nongnu.org +S: Supported +F: hw/*/*xive* +F: include/hw/*/*xive* + virtex_ml507 M: Edgar E. Iglesias L: qemu-ppc@nongnu.org