From 9b44c836dc37507513eb67cea862f82dafa249d8 Mon Sep 17 00:00:00 2001 From: Suraj Jitindar Singh Date: Wed, 1 Mar 2017 18:12:57 +1100 Subject: [PATCH 01/17] target/ppc: Add POWER9/ISAv3.00 to compat_table compat_table contains the list of logical pvr compat modes which a cpu can operate in. It is a list of struct CompatInfo which contains the given pvr value for a compat mode, the pcr bits which should be set to operate in that compat mode, the pcr level which must be present in pcr_supported for a processor to support that compat mode and the max threads possible in that compat mode. Add an entry for the POWER9/ISAv3.00 logical pvr which represents a processor running with support for logical pvr 0x0f000005. A processor running in this mode should have PCR_COMPAT_3_00 set in the pcr (if available in pcr_mask) and should have PCR_COMPAT_3_00 in pcr_supported to indicate that it is capable of running in this compat mode. Also add PCR_COMPAT_3_00 to the bits which must be set for all previous compat modes. Since no processor models contain this bit yet in pcr_mask it will never be set, but this ensures we don't forget to in the future. Signed-off-by: Suraj Jitindar Singh Signed-off-by: David Gibson --- target/ppc/compat.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/target/ppc/compat.c b/target/ppc/compat.c index 458da262be..e8ec1e19e7 100644 --- a/target/ppc/compat.c +++ b/target/ppc/compat.c @@ -39,29 +39,35 @@ static const CompatInfo compat_table[] = { */ { /* POWER6, ISA2.05 */ .pvr = CPU_POWERPC_LOGICAL_2_05, - .pcr = PCR_COMPAT_2_07 | PCR_COMPAT_2_06 | PCR_COMPAT_2_05 - | PCR_TM_DIS | PCR_VSX_DIS, + .pcr = PCR_COMPAT_3_00 | PCR_COMPAT_2_07 | PCR_COMPAT_2_06 | + PCR_COMPAT_2_05 | PCR_TM_DIS | PCR_VSX_DIS, .pcr_level = PCR_COMPAT_2_05, .max_threads = 2, }, { /* POWER7, ISA2.06 */ .pvr = CPU_POWERPC_LOGICAL_2_06, - .pcr = PCR_COMPAT_2_07 | PCR_COMPAT_2_06 | PCR_TM_DIS, + .pcr = PCR_COMPAT_3_00 | PCR_COMPAT_2_07 | PCR_COMPAT_2_06 | PCR_TM_DIS, .pcr_level = PCR_COMPAT_2_06, .max_threads = 4, }, { .pvr = CPU_POWERPC_LOGICAL_2_06_PLUS, - .pcr = PCR_COMPAT_2_07 | PCR_COMPAT_2_06 | PCR_TM_DIS, + .pcr = PCR_COMPAT_3_00 | PCR_COMPAT_2_07 | PCR_COMPAT_2_06 | PCR_TM_DIS, .pcr_level = PCR_COMPAT_2_06, .max_threads = 4, }, { /* POWER8, ISA2.07 */ .pvr = CPU_POWERPC_LOGICAL_2_07, - .pcr = PCR_COMPAT_2_07, + .pcr = PCR_COMPAT_3_00 | PCR_COMPAT_2_07, .pcr_level = PCR_COMPAT_2_07, .max_threads = 8, }, + { /* POWER9, ISA3.00 */ + .pvr = CPU_POWERPC_LOGICAL_3_00, + .pcr = PCR_COMPAT_3_00, + .pcr_level = PCR_COMPAT_3_00, + .max_threads = 4, + }, }; static const CompatInfo *compat_by_pvr(uint32_t pvr) From 9c60766887c647df7193463f7a2075e8993b514c Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Thu, 2 Mar 2017 13:36:11 +1100 Subject: [PATCH 02/17] exec, kvm, target-ppc: Move getrampagesize() to common code getrampagesize() returns the largest supported page size and mainly used to know if huge pages are enabled. However is implemented in target-ppc/kvm.c and not available in TCG or other architectures. This renames and moves gethugepagesize() to mmap-alloc.c where fd-based analog of it is already implemented. This renames and moves getrampagesize() to exec.c as it seems to be the common place for helpers like this. Signed-off-by: Alexey Kardashevskiy Signed-off-by: David Gibson --- exec.c | 82 ++++++++++++++++++++++++++++ include/exec/ram_addr.h | 1 + include/qemu/mmap-alloc.h | 2 + target/ppc/kvm.c | 109 ++------------------------------------ util/mmap-alloc.c | 25 +++++++++ 5 files changed, 115 insertions(+), 104 deletions(-) diff --git a/exec.c b/exec.c index 785d20f648..aabb035e92 100644 --- a/exec.c +++ b/exec.c @@ -42,6 +42,7 @@ #include "exec/memory.h" #include "exec/ioport.h" #include "sysemu/dma.h" +#include "sysemu/numa.h" #include "exec/address-spaces.h" #include "sysemu/xen-mapcache.h" #include "trace-root.h" @@ -1256,6 +1257,87 @@ void qemu_mutex_unlock_ramlist(void) qemu_mutex_unlock(&ram_list.mutex); } +#ifdef __linux__ +/* + * FIXME TOCTTOU: this iterates over memory backends' mem-path, which + * may or may not name the same files / on the same filesystem now as + * when we actually open and map them. Iterate over the file + * descriptors instead, and use qemu_fd_getpagesize(). + */ +static int find_max_supported_pagesize(Object *obj, void *opaque) +{ + char *mem_path; + long *hpsize_min = opaque; + + if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) { + mem_path = object_property_get_str(obj, "mem-path", NULL); + if (mem_path) { + long hpsize = qemu_mempath_getpagesize(mem_path); + if (hpsize < *hpsize_min) { + *hpsize_min = hpsize; + } + } else { + *hpsize_min = getpagesize(); + } + } + + return 0; +} + +long qemu_getrampagesize(void) +{ + long hpsize = LONG_MAX; + long mainrampagesize; + Object *memdev_root; + + if (mem_path) { + mainrampagesize = qemu_mempath_getpagesize(mem_path); + } else { + mainrampagesize = getpagesize(); + } + + /* it's possible we have memory-backend objects with + * hugepage-backed RAM. these may get mapped into system + * address space via -numa parameters or memory hotplug + * hooks. we want to take these into account, but we + * also want to make sure these supported hugepage + * sizes are applicable across the entire range of memory + * we may boot from, so we take the min across all + * backends, and assume normal pages in cases where a + * backend isn't backed by hugepages. + */ + memdev_root = object_resolve_path("/objects", NULL); + if (memdev_root) { + object_child_foreach(memdev_root, find_max_supported_pagesize, &hpsize); + } + if (hpsize == LONG_MAX) { + /* No additional memory regions found ==> Report main RAM page size */ + return mainrampagesize; + } + + /* If NUMA is disabled or the NUMA nodes are not backed with a + * memory-backend, then there is at least one node using "normal" RAM, + * so if its page size is smaller we have got to report that size instead. + */ + if (hpsize > mainrampagesize && + (nb_numa_nodes == 0 || numa_info[0].node_memdev == NULL)) { + static bool warned; + if (!warned) { + error_report("Huge page support disabled (n/a for main memory)."); + warned = true; + } + return mainrampagesize; + } + + return hpsize; +} +#else +long qemu_getrampagesize(void) +{ + return getpagesize(); +} +#endif + #ifdef __linux__ static int64_t get_file_size(int fd) { diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h index 3e79466a44..cd432e73ae 100644 --- a/include/exec/ram_addr.h +++ b/include/exec/ram_addr.h @@ -52,6 +52,7 @@ static inline void *ramblock_ptr(RAMBlock *block, ram_addr_t offset) return (char *)block->host + offset; } +long qemu_getrampagesize(void); ram_addr_t last_ram_offset(void); RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr, bool share, const char *mem_path, diff --git a/include/qemu/mmap-alloc.h b/include/qemu/mmap-alloc.h index 933c024ac5..50385e3f81 100644 --- a/include/qemu/mmap-alloc.h +++ b/include/qemu/mmap-alloc.h @@ -5,6 +5,8 @@ size_t qemu_fd_getpagesize(int fd); +size_t qemu_mempath_getpagesize(const char *mem_path); + void *qemu_ram_mmap(int fd, size_t size, size_t align, bool shared); void qemu_ram_munmap(void *ptr, size_t size); diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c index acc40ece65..9b51484052 100644 --- a/target/ppc/kvm.c +++ b/target/ppc/kvm.c @@ -28,7 +28,6 @@ #include "qemu/timer.h" #include "sysemu/sysemu.h" #include "sysemu/hw_accel.h" -#include "sysemu/numa.h" #include "kvm_ppc.h" #include "sysemu/cpus.h" #include "sysemu/device_tree.h" @@ -43,8 +42,10 @@ #include "trace.h" #include "exec/gdbstub.h" #include "exec/memattrs.h" +#include "exec/ram_addr.h" #include "sysemu/hostmem.h" #include "qemu/cutils.h" +#include "qemu/mmap-alloc.h" #if defined(TARGET_PPC64) #include "hw/ppc/spapr_cpu_core.h" #endif @@ -329,106 +330,6 @@ static void kvm_get_smmu_info(PowerPCCPU *cpu, struct kvm_ppc_smmu_info *info) kvm_get_fallback_smmu_info(cpu, info); } -static long gethugepagesize(const char *mem_path) -{ - struct statfs fs; - int ret; - - do { - ret = statfs(mem_path, &fs); - } while (ret != 0 && errno == EINTR); - - if (ret != 0) { - fprintf(stderr, "Couldn't statfs() memory path: %s\n", - strerror(errno)); - exit(1); - } - -#define HUGETLBFS_MAGIC 0x958458f6 - - if (fs.f_type != HUGETLBFS_MAGIC) { - /* Explicit mempath, but it's ordinary pages */ - return getpagesize(); - } - - /* It's hugepage, return the huge page size */ - return fs.f_bsize; -} - -/* - * FIXME TOCTTOU: this iterates over memory backends' mem-path, which - * may or may not name the same files / on the same filesystem now as - * when we actually open and map them. Iterate over the file - * descriptors instead, and use qemu_fd_getpagesize(). - */ -static int find_max_supported_pagesize(Object *obj, void *opaque) -{ - char *mem_path; - long *hpsize_min = opaque; - - if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) { - mem_path = object_property_get_str(obj, "mem-path", NULL); - if (mem_path) { - long hpsize = gethugepagesize(mem_path); - if (hpsize < *hpsize_min) { - *hpsize_min = hpsize; - } - } else { - *hpsize_min = getpagesize(); - } - } - - return 0; -} - -static long getrampagesize(void) -{ - long hpsize = LONG_MAX; - long mainrampagesize; - Object *memdev_root; - - if (mem_path) { - mainrampagesize = gethugepagesize(mem_path); - } else { - mainrampagesize = getpagesize(); - } - - /* it's possible we have memory-backend objects with - * hugepage-backed RAM. these may get mapped into system - * address space via -numa parameters or memory hotplug - * hooks. we want to take these into account, but we - * also want to make sure these supported hugepage - * sizes are applicable across the entire range of memory - * we may boot from, so we take the min across all - * backends, and assume normal pages in cases where a - * backend isn't backed by hugepages. - */ - memdev_root = object_resolve_path("/objects", NULL); - if (memdev_root) { - object_child_foreach(memdev_root, find_max_supported_pagesize, &hpsize); - } - if (hpsize == LONG_MAX) { - /* No additional memory regions found ==> Report main RAM page size */ - return mainrampagesize; - } - - /* If NUMA is disabled or the NUMA nodes are not backed with a - * memory-backend, then there is at least one node using "normal" RAM, - * so if its page size is smaller we have got to report that size instead. - */ - if (hpsize > mainrampagesize && - (nb_numa_nodes == 0 || numa_info[0].node_memdev == NULL)) { - static bool warned; - if (!warned) { - error_report("Huge page support disabled (n/a for main memory)."); - warned = true; - } - return mainrampagesize; - } - - return hpsize; -} - static bool kvm_valid_page_size(uint32_t flags, long rampgsize, uint32_t shift) { if (!(flags & KVM_PPC_PAGE_SIZES_REAL)) { @@ -460,7 +361,7 @@ static void kvm_fixup_page_sizes(PowerPCCPU *cpu) } if (!max_cpu_page_size) { - max_cpu_page_size = getrampagesize(); + max_cpu_page_size = qemu_getrampagesize(); } /* Convert to QEMU form */ @@ -521,7 +422,7 @@ bool kvmppc_is_mem_backend_page_size_ok(char *obj_path) long pagesize; if (mempath) { - pagesize = gethugepagesize(mempath); + pagesize = qemu_mempath_getpagesize(mempath); } else { pagesize = getpagesize(); } @@ -2205,7 +2106,7 @@ uint64_t kvmppc_rma_size(uint64_t current_size, unsigned int hash_shift) /* Find the largest hardware supported page size that's less than * or equal to the (logical) backing page size of guest RAM */ kvm_get_smmu_info(POWERPC_CPU(first_cpu), &info); - rampagesize = getrampagesize(); + rampagesize = qemu_getrampagesize(); best_page_shift = 0; for (i = 0; i < KVM_PPC_PAGE_SIZES_MAX_SZ; i++) { diff --git a/util/mmap-alloc.c b/util/mmap-alloc.c index 2f55f5e94f..3ec029a9ea 100644 --- a/util/mmap-alloc.c +++ b/util/mmap-alloc.c @@ -40,6 +40,31 @@ size_t qemu_fd_getpagesize(int fd) return getpagesize(); } +size_t qemu_mempath_getpagesize(const char *mem_path) +{ +#ifdef CONFIG_LINUX + struct statfs fs; + int ret; + + do { + ret = statfs(mem_path, &fs); + } while (ret != 0 && errno == EINTR); + + if (ret != 0) { + fprintf(stderr, "Couldn't statfs() memory path: %s\n", + strerror(errno)); + exit(1); + } + + if (fs.f_type == HUGETLBFS_MAGIC) { + /* It's hugepage, return the huge page size */ + return fs.f_bsize; + } +#endif + + return getpagesize(); +} + void *qemu_ram_mmap(int fd, size_t size, size_t align, bool shared) { /* From eaa477ca4ef59eed2c77d81937b981ff823def3b Mon Sep 17 00:00:00 2001 From: David Gibson Date: Thu, 2 Mar 2017 15:29:31 +1100 Subject: [PATCH 03/17] powernv: Don't test POWER9 CPU yet A couple of tests for the work-in-progress 'powernv' machine type attempt to test on POWER9 CPUs. However the POWER9 CPU support is incomplete and this doesn't really work. In particular the firmware image we have currently assumes the presence of the SDR1 register, which no longer exists on POWER9. We only got away with this so far, because of a different bug which added SDR1 to POWER9 even though it shouldn't be there. For now, remove POWER9 testing of powernv, POWER8 testing will do for now until the POWER9 support is more complete. Signed-off-by: David Gibson --- tests/boot-serial-test.c | 2 +- tests/pnv-xscom-test.c | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/tests/boot-serial-test.c b/tests/boot-serial-test.c index 44c82e5110..57edf6af33 100644 --- a/tests/boot-serial-test.c +++ b/tests/boot-serial-test.c @@ -29,7 +29,7 @@ static testdef_t tests[] = { { "ppc64", "ppce500", "", "U-Boot" }, { "ppc64", "prep", "", "Open Hack'Ware BIOS" }, { "ppc64", "pseries", "", "Open Firmware" }, - { "ppc64", "powernv", "-cpu POWER9", "SkiBoot" }, + { "ppc64", "powernv", "-cpu POWER8", "SkiBoot" }, { "i386", "isapc", "-cpu qemu32 -device sga", "SGABIOS" }, { "i386", "pc", "-device sga", "SGABIOS" }, { "i386", "q35", "-device sga", "SGABIOS" }, diff --git a/tests/pnv-xscom-test.c b/tests/pnv-xscom-test.c index 5951da16cd..5adc3fd3a9 100644 --- a/tests/pnv-xscom-test.c +++ b/tests/pnv-xscom-test.c @@ -41,7 +41,9 @@ static const PnvChip pnv_chips[] = { .xscom_core_base = 0x10000000ull, .cfam_id = 0x120d304980000000ull, .first_core = 0x1, - }, { + }, +#if 0 /* POWER9 support is not ready yet */ + { .chip_type = PNV_CHIP_POWER9, .cpu_model = "POWER9", .xscom_base = 0x000603fc00000000ull, @@ -49,6 +51,7 @@ static const PnvChip pnv_chips[] = { .cfam_id = 0x100d104980000000ull, .first_core = 0x20, }, +#endif }; static uint64_t pnv_xscom_addr(const PnvChip *chip, uint32_t pcba) From 0922f1e4872656f7643fb2bc8459bea6964de085 Mon Sep 17 00:00:00 2001 From: David Gibson Date: Thu, 2 Mar 2017 12:13:07 +1100 Subject: [PATCH 04/17] target/ppc/POWER9: Add POWERPC_MMU_V3 bit For easier handling of future processors using the POWER9 or something close to it, add a new bit in the MMU model. This was originally from a revised version of 86cf1e9 "target/ppc/POWER9: Add ISAv3.00 MMU definition" but the older version of the patch was already merged. This makes the change on top of the original version. Signed-off-by: Suraj Jitindar Singh Signed-off-by: David Gibson --- target/ppc/cpu-qom.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/target/ppc/cpu-qom.h b/target/ppc/cpu-qom.h index 4e3132b56b..da7eb5a6d6 100644 --- a/target/ppc/cpu-qom.h +++ b/target/ppc/cpu-qom.h @@ -71,6 +71,7 @@ enum powerpc_mmu_t { #define POWERPC_MMU_1TSEG 0x00020000 #define POWERPC_MMU_AMR 0x00040000 #define POWERPC_MMU_64K 0x00080000 +#define POWERPC_MMU_V3 0x00100000 /* ISA V3.00 MMU Support */ /* 64 bits PowerPC MMU */ POWERPC_MMU_64B = POWERPC_MMU_64 | 0x00000001, /* Architecture 2.03 and later (has LPCR) */ @@ -92,7 +93,8 @@ enum powerpc_mmu_t { /* Architecture 3.00 variant */ POWERPC_MMU_3_00 = POWERPC_MMU_64 | POWERPC_MMU_1TSEG | POWERPC_MMU_64K - | POWERPC_MMU_AMR | 0x00000005, + | POWERPC_MMU_AMR | POWERPC_MMU_V3 + | 0x00000005, }; /*****************************************************************************/ From 9861bb3efd2a4def622cd40afb58af61542c4458 Mon Sep 17 00:00:00 2001 From: Suraj Jitindar Singh Date: Wed, 1 Mar 2017 17:54:36 +1100 Subject: [PATCH 05/17] target/ppc: Add patb_entry to sPAPRMachineState ISA v3.00 adds the idea of a partition table which is used to store the address translation details for all partitions on the system. The partition table consists of double word entries indexed by partition id where the second double word contains the location of the process table in guest memory. The process table is registered by the guest via a h-call. We need somewhere to store the address of the process table so we add an entry to the sPAPRMachineState struct called patb_entry to represent the second doubleword of a single partition table entry corresponding to the current guest. We need to store this value so we know if the guest is using radix or hash translation and the location of the corresponding process table in guest memory. Since we only have a single guest per qemu instance, we only need one entry. Since the partition table is technically a hypervisor resource we require that access to it is abstracted by the virtual hypervisor through the get_patbe() call. Currently the value of the entry is never set (and thus defaults to 0 indicating hash), but it will be required to both implement POWER9 kvm support and tcg radix support. We also add this field to be migrated as part of the sPAPRMachineState as we will need it on the receiving side as the guest will never tell us this information again and we need it to perform translation. Signed-off-by: Suraj Jitindar Singh Reviewed-by: David Gibson Signed-off-by: David Gibson --- hw/ppc/spapr.c | 29 +++++++++++++++++++++++++++++ include/hw/ppc/spapr.h | 1 + target/ppc/cpu.h | 1 + 3 files changed, 31 insertions(+) diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index 81c6c1c27c..1cc5e008d7 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -1055,6 +1055,13 @@ static void emulate_spapr_hypercall(PPCVirtualHypervisor *vhyp, } } +static uint64_t spapr_get_patbe(PPCVirtualHypervisor *vhyp) +{ + sPAPRMachineState *spapr = SPAPR_MACHINE(vhyp); + + return spapr->patb_entry; +} + #define HPTE(_table, _i) (void *)(((uint64_t *)(_table)) + ((_i) * 2)) #define HPTE_VALID(_hpte) (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_VALID) #define HPTE_DIRTY(_hpte) (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_HPTE_DIRTY) @@ -1234,6 +1241,8 @@ static void ppc_spapr_reset(void) /* Check for unknown sysbus devices */ foreach_dynamic_sysbus_device(find_unknown_sysbus_device, NULL); + spapr->patb_entry = 0; + /* Allocate and/or reset the hash page table */ spapr_reallocate_hpt(spapr, spapr_hpt_shift_for_ramsize(machine->maxram_size), @@ -1427,6 +1436,24 @@ static const VMStateDescription vmstate_spapr_ov5_cas = { }, }; +static bool spapr_patb_entry_needed(void *opaque) +{ + sPAPRMachineState *spapr = opaque; + + return !!spapr->patb_entry; +} + +static const VMStateDescription vmstate_spapr_patb_entry = { + .name = "spapr_patb_entry", + .version_id = 1, + .minimum_version_id = 1, + .needed = spapr_patb_entry_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT64(patb_entry, sPAPRMachineState), + VMSTATE_END_OF_LIST() + }, +}; + static const VMStateDescription vmstate_spapr = { .name = "spapr", .version_id = 3, @@ -1444,6 +1471,7 @@ static const VMStateDescription vmstate_spapr = { }, .subsections = (const VMStateDescription*[]) { &vmstate_spapr_ov5_cas, + &vmstate_spapr_patb_entry, NULL } }; @@ -3049,6 +3077,7 @@ static void spapr_machine_class_init(ObjectClass *oc, void *data) vhc->map_hptes = spapr_map_hptes; vhc->unmap_hptes = spapr_unmap_hptes; vhc->store_hpte = spapr_store_hpte; + vhc->get_patbe = spapr_get_patbe; xic->ics_get = spapr_ics_get; xic->ics_resend = spapr_ics_resend; xic->icp_get = spapr_icp_get; diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h index cfd271129d..3258eaa6bf 100644 --- a/include/hw/ppc/spapr.h +++ b/include/hw/ppc/spapr.h @@ -63,6 +63,7 @@ struct sPAPRMachineState { void *htab; uint32_t htab_shift; + uint64_t patb_entry; /* Process tbl registed in H_REGISTER_PROCESS_TABLE */ hwaddr rma_size; int vrma_adjust; ssize_t rtas_size; diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h index d33c17e646..674bb3f6d9 100644 --- a/target/ppc/cpu.h +++ b/target/ppc/cpu.h @@ -1216,6 +1216,7 @@ struct PPCVirtualHypervisorClass { hwaddr ptex, int n); void (*store_hpte)(PPCVirtualHypervisor *vhyp, hwaddr ptex, uint64_t pte0, uint64_t pte1); + uint64_t (*get_patbe)(PPCVirtualHypervisor *vhyp); }; #define TYPE_PPC_VIRTUAL_HYPERVISOR "ppc-virtual-hypervisor" From 4f4f28ffc1decf842b194644510e0d7f646af4c1 Mon Sep 17 00:00:00 2001 From: Suraj Jitindar Singh Date: Wed, 1 Mar 2017 17:54:37 +1100 Subject: [PATCH 06/17] target/ppc: Don't gen an SDR1 on POWER9 and rework register creation POWER9 doesn't have a storage description register 1 (SDR1) which is used to store the base and size of the hash table. Thus we don't need to generate this register on the POWER9 cpu model. While we're here, the register generation code for 970, POWER5+, POWER<7/8/9> in general is a mess where we call a generic function from a model specific function which then attempts to call model specific functions, so rework this for readability. We update ppc_cpu_dump_state so that "info registers" will only display the value of sdr1 if the register has been generated. As mentioned above the register generation for the pcc->init_proc function for 970, POWER5+, POWER7, POWER8 and POWER9 has been reworked for improved clarity. Instead of calling init_proc_book3s_64 which then attempts to generate the correct registers through a mess of if statements, we remove this function and instead call the appropriate register generation functions directly. This follows the register generation model used for earlier cpu models (pre-970) whereby cpu specific registers are generated directly in the init_proc function and makes it easier to add/remove specific registers for new cpu models. Signed-off-by: Suraj Jitindar Singh Reviewed-by: David Gibson Signed-off-by: David Gibson --- target/ppc/translate.c | 7 +- target/ppc/translate_init.c | 320 ++++++++++++++++++++++-------------- 2 files changed, 205 insertions(+), 122 deletions(-) diff --git a/target/ppc/translate.c b/target/ppc/translate.c index 6e6868b7a0..1de554a435 100644 --- a/target/ppc/translate.c +++ b/target/ppc/translate.c @@ -7090,9 +7090,12 @@ void ppc_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf, case POWERPC_MMU_2_06a: case POWERPC_MMU_2_07: case POWERPC_MMU_2_07a: + case POWERPC_MMU_3_00: #endif - cpu_fprintf(f, " SDR1 " TARGET_FMT_lx " DAR " TARGET_FMT_lx - " DSISR " TARGET_FMT_lx "\n", env->spr[SPR_SDR1], + if (env->spr_cb[SPR_SDR1].name) { /* SDR1 Exists */ + cpu_fprintf(f, " SDR1 " TARGET_FMT_lx " ", env->spr[SPR_SDR1]); + } + cpu_fprintf(f, " DAR " TARGET_FMT_lx " DSISR " TARGET_FMT_lx "\n", env->spr[SPR_DAR], env->spr[SPR_DSISR]); break; case POWERPC_MMU_BOOKE206: diff --git a/target/ppc/translate_init.c b/target/ppc/translate_init.c index 37f74be984..1a05ac36e5 100644 --- a/target/ppc/translate_init.c +++ b/target/ppc/translate_init.c @@ -723,7 +723,7 @@ static void gen_spr_generic (CPUPPCState *env) } /* SPR common to all non-embedded PowerPC, including 601 */ -static void gen_spr_ne_601 (CPUPPCState *env) +static void gen_spr_ne_601(CPUPPCState *env) { /* Exception processing */ spr_register_kvm(env, SPR_DSISR, "DSISR", @@ -739,7 +739,11 @@ static void gen_spr_ne_601 (CPUPPCState *env) SPR_NOACCESS, SPR_NOACCESS, &spr_read_decr, &spr_write_decr, 0x00000000); - /* Memory management */ +} + +/* Storage Description Register 1 */ +static void gen_spr_sdr1(CPUPPCState *env) +{ #ifndef CONFIG_USER_ONLY if (env->has_hv_mode) { /* SDR1 is a hypervisor resource on CPUs which have a @@ -1180,7 +1184,7 @@ static void spr_write_iamr(DisasContext *ctx, int sprn, int gprn) } #endif /* CONFIG_USER_ONLY */ -static void gen_spr_amr(CPUPPCState *env, bool has_iamr) +static void gen_spr_amr(CPUPPCState *env) { #ifndef CONFIG_USER_ONLY /* Virtual Page Class Key protection */ @@ -1206,13 +1210,17 @@ static void gen_spr_amr(CPUPPCState *env, bool has_iamr) SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0); - if (has_iamr) { - spr_register_kvm_hv(env, SPR_IAMR, "IAMR", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_iamr, - &spr_read_generic, &spr_write_generic, - KVM_REG_PPC_IAMR, 0); - } +#endif /* !CONFIG_USER_ONLY */ +} + +static void gen_spr_iamr(CPUPPCState *env) +{ +#ifndef CONFIG_USER_ONLY + spr_register_kvm_hv(env, SPR_IAMR, "IAMR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_iamr, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_IAMR, 0); #endif /* !CONFIG_USER_ONLY */ } #endif /* TARGET_PPC64 */ @@ -4422,6 +4430,7 @@ POWERPC_FAMILY(MPC8xx)(ObjectClass *oc, void *data) static void init_proc_G2 (CPUPPCState *env) { gen_spr_ne_601(env); + gen_spr_sdr1(env); gen_spr_G2_755(env); gen_spr_G2(env); /* Time base */ @@ -4500,6 +4509,7 @@ POWERPC_FAMILY(G2)(ObjectClass *oc, void *data) static void init_proc_G2LE (CPUPPCState *env) { gen_spr_ne_601(env); + gen_spr_sdr1(env); gen_spr_G2_755(env); gen_spr_G2(env); /* Time base */ @@ -4735,6 +4745,7 @@ POWERPC_FAMILY(e200)(ObjectClass *oc, void *data) static void init_proc_e300 (CPUPPCState *env) { gen_spr_ne_601(env); + gen_spr_sdr1(env); gen_spr_603(env); /* Time base */ gen_tbl(env); @@ -5234,6 +5245,7 @@ POWERPC_FAMILY(e5500)(ObjectClass *oc, void *data) static void init_proc_601 (CPUPPCState *env) { gen_spr_ne_601(env); + gen_spr_sdr1(env); gen_spr_601(env); /* Hardware implementation registers */ /* XXX : not implemented */ @@ -5348,6 +5360,7 @@ POWERPC_FAMILY(601v)(ObjectClass *oc, void *data) static void init_proc_602 (CPUPPCState *env) { gen_spr_ne_601(env); + gen_spr_sdr1(env); gen_spr_602(env); /* Time base */ gen_tbl(env); @@ -5417,6 +5430,7 @@ POWERPC_FAMILY(602)(ObjectClass *oc, void *data) static void init_proc_603 (CPUPPCState *env) { gen_spr_ne_601(env); + gen_spr_sdr1(env); gen_spr_603(env); /* Time base */ gen_tbl(env); @@ -5483,6 +5497,7 @@ POWERPC_FAMILY(603)(ObjectClass *oc, void *data) static void init_proc_603E (CPUPPCState *env) { gen_spr_ne_601(env); + gen_spr_sdr1(env); gen_spr_603(env); /* Time base */ gen_tbl(env); @@ -5549,6 +5564,7 @@ POWERPC_FAMILY(603E)(ObjectClass *oc, void *data) static void init_proc_604 (CPUPPCState *env) { gen_spr_ne_601(env); + gen_spr_sdr1(env); gen_spr_604(env); /* Time base */ gen_tbl(env); @@ -5612,6 +5628,7 @@ POWERPC_FAMILY(604)(ObjectClass *oc, void *data) static void init_proc_604E (CPUPPCState *env) { gen_spr_ne_601(env); + gen_spr_sdr1(env); gen_spr_604(env); /* XXX : not implemented */ spr_register(env, SPR_7XX_MMCR1, "MMCR1", @@ -5695,6 +5712,7 @@ POWERPC_FAMILY(604E)(ObjectClass *oc, void *data) static void init_proc_740 (CPUPPCState *env) { gen_spr_ne_601(env); + gen_spr_sdr1(env); gen_spr_7xx(env); /* Time base */ gen_tbl(env); @@ -5765,6 +5783,7 @@ POWERPC_FAMILY(740)(ObjectClass *oc, void *data) static void init_proc_750 (CPUPPCState *env) { gen_spr_ne_601(env); + gen_spr_sdr1(env); gen_spr_7xx(env); /* XXX : not implemented */ spr_register(env, SPR_L2CR, "L2CR", @@ -5843,6 +5862,7 @@ POWERPC_FAMILY(750)(ObjectClass *oc, void *data) static void init_proc_750cl (CPUPPCState *env) { gen_spr_ne_601(env); + gen_spr_sdr1(env); gen_spr_7xx(env); /* XXX : not implemented */ spr_register(env, SPR_L2CR, "L2CR", @@ -6044,6 +6064,7 @@ POWERPC_FAMILY(750cl)(ObjectClass *oc, void *data) static void init_proc_750cx (CPUPPCState *env) { gen_spr_ne_601(env); + gen_spr_sdr1(env); gen_spr_7xx(env); /* XXX : not implemented */ spr_register(env, SPR_L2CR, "L2CR", @@ -6126,6 +6147,7 @@ POWERPC_FAMILY(750cx)(ObjectClass *oc, void *data) static void init_proc_750fx (CPUPPCState *env) { gen_spr_ne_601(env); + gen_spr_sdr1(env); gen_spr_7xx(env); /* XXX : not implemented */ spr_register(env, SPR_L2CR, "L2CR", @@ -6213,6 +6235,7 @@ POWERPC_FAMILY(750fx)(ObjectClass *oc, void *data) static void init_proc_750gx (CPUPPCState *env) { gen_spr_ne_601(env); + gen_spr_sdr1(env); gen_spr_7xx(env); /* XXX : not implemented (XXX: different from 750fx) */ spr_register(env, SPR_L2CR, "L2CR", @@ -6300,6 +6323,7 @@ POWERPC_FAMILY(750gx)(ObjectClass *oc, void *data) static void init_proc_745 (CPUPPCState *env) { gen_spr_ne_601(env); + gen_spr_sdr1(env); gen_spr_7xx(env); gen_spr_G2_755(env); /* Time base */ @@ -6375,6 +6399,7 @@ POWERPC_FAMILY(745)(ObjectClass *oc, void *data) static void init_proc_755 (CPUPPCState *env) { gen_spr_ne_601(env); + gen_spr_sdr1(env); gen_spr_7xx(env); gen_spr_G2_755(env); /* Time base */ @@ -6461,6 +6486,7 @@ POWERPC_FAMILY(755)(ObjectClass *oc, void *data) static void init_proc_7400 (CPUPPCState *env) { gen_spr_ne_601(env); + gen_spr_sdr1(env); gen_spr_7xx(env); /* Time base */ gen_tbl(env); @@ -6539,6 +6565,7 @@ POWERPC_FAMILY(7400)(ObjectClass *oc, void *data) static void init_proc_7410 (CPUPPCState *env) { gen_spr_ne_601(env); + gen_spr_sdr1(env); gen_spr_7xx(env); /* Time base */ gen_tbl(env); @@ -6623,6 +6650,7 @@ POWERPC_FAMILY(7410)(ObjectClass *oc, void *data) static void init_proc_7440 (CPUPPCState *env) { gen_spr_ne_601(env); + gen_spr_sdr1(env); gen_spr_7xx(env); /* Time base */ gen_tbl(env); @@ -6730,6 +6758,7 @@ POWERPC_FAMILY(7440)(ObjectClass *oc, void *data) static void init_proc_7450 (CPUPPCState *env) { gen_spr_ne_601(env); + gen_spr_sdr1(env); gen_spr_7xx(env); /* Time base */ gen_tbl(env); @@ -6863,6 +6892,7 @@ POWERPC_FAMILY(7450)(ObjectClass *oc, void *data) static void init_proc_7445 (CPUPPCState *env) { gen_spr_ne_601(env); + gen_spr_sdr1(env); gen_spr_7xx(env); /* Time base */ gen_tbl(env); @@ -6999,6 +7029,7 @@ POWERPC_FAMILY(7445)(ObjectClass *oc, void *data) static void init_proc_7455 (CPUPPCState *env) { gen_spr_ne_601(env); + gen_spr_sdr1(env); gen_spr_7xx(env); /* Time base */ gen_tbl(env); @@ -7137,6 +7168,7 @@ POWERPC_FAMILY(7455)(ObjectClass *oc, void *data) static void init_proc_7457 (CPUPPCState *env) { gen_spr_ne_601(env); + gen_spr_sdr1(env); gen_spr_7xx(env); /* Time base */ gen_tbl(env); @@ -7299,6 +7331,7 @@ POWERPC_FAMILY(7457)(ObjectClass *oc, void *data) static void init_proc_e600 (CPUPPCState *env) { gen_spr_ne_601(env); + gen_spr_sdr1(env); gen_spr_7xx(env); /* Time base */ gen_tbl(env); @@ -7444,15 +7477,6 @@ POWERPC_FAMILY(e600)(ObjectClass *oc, void *data) #define POWERPC970_HID5_INIT 0x00000000 #endif -enum BOOK3S_CPU_TYPE { - BOOK3S_CPU_970, - BOOK3S_CPU_POWER5PLUS, - BOOK3S_CPU_POWER6, - BOOK3S_CPU_POWER7, - BOOK3S_CPU_POWER8, - BOOK3S_CPU_POWER9 -}; - static void gen_fscr_facility_check(DisasContext *ctx, int facility_sprn, int bit, int sprn, int cause) { @@ -7540,7 +7564,7 @@ static void gen_spr_970_hior(CPUPPCState *env) 0x00000000); } -static void gen_spr_book3s_common(CPUPPCState *env) +static void gen_spr_book3s_ctrl(CPUPPCState *env) { spr_register(env, SPR_CTRL, "SPR_CTRL", SPR_NOACCESS, SPR_NOACCESS, @@ -8210,112 +8234,42 @@ static void gen_spr_power8_rpr(CPUPPCState *env) #endif } -static void init_proc_book3s_64(CPUPPCState *env, int version) +static void init_proc_book3s_common(CPUPPCState *env) { gen_spr_ne_601(env); gen_tbl(env); gen_spr_book3s_altivec(env); gen_spr_book3s_pmu_sup(env); gen_spr_book3s_pmu_user(env); - gen_spr_book3s_common(env); - - switch (version) { - case BOOK3S_CPU_970: - case BOOK3S_CPU_POWER5PLUS: - gen_spr_970_hid(env); - gen_spr_970_hior(env); - gen_low_BATs(env); - gen_spr_970_pmu_sup(env); - gen_spr_970_pmu_user(env); - break; - case BOOK3S_CPU_POWER7: - case BOOK3S_CPU_POWER8: - case BOOK3S_CPU_POWER9: - gen_spr_book3s_ids(env); - gen_spr_amr(env, version >= BOOK3S_CPU_POWER8); - gen_spr_book3s_purr(env); - env->ci_large_pages = true; - break; - default: - g_assert_not_reached(); - } - if (version >= BOOK3S_CPU_POWER5PLUS) { - gen_spr_power5p_common(env); - gen_spr_power5p_lpar(env); - gen_spr_power5p_ear(env); - } else { - gen_spr_970_lpar(env); - } - if (version == BOOK3S_CPU_970) { - gen_spr_970_dbg(env); - } - if (version >= BOOK3S_CPU_POWER6) { - gen_spr_power6_common(env); - gen_spr_power6_dbg(env); - } - if (version == BOOK3S_CPU_POWER7) { - gen_spr_power7_book4(env); - } - if (version >= BOOK3S_CPU_POWER8) { - gen_spr_power8_tce_address_control(env); - gen_spr_power8_ids(env); - gen_spr_power8_ebb(env); - gen_spr_power8_fscr(env); - gen_spr_power8_pmu_sup(env); - gen_spr_power8_pmu_user(env); - gen_spr_power8_tm(env); - gen_spr_power8_pspb(env); - gen_spr_vtb(env); - gen_spr_power8_ic(env); - gen_spr_power8_book4(env); - gen_spr_power8_rpr(env); - } - if (version < BOOK3S_CPU_POWER8) { - gen_spr_book3s_dbg(env); - } else { - gen_spr_book3s_207_dbg(env); - } -#if !defined(CONFIG_USER_ONLY) - switch (version) { - case BOOK3S_CPU_970: - case BOOK3S_CPU_POWER5PLUS: - env->slb_nr = 64; - break; - case BOOK3S_CPU_POWER7: - case BOOK3S_CPU_POWER8: - case BOOK3S_CPU_POWER9: - default: - env->slb_nr = 32; - break; - } -#endif - /* Allocate hardware IRQ controller */ - switch (version) { - case BOOK3S_CPU_970: - case BOOK3S_CPU_POWER5PLUS: - init_excp_970(env); - ppc970_irq_init(ppc_env_get_cpu(env)); - break; - case BOOK3S_CPU_POWER7: - init_excp_POWER7(env); - ppcPOWER7_irq_init(ppc_env_get_cpu(env)); - break; - case BOOK3S_CPU_POWER8: - case BOOK3S_CPU_POWER9: - init_excp_POWER8(env); - ppcPOWER7_irq_init(ppc_env_get_cpu(env)); - break; - default: - g_assert_not_reached(); - } - - env->dcache_line_size = 128; - env->icache_line_size = 128; + gen_spr_book3s_ctrl(env); } static void init_proc_970(CPUPPCState *env) { - init_proc_book3s_64(env, BOOK3S_CPU_970); + /* Common Registers */ + init_proc_book3s_common(env); + gen_spr_sdr1(env); + gen_spr_book3s_dbg(env); + + /* 970 Specific Registers */ + gen_spr_970_hid(env); + gen_spr_970_hior(env); + gen_low_BATs(env); + gen_spr_970_pmu_sup(env); + gen_spr_970_pmu_user(env); + gen_spr_970_lpar(env); + gen_spr_970_dbg(env); + + /* env variables */ +#if !defined(CONFIG_USER_ONLY) + env->slb_nr = 64; +#endif + env->dcache_line_size = 128; + env->icache_line_size = 128; + + /* Allocate hardware IRQ controller */ + init_excp_970(env); + ppc970_irq_init(ppc_env_get_cpu(env)); } POWERPC_FAMILY(970)(ObjectClass *oc, void *data) @@ -8367,7 +8321,31 @@ POWERPC_FAMILY(970)(ObjectClass *oc, void *data) static void init_proc_power5plus(CPUPPCState *env) { - init_proc_book3s_64(env, BOOK3S_CPU_POWER5PLUS); + /* Common Registers */ + init_proc_book3s_common(env); + gen_spr_sdr1(env); + gen_spr_book3s_dbg(env); + + /* POWER5+ Specific Registers */ + gen_spr_970_hid(env); + gen_spr_970_hior(env); + gen_low_BATs(env); + gen_spr_970_pmu_sup(env); + gen_spr_970_pmu_user(env); + gen_spr_power5p_common(env); + gen_spr_power5p_lpar(env); + gen_spr_power5p_ear(env); + + /* env variables */ +#if !defined(CONFIG_USER_ONLY) + env->slb_nr = 64; +#endif + env->dcache_line_size = 128; + env->icache_line_size = 128; + + /* Allocate hardware IRQ controller */ + init_excp_970(env); + ppc970_irq_init(ppc_env_get_cpu(env)); } POWERPC_FAMILY(POWER5P)(ObjectClass *oc, void *data) @@ -8520,7 +8498,33 @@ static const struct ppc_segment_page_sizes POWER7_POWER8_sps = { static void init_proc_POWER7 (CPUPPCState *env) { - init_proc_book3s_64(env, BOOK3S_CPU_POWER7); + /* Common Registers */ + init_proc_book3s_common(env); + gen_spr_sdr1(env); + gen_spr_book3s_dbg(env); + + /* POWER7 Specific Registers */ + gen_spr_book3s_ids(env); + gen_spr_amr(env); + gen_spr_book3s_purr(env); + gen_spr_power5p_common(env); + gen_spr_power5p_lpar(env); + gen_spr_power5p_ear(env); + gen_spr_power6_common(env); + gen_spr_power6_dbg(env); + gen_spr_power7_book4(env); + + /* env variables */ +#if !defined(CONFIG_USER_ONLY) + env->slb_nr = 32; +#endif + env->ci_large_pages = true; + env->dcache_line_size = 128; + env->icache_line_size = 128; + + /* Allocate hardware IRQ controller */ + init_excp_POWER7(env); + ppcPOWER7_irq_init(ppc_env_get_cpu(env)); } static bool ppc_pvr_match_power7(PowerPCCPUClass *pcc, uint32_t pvr) @@ -8636,7 +8640,45 @@ POWERPC_FAMILY(POWER7)(ObjectClass *oc, void *data) static void init_proc_POWER8(CPUPPCState *env) { - init_proc_book3s_64(env, BOOK3S_CPU_POWER8); + /* Common Registers */ + init_proc_book3s_common(env); + gen_spr_sdr1(env); + gen_spr_book3s_207_dbg(env); + + /* POWER8 Specific Registers */ + gen_spr_book3s_ids(env); + gen_spr_amr(env); + gen_spr_iamr(env); + gen_spr_book3s_purr(env); + gen_spr_power5p_common(env); + gen_spr_power5p_lpar(env); + gen_spr_power5p_ear(env); + gen_spr_power6_common(env); + gen_spr_power6_dbg(env); + gen_spr_power8_tce_address_control(env); + gen_spr_power8_ids(env); + gen_spr_power8_ebb(env); + gen_spr_power8_fscr(env); + gen_spr_power8_pmu_sup(env); + gen_spr_power8_pmu_user(env); + gen_spr_power8_tm(env); + gen_spr_power8_pspb(env); + gen_spr_vtb(env); + gen_spr_power8_ic(env); + gen_spr_power8_book4(env); + gen_spr_power8_rpr(env); + + /* env variables */ +#if !defined(CONFIG_USER_ONLY) + env->slb_nr = 32; +#endif + env->ci_large_pages = true; + env->dcache_line_size = 128; + env->icache_line_size = 128; + + /* Allocate hardware IRQ controller */ + init_excp_POWER8(env); + ppcPOWER7_irq_init(ppc_env_get_cpu(env)); } static bool ppc_pvr_match_power8(PowerPCCPUClass *pcc, uint32_t pvr) @@ -8764,9 +8806,47 @@ POWERPC_FAMILY(POWER8)(ObjectClass *oc, void *data) pcc->l1_icache_size = 0x8000; pcc->interrupts_big_endian = ppc_cpu_interrupts_big_endian_lpcr; } + static void init_proc_POWER9(CPUPPCState *env) { - init_proc_book3s_64(env, BOOK3S_CPU_POWER9); + /* Common Registers */ + init_proc_book3s_common(env); + gen_spr_book3s_207_dbg(env); + + /* POWER8 Specific Registers */ + gen_spr_book3s_ids(env); + gen_spr_amr(env); + gen_spr_iamr(env); + gen_spr_book3s_purr(env); + gen_spr_power5p_common(env); + gen_spr_power5p_lpar(env); + gen_spr_power5p_ear(env); + gen_spr_power6_common(env); + gen_spr_power6_dbg(env); + gen_spr_power8_tce_address_control(env); + gen_spr_power8_ids(env); + gen_spr_power8_ebb(env); + gen_spr_power8_fscr(env); + gen_spr_power8_pmu_sup(env); + gen_spr_power8_pmu_user(env); + gen_spr_power8_tm(env); + gen_spr_power8_pspb(env); + gen_spr_vtb(env); + gen_spr_power8_ic(env); + gen_spr_power8_book4(env); + gen_spr_power8_rpr(env); + + /* env variables */ +#if !defined(CONFIG_USER_ONLY) + env->slb_nr = 32; +#endif + env->ci_large_pages = true; + env->dcache_line_size = 128; + env->icache_line_size = 128; + + /* Allocate hardware IRQ controller */ + init_excp_POWER8(env); + ppcPOWER7_irq_init(ppc_env_get_cpu(env)); } static bool ppc_pvr_match_power9(PowerPCCPUClass *pcc, uint32_t pvr) From b2899495e3bd467adb9ef195655407cd58a97ded Mon Sep 17 00:00:00 2001 From: Suraj Jitindar Singh Date: Wed, 1 Mar 2017 17:54:38 +1100 Subject: [PATCH 07/17] target/ppc/POWER9: Add POWER9 mmu fault handler Add a new mmu fault handler for the POWER9 cpu and add it as the handler for the POWER9 cpu definition. This handler checks if the guest is radix or hash based on the value in the partition table entry and calls the correct fault handler accordingly. The hash fault handling code has also been updated to check if the partition is using segment tables. Currently only legacy hash (no segment tables) is supported. Signed-off-by: Suraj Jitindar Singh Reviewed-by: David Gibson Signed-off-by: David Gibson --- target/ppc/Makefile.objs | 2 +- target/ppc/mmu-book3s-v3.c | 37 +++++++++++++++++++++++++++ target/ppc/mmu-book3s-v3.h | 50 +++++++++++++++++++++++++++++++++++++ target/ppc/mmu-hash64.c | 8 ++++++ target/ppc/mmu_helper.c | 15 +++++++++++ target/ppc/translate_init.c | 3 ++- 6 files changed, 113 insertions(+), 2 deletions(-) create mode 100644 target/ppc/mmu-book3s-v3.c create mode 100644 target/ppc/mmu-book3s-v3.h diff --git a/target/ppc/Makefile.objs b/target/ppc/Makefile.objs index 0057b319c0..f963777277 100644 --- a/target/ppc/Makefile.objs +++ b/target/ppc/Makefile.objs @@ -3,7 +3,7 @@ obj-y += cpu.o obj-y += translate.o ifeq ($(CONFIG_SOFTMMU),y) obj-y += machine.o mmu_helper.o mmu-hash32.o monitor.o arch_dump.o -obj-$(TARGET_PPC64) += mmu-hash64.o compat.o +obj-$(TARGET_PPC64) += mmu-hash64.o mmu-book3s-v3.o compat.o endif obj-$(CONFIG_KVM) += kvm.o obj-$(call lnot,$(CONFIG_KVM)) += kvm-stub.o diff --git a/target/ppc/mmu-book3s-v3.c b/target/ppc/mmu-book3s-v3.c new file mode 100644 index 0000000000..005c96340a --- /dev/null +++ b/target/ppc/mmu-book3s-v3.c @@ -0,0 +1,37 @@ +/* + * PowerPC ISAV3 BookS emulation generic mmu helpers for qemu. + * + * Copyright (c) 2017 Suraj Jitindar Singh, IBM Corporation + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "cpu.h" +#include "mmu-hash64.h" +#include "mmu-book3s-v3.h" +#include "qemu/error-report.h" + +int ppc64_v3_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx, + int mmu_idx) +{ + if (ppc64_radix_guest(cpu)) { /* Guest uses radix */ + /* TODO - Unsupported */ + error_report("Guest Radix Support Unimplemented"); + exit(1); + } else { /* Guest uses hash */ + return ppc_hash64_handle_mmu_fault(cpu, eaddr, rwx, mmu_idx); + } +} diff --git a/target/ppc/mmu-book3s-v3.h b/target/ppc/mmu-book3s-v3.h new file mode 100644 index 0000000000..636f6ab95f --- /dev/null +++ b/target/ppc/mmu-book3s-v3.h @@ -0,0 +1,50 @@ +/* + * PowerPC ISAV3 BookS emulation generic mmu definitions for qemu. + * + * Copyright (c) 2017 Suraj Jitindar Singh, IBM Corporation + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#ifndef MMU_H +#define MMU_H + +#ifndef CONFIG_USER_ONLY + +/* Partition Table Entry Fields */ +#define PATBE1_GR 0x8000000000000000 + +#ifdef TARGET_PPC64 + +static inline bool ppc64_use_proc_tbl(PowerPCCPU *cpu) +{ + return !!(cpu->env.spr[SPR_LPCR] & LPCR_UPRT); +} + +static inline bool ppc64_radix_guest(PowerPCCPU *cpu) +{ + PPCVirtualHypervisorClass *vhc = + PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); + + return !!(vhc->get_patbe(cpu->vhyp) & PATBE1_GR); +} + +int ppc64_v3_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx, + int mmu_idx); + +#endif /* TARGET_PPC64 */ + +#endif /* CONFIG_USER_ONLY */ + +#endif /* MMU_H */ diff --git a/target/ppc/mmu-hash64.c b/target/ppc/mmu-hash64.c index d44f2bb432..368ee60efb 100644 --- a/target/ppc/mmu-hash64.c +++ b/target/ppc/mmu-hash64.c @@ -28,6 +28,7 @@ #include "mmu-hash64.h" #include "exec/log.h" #include "hw/hw.h" +#include "mmu-book3s-v3.h" //#define DEBUG_SLB @@ -726,6 +727,13 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, /* 2. Translation is on, so look up the SLB */ slb = slb_lookup(cpu, eaddr); if (!slb) { + /* No entry found, check if in-memory segment tables are in use */ + if ((env->mmu_model & POWERPC_MMU_V3) && ppc64_use_proc_tbl(cpu)) { + /* TODO - Unsupported */ + error_report("Segment Table Support Unimplemented"); + exit(1); + } + /* Segment still not found, generate the appropriate interrupt */ if (rwx == 2) { cs->exception_index = POWERPC_EXCP_ISEG; env->error_code = 0; diff --git a/target/ppc/mmu_helper.c b/target/ppc/mmu_helper.c index a1af3d6bf2..18a76d2a3f 100644 --- a/target/ppc/mmu_helper.c +++ b/target/ppc/mmu_helper.c @@ -29,6 +29,7 @@ #include "exec/log.h" #include "helper_regs.h" #include "qemu/error-report.h" +#include "mmu-book3s-v3.h" //#define DEBUG_MMU //#define DEBUG_BATS @@ -1285,6 +1286,13 @@ void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUPPCState *env) case POWERPC_MMU_2_07a: dump_slb(f, cpu_fprintf, ppc_env_get_cpu(env)); break; + case POWERPC_MMU_3_00: + if (ppc64_radix_guest(ppc_env_get_cpu(env))) { + /* TODO - Unsupported */ + } else { + dump_slb(f, cpu_fprintf, ppc_env_get_cpu(env)); + break; + } #endif default: qemu_log_mask(LOG_UNIMP, "%s: unimplemented\n", __func__); @@ -1426,6 +1434,13 @@ hwaddr ppc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) case POWERPC_MMU_2_07: case POWERPC_MMU_2_07a: return ppc_hash64_get_phys_page_debug(cpu, addr); + case POWERPC_MMU_3_00: + if (ppc64_radix_guest(ppc_env_get_cpu(env))) { + /* TODO - Unsupported */ + } else { + return ppc_hash64_get_phys_page_debug(cpu, addr); + } + break; #endif case POWERPC_MMU_32B: diff --git a/target/ppc/translate_init.c b/target/ppc/translate_init.c index 1a05ac36e5..dc2f8ebe10 100644 --- a/target/ppc/translate_init.c +++ b/target/ppc/translate_init.c @@ -32,6 +32,7 @@ #include "qapi/visitor.h" #include "hw/qdev-properties.h" #include "hw/ppc/ppc.h" +#include "mmu-book3s-v3.h" //#define PPC_DUMP_CPU //#define PPC_DEBUG_SPR @@ -8910,7 +8911,7 @@ POWERPC_FAMILY(POWER9)(ObjectClass *oc, void *data) (1ull << MSR_LE); pcc->mmu_model = POWERPC_MMU_3_00; #if defined(CONFIG_SOFTMMU) - pcc->handle_mmu_fault = ppc_hash64_handle_mmu_fault; + pcc->handle_mmu_fault = ppc64_v3_handle_mmu_fault; /* segment page size remain the same */ pcc->sps = &POWER7_POWER8_sps; #endif From 4975c098c9f4f96d433e88ab1b63d0d7e2a48ce0 Mon Sep 17 00:00:00 2001 From: Suraj Jitindar Singh Date: Wed, 1 Mar 2017 17:54:39 +1100 Subject: [PATCH 08/17] target/ppc/POWER9: Add POWER9 pa-features definition Add a pa-features definition which includes all of the new fields which have been added, note we don't claim support for any of these new features at this stage. Signed-off-by: Suraj Jitindar Singh Reviewed-by: David Gibson Acked-by: Balbir Singh Signed-off-by: David Gibson --- hw/ppc/spapr.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index 1cc5e008d7..81e2adaa65 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -390,6 +390,20 @@ static void spapr_populate_pa_features(CPUPPCState *env, void *fdt, int offset) 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, 0x00, 0x00 }; + /* Currently we don't advertise any of the "new" ISAv3.00 functionality */ + uint8_t pa_features_300[] = { 64, 0, + 0xf6, 0x1f, 0xc7, 0xc0, 0x80, 0xf0, /* 0 - 5 */ + 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, /* 6 - 11 */ + 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 12 - 17 */ + 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 18 - 23 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 24 - 29 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 30 - 35 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 36 - 41 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 42 - 47 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 48 - 53 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 54 - 59 */ + 0x00, 0x00, 0x00, 0x00 }; /* 60 - 63 */ + uint8_t *pa_features; size_t pa_size; @@ -404,6 +418,10 @@ static void spapr_populate_pa_features(CPUPPCState *env, void *fdt, int offset) pa_features = pa_features_207; pa_size = sizeof(pa_features_207); break; + case POWERPC_MMU_3_00: + pa_features = pa_features_300; + pa_size = sizeof(pa_features_300); + break; default: return; } From 6f46dcb3e5b9352ad3479bd6f005576510ee45fb Mon Sep 17 00:00:00 2001 From: Suraj Jitindar Singh Date: Wed, 1 Mar 2017 17:54:40 +1100 Subject: [PATCH 09/17] target/ppc/POWER9: Add cpu_has_work function for POWER9 The cpu has work function is used to mask interrupts used to determine if there is work for the cpu based on the LPCR. Add a function to do this for POWER9 and add it to the POWER9 cpu definition. This is similar to that for POWER8 except using the LPCR bits as defined for POWER9. Signed-off-by: Suraj Jitindar Singh Reviewed-by: David Gibson Signed-off-by: David Gibson --- target/ppc/translate_init.c | 45 +++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/target/ppc/translate_init.c b/target/ppc/translate_init.c index dc2f8ebe10..c1a901455c 100644 --- a/target/ppc/translate_init.c +++ b/target/ppc/translate_init.c @@ -8858,10 +8858,54 @@ static bool ppc_pvr_match_power9(PowerPCCPUClass *pcc, uint32_t pvr) return false; } +static bool cpu_has_work_POWER9(CPUState *cs) +{ + PowerPCCPU *cpu = POWERPC_CPU(cs); + CPUPPCState *env = &cpu->env; + + if (cs->halted) { + if (!(cs->interrupt_request & CPU_INTERRUPT_HARD)) { + return false; + } + /* External Exception */ + if ((env->pending_interrupts & (1u << PPC_INTERRUPT_EXT)) && + (env->spr[SPR_LPCR] & LPCR_EEE)) { + return true; + } + /* Decrementer Exception */ + if ((env->pending_interrupts & (1u << PPC_INTERRUPT_DECR)) && + (env->spr[SPR_LPCR] & LPCR_DEE)) { + return true; + } + /* Machine Check or Hypervisor Maintenance Exception */ + if ((env->pending_interrupts & (1u << PPC_INTERRUPT_MCK | + 1u << PPC_INTERRUPT_HMI)) && (env->spr[SPR_LPCR] & LPCR_OEE)) { + return true; + } + /* Privileged Doorbell Exception */ + if ((env->pending_interrupts & (1u << PPC_INTERRUPT_DOORBELL)) && + (env->spr[SPR_LPCR] & LPCR_PDEE)) { + return true; + } + /* Hypervisor Doorbell Exception */ + if ((env->pending_interrupts & (1u << PPC_INTERRUPT_HDOORBELL)) && + (env->spr[SPR_LPCR] & LPCR_HDEE)) { + return true; + } + if (env->pending_interrupts & (1u << PPC_INTERRUPT_RESET)) { + return true; + } + return false; + } else { + return msr_ee && (cs->interrupt_request & CPU_INTERRUPT_HARD); + } +} + POWERPC_FAMILY(POWER9)(ObjectClass *oc, void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + CPUClass *cc = CPU_CLASS(oc); dc->fw_name = "PowerPC,POWER9"; dc->desc = "POWER9"; @@ -8872,6 +8916,7 @@ POWERPC_FAMILY(POWER9)(ObjectClass *oc, void *data) PCR_COMPAT_2_05; pcc->init_proc = init_proc_POWER9; pcc->check_pow = check_pow_nocheck; + cc->has_work = cpu_has_work_POWER9; pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | From 24d8e5655f3da3f07cbb8689419ca9836c70fa94 Mon Sep 17 00:00:00 2001 From: Suraj Jitindar Singh Date: Wed, 1 Mar 2017 17:54:41 +1100 Subject: [PATCH 10/17] hw/ppc/spapr: Add POWER9 to pseries cpu models Add POWER9 cpu to list of spapr core models which allows it to be specified as the cpu model for a pseries guest (e.g. -machine pseries -cpu POWER9). This now allows a POWER9 cpu to boot to userspace in tcg emulation for a pseries machine with a legacy kernel. Signed-off-by: Suraj Jitindar Singh Reviewed-by: David Gibson Acked-by: Balbir Singh Signed-off-by: David Gibson --- hw/ppc/spapr_cpu_core.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/hw/ppc/spapr_cpu_core.c b/hw/ppc/spapr_cpu_core.c index 90d682fe33..d37832d0e3 100644 --- a/hw/ppc/spapr_cpu_core.c +++ b/hw/ppc/spapr_cpu_core.c @@ -238,6 +238,9 @@ static const char *spapr_core_models[] = { /* POWER8NVL */ "POWER8NVL_v1.0", + + /* POWER9 */ + "POWER9_v1.0", }; void spapr_cpu_core_class_init(ObjectClass *oc, void *data) From a6152b52bc50c5cf1cd118a74b483dd3f0748ebd Mon Sep 17 00:00:00 2001 From: Suraj Jitindar Singh Date: Wed, 1 Mar 2017 18:12:52 +1100 Subject: [PATCH 11/17] target/ppc: Add Instruction Authority Mask Register Check The instruction authority mask register (IAMR) can be used to restrict permissions for instruction fetch accesses on a per key basis for each of 32 different key values. Access permissions are derived based on the specific key value stored in the relevant page table entry. The IAMR was introduced in, and is present in processors since, POWER8 (ISA v2.07). Thus introduce a function to check access permissions based on the pte key value and the contents of the IAMR when handling a page fault to ensure sufficient access permissions for an instruction fetch. A hash pte contains a key value in bits 2:3|52:54 of the second double word of the pte, this key value gives an index into the IAMR which contains 32 2-bit access masks. If the least significant bit of the 2-bit access mask corresponding to the given key value is set (IAMR[key] & 0x1 == 0x1) then the instruction fetch is not permitted and an ISI is generated accordingly. While we're here, add defines for the srr1 bits to be set for the ISI for clarity. e.g. pte: dw0 [XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX] dw1 [XX01XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX010XXXXXXXXX] ^^ ^^^ key = 01010 (0x0a) IAMR: [XXXXXXXXXXXXXXXXXXXX01XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX] ^^ Access mask = 0b01 Test access mask: 0b01 & 0x1 == 0x1 Least significant bit of the access mask is set, thus the instruction fetch is not permitted. We should generate an instruction storage interrupt (ISI) with bit 42 of SRR1 set to indicate access precluded by virtual page class key protection. Signed-off-by: Suraj Jitindar Singh [dwg: Move new constants to cpu.h, since they're not MMUv3 specific] Signed-off-by: David Gibson --- target/ppc/cpu.h | 5 +++++ target/ppc/mmu-hash64.c | 41 ++++++++++++++++++++++++++++++++++++++++- 2 files changed, 45 insertions(+), 1 deletion(-) diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h index 674bb3f6d9..42fed6ee25 100644 --- a/target/ppc/cpu.h +++ b/target/ppc/cpu.h @@ -473,6 +473,11 @@ struct ppc_slb_t { #endif #endif +/* SRR1 error code fields */ + +#define SRR1_PROTFAULT 0x08000000 +#define SRR1_IAMR 0x00200000 + /* Facility Status and Control (FSCR) bits */ #define FSCR_EBB (63 - 56) /* Event-Based Branch Facility */ #define FSCR_TAR (63 - 55) /* Target Address Register */ diff --git a/target/ppc/mmu-hash64.c b/target/ppc/mmu-hash64.c index 368ee60efb..ee94f13481 100644 --- a/target/ppc/mmu-hash64.c +++ b/target/ppc/mmu-hash64.c @@ -343,6 +343,23 @@ static int ppc_hash64_pte_prot(PowerPCCPU *cpu, return prot; } +/* Check the instruction access permissions specified in the IAMR */ +static int ppc_hash64_iamr_prot(PowerPCCPU *cpu, int key) +{ + CPUPPCState *env = &cpu->env; + int iamr_bits = (env->spr[SPR_IAMR] >> 2 * (31 - key)) & 0x3; + + /* + * An instruction fetch is permitted if the IAMR bit is 0. + * If the bit is set, return PAGE_READ | PAGE_WRITE because this bit + * can only take away EXEC permissions not READ or WRITE permissions. + * If bit is cleared return PAGE_READ | PAGE_WRITE | PAGE_EXEC since + * EXEC permissions are allowed. + */ + return (iamr_bits & 0x1) ? PAGE_READ | PAGE_WRITE : + PAGE_READ | PAGE_WRITE | PAGE_EXEC; +} + static int ppc_hash64_amr_prot(PowerPCCPU *cpu, ppc_hash_pte64_t pte) { CPUPPCState *env = &cpu->env; @@ -375,6 +392,21 @@ static int ppc_hash64_amr_prot(PowerPCCPU *cpu, ppc_hash_pte64_t pte) prot &= ~PAGE_READ; } + switch (env->mmu_model) { + /* + * MMU version 2.07 and later support IAMR + * Check if the IAMR allows the instruction access - it will return + * PAGE_EXEC if it doesn't (and thus that bit will be cleared) or 0 + * if it does (and prot will be unchanged indicating execution support). + */ + case POWERPC_MMU_2_07: + case POWERPC_MMU_3_00: + prot &= ppc_hash64_iamr_prot(cpu, key); + break; + default: + break; + } + return prot; } @@ -780,7 +812,14 @@ skip_slb_search: /* Access right violation */ qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n"); if (rwx == 2) { - ppc_hash64_set_isi(cs, env, 0x08000000); + int srr1 = 0; + if (PAGE_EXEC & ~pp_prot) { + srr1 |= SRR1_PROTFAULT; /* Access violates access authority */ + } + if (PAGE_EXEC & ~amr_prot) { + srr1 |= SRR1_IAMR; /* Access violates virt pg class key prot */ + } + ppc_hash64_set_isi(cs, env, srr1); } else { dsisr = 0; if (need_prot[rwx] & ~pp_prot) { From 347a5c73bafd1b5872c9d3192a4d08f8aa1d5f5a Mon Sep 17 00:00:00 2001 From: Suraj Jitindar Singh Date: Wed, 1 Mar 2017 18:12:53 +1100 Subject: [PATCH 12/17] target/ppc: Add execute permission checking to access authority check Basic storage protection defines various access authority permissions based on a slb storage key and pte pp value pair. This access authority defines read, write and execute permissions however currently we only use this to control read and write permissions and ignore the execute control. Fix the code to allow execute permissions based on the key-pp value pair. Execute is allowed under the same conditions which enable reads. (i.e. read permission -> execute permission) Signed-off-by: Suraj Jitindar Singh Acked-by: David Gibson Signed-off-by: David Gibson --- target/ppc/mmu-hash64.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/target/ppc/mmu-hash64.c b/target/ppc/mmu-hash64.c index ee94f13481..99f936ddbe 100644 --- a/target/ppc/mmu-hash64.c +++ b/target/ppc/mmu-hash64.c @@ -308,28 +308,27 @@ static int ppc_hash64_pte_prot(PowerPCCPU *cpu, case 0x0: case 0x1: case 0x2: - prot = PAGE_READ | PAGE_WRITE; + prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; break; case 0x3: case 0x6: - prot = PAGE_READ; + prot = PAGE_READ | PAGE_EXEC; break; } } else { switch (pp) { case 0x0: case 0x6: - prot = 0; break; case 0x1: case 0x3: - prot = PAGE_READ; + prot = PAGE_READ | PAGE_EXEC; break; case 0x2: - prot = PAGE_READ | PAGE_WRITE; + prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; break; } } From 07a68f990785a8574c78a36b21cf5165e46f1113 Mon Sep 17 00:00:00 2001 From: Suraj Jitindar Singh Date: Wed, 1 Mar 2017 18:12:54 +1100 Subject: [PATCH 13/17] target/ppc: Move no-execute and guarded page checking into new function A pte entry has bit fields which can be used to make a page no-execute or guarded, if either of these bits are set then an instruction access to this page will fail. Currently these bits are checked with the pp_prot function however the ISA specifies that the access authority controlled by the key-pp value pair should only be checked on an instruction access after the no-execute and guard bits have already been verified to permit the access. Move the no-execute and guard bit checking into a new separate function. Note that we can remove the check for the no-execute bit in the slb entry since this check was already performed above when we obtained the slb entry. In the event that the no-execute or guard bits are set, an ISI should be generated with the SRR1_NOEXEC_GUARD (0x10000000) bit set in srr1. Add a define for this for clarity. Signed-off-by: Suraj Jitindar Singh [dwg: Move constants to cpu.h since they're not MMUv3 specific] Signed-off-by: David Gibson --- target/ppc/cpu.h | 1 + target/ppc/mmu-hash64.c | 25 ++++++++++++++++--------- 2 files changed, 17 insertions(+), 9 deletions(-) diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h index 42fed6ee25..14c286e09a 100644 --- a/target/ppc/cpu.h +++ b/target/ppc/cpu.h @@ -475,6 +475,7 @@ struct ppc_slb_t { /* SRR1 error code fields */ +#define SRR1_NOEXEC_GUARD 0x10000000 #define SRR1_PROTFAULT 0x08000000 #define SRR1_IAMR 0x00200000 diff --git a/target/ppc/mmu-hash64.c b/target/ppc/mmu-hash64.c index 99f936ddbe..d985617068 100644 --- a/target/ppc/mmu-hash64.c +++ b/target/ppc/mmu-hash64.c @@ -290,6 +290,16 @@ target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb) return rt; } +/* Check No-Execute or Guarded Storage */ +static inline int ppc_hash64_pte_noexec_guard(PowerPCCPU *cpu, + ppc_hash_pte64_t pte) +{ + /* Exec permissions CANNOT take away read or write permissions */ + return (pte.pte1 & HPTE64_R_N) || (pte.pte1 & HPTE64_R_G) ? + PAGE_READ | PAGE_WRITE : PAGE_READ | PAGE_WRITE | PAGE_EXEC; +} + +/* Check Basic Storage Protection */ static int ppc_hash64_pte_prot(PowerPCCPU *cpu, ppc_slb_t *slb, ppc_hash_pte64_t pte) { @@ -333,12 +343,6 @@ static int ppc_hash64_pte_prot(PowerPCCPU *cpu, } } - /* No execute if either noexec or guarded bits set */ - if (!(pte.pte1 & HPTE64_R_N) || (pte.pte1 & HPTE64_R_G) - || (slb->vsid & SLB_VSID_N)) { - prot |= PAGE_EXEC; - } - return prot; } @@ -696,7 +700,7 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, unsigned apshift; hwaddr ptex; ppc_hash_pte64_t pte; - int pp_prot, amr_prot, prot; + int exec_prot, pp_prot, amr_prot, prot; uint64_t new_pte1, dsisr; const int need_prot[] = {PAGE_READ, PAGE_WRITE, PAGE_EXEC}; hwaddr raddr; @@ -803,16 +807,19 @@ skip_slb_search: /* 5. Check access permissions */ + exec_prot = ppc_hash64_pte_noexec_guard(cpu, pte); pp_prot = ppc_hash64_pte_prot(cpu, slb, pte); amr_prot = ppc_hash64_amr_prot(cpu, pte); - prot = pp_prot & amr_prot; + prot = exec_prot & pp_prot & amr_prot; if ((need_prot[rwx] & ~prot) != 0) { /* Access right violation */ qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n"); if (rwx == 2) { int srr1 = 0; - if (PAGE_EXEC & ~pp_prot) { + if (PAGE_EXEC & ~exec_prot) { + srr1 |= SRR1_NOEXEC_GUARD; /* Access violates noexec or guard */ + } else if (PAGE_EXEC & ~pp_prot) { srr1 |= SRR1_PROTFAULT; /* Access violates access authority */ } if (PAGE_EXEC & ~amr_prot) { From da82c73a950a99b9d6c1ec3eba3d1d6034effd43 Mon Sep 17 00:00:00 2001 From: Suraj Jitindar Singh Date: Wed, 1 Mar 2017 18:12:55 +1100 Subject: [PATCH 14/17] target/ppc: Rework hash mmu page fault code and add defines for clarity The hash mmu page fault handling code is responsible for generating ISIs and DSIs when access permissions cause an access to fail. Part of this involves setting the srr1 or dsisr registers to indicate what causes the access to fail. Add defines for the bit fields of these registers and rework the code to use these new defines in order to improve readability and code clarity. While we're here, update what is logged when an access fails to include information as to what caused to access to fail for debug purposes. Signed-off-by: Suraj Jitindar Singh [dwg: Moved constants to cpu.h since they're not MMUv3 specific] Signed-off-by: David Gibson --- target/ppc/cpu.h | 14 ++++++++++++-- target/ppc/mmu-hash64.c | 24 ++++++++++++------------ 2 files changed, 24 insertions(+), 14 deletions(-) diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h index 14c286e09a..7c4a1f50b3 100644 --- a/target/ppc/cpu.h +++ b/target/ppc/cpu.h @@ -473,11 +473,21 @@ struct ppc_slb_t { #endif #endif +/* DSISR */ +#define DSISR_NOPTE 0x40000000 +/* Not permitted by access authority of encoded access authority */ +#define DSISR_PROTFAULT 0x08000000 +#define DSISR_ISSTORE 0x02000000 +/* Not permitted by virtual page class key protection */ +#define DSISR_AMR 0x00200000 + /* SRR1 error code fields */ +#define SRR1_NOPTE DSISR_NOPTE +/* Not permitted due to no-execute or guard bit set */ #define SRR1_NOEXEC_GUARD 0x10000000 -#define SRR1_PROTFAULT 0x08000000 -#define SRR1_IAMR 0x00200000 +#define SRR1_PROTFAULT DSISR_PROTFAULT +#define SRR1_IAMR DSISR_AMR /* Facility Status and Control (FSCR) bits */ #define FSCR_EBB (63 - 56) /* Event-Based Branch Facility */ diff --git a/target/ppc/mmu-hash64.c b/target/ppc/mmu-hash64.c index d985617068..d5a871fa94 100644 --- a/target/ppc/mmu-hash64.c +++ b/target/ppc/mmu-hash64.c @@ -701,7 +701,7 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, hwaddr ptex; ppc_hash_pte64_t pte; int exec_prot, pp_prot, amr_prot, prot; - uint64_t new_pte1, dsisr; + uint64_t new_pte1; const int need_prot[] = {PAGE_READ, PAGE_WRITE, PAGE_EXEC}; hwaddr raddr; @@ -742,11 +742,11 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, } else { /* The access failed, generate the approriate interrupt */ if (rwx == 2) { - ppc_hash64_set_isi(cs, env, 0x08000000); + ppc_hash64_set_isi(cs, env, SRR1_PROTFAULT); } else { - dsisr = 0x08000000; + int dsisr = DSISR_PROTFAULT; if (rwx == 1) { - dsisr |= 0x02000000; + dsisr |= DSISR_ISSTORE; } ppc_hash64_set_dsi(cs, env, eaddr, dsisr); } @@ -784,19 +784,19 @@ skip_slb_search: /* 3. Check for segment level no-execute violation */ if ((rwx == 2) && (slb->vsid & SLB_VSID_N)) { - ppc_hash64_set_isi(cs, env, 0x10000000); + ppc_hash64_set_isi(cs, env, SRR1_NOEXEC_GUARD); return 1; } /* 4. Locate the PTE in the hash table */ ptex = ppc_hash64_htab_lookup(cpu, slb, eaddr, &pte, &apshift); if (ptex == -1) { - dsisr = 0x40000000; if (rwx == 2) { - ppc_hash64_set_isi(cs, env, dsisr); + ppc_hash64_set_isi(cs, env, SRR1_NOPTE); } else { + int dsisr = DSISR_NOPTE; if (rwx == 1) { - dsisr |= 0x02000000; + dsisr |= DSISR_ISSTORE; } ppc_hash64_set_dsi(cs, env, eaddr, dsisr); } @@ -827,15 +827,15 @@ skip_slb_search: } ppc_hash64_set_isi(cs, env, srr1); } else { - dsisr = 0; + int dsisr = 0; if (need_prot[rwx] & ~pp_prot) { - dsisr |= 0x08000000; + dsisr |= DSISR_PROTFAULT; } if (rwx == 1) { - dsisr |= 0x02000000; + dsisr |= DSISR_ISSTORE; } if (need_prot[rwx] & ~amr_prot) { - dsisr |= 0x00200000; + dsisr |= DSISR_AMR; } ppc_hash64_set_dsi(cs, env, eaddr, dsisr); } From bb998645284924db6da93e777af5f29ef2f3c0a8 Mon Sep 17 00:00:00 2001 From: David Gibson Date: Wed, 1 Mar 2017 16:23:12 +1100 Subject: [PATCH 15/17] spapr_pci: Advertise access to PCIe extended config space The (paravirtual) PCI host bridge on the 'pseries' machine in most regards acts like a regular PCI bus, rather than a PCIe bus. Despite this, though, it does allow access to the PCIe extended config space. We already implemented the RTAS methods to allow this access.. but forgot to put the markers into the device tree so that guest's know it is there. This adds them in. With this, a pseries guest is able to view extended config space on (for example an e1000e device. This should be enough to allow guests to use at least some PCIe devices. Signed-off-by: David Gibson --- hw/ppc/spapr_pci.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/hw/ppc/spapr_pci.c b/hw/ppc/spapr_pci.c index 2a3499eaf8..919d3c2c59 100644 --- a/hw/ppc/spapr_pci.c +++ b/hw/ppc/spapr_pci.c @@ -1321,6 +1321,10 @@ static int spapr_populate_pci_child_dt(PCIDevice *dev, void *fdt, int offset, _FDT(fdt_setprop(fdt, offset, "assigned-addresses", (uint8_t *)rp.assigned, rp.assigned_len)); + if (pci_is_express(dev)) { + _FDT(fdt_setprop_cell(fdt, offset, "ibm,pci-config-space-type", 0x1)); + } + return 0; } From ec975e839cbb6143be80cfc91b1df103fc7e4771 Mon Sep 17 00:00:00 2001 From: Sam Bobroff Date: Thu, 2 Mar 2017 16:38:56 +1100 Subject: [PATCH 16/17] spapr: Small cleanup of PPC MMU enums The PPC MMU types are sometimes treated as if they were a bit field and sometime as if they were an enum which causes maintenance problems: flipping bits in the MMU type (which is done on both the 1TB segment and 64K segment bits) currently produces new MMU type values that are not handled in every "switch" on it, sometimes causing an abort(). This patch provides some macros that can be used to filter out the "bit field-like" bits so that the remainder of the value can be switched on, like an enum. This allows removal of all of the "degraded" types from the list and should ease maintenance. Signed-off-by: Sam Bobroff Signed-off-by: David Gibson --- hw/ppc/spapr.c | 10 +++--- target/ppc/cpu-qom.h | 12 +++---- target/ppc/kvm.c | 8 ++--- target/ppc/mmu-hash64.c | 12 +++---- target/ppc/mmu_helper.c | 71 +++++++++++++++++------------------------ target/ppc/translate.c | 14 ++++---- 6 files changed, 55 insertions(+), 72 deletions(-) diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index 81e2adaa65..14192accf4 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -407,18 +407,16 @@ static void spapr_populate_pa_features(CPUPPCState *env, void *fdt, int offset) uint8_t *pa_features; size_t pa_size; - switch (env->mmu_model) { - case POWERPC_MMU_2_06: - case POWERPC_MMU_2_06a: + switch (POWERPC_MMU_VER(env->mmu_model)) { + case POWERPC_MMU_VER_2_06: pa_features = pa_features_206; pa_size = sizeof(pa_features_206); break; - case POWERPC_MMU_2_07: - case POWERPC_MMU_2_07a: + case POWERPC_MMU_VER_2_07: pa_features = pa_features_207; pa_size = sizeof(pa_features_207); break; - case POWERPC_MMU_3_00: + case POWERPC_MMU_VER_3_00: pa_features = pa_features_300; pa_size = sizeof(pa_features_300); break; diff --git a/target/ppc/cpu-qom.h b/target/ppc/cpu-qom.h index da7eb5a6d6..81500e5748 100644 --- a/target/ppc/cpu-qom.h +++ b/target/ppc/cpu-qom.h @@ -80,22 +80,22 @@ enum powerpc_mmu_t { POWERPC_MMU_2_06 = POWERPC_MMU_64 | POWERPC_MMU_1TSEG | POWERPC_MMU_64K | POWERPC_MMU_AMR | 0x00000003, - /* Architecture 2.06 "degraded" (no 1T segments) */ - POWERPC_MMU_2_06a = POWERPC_MMU_64 | POWERPC_MMU_AMR - | 0x00000003, /* Architecture 2.07 variant */ POWERPC_MMU_2_07 = POWERPC_MMU_64 | POWERPC_MMU_1TSEG | POWERPC_MMU_64K | POWERPC_MMU_AMR | 0x00000004, - /* Architecture 2.07 "degraded" (no 1T segments) */ - POWERPC_MMU_2_07a = POWERPC_MMU_64 | POWERPC_MMU_AMR - | 0x00000004, /* Architecture 3.00 variant */ POWERPC_MMU_3_00 = POWERPC_MMU_64 | POWERPC_MMU_1TSEG | POWERPC_MMU_64K | POWERPC_MMU_AMR | POWERPC_MMU_V3 | 0x00000005, }; +#define POWERPC_MMU_VER(x) ((x) & (POWERPC_MMU_64 | 0xFFFF)) +#define POWERPC_MMU_VER_64B POWERPC_MMU_VER(POWERPC_MMU_64B) +#define POWERPC_MMU_VER_2_03 POWERPC_MMU_VER(POWERPC_MMU_2_03) +#define POWERPC_MMU_VER_2_06 POWERPC_MMU_VER(POWERPC_MMU_2_06) +#define POWERPC_MMU_VER_2_07 POWERPC_MMU_VER(POWERPC_MMU_2_07) +#define POWERPC_MMU_VER_3_00 POWERPC_MMU_VER(POWERPC_MMU_3_00) /*****************************************************************************/ /* Exception model */ diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c index 9b51484052..1adb55cb01 100644 --- a/target/ppc/kvm.c +++ b/target/ppc/kvm.c @@ -283,8 +283,8 @@ static void kvm_get_fallback_smmu_info(PowerPCCPU *cpu, info->flags |= KVM_PPC_1T_SEGMENTS; } - if (env->mmu_model == POWERPC_MMU_2_06 || - env->mmu_model == POWERPC_MMU_2_07) { + if (POWERPC_MMU_VER(env->mmu_model) == POWERPC_MMU_VER_2_06 || + POWERPC_MMU_VER(env->mmu_model) == POWERPC_MMU_VER_2_07) { info->slb_size = 32; } else { info->slb_size = 64; @@ -298,8 +298,8 @@ static void kvm_get_fallback_smmu_info(PowerPCCPU *cpu, i++; /* 64K on MMU 2.06 and later */ - if (env->mmu_model == POWERPC_MMU_2_06 || - env->mmu_model == POWERPC_MMU_2_07) { + if (POWERPC_MMU_VER(env->mmu_model) == POWERPC_MMU_VER_2_06 || + POWERPC_MMU_VER(env->mmu_model) == POWERPC_MMU_VER_2_07) { info->sps[i].page_shift = 16; info->sps[i].slb_enc = 0x110; info->sps[i].enc[0].page_shift = 16; diff --git a/target/ppc/mmu-hash64.c b/target/ppc/mmu-hash64.c index d5a871fa94..14d34e512f 100644 --- a/target/ppc/mmu-hash64.c +++ b/target/ppc/mmu-hash64.c @@ -1032,8 +1032,8 @@ void helper_store_lpcr(CPUPPCState *env, target_ulong val) uint64_t lpcr = 0; /* Filter out bits */ - switch (env->mmu_model) { - case POWERPC_MMU_64B: /* 970 */ + switch (POWERPC_MMU_VER(env->mmu_model)) { + case POWERPC_MMU_VER_64B: /* 970 */ if (val & 0x40) { lpcr |= LPCR_LPES0; } @@ -1059,26 +1059,26 @@ void helper_store_lpcr(CPUPPCState *env, target_ulong val) * to dig HRMOR out of HID5 */ break; - case POWERPC_MMU_2_03: /* P5p */ + case POWERPC_MMU_VER_2_03: /* P5p */ lpcr = val & (LPCR_RMLS | LPCR_ILE | LPCR_LPES0 | LPCR_LPES1 | LPCR_RMI | LPCR_HDICE); break; - case POWERPC_MMU_2_06: /* P7 */ + case POWERPC_MMU_VER_2_06: /* P7 */ lpcr = val & (LPCR_VPM0 | LPCR_VPM1 | LPCR_ISL | LPCR_DPFD | LPCR_VRMASD | LPCR_RMLS | LPCR_ILE | LPCR_P7_PECE0 | LPCR_P7_PECE1 | LPCR_P7_PECE2 | LPCR_MER | LPCR_TC | LPCR_LPES0 | LPCR_LPES1 | LPCR_HDICE); break; - case POWERPC_MMU_2_07: /* P8 */ + case POWERPC_MMU_VER_2_07: /* P8 */ lpcr = val & (LPCR_VPM0 | LPCR_VPM1 | LPCR_ISL | LPCR_KBV | LPCR_DPFD | LPCR_VRMASD | LPCR_RMLS | LPCR_ILE | LPCR_AIL | LPCR_ONL | LPCR_P8_PECE0 | LPCR_P8_PECE1 | LPCR_P8_PECE2 | LPCR_P8_PECE3 | LPCR_P8_PECE4 | LPCR_MER | LPCR_TC | LPCR_LPES0 | LPCR_HDICE); break; - case POWERPC_MMU_3_00: /* P9 */ + case POWERPC_MMU_VER_3_00: /* P9 */ lpcr = val & (LPCR_VPM1 | LPCR_ISL | LPCR_KBV | LPCR_DPFD | (LPCR_PECE_U_MASK & LPCR_HVEE) | LPCR_ILE | LPCR_AIL | LPCR_UPRT | LPCR_EVIRT | LPCR_ONL | diff --git a/target/ppc/mmu_helper.c b/target/ppc/mmu_helper.c index 18a76d2a3f..65d1c8692d 100644 --- a/target/ppc/mmu_helper.c +++ b/target/ppc/mmu_helper.c @@ -1266,7 +1266,7 @@ static void mmu6xx_dump_mmu(FILE *f, fprintf_function cpu_fprintf, void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUPPCState *env) { - switch (env->mmu_model) { + switch (POWERPC_MMU_VER(env->mmu_model)) { case POWERPC_MMU_BOOKE: mmubooke_dump_mmu(f, cpu_fprintf, env); break; @@ -1278,15 +1278,13 @@ void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUPPCState *env) mmu6xx_dump_mmu(f, cpu_fprintf, env); break; #if defined(TARGET_PPC64) - case POWERPC_MMU_64B: - case POWERPC_MMU_2_03: - case POWERPC_MMU_2_06: - case POWERPC_MMU_2_06a: - case POWERPC_MMU_2_07: - case POWERPC_MMU_2_07a: + case POWERPC_MMU_VER_64B: + case POWERPC_MMU_VER_2_03: + case POWERPC_MMU_VER_2_06: + case POWERPC_MMU_VER_2_07: dump_slb(f, cpu_fprintf, ppc_env_get_cpu(env)); break; - case POWERPC_MMU_3_00: + case POWERPC_MMU_VER_3_00: if (ppc64_radix_guest(ppc_env_get_cpu(env))) { /* TODO - Unsupported */ } else { @@ -1425,16 +1423,14 @@ hwaddr ppc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) CPUPPCState *env = &cpu->env; mmu_ctx_t ctx; - switch (env->mmu_model) { + switch (POWERPC_MMU_VER(env->mmu_model)) { #if defined(TARGET_PPC64) - case POWERPC_MMU_64B: - case POWERPC_MMU_2_03: - case POWERPC_MMU_2_06: - case POWERPC_MMU_2_06a: - case POWERPC_MMU_2_07: - case POWERPC_MMU_2_07a: + case POWERPC_MMU_VER_64B: + case POWERPC_MMU_VER_2_03: + case POWERPC_MMU_VER_2_06: + case POWERPC_MMU_VER_2_07: return ppc_hash64_get_phys_page_debug(cpu, addr); - case POWERPC_MMU_3_00: + case POWERPC_MMU_VER_3_00: if (ppc64_radix_guest(ppc_env_get_cpu(env))) { /* TODO - Unsupported */ } else { @@ -1924,6 +1920,12 @@ void ppc_tlb_invalidate_all(CPUPPCState *env) { PowerPCCPU *cpu = ppc_env_get_cpu(env); +#if defined(TARGET_PPC64) + if (env->mmu_model & POWERPC_MMU_64) { + env->tlb_need_flush = 0; + tlb_flush(CPU(cpu)); + } else +#endif /* defined(TARGET_PPC64) */ switch (env->mmu_model) { case POWERPC_MMU_SOFT_6xx: case POWERPC_MMU_SOFT_74xx: @@ -1948,21 +1950,12 @@ void ppc_tlb_invalidate_all(CPUPPCState *env) break; case POWERPC_MMU_32B: case POWERPC_MMU_601: -#if defined(TARGET_PPC64) - case POWERPC_MMU_64B: - case POWERPC_MMU_2_03: - case POWERPC_MMU_2_06: - case POWERPC_MMU_2_06a: - case POWERPC_MMU_2_07: - case POWERPC_MMU_2_07a: - case POWERPC_MMU_3_00: -#endif /* defined(TARGET_PPC64) */ env->tlb_need_flush = 0; tlb_flush(CPU(cpu)); break; default: /* XXX: TODO */ - cpu_abort(CPU(cpu), "Unknown MMU model %d\n", env->mmu_model); + cpu_abort(CPU(cpu), "Unknown MMU model %x\n", env->mmu_model); break; } } @@ -1971,6 +1964,16 @@ void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr) { #if !defined(FLUSH_ALL_TLBS) addr &= TARGET_PAGE_MASK; +#if defined(TARGET_PPC64) + if (env->mmu_model & POWERPC_MMU_64) { + /* tlbie invalidate TLBs for all segments */ + /* XXX: given the fact that there are too many segments to invalidate, + * and we still don't have a tlb_flush_mask(env, n, mask) in QEMU, + * we just invalidate all TLBs + */ + env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; + } else +#endif /* defined(TARGET_PPC64) */ switch (env->mmu_model) { case POWERPC_MMU_SOFT_6xx: case POWERPC_MMU_SOFT_74xx: @@ -1988,22 +1991,6 @@ void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr) */ env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; break; -#if defined(TARGET_PPC64) - case POWERPC_MMU_64B: - case POWERPC_MMU_2_03: - case POWERPC_MMU_2_06: - case POWERPC_MMU_2_06a: - case POWERPC_MMU_2_07: - case POWERPC_MMU_2_07a: - case POWERPC_MMU_3_00: - /* tlbie invalidate TLBs for all segments */ - /* XXX: given the fact that there are too many segments to invalidate, - * and we still don't have a tlb_flush_mask(env, n, mask) in QEMU, - * we just invalidate all TLBs - */ - env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; - break; -#endif /* defined(TARGET_PPC64) */ default: /* Should never reach here with other MMU models */ assert(0); diff --git a/target/ppc/translate.c b/target/ppc/translate.c index 1de554a435..b6abc60a00 100644 --- a/target/ppc/translate.c +++ b/target/ppc/translate.c @@ -7078,19 +7078,17 @@ void ppc_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf, if (env->spr_cb[SPR_LPCR].name) cpu_fprintf(f, " LPCR " TARGET_FMT_lx "\n", env->spr[SPR_LPCR]); - switch (env->mmu_model) { + switch (POWERPC_MMU_VER(env->mmu_model)) { case POWERPC_MMU_32B: case POWERPC_MMU_601: case POWERPC_MMU_SOFT_6xx: case POWERPC_MMU_SOFT_74xx: #if defined(TARGET_PPC64) - case POWERPC_MMU_64B: - case POWERPC_MMU_2_03: - case POWERPC_MMU_2_06: - case POWERPC_MMU_2_06a: - case POWERPC_MMU_2_07: - case POWERPC_MMU_2_07a: - case POWERPC_MMU_3_00: + case POWERPC_MMU_VER_64B: + case POWERPC_MMU_VER_2_03: + case POWERPC_MMU_VER_2_06: + case POWERPC_MMU_VER_2_07: + case POWERPC_MMU_VER_3_00: #endif if (env->spr_cb[SPR_SDR1].name) { /* SDR1 Exists */ cpu_fprintf(f, " SDR1 " TARGET_FMT_lx " ", env->spr[SPR_SDR1]); From 992d7e976c7eca2b3129cd4bae4a0d350a8065fa Mon Sep 17 00:00:00 2001 From: Nikunj A Dadhania Date: Thu, 2 Mar 2017 19:40:29 +0530 Subject: [PATCH 17/17] target/ppc: rewrite f[n]m[add,sub] using float64_muladd Use the softfloat api for fused multiply-add. Introduce routine to set the FPSCR flags VXNAN, VXIMZ nad VMISI. Signed-off-by: Nikunj A Dadhania Signed-off-by: David Gibson --- target/ppc/fpu_helper.c | 213 +++++++++------------------------------- 1 file changed, 46 insertions(+), 167 deletions(-) diff --git a/target/ppc/fpu_helper.c b/target/ppc/fpu_helper.c index 58aee640c3..0535ad0814 100644 --- a/target/ppc/fpu_helper.c +++ b/target/ppc/fpu_helper.c @@ -743,178 +743,62 @@ uint64_t helper_frim(CPUPPCState *env, uint64_t arg) return do_fri(env, arg, float_round_down); } -/* fmadd - fmadd. */ -uint64_t helper_fmadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2, - uint64_t arg3) +static void float64_maddsub_update_excp(CPUPPCState *env, float64 arg1, + float64 arg2, float64 arg3, + unsigned int madd_flags) { - CPU_DoubleU farg1, farg2, farg3; - - farg1.ll = arg1; - farg2.ll = arg2; - farg3.ll = arg3; - - if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) || - (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) { + if (unlikely((float64_is_infinity(arg1) && float64_is_zero(arg2)) || + (float64_is_zero(arg1) && float64_is_infinity(arg2)))) { /* Multiplication of zero by infinity */ - farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1); - } else { - if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) || - float64_is_signaling_nan(farg2.d, &env->fp_status) || - float64_is_signaling_nan(farg3.d, &env->fp_status))) { - /* sNaN operation */ - float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); - } - /* This is the way the PowerPC specification defines it */ - float128 ft0_128, ft1_128; + arg1 = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1); + } else if (unlikely(float64_is_signaling_nan(arg1, &env->fp_status) || + float64_is_signaling_nan(arg2, &env->fp_status) || + float64_is_signaling_nan(arg3, &env->fp_status))) { + /* sNaN operation */ + float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); + } else if ((float64_is_infinity(arg1) || float64_is_infinity(arg2)) && + float64_is_infinity(arg3)) { + uint8_t aSign, bSign, cSign; - ft0_128 = float64_to_float128(farg1.d, &env->fp_status); - ft1_128 = float64_to_float128(farg2.d, &env->fp_status); - ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status); - if (unlikely(float128_is_infinity(ft0_128) && - float64_is_infinity(farg3.d) && - float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) { - /* Magnitude subtraction of infinities */ - farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1); - } else { - ft1_128 = float64_to_float128(farg3.d, &env->fp_status); - ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status); - farg1.d = float128_to_float64(ft0_128, &env->fp_status); + aSign = float64_is_neg(arg1); + bSign = float64_is_neg(arg2); + cSign = float64_is_neg(arg3); + if (madd_flags & float_muladd_negate_c) { + cSign ^= 1; + } + if (aSign ^ bSign ^ cSign) { + float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1); } } - - return farg1.ll; } -/* fmsub - fmsub. */ -uint64_t helper_fmsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2, - uint64_t arg3) -{ - CPU_DoubleU farg1, farg2, farg3; - - farg1.ll = arg1; - farg2.ll = arg2; - farg3.ll = arg3; - - if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) || - (float64_is_zero(farg1.d) && - float64_is_infinity(farg2.d)))) { - /* Multiplication of zero by infinity */ - farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1); - } else { - if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) || - float64_is_signaling_nan(farg2.d, &env->fp_status) || - float64_is_signaling_nan(farg3.d, &env->fp_status))) { - /* sNaN operation */ - float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); - } - /* This is the way the PowerPC specification defines it */ - float128 ft0_128, ft1_128; - - ft0_128 = float64_to_float128(farg1.d, &env->fp_status); - ft1_128 = float64_to_float128(farg2.d, &env->fp_status); - ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status); - if (unlikely(float128_is_infinity(ft0_128) && - float64_is_infinity(farg3.d) && - float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) { - /* Magnitude subtraction of infinities */ - farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1); - } else { - ft1_128 = float64_to_float128(farg3.d, &env->fp_status); - ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status); - farg1.d = float128_to_float64(ft0_128, &env->fp_status); - } - } - return farg1.ll; +#define FPU_FMADD(op, madd_flags) \ +uint64_t helper_##op(CPUPPCState *env, uint64_t arg1, \ + uint64_t arg2, uint64_t arg3) \ +{ \ + uint32_t flags; \ + float64 ret = float64_muladd(arg1, arg2, arg3, madd_flags, \ + &env->fp_status); \ + flags = get_float_exception_flags(&env->fp_status); \ + if (flags) { \ + if (flags & float_flag_invalid) { \ + float64_maddsub_update_excp(env, arg1, arg2, arg3, \ + madd_flags); \ + } \ + float_check_status(env); \ + } \ + return ret; \ } -/* fnmadd - fnmadd. */ -uint64_t helper_fnmadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2, - uint64_t arg3) -{ - CPU_DoubleU farg1, farg2, farg3; +#define MADD_FLGS 0 +#define MSUB_FLGS float_muladd_negate_c +#define NMADD_FLGS float_muladd_negate_result +#define NMSUB_FLGS (float_muladd_negate_c | float_muladd_negate_result) - farg1.ll = arg1; - farg2.ll = arg2; - farg3.ll = arg3; - - if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) || - (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) { - /* Multiplication of zero by infinity */ - farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1); - } else { - if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) || - float64_is_signaling_nan(farg2.d, &env->fp_status) || - float64_is_signaling_nan(farg3.d, &env->fp_status))) { - /* sNaN operation */ - float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); - } - /* This is the way the PowerPC specification defines it */ - float128 ft0_128, ft1_128; - - ft0_128 = float64_to_float128(farg1.d, &env->fp_status); - ft1_128 = float64_to_float128(farg2.d, &env->fp_status); - ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status); - if (unlikely(float128_is_infinity(ft0_128) && - float64_is_infinity(farg3.d) && - float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) { - /* Magnitude subtraction of infinities */ - farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1); - } else { - ft1_128 = float64_to_float128(farg3.d, &env->fp_status); - ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status); - farg1.d = float128_to_float64(ft0_128, &env->fp_status); - } - if (likely(!float64_is_any_nan(farg1.d))) { - farg1.d = float64_chs(farg1.d); - } - } - return farg1.ll; -} - -/* fnmsub - fnmsub. */ -uint64_t helper_fnmsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2, - uint64_t arg3) -{ - CPU_DoubleU farg1, farg2, farg3; - - farg1.ll = arg1; - farg2.ll = arg2; - farg3.ll = arg3; - - if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) || - (float64_is_zero(farg1.d) && - float64_is_infinity(farg2.d)))) { - /* Multiplication of zero by infinity */ - farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1); - } else { - if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) || - float64_is_signaling_nan(farg2.d, &env->fp_status) || - float64_is_signaling_nan(farg3.d, &env->fp_status))) { - /* sNaN operation */ - float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); - } - /* This is the way the PowerPC specification defines it */ - float128 ft0_128, ft1_128; - - ft0_128 = float64_to_float128(farg1.d, &env->fp_status); - ft1_128 = float64_to_float128(farg2.d, &env->fp_status); - ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status); - if (unlikely(float128_is_infinity(ft0_128) && - float64_is_infinity(farg3.d) && - float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) { - /* Magnitude subtraction of infinities */ - farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1); - } else { - ft1_128 = float64_to_float128(farg3.d, &env->fp_status); - ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status); - farg1.d = float128_to_float64(ft0_128, &env->fp_status); - } - if (likely(!float64_is_any_nan(farg1.d))) { - farg1.d = float64_chs(farg1.d); - } - } - return farg1.ll; -} +FPU_FMADD(fmadd, MADD_FLGS) +FPU_FMADD(fnmadd, NMADD_FLGS) +FPU_FMADD(fmsub, MSUB_FLGS) +FPU_FMADD(fnmsub, NMSUB_FLGS) /* frsp - frsp. */ uint64_t helper_frsp(CPUPPCState *env, uint64_t arg) @@ -2384,11 +2268,6 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \ float_check_status(env); \ } -#define MADD_FLGS 0 -#define MSUB_FLGS float_muladd_negate_c -#define NMADD_FLGS float_muladd_negate_result -#define NMSUB_FLGS (float_muladd_negate_c | float_muladd_negate_result) - VSX_MADD(xsmaddadp, 1, float64, VsrD(0), MADD_FLGS, 1, 1, 0) VSX_MADD(xsmaddmdp, 1, float64, VsrD(0), MADD_FLGS, 0, 1, 0) VSX_MADD(xsmsubadp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1, 0)