diff --git a/hw/arm/smmu-common.c b/hw/arm/smmu-common.c index 408fc4efca..8d52472863 100644 --- a/hw/arm/smmu-common.c +++ b/hw/arm/smmu-common.c @@ -566,6 +566,65 @@ int smmu_ptw(SMMUTransCfg *cfg, dma_addr_t iova, IOMMUAccessFlags perm, g_assert_not_reached(); } +SMMUTLBEntry *smmu_translate(SMMUState *bs, SMMUTransCfg *cfg, dma_addr_t addr, + IOMMUAccessFlags flag, SMMUPTWEventInfo *info) +{ + uint64_t page_mask, aligned_addr; + SMMUTLBEntry *cached_entry = NULL; + SMMUTransTableInfo *tt; + int status; + + /* + * Combined attributes used for TLB lookup, as only one stage is supported, + * it will hold attributes based on the enabled stage. + */ + SMMUTransTableInfo tt_combined; + + if (cfg->stage == SMMU_STAGE_1) { + /* Select stage1 translation table. */ + tt = select_tt(cfg, addr); + if (!tt) { + info->type = SMMU_PTW_ERR_TRANSLATION; + info->stage = SMMU_STAGE_1; + return NULL; + } + tt_combined.granule_sz = tt->granule_sz; + tt_combined.tsz = tt->tsz; + + } else { + /* Stage2. */ + tt_combined.granule_sz = cfg->s2cfg.granule_sz; + tt_combined.tsz = cfg->s2cfg.tsz; + } + + /* + * TLB lookup looks for granule and input size for a translation stage, + * as only one stage is supported right now, choose the right values + * from the configuration. + */ + page_mask = (1ULL << tt_combined.granule_sz) - 1; + aligned_addr = addr & ~page_mask; + + cached_entry = smmu_iotlb_lookup(bs, cfg, &tt_combined, aligned_addr); + if (cached_entry) { + if ((flag & IOMMU_WO) && !(cached_entry->entry.perm & IOMMU_WO)) { + info->type = SMMU_PTW_ERR_PERMISSION; + info->stage = cfg->stage; + return NULL; + } + return cached_entry; + } + + cached_entry = g_new0(SMMUTLBEntry, 1); + status = smmu_ptw(cfg, aligned_addr, flag, cached_entry, info); + if (status) { + g_free(cached_entry); + return NULL; + } + smmu_iotlb_insert(bs, cfg, cached_entry); + return cached_entry; +} + /** * The bus number is used for lookup when SID based invalidation occurs. * In that case we lazily populate the SMMUPciBus array from the bus hash diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c index aab09df23e..76d9969c93 100644 --- a/hw/arm/smmuv3.c +++ b/hw/arm/smmuv3.c @@ -827,6 +827,76 @@ static void smmuv3_flush_config(SMMUDevice *sdev) g_hash_table_remove(bc->configs, sdev); } +/* Do translation with TLB lookup. */ +static SMMUTranslationStatus smmuv3_do_translate(SMMUv3State *s, hwaddr addr, + SMMUTransCfg *cfg, + SMMUEventInfo *event, + IOMMUAccessFlags flag, + SMMUTLBEntry **out_entry) +{ + SMMUPTWEventInfo ptw_info = {}; + SMMUState *bs = ARM_SMMU(s); + SMMUTLBEntry *cached_entry = NULL; + + cached_entry = smmu_translate(bs, cfg, addr, flag, &ptw_info); + if (!cached_entry) { + /* All faults from PTW has S2 field. */ + event->u.f_walk_eabt.s2 = (ptw_info.stage == SMMU_STAGE_2); + switch (ptw_info.type) { + case SMMU_PTW_ERR_WALK_EABT: + event->type = SMMU_EVT_F_WALK_EABT; + event->u.f_walk_eabt.addr = addr; + event->u.f_walk_eabt.rnw = flag & 0x1; + event->u.f_walk_eabt.class = (ptw_info.stage == SMMU_STAGE_2) ? + SMMU_CLASS_IN : SMMU_CLASS_TT; + event->u.f_walk_eabt.addr2 = ptw_info.addr; + break; + case SMMU_PTW_ERR_TRANSLATION: + if (PTW_RECORD_FAULT(cfg)) { + event->type = SMMU_EVT_F_TRANSLATION; + event->u.f_translation.addr = addr; + event->u.f_translation.addr2 = ptw_info.addr; + event->u.f_translation.class = SMMU_CLASS_IN; + event->u.f_translation.rnw = flag & 0x1; + } + break; + case SMMU_PTW_ERR_ADDR_SIZE: + if (PTW_RECORD_FAULT(cfg)) { + event->type = SMMU_EVT_F_ADDR_SIZE; + event->u.f_addr_size.addr = addr; + event->u.f_addr_size.addr2 = ptw_info.addr; + event->u.f_addr_size.class = SMMU_CLASS_IN; + event->u.f_addr_size.rnw = flag & 0x1; + } + break; + case SMMU_PTW_ERR_ACCESS: + if (PTW_RECORD_FAULT(cfg)) { + event->type = SMMU_EVT_F_ACCESS; + event->u.f_access.addr = addr; + event->u.f_access.addr2 = ptw_info.addr; + event->u.f_access.class = SMMU_CLASS_IN; + event->u.f_access.rnw = flag & 0x1; + } + break; + case SMMU_PTW_ERR_PERMISSION: + if (PTW_RECORD_FAULT(cfg)) { + event->type = SMMU_EVT_F_PERMISSION; + event->u.f_permission.addr = addr; + event->u.f_permission.addr2 = ptw_info.addr; + event->u.f_permission.class = SMMU_CLASS_IN; + event->u.f_permission.rnw = flag & 0x1; + } + break; + default: + g_assert_not_reached(); + } + return SMMU_TRANS_ERROR; + } + *out_entry = cached_entry; + return SMMU_TRANS_SUCCESS; +} + +/* Entry point to SMMU, does everything. */ static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr, IOMMUAccessFlags flag, int iommu_idx) { @@ -836,12 +906,7 @@ static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr, SMMUEventInfo event = {.type = SMMU_EVT_NONE, .sid = sid, .inval_ste_allowed = false}; - SMMUPTWEventInfo ptw_info = {}; SMMUTranslationStatus status; - SMMUState *bs = ARM_SMMU(s); - uint64_t page_mask, aligned_addr; - SMMUTLBEntry *cached_entry = NULL; - SMMUTransTableInfo *tt; SMMUTransCfg *cfg = NULL; IOMMUTLBEntry entry = { .target_as = &address_space_memory, @@ -850,11 +915,7 @@ static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr, .addr_mask = ~(hwaddr)0, .perm = IOMMU_NONE, }; - /* - * Combined attributes used for TLB lookup, as only one stage is supported, - * it will hold attributes based on the enabled stage. - */ - SMMUTransTableInfo tt_combined; + SMMUTLBEntry *cached_entry = NULL; qemu_mutex_lock(&s->mutex); @@ -883,115 +944,7 @@ static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr, goto epilogue; } - if (cfg->stage == SMMU_STAGE_1) { - /* Select stage1 translation table. */ - tt = select_tt(cfg, addr); - if (!tt) { - if (cfg->record_faults) { - event.type = SMMU_EVT_F_TRANSLATION; - event.u.f_translation.addr = addr; - event.u.f_translation.rnw = flag & 0x1; - } - status = SMMU_TRANS_ERROR; - goto epilogue; - } - tt_combined.granule_sz = tt->granule_sz; - tt_combined.tsz = tt->tsz; - - } else { - /* Stage2. */ - tt_combined.granule_sz = cfg->s2cfg.granule_sz; - tt_combined.tsz = cfg->s2cfg.tsz; - } - /* - * TLB lookup looks for granule and input size for a translation stage, - * as only one stage is supported right now, choose the right values - * from the configuration. - */ - page_mask = (1ULL << tt_combined.granule_sz) - 1; - aligned_addr = addr & ~page_mask; - - cached_entry = smmu_iotlb_lookup(bs, cfg, &tt_combined, aligned_addr); - if (cached_entry) { - if ((flag & IOMMU_WO) && !(cached_entry->entry.perm & IOMMU_WO)) { - status = SMMU_TRANS_ERROR; - /* - * We know that the TLB only contains either stage-1 or stage-2 as - * nesting is not supported. So it is sufficient to check the - * translation stage to know the TLB stage for now. - */ - event.u.f_walk_eabt.s2 = (cfg->stage == SMMU_STAGE_2); - if (PTW_RECORD_FAULT(cfg)) { - event.type = SMMU_EVT_F_PERMISSION; - event.u.f_permission.addr = addr; - event.u.f_permission.rnw = flag & 0x1; - } - } else { - status = SMMU_TRANS_SUCCESS; - } - goto epilogue; - } - - cached_entry = g_new0(SMMUTLBEntry, 1); - - if (smmu_ptw(cfg, aligned_addr, flag, cached_entry, &ptw_info)) { - /* All faults from PTW has S2 field. */ - event.u.f_walk_eabt.s2 = (ptw_info.stage == SMMU_STAGE_2); - g_free(cached_entry); - switch (ptw_info.type) { - case SMMU_PTW_ERR_WALK_EABT: - event.type = SMMU_EVT_F_WALK_EABT; - event.u.f_walk_eabt.addr = addr; - event.u.f_walk_eabt.rnw = flag & 0x1; - /* Stage-2 (only) is class IN while stage-1 is class TT */ - event.u.f_walk_eabt.class = (ptw_info.stage == SMMU_STAGE_2) ? - SMMU_CLASS_IN : SMMU_CLASS_TT; - event.u.f_walk_eabt.addr2 = ptw_info.addr; - break; - case SMMU_PTW_ERR_TRANSLATION: - if (PTW_RECORD_FAULT(cfg)) { - event.type = SMMU_EVT_F_TRANSLATION; - event.u.f_translation.addr = addr; - event.u.f_translation.addr2 = ptw_info.addr; - event.u.f_translation.class = SMMU_CLASS_IN; - event.u.f_translation.rnw = flag & 0x1; - } - break; - case SMMU_PTW_ERR_ADDR_SIZE: - if (PTW_RECORD_FAULT(cfg)) { - event.type = SMMU_EVT_F_ADDR_SIZE; - event.u.f_addr_size.addr = addr; - event.u.f_addr_size.addr2 = ptw_info.addr; - event.u.f_translation.class = SMMU_CLASS_IN; - event.u.f_addr_size.rnw = flag & 0x1; - } - break; - case SMMU_PTW_ERR_ACCESS: - if (PTW_RECORD_FAULT(cfg)) { - event.type = SMMU_EVT_F_ACCESS; - event.u.f_access.addr = addr; - event.u.f_access.addr2 = ptw_info.addr; - event.u.f_translation.class = SMMU_CLASS_IN; - event.u.f_access.rnw = flag & 0x1; - } - break; - case SMMU_PTW_ERR_PERMISSION: - if (PTW_RECORD_FAULT(cfg)) { - event.type = SMMU_EVT_F_PERMISSION; - event.u.f_permission.addr = addr; - event.u.f_permission.addr2 = ptw_info.addr; - event.u.f_translation.class = SMMU_CLASS_IN; - event.u.f_permission.rnw = flag & 0x1; - } - break; - default: - g_assert_not_reached(); - } - status = SMMU_TRANS_ERROR; - } else { - smmu_iotlb_insert(bs, cfg, cached_entry); - status = SMMU_TRANS_SUCCESS; - } + status = smmuv3_do_translate(s, addr, cfg, &event, flag, &cached_entry); epilogue: qemu_mutex_unlock(&s->mutex); @@ -1002,7 +955,8 @@ epilogue: (addr & cached_entry->entry.addr_mask); entry.addr_mask = cached_entry->entry.addr_mask; trace_smmuv3_translate_success(mr->parent_obj.name, sid, addr, - entry.translated_addr, entry.perm); + entry.translated_addr, entry.perm, + cfg->stage); break; case SMMU_TRANS_DISABLE: entry.perm = flag; diff --git a/hw/arm/trace-events b/hw/arm/trace-events index f1a54a02df..cc12924a84 100644 --- a/hw/arm/trace-events +++ b/hw/arm/trace-events @@ -37,7 +37,7 @@ smmuv3_get_ste(uint64_t addr) "STE addr: 0x%"PRIx64 smmuv3_translate_disable(const char *n, uint16_t sid, uint64_t addr, bool is_write) "%s sid=0x%x bypass (smmu disabled) iova:0x%"PRIx64" is_write=%d" smmuv3_translate_bypass(const char *n, uint16_t sid, uint64_t addr, bool is_write) "%s sid=0x%x STE bypass iova:0x%"PRIx64" is_write=%d" smmuv3_translate_abort(const char *n, uint16_t sid, uint64_t addr, bool is_write) "%s sid=0x%x abort on iova:0x%"PRIx64" is_write=%d" -smmuv3_translate_success(const char *n, uint16_t sid, uint64_t iova, uint64_t translated, int perm) "%s sid=0x%x iova=0x%"PRIx64" translated=0x%"PRIx64" perm=0x%x" +smmuv3_translate_success(const char *n, uint16_t sid, uint64_t iova, uint64_t translated, int perm, int stage) "%s sid=0x%x iova=0x%"PRIx64" translated=0x%"PRIx64" perm=0x%x stage=%d" smmuv3_get_cd(uint64_t addr) "CD addr: 0x%"PRIx64 smmuv3_decode_cd(uint32_t oas) "oas=%d" smmuv3_decode_cd_tt(int i, uint32_t tsz, uint64_t ttb, uint32_t granule_sz, bool had) "TT[%d]:tsz:%d ttb:0x%"PRIx64" granule_sz:%d had:%d" diff --git a/include/hw/arm/smmu-common.h b/include/hw/arm/smmu-common.h index 260582089e..366b53be77 100644 --- a/include/hw/arm/smmu-common.h +++ b/include/hw/arm/smmu-common.h @@ -183,6 +183,14 @@ static inline uint16_t smmu_get_sid(SMMUDevice *sdev) int smmu_ptw(SMMUTransCfg *cfg, dma_addr_t iova, IOMMUAccessFlags perm, SMMUTLBEntry *tlbe, SMMUPTWEventInfo *info); + +/* + * smmu_translate - Look for a translation in TLB, if not, do a PTW. + * Returns NULL on PTW error or incase of TLB permission errors. + */ +SMMUTLBEntry *smmu_translate(SMMUState *bs, SMMUTransCfg *cfg, dma_addr_t addr, + IOMMUAccessFlags flag, SMMUPTWEventInfo *info); + /** * select_tt - compute which translation table shall be used according to * the input iova and translation config and return the TT specific info