diff options
author | Peter Xu <peterx@redhat.com> | 2018-08-15 17:53:28 +0800 |
---|---|---|
committer | Markus Armbruster <armbru@redhat.com> | 2018-08-27 15:09:20 +0200 |
commit | 4e4abd111a2af0179a4467368d695958844bf113 (patch) | |
tree | 424dc986d07547af6e367b0b23e7164bc6d08530 /hw/i386/intel_iommu.c | |
parent | 1376211f77bdcd84dc4acb877690f7399d8cf58a (diff) | |
download | qemu-4e4abd111a2af0179a4467368d695958844bf113.zip |
intel-iommu: replace more vtd_err_* traces
Replace all the trace_vtd_err_*() hooks with the new error_report_once()
since they are similar to trace_vtd_err() - dumping the first error
would be mostly enough, then we have them on by default too.
Signed-off-by: Peter Xu <peterx@redhat.com>
Message-Id: <20180815095328.32414-4-peterx@redhat.com>
[Use "%x" instead of "%" PRIx16 to print uint16_t, whitespace tidied up]
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Diffstat (limited to 'hw/i386/intel_iommu.c')
-rw-r--r-- | hw/i386/intel_iommu.c | 64 |
1 files changed, 46 insertions, 18 deletions
diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c index 9c0f525408..3dfada19a6 100644 --- a/hw/i386/intel_iommu.c +++ b/hw/i386/intel_iommu.c @@ -705,7 +705,8 @@ static int vtd_iova_to_slpte(VTDContextEntry *ce, uint64_t iova, bool is_write, uint64_t access_right_check; if (!vtd_iova_range_check(iova, ce, aw_bits)) { - trace_vtd_err_dmar_iova_overflow(iova); + error_report_once("%s: detected IOVA overflow (iova=0x%" PRIx64 ")", + __func__, iova); return -VTD_FR_ADDR_BEYOND_MGAW; } @@ -717,7 +718,8 @@ static int vtd_iova_to_slpte(VTDContextEntry *ce, uint64_t iova, bool is_write, slpte = vtd_get_slpte(addr, offset); if (slpte == (uint64_t)-1) { - trace_vtd_err_dmar_slpte_read_error(iova, level); + error_report_once("%s: detected read error on DMAR slpte " + "(iova=0x%" PRIx64 ")", __func__, iova); if (level == vtd_ce_get_level(ce)) { /* Invalid programming of context-entry */ return -VTD_FR_CONTEXT_ENTRY_INV; @@ -728,11 +730,17 @@ static int vtd_iova_to_slpte(VTDContextEntry *ce, uint64_t iova, bool is_write, *reads = (*reads) && (slpte & VTD_SL_R); *writes = (*writes) && (slpte & VTD_SL_W); if (!(slpte & access_right_check)) { - trace_vtd_err_dmar_slpte_perm_error(iova, level, slpte, is_write); + error_report_once("%s: detected slpte permission error " + "(iova=0x%" PRIx64 ", level=0x%" PRIx32 ", " + "slpte=0x%" PRIx64 ", write=%d)", __func__, + iova, level, slpte, is_write); return is_write ? -VTD_FR_WRITE : -VTD_FR_READ; } if (vtd_slpte_nonzero_rsvd(slpte, level)) { - trace_vtd_err_dmar_slpte_resv_error(iova, level, slpte); + error_report_once("%s: detected splte reserve non-zero " + "iova=0x%" PRIx64 ", level=0x%" PRIx32 + "slpte=0x%" PRIx64 ")", __func__, iova, + level, slpte); return -VTD_FR_PAGING_ENTRY_RSVD; } @@ -1697,7 +1705,10 @@ static void vtd_handle_gcmd_qie(IntelIOMMUState *s, bool en) /* Ok - report back to driver */ vtd_set_clear_mask_long(s, DMAR_GSTS_REG, VTD_GSTS_QIES, 0); } else { - trace_vtd_err_qi_disable(s->iq_head, s->iq_tail, s->iq_last_desc_type); + error_report_once("%s: detected improper state when disable QI " + "(head=0x%x, tail=0x%x, last_type=%d)", + __func__, + s->iq_head, s->iq_tail, s->iq_last_desc_type); } } } @@ -2094,7 +2105,9 @@ static void vtd_fetch_inv_desc(IntelIOMMUState *s) if (s->iq_tail >= s->iq_size) { /* Detects an invalid Tail pointer */ - trace_vtd_err_qi_tail(s->iq_tail, s->iq_size); + error_report_once("%s: detected invalid QI tail " + "(tail=0x%x, size=0x%x)", + __func__, s->iq_tail, s->iq_size); vtd_handle_inv_queue_error(s); return; } @@ -2507,10 +2520,12 @@ static IOMMUTLBEntry vtd_iommu_translate(IOMMUMemoryRegion *iommu, hwaddr addr, iotlb.iova, iotlb.translated_addr, iotlb.addr_mask); } else { - trace_vtd_err_dmar_translate(pci_bus_num(vtd_as->bus), - VTD_PCI_SLOT(vtd_as->devfn), - VTD_PCI_FUNC(vtd_as->devfn), - iotlb.iova); + error_report_once("%s: detected translation failure " + "(dev=%02x:%02x:%02x, iova=0x%" PRIx64 ")", + __func__, pci_bus_num(vtd_as->bus), + VTD_PCI_SLOT(vtd_as->devfn), + VTD_PCI_FUNC(vtd_as->devfn), + iotlb.iova); } return iotlb; @@ -2626,15 +2641,19 @@ static int vtd_irte_get(IntelIOMMUState *iommu, uint16_t index, le64_to_cpu(entry->data[0])); if (!entry->irte.present) { - trace_vtd_err_irte(index, le64_to_cpu(entry->data[1]), - le64_to_cpu(entry->data[0])); + error_report_once("%s: detected non-present IRTE " + "(index=%u, high=0x%" PRIx64 ", low=0x%" PRIx64 ")", + __func__, index, le64_to_cpu(entry->data[1]), + le64_to_cpu(entry->data[0])); return -VTD_FR_IR_ENTRY_P; } if (entry->irte.__reserved_0 || entry->irte.__reserved_1 || entry->irte.__reserved_2) { - trace_vtd_err_irte(index, le64_to_cpu(entry->data[1]), - le64_to_cpu(entry->data[0])); + error_report_once("%s: detected non-zero reserved IRTE " + "(index=%u, high=0x%" PRIx64 ", low=0x%" PRIx64 ")", + __func__, index, le64_to_cpu(entry->data[1]), + le64_to_cpu(entry->data[0])); return -VTD_FR_IR_IRTE_RSVD; } @@ -2648,7 +2667,9 @@ static int vtd_irte_get(IntelIOMMUState *iommu, uint16_t index, case VTD_SVT_ALL: mask = vtd_svt_mask[entry->irte.sid_q]; if ((source_id & mask) != (sid & mask)) { - trace_vtd_err_irte_sid(index, sid, source_id); + error_report_once("%s: invalid IRTE SID " + "(index=%u, sid=%u, source_id=%u)", + __func__, index, sid, source_id); return -VTD_FR_IR_SID_ERR; } break; @@ -2658,13 +2679,17 @@ static int vtd_irte_get(IntelIOMMUState *iommu, uint16_t index, bus_min = source_id & 0xff; bus = sid >> 8; if (bus > bus_max || bus < bus_min) { - trace_vtd_err_irte_sid_bus(index, bus, bus_min, bus_max); + error_report_once("%s: invalid SVT_BUS " + "(index=%u, bus=%u, min=%u, max=%u)", + __func__, index, bus, bus_min, bus_max); return -VTD_FR_IR_SID_ERR; } break; default: - trace_vtd_err_irte_svt(index, entry->irte.sid_vtype); + error_report_once("%s: detected invalid IRTE SVT " + "(index=%u, type=%d)", __func__, + index, entry->irte.sid_vtype); /* Take this as verification failure. */ return -VTD_FR_IR_SID_ERR; break; @@ -2786,7 +2811,10 @@ static int vtd_interrupt_remap_msi(IntelIOMMUState *iommu, if (addr.addr.sub_valid) { trace_vtd_ir_remap_type("MSI"); if (origin->data & VTD_IR_MSI_DATA_RESERVED) { - trace_vtd_err_ir_msi_invalid(sid, origin->address, origin->data); + error_report_once("%s: invalid IR MSI " + "(sid=%u, address=0x%" PRIx64 + ", data=0x%" PRIx32 ")", + __func__, sid, origin->address, origin->data); return -VTD_FR_IR_REQ_RSVD; } } else { |