Sign Up
Log In
Log In
or
Sign Up
Places
All Projects
Status Monitor
Collapse sidebar
Please login to access the resource
openSUSE:Step:15-SP1
xen.31136
xsa442.patch
Overview
Repositories
Revisions
Requests
Users
Attributes
Meta
File xsa442.patch of Package xen.31136
From: Roger Pau Monne <roger.pau@citrix.com> Date: Tue, 13 Jun 2023 15:01:05 +0200 Subject: [PATCH] iommu/amd-vi: flush IOMMU TLB when flushing the DTE The caching invalidation guidelines from the AMD-Vi specification (48882—Rev 3.07-PUB—Oct 2022) seem to be misleading on some hardware, as devices will malfunction (see stale DMA mappings) if some fields of the DTE are updated but the IOMMU TLB is not flushed. This has been observed in practice on AMD systems. Due to the lack of guidance from the currently published specification this patch aims to increase the flushing done in order to prevent device malfunction. In order to fix, issue an INVALIDATE_IOMMU_PAGES command from amd_iommu_flush_device(), flushing all the address space. Note this requires callers to be adjusted in order to pass the DomID on the DTE previous to the modification. Some call sites don't provide a valid DomID to amd_iommu_flush_device() in order to avoid the flush. That's because the device had address translations disabled and hence the previous DomID on the DTE is not valid. Note the current logic relies on the entity disabling address translations to also flush the TLB of the in use DomID. Device I/O TLB flushing when ATS are enabled is not covered by the current change, as ATS usage is not security supported. This is XSA-442 / CVE-2023-34326 Signed-off-by: Roger Pau Monné <roger.pau@citrix.com> Reviewed-by: Jan Beulich <jbeulich@suse.com> --- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h +++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h @@ -96,7 +96,8 @@ void amd_iommu_flush_pages(struct domain unsigned int order); void amd_iommu_flush_iotlb(u8 devfn, const struct pci_dev *pdev, uint64_t gaddr, unsigned int order); -void amd_iommu_flush_device(struct amd_iommu *iommu, uint16_t bdf); +void amd_iommu_flush_device(struct amd_iommu *iommu, uint16_t bdf, + domid_t domid); void amd_iommu_flush_intremap(struct amd_iommu *iommu, uint16_t bdf); void amd_iommu_flush_all_caches(struct amd_iommu *iommu); --- a/xen/drivers/passthrough/amd/iommu_cmd.c +++ b/xen/drivers/passthrough/amd/iommu_cmd.c @@ -371,12 +371,20 @@ void amd_iommu_flush_pages(struct domain _amd_iommu_flush_pages(d, __dfn_to_daddr(dfn), order); } -void amd_iommu_flush_device(struct amd_iommu *iommu, uint16_t bdf) +void amd_iommu_flush_device(struct amd_iommu *iommu, uint16_t bdf, + domid_t domid) { ASSERT( spin_is_locked(&iommu->lock) ); invalidate_dev_table_entry(iommu, bdf); flush_command_buffer(iommu, 0); + + /* Also invalidate IOMMU TLB entries when flushing the DTE. */ + if ( domid != DOMID_INVALID ) + { + invalidate_iommu_pages(iommu, INV_IOMMU_ALL_PAGES_ADDRESS, domid, 0); + flush_command_buffer(iommu, 0); + } } void amd_iommu_flush_intremap(struct amd_iommu *iommu, uint16_t bdf) --- a/xen/drivers/passthrough/amd/iommu_guest.c +++ b/xen/drivers/passthrough/amd/iommu_guest.c @@ -396,7 +396,7 @@ static int do_completion_wait(struct dom static int do_invalidate_dte(struct domain *d, cmd_entry_t *cmd) { - uint16_t gbdf, mbdf, req_id, gdom_id, hdom_id; + uint16_t gbdf, mbdf, req_id, gdom_id, hdom_id, prev_domid; dev_entry_t *gdte, *mdte, *dte_base; struct amd_iommu *iommu = NULL; struct guest_iommu *g_iommu; @@ -455,12 +455,15 @@ static int do_invalidate_dte(struct doma hdom_id = host_domid(d, gdom_id); req_id = get_dma_requestor_id(iommu->seg, mbdf); mdte = iommu->dev_table.buffer + (req_id * sizeof(dev_entry_t)); + prev_domid = get_field_from_reg_u32(mdte->data[2], + IOMMU_DEV_TABLE_DOMAIN_ID_MASK, + IOMMU_DEV_TABLE_DOMAIN_ID_SHIFT); spin_lock_irqsave(&iommu->lock, flags); iommu_dte_set_guest_cr3((u32 *)mdte, hdom_id, gcr3_mfn << PAGE_SHIFT, gv, glx); - amd_iommu_flush_device(iommu, req_id); + amd_iommu_flush_device(iommu, req_id, prev_domid); spin_unlock_irqrestore(&iommu->lock, flags); return 0; --- a/xen/drivers/passthrough/amd/iommu_init.c +++ b/xen/drivers/passthrough/amd/iommu_init.c @@ -1368,7 +1368,11 @@ static int _invalidate_all_devices( if ( iommu ) { spin_lock_irqsave(&iommu->lock, flags); - amd_iommu_flush_device(iommu, req_id); + /* + * IOMMU TLB flush performed separately (see + * invalidate_all_domain_pages()). + */ + amd_iommu_flush_device(iommu, req_id, DOMID_INVALID); amd_iommu_flush_intremap(iommu, req_id); spin_unlock_irqrestore(&iommu->lock, flags); } --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c @@ -204,10 +204,15 @@ static int __must_check amd_iommu_setup_ iommu_has_cap(iommu, PCI_CAP_IOTLB_SHIFT) ) iommu_dte_set_iotlb((u32 *)dte, dte_i); - amd_iommu_flush_device(iommu, req_id); + /* DTE didn't have DMA translations enabled, do not flush the TLB. */ + amd_iommu_flush_device(iommu, req_id, DOMID_INVALID); } else if ( amd_iommu_get_root_page_table(dte) != page_to_maddr(root_pg) ) { + domid_t prev_domid = + get_field_from_reg_u32(dte[2], IOMMU_DEV_TABLE_DOMAIN_ID_MASK, + IOMMU_DEV_TABLE_DOMAIN_ID_SHIFT); + /* * Strictly speaking if the device is the only one with this requestor * ID, it could be allowed to be re-assigned regardless of unity map @@ -252,7 +257,7 @@ static int __must_check amd_iommu_setup_ dte[3], IOMMU_DEV_TABLE_IOTLB_SUPPORT_MASK, IOMMU_DEV_TABLE_IOTLB_SUPPORT_SHIFT) == dte_i); - amd_iommu_flush_device(iommu, req_id); + amd_iommu_flush_device(iommu, req_id, prev_domid); } spin_unlock_irqrestore(&iommu->lock, flags); @@ -366,7 +371,7 @@ void amd_iommu_disable_domain_device(str struct amd_iommu *iommu, u8 devfn, struct pci_dev *pdev) { - void *dte; + uint32_t *dte; unsigned long flags; int req_id; u8 bus = pdev->bus; @@ -376,22 +381,23 @@ void amd_iommu_disable_domain_device(str dte = iommu->dev_table.buffer + (req_id * IOMMU_DEV_TABLE_ENTRY_SIZE); spin_lock_irqsave(&iommu->lock, flags); - if ( is_translation_valid((u32 *)dte) ) + if ( is_translation_valid(dte) ) { - disable_translation((u32 *)dte); + domid_t prev_domid = + get_field_from_reg_u32(dte[2], IOMMU_DEV_TABLE_DOMAIN_ID_MASK, + IOMMU_DEV_TABLE_DOMAIN_ID_SHIFT); + + disable_translation(dte); if ( pci_ats_device(iommu->seg, bus, pdev->devfn) && iommu_has_cap(iommu, PCI_CAP_IOTLB_SHIFT) ) - iommu_dte_set_iotlb((u32 *)dte, 0); + iommu_dte_set_iotlb(dte, 0); - amd_iommu_flush_device(iommu, req_id); + amd_iommu_flush_device(iommu, req_id, prev_domid); AMD_IOMMU_DEBUG("Disable: device id = %#x, " "domain = %d, paging mode = %d\n", - req_id, - get_field_from_reg_u32(((uint32_t *)dte)[2], - IOMMU_DEV_TABLE_DOMAIN_ID_MASK, - IOMMU_DEV_TABLE_DOMAIN_ID_SHIFT), + req_id, prev_domid, dom_iommu(domain)->arch.paging_mode); } spin_unlock_irqrestore(&iommu->lock, flags);
Locations
Projects
Search
Status Monitor
Help
OpenBuildService.org
Documentation
API Documentation
Code of Conduct
Contact
Support
@OBShq
Terms
openSUSE Build Service is sponsored by
The Open Build Service is an
openSUSE project
.
Sign Up
Log In
Places
Places
All Projects
Status Monitor