Sign Up
Log In
Log In
or
Sign Up
Places
All Projects
Status Monitor
Collapse sidebar
openSUSE:Leap:15.4:ARM
xen.25150
5d9ee2a8-AMD-IOMMU-alloc-1-devtab-per-PCI-seg.p...
Overview
Repositories
Revisions
Requests
Users
Attributes
Meta
File 5d9ee2a8-AMD-IOMMU-alloc-1-devtab-per-PCI-seg.patch of Package xen.25150
# Commit fd345376a5421827dc2d1beaa8e361b8a7f54540 # Date 2019-10-10 09:50:00 +0200 # Author Jan Beulich <jbeulich@suse.com> # Committer Jan Beulich <jbeulich@suse.com> AMD/IOMMU: allocate one device table per PCI segment Having a single device table for all segments can't possibly be right. (Even worse, the symbol wasn't static despite being used in just one source file.) Attach the device tables to their respective IVRS mapping ones. Signed-off-by: Jan Beulich <jbeulich@suse.com> Reviewed-by: Paul Durrant <paul.durrant@citrix.com> Acked-by: Andrew Cooper <andrew.cooper3@citrix.com> --- a/xen/drivers/passthrough/amd/iommu_init.c +++ b/xen/drivers/passthrough/amd/iommu_init.c @@ -39,7 +39,6 @@ unsigned int __read_mostly ivrs_bdf_entr u8 __read_mostly ivhd_type; static struct radix_tree_root ivrs_maps; LIST_HEAD_READ_MOSTLY(amd_iommu_head); -struct table_struct device_table; bool_t iommuv2_enabled; static bool iommu_has_ht_flag(struct amd_iommu *iommu, u8 mask) @@ -993,6 +992,12 @@ static void disable_iommu(struct amd_iom spin_unlock_irqrestore(&iommu->lock, flags); } +static unsigned int __init dt_alloc_size(void) +{ + return PAGE_SIZE << get_order_from_bytes(ivrs_bdf_entries * + IOMMU_DEV_TABLE_ENTRY_SIZE); +} + static void __init deallocate_buffer(void *buf, uint32_t sz) { int order = 0; @@ -1003,12 +1008,6 @@ static void __init deallocate_buffer(voi } } -static void __init deallocate_device_table(struct table_struct *table) -{ - deallocate_buffer(table->buffer, table->alloc_size); - table->buffer = NULL; -} - static void __init deallocate_ring_buffer(struct ring_buffer *ring_buf) { deallocate_buffer(ring_buf->buffer, ring_buf->alloc_size); @@ -1094,8 +1093,30 @@ static void * __init allocate_ppr_log(st IOMMU_PPR_LOG_DEFAULT_ENTRIES, "PPR Log"); } +/* + * Within ivrs_mappings[] we allocate an extra array element to store + * - segment number, + * - device table. + */ +#define IVRS_MAPPINGS_SEG(m) (m)[ivrs_bdf_entries].dte_requestor_id +#define IVRS_MAPPINGS_DEVTAB(m) (m)[ivrs_bdf_entries].intremap_table + +/* Gets passed to radix_tree_destroy(), so its param needs to be void *. */ +static void __init free_ivrs_mapping_callback(void *ptr) +{ + const struct ivrs_mappings *ivrs_mappings = ptr; + + if ( IVRS_MAPPINGS_DEVTAB(ivrs_mappings) ) + deallocate_buffer(IVRS_MAPPINGS_DEVTAB(ivrs_mappings), + dt_alloc_size()); + + xfree(ptr); +} + static int __init amd_iommu_init_one(struct amd_iommu *iommu, bool intr) { + const struct ivrs_mappings *ivrs_mappings; + pci_hide_device(iommu->seg, PCI_BUS(iommu->bdf), PCI_DEVFN2(iommu->bdf)); if ( allocate_cmd_buffer(iommu) == NULL ) @@ -1110,13 +1131,15 @@ static int __init amd_iommu_init_one(str if ( intr && !set_iommu_interrupt_handler(iommu) ) goto error_out; - /* To make sure that device_table.buffer has been successfully allocated */ - if ( device_table.buffer == NULL ) + /* Make sure that the device table has been successfully allocated. */ + ivrs_mappings = get_ivrs_mappings(iommu->seg); + if ( !IVRS_MAPPINGS_DEVTAB(ivrs_mappings) ) goto error_out; - iommu->dev_table.alloc_size = device_table.alloc_size; - iommu->dev_table.entries = device_table.entries; - iommu->dev_table.buffer = device_table.buffer; + iommu->dev_table.alloc_size = dt_alloc_size(); + iommu->dev_table.entries = iommu->dev_table.alloc_size / + IOMMU_DEV_TABLE_ENTRY_SIZE; + iommu->dev_table.buffer = IVRS_MAPPINGS_DEVTAB(ivrs_mappings); enable_iommu(iommu); printk("AMD-Vi: IOMMU %d Enabled.\n", nr_amd_iommus ); @@ -1163,11 +1186,8 @@ static void __init amd_iommu_init_cleanu xfree(iommu); } - /* free device table */ - deallocate_device_table(&device_table); - - /* free ivrs_mappings[] */ - radix_tree_destroy(&ivrs_maps, xfree); + /* Free ivrs_mappings[] and their device tables. */ + radix_tree_destroy(&ivrs_maps, free_ivrs_mapping_callback); iommu_enabled = 0; iommu_hwdom_passthrough = false; @@ -1175,12 +1195,6 @@ static void __init amd_iommu_init_cleanu iommuv2_enabled = 0; } -/* - * We allocate an extra array element to store the segment number - * (and in the future perhaps other global information). - */ -#define IVRS_MAPPINGS_SEG(m) m[ivrs_bdf_entries].dte_requestor_id - struct ivrs_mappings *get_ivrs_mappings(u16 seg) { return radix_tree_lookup(&ivrs_maps, seg); @@ -1268,24 +1282,18 @@ static int __init alloc_ivrs_mappings(u1 static int __init amd_iommu_setup_device_table( u16 seg, struct ivrs_mappings *ivrs_mappings) { + void *dt = IVRS_MAPPINGS_DEVTAB(ivrs_mappings); unsigned int bdf; BUG_ON( (ivrs_bdf_entries == 0) ); - if ( !device_table.buffer ) + if ( !dt ) { /* allocate 'device table' on a 4K boundary */ - device_table.alloc_size = PAGE_SIZE << - get_order_from_bytes( - PAGE_ALIGN(ivrs_bdf_entries * - IOMMU_DEV_TABLE_ENTRY_SIZE)); - device_table.entries = device_table.alloc_size / - IOMMU_DEV_TABLE_ENTRY_SIZE; - - device_table.buffer = allocate_buffer(device_table.alloc_size, - "Device Table"); + dt = IVRS_MAPPINGS_DEVTAB(ivrs_mappings) = + allocate_buffer(dt_alloc_size(), "Device Table"); } - if ( !device_table.buffer ) + if ( !dt ) return -ENOMEM; /* Add device table entries */ @@ -1297,7 +1305,7 @@ static int __init amd_iommu_setup_device const struct pci_dev *pdev = NULL; /* add device table entry */ - dte = device_table.buffer + (bdf * IOMMU_DEV_TABLE_ENTRY_SIZE); + dte = dt + bdf * IOMMU_DEV_TABLE_ENTRY_SIZE; iommu_dte_add_device_entry(dte, &ivrs_mappings[bdf]); if ( iommu_intremap &&
Locations
Projects
Search
Status Monitor
Help
OpenBuildService.org
Documentation
API Documentation
Code of Conduct
Contact
Support
@OBShq
Terms
openSUSE Build Service is sponsored by
The Open Build Service is an
openSUSE project
.
Sign Up
Log In
Places
Places
All Projects
Status Monitor