Sign Up
Log In
Log In
or
Sign Up
Places
All Projects
Status Monitor
Collapse sidebar
openSUSE:Leap:15.5:Update
xen.31136
612634dc-x86-p2m-guard-identity-mappings.patch
Overview
Repositories
Revisions
Requests
Users
Attributes
Meta
File 612634dc-x86-p2m-guard-identity-mappings.patch of Package xen.31136
# Commit 753cb68e653002e89fdcd1c80e52905fdbfb78cb # Date 2021-08-25 14:17:32 +0200 # Author Jan Beulich <jbeulich@suse.com> # Committer Jan Beulich <jbeulich@suse.com> x86/p2m: guard (in particular) identity mapping entries Such entries, created by set_identity_p2m_entry(), should only be destroyed by clear_identity_p2m_entry(). However, similarly, entries created by set_mmio_p2m_entry() should only be torn down by clear_mmio_p2m_entry(), so the logic gets based upon p2m_mmio_direct as the entry type (separation between "ordinary" and 1:1 mappings would require a further indicator to tell apart the two). As to the guest_remove_page() change, commit 48dfb297a20a ("x86/PVH: allow guest_remove_page to remove p2m_mmio_direct pages"), which introduced the call to clear_mmio_p2m_entry(), claimed this was done for hwdom only without this actually having been the case. However, this code shouldn't be there in the first place, as MMIO entries shouldn't be dropped this way. Avoid triggering the warning again that 48dfb297a20a silenced by an adjustment to xenmem_add_to_physmap_one() instead. Note that guest_physmap_mark_populate_on_demand() gets tightened beyond the immediate purpose of this change. Note also that I didn't inspect code which isn't security supported, e.g. sharing, paging, or altp2m. This is CVE-2021-28694 / part of XSA-378. Signed-off-by: Jan Beulich <jbeulich@suse.com> Reviewed-by: Paul Durrant <paul@xen.org> --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -4809,7 +4809,9 @@ int xenmem_add_to_physmap_one( /* Remove previously mapped page if it was present. */ prev_mfn = mfn_x(get_gfn(d, gfn_x(gpfn), &p2mt)); - if ( mfn_valid(_mfn(prev_mfn)) ) + if ( p2mt == p2m_mmio_direct ) + rc = -EPERM; + else if ( mfn_valid(_mfn(prev_mfn)) ) { if ( is_xen_heap_mfn(prev_mfn) ) /* Xen heap frames are simply unhooked from this phys slot. */ --- a/xen/arch/x86/mm/p2m.c +++ b/xen/arch/x86/mm/p2m.c @@ -795,7 +795,8 @@ p2m_remove_page(struct p2m_domain *p2m, &cur_order, NULL); if ( p2m_is_valid(t) && - (!mfn_valid(_mfn(mfn)) || mfn + i != mfn_x(mfn_return)) ) + (!mfn_valid(_mfn(mfn)) || t == p2m_mmio_direct || + mfn + i != mfn_x(mfn_return)) ) return -EILSEQ; i += (1UL << cur_order) - ((gfn_l + i) & ((1UL << cur_order) - 1)); @@ -873,7 +874,7 @@ guest_physmap_add_entry(struct domain *d if ( p2m_is_foreign(t) ) return -EINVAL; - if ( !mfn_valid(mfn) ) + if ( !mfn_valid(mfn) || t == p2m_mmio_direct ) { ASSERT_UNREACHABLE(); return -EINVAL; @@ -919,7 +920,7 @@ guest_physmap_add_entry(struct domain *d } if ( p2m_is_special(ot) ) { - /* Don't permit unmapping grant/foreign this way. */ + /* Don't permit unmapping grant/foreign/direct-MMIO this way. */ domain_crash(d); p2m_unlock(p2m); @@ -1375,8 +1376,8 @@ int set_identity_p2m_entry(struct domain * order+1 for caller to retry with order (guaranteed smaller than * the order value passed in) */ -int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn_l, mfn_t mfn, - unsigned int order) +static int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn_l, + mfn_t mfn, unsigned int order) { int rc = -EINVAL; gfn_t gfn = _gfn(gfn_l); --- a/xen/arch/x86/mm/p2m-pod.c +++ b/xen/arch/x86/mm/p2m-pod.c @@ -1297,17 +1297,17 @@ guest_physmap_mark_populate_on_demand(st p2m->get_entry(p2m, gfn_add(gfn, i), &ot, &a, 0, &cur_order, NULL); n = 1UL << min(order, cur_order); - if ( p2m_is_ram(ot) ) + if ( ot == p2m_populate_on_demand ) + { + /* Count how many PoD entries we'll be replacing if successful */ + pod_count += n; + } + else if ( ot != p2m_invalid && ot != p2m_mmio_dm ) { P2M_DEBUG("gfn_to_mfn returned type %d!\n", ot); rc = -EBUSY; goto out; } - else if ( ot == p2m_populate_on_demand ) - { - /* Count how man PoD entries we'll be replacing if successful */ - pod_count += n; - } } /* Now, actually do the two-way mapping */ --- a/xen/common/memory.c +++ b/xen/common/memory.c @@ -336,7 +336,7 @@ int guest_remove_page(struct domain *d, } if ( p2mt == p2m_mmio_direct ) { - rc = clear_mmio_p2m_entry(d, gmfn, mfn, PAGE_ORDER_4K); + rc = -EPERM; goto out_put_gfn; } #else @@ -1724,6 +1724,15 @@ int check_get_page_from_gfn(struct domai return -EAGAIN; } #endif +#ifdef CONFIG_X86 + if ( p2mt == p2m_mmio_direct ) + { + if ( page ) + put_page(page); + + return -EPERM; + } +#endif if ( !page ) return -EINVAL; --- a/xen/include/asm-x86/p2m.h +++ b/xen/include/asm-x86/p2m.h @@ -143,7 +143,8 @@ typedef unsigned int p2m_query_t; /* Types established/cleaned up via special accessors. */ #define P2M_SPECIAL_TYPES (P2M_GRANT_TYPES | \ - p2m_to_mask(p2m_map_foreign)) + p2m_to_mask(p2m_map_foreign) | \ + p2m_to_mask(p2m_mmio_direct)) /* Valid types not necessarily associated with a (valid) MFN. */ #define P2M_INVALID_MFN_TYPES (P2M_POD_TYPES \ @@ -640,8 +641,6 @@ int set_foreign_p2m_entry(struct domain /* Set mmio addresses in the p2m table (for pass-through) */ int set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn, unsigned int order, p2m_access_t access); -int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn, - unsigned int order); /* Set identity addresses in the p2m table (for pass-through) */ int set_identity_p2m_entry(struct domain *d, unsigned long gfn,
Locations
Projects
Search
Status Monitor
Help
OpenBuildService.org
Documentation
API Documentation
Code of Conduct
Contact
Support
@OBShq
Terms
openSUSE Build Service is sponsored by
The Open Build Service is an
openSUSE project
.
Sign Up
Log In
Places
Places
All Projects
Status Monitor