Sign Up
Log In
Log In
or
Sign Up
Places
All Projects
Status Monitor
Collapse sidebar
Please login to access the resource
SUSE:SLE-12-SP5:Update
xen.19021
5b9784f2-x86-HVM-split-page-straddling-accesses...
Overview
Repositories
Revisions
Requests
Users
Attributes
Meta
File 5b9784f2-x86-HVM-split-page-straddling-accesses.patch of Package xen.19021
References: bsc#1094508 # Commit 3bdec530a5f50b212aa5fd05d97e7349e8bdba82 # Date 2018-09-11 11:03:46 +0200 # Author Jan Beulich <jbeulich@suse.com> # Committer Jan Beulich <jbeulich@suse.com> x86/HVM: split page straddling emulated accesses in more cases Assuming consecutive linear addresses map to all RAM or all MMIO is not correct. Nor is assuming that a page straddling MMIO access will access the same emulating component for both parts of the access. If a guest RAM read fails with HVMTRANS_bad_gfn_to_mfn and if the access straddles a page boundary, issue accesses separately for both parts. The extra call to known_gla() from hvmemul_write() is just to preserve original behavior; for consistency the check also gets added to hvmemul_rmw() (albeit I continue to be unsure whether we wouldn't better drop both). Note that the correctness of this depends on the MMIO caching used elsewhere in the emulation code. Signed-off-by: Jan Beulich <jbeulich@suse.com> Tested-by: Olaf Hering <olaf@aepfle.de> Reviewed-by: Paul Durrant <paul.durrant@citrix.com> --- a/xen/arch/x86/hvm/emulate.c +++ b/xen/arch/x86/hvm/emulate.c @@ -769,7 +769,87 @@ static bool_t known_gla(unsigned long ad else if ( !vio->mmio_access.read_access ) return 0; - return vio->mmio_gva == (addr & PAGE_MASK); + return (vio->mmio_gva == (addr & PAGE_MASK) && + (addr & ~PAGE_MASK) + bytes <= PAGE_SIZE); +} + +static int linear_read(unsigned long addr, unsigned int bytes, void *p_data, + uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt) +{ + int rc = hvm_copy_from_guest_virt(p_data, addr, bytes, pfec); + + switch ( rc ) + { + unsigned int offset, part1; + + case HVMCOPY_okay: + return X86EMUL_OKAY; + + case HVMCOPY_bad_gva_to_gfn: + return X86EMUL_EXCEPTION; + + case HVMCOPY_bad_gfn_to_mfn: + if ( pfec & PFEC_insn_fetch ) + return X86EMUL_UNHANDLEABLE; + + offset = addr & ~PAGE_MASK; + if ( offset + bytes <= PAGE_SIZE ) + return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec, + hvmemul_ctxt, + known_gla(addr, bytes, pfec)); + + /* Split the access at the page boundary. */ + part1 = PAGE_SIZE - offset; + rc = linear_read(addr, part1, p_data, pfec, hvmemul_ctxt); + if ( rc == X86EMUL_OKAY ) + rc = linear_read(addr + part1, bytes - part1, p_data + part1, + pfec, hvmemul_ctxt); + return rc; + + case HVMCOPY_gfn_paged_out: + case HVMCOPY_gfn_shared: + return X86EMUL_RETRY; + } + + return X86EMUL_UNHANDLEABLE; +} + +static int linear_write(unsigned long addr, unsigned int bytes, void *p_data, + uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt) +{ + int rc = hvm_copy_to_guest_virt(addr, p_data, bytes, pfec); + + switch ( rc ) + { + unsigned int offset, part1; + + case HVMCOPY_okay: + return X86EMUL_OKAY; + + case HVMCOPY_bad_gva_to_gfn: + return X86EMUL_EXCEPTION; + + case HVMCOPY_bad_gfn_to_mfn: + offset = addr & ~PAGE_MASK; + if ( offset + bytes <= PAGE_SIZE ) + return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec, + hvmemul_ctxt, + known_gla(addr, bytes, pfec)); + + /* Split the access at the page boundary. */ + part1 = PAGE_SIZE - offset; + rc = linear_write(addr, part1, p_data, pfec, hvmemul_ctxt); + if ( rc == X86EMUL_OKAY ) + rc = linear_write(addr + part1, bytes - part1, p_data + part1, + pfec, hvmemul_ctxt); + return rc; + + case HVMCOPY_gfn_paged_out: + case HVMCOPY_gfn_shared: + return X86EMUL_RETRY; + } + + return X86EMUL_UNHANDLEABLE; } static int __hvmemul_read( @@ -791,34 +871,12 @@ static int __hvmemul_read( seg, offset, bytes, &reps, access_type, hvmemul_ctxt, &addr); if ( rc != X86EMUL_OKAY || !bytes ) return rc; - if ( known_gla(addr, bytes, pfec) ) - return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec, hvmemul_ctxt, 1); - if ( (seg != x86_seg_none) && (hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3) ) pfec |= PFEC_user_mode; - rc = hvm_copy_from_guest_virt(p_data, addr, bytes, pfec); - - switch ( rc ) - { - case HVMCOPY_okay: - break; - case HVMCOPY_bad_gva_to_gfn: - return X86EMUL_EXCEPTION; - case HVMCOPY_bad_gfn_to_mfn: - if ( access_type == hvm_access_insn_fetch ) - return X86EMUL_UNHANDLEABLE; - return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec, hvmemul_ctxt, 0); - case HVMCOPY_gfn_paged_out: - case HVMCOPY_gfn_shared: - return X86EMUL_RETRY; - default: - return X86EMUL_UNHANDLEABLE; - } - - return X86EMUL_OKAY; + return linear_read(addr, bytes, p_data, pfec, hvmemul_ctxt); } static int hvmemul_read( @@ -904,31 +962,11 @@ static int hvmemul_write( if ( rc != X86EMUL_OKAY || !bytes ) return rc; - if ( known_gla(addr, bytes, pfec) ) - return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec, hvmemul_ctxt, 1); - if ( (seg != x86_seg_none) && (hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3) ) pfec |= PFEC_user_mode; - rc = hvm_copy_to_guest_virt(addr, p_data, bytes, pfec); - - switch ( rc ) - { - case HVMCOPY_okay: - break; - case HVMCOPY_bad_gva_to_gfn: - return X86EMUL_EXCEPTION; - case HVMCOPY_bad_gfn_to_mfn: - return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec, hvmemul_ctxt, 0); - case HVMCOPY_gfn_paged_out: - case HVMCOPY_gfn_shared: - return X86EMUL_RETRY; - default: - return X86EMUL_UNHANDLEABLE; - } - - return X86EMUL_OKAY; + return linear_write(addr, bytes, p_data, pfec, hvmemul_ctxt); } static int hvmemul_write_discard(
Locations
Projects
Search
Status Monitor
Help
OpenBuildService.org
Documentation
API Documentation
Code of Conduct
Contact
Support
@OBShq
Terms
openSUSE Build Service is sponsored by
The Open Build Service is an
openSUSE project
.
Sign Up
Log In
Places
Places
All Projects
Status Monitor