Sign Up
Log In
Log In
or
Sign Up
Places
All Projects
Status Monitor
Collapse sidebar
SUSE:SLE-12-SP2:Update
xen.12882
5b3cab8f-3-VMX-load-only-guest-MSR-entries.patch
Overview
Repositories
Revisions
Requests
Users
Attributes
Meta
File 5b3cab8f-3-VMX-load-only-guest-MSR-entries.patch of Package xen.12882
# Commit 1ac46b55632626aeb935726e1b0a71605ef6763a # Date 2018-07-04 12:12:15 +0100 # Author Andrew Cooper <andrew.cooper3@citrix.com> # Committer Andrew Cooper <andrew.cooper3@citrix.com> x86/vmx: Support load-only guest MSR list entries Currently, the VMX_MSR_GUEST type maintains completely symmetric guest load and save lists, by pointing VM_EXIT_MSR_STORE_ADDR and VM_ENTRY_MSR_LOAD_ADDR at the same page, and setting VM_EXIT_MSR_STORE_COUNT and VM_ENTRY_MSR_LOAD_COUNT to the same value. However, for MSRs which we won't let the guest have direct access to, having hardware save the current value on VMExit is unnecessary overhead. To avoid this overhead, we must make the load and save lists asymmetric. By making the entry load count greater than the exit store count, we can maintain two adjacent lists of MSRs, the first of which is saved and restored, and the second of which is only restored on VMEntry. For simplicity: * Both adjacent lists are still sorted by MSR index. * It undefined behaviour to insert the same MSR into both lists. * The total size of both lists is still limited at 256 entries (one 4k page). Split the current msr_count field into msr_{load,save}_count, and introduce a new VMX_MSR_GUEST_LOADONLY type, and update vmx_{add,find}_msr() to calculate which sublist to search, based on type. VMX_MSR_HOST has no logical sublist, whereas VMX_MSR_GUEST has a sublist between 0 and the save count, while VMX_MSR_GUEST_LOADONLY has a sublist between the save count and the load count. One subtle point is that inserting an MSR into the load-save list involves moving the entire load-only list, and updating both counts. Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com> Reviewed-by: Jan Beulich <jbeulich@suse.com> Reviewed-by: Roger Pau Monné <roger.pau@citrix.com> Acked-by: Kevin Tian <kevin.tian@intel.com> --- a/xen/arch/x86/hvm/vmx/vmcs.c +++ b/xen/arch/x86/hvm/vmx/vmcs.c @@ -1362,7 +1362,7 @@ struct vmx_msr_entry *vmx_find_msr(const { const struct arch_vmx_struct *vmx = &v->arch.hvm_vmx; struct vmx_msr_entry *start = NULL, *ent, *end; - unsigned int total; + unsigned int substart, subend, total; ASSERT(v == current || !vcpu_runnable(v)); @@ -1370,12 +1370,23 @@ struct vmx_msr_entry *vmx_find_msr(const { case VMX_MSR_HOST: start = vmx->host_msr_area; - total = vmx->host_msr_count; + substart = 0; + subend = vmx->host_msr_count; + total = subend; break; case VMX_MSR_GUEST: start = vmx->msr_area; - total = vmx->msr_count; + substart = 0; + subend = vmx->msr_save_count; + total = vmx->msr_load_count; + break; + + case VMX_MSR_GUEST_LOADONLY: + start = vmx->msr_area; + substart = vmx->msr_save_count; + subend = vmx->msr_load_count; + total = subend; break; default: @@ -1386,7 +1397,7 @@ struct vmx_msr_entry *vmx_find_msr(const return NULL; end = start + total; - ent = locate_msr_entry(start, end, msr); + ent = locate_msr_entry(start + substart, start + subend, msr); return ((ent < end) && (ent->index == msr)) ? ent : NULL; } @@ -1395,7 +1406,7 @@ int vmx_add_msr(struct vcpu *v, uint32_t { struct arch_vmx_struct *vmx = &v->arch.hvm_vmx; struct vmx_msr_entry **ptr, *start = NULL, *ent, *end; - unsigned int total; + unsigned int substart, subend, total; int rc; ASSERT(v == current || !vcpu_runnable(v)); @@ -1404,12 +1415,23 @@ int vmx_add_msr(struct vcpu *v, uint32_t { case VMX_MSR_HOST: ptr = &vmx->host_msr_area; - total = vmx->host_msr_count; + substart = 0; + subend = vmx->host_msr_count; + total = subend; break; case VMX_MSR_GUEST: ptr = &vmx->msr_area; - total = vmx->msr_count; + substart = 0; + subend = vmx->msr_save_count; + total = vmx->msr_load_count; + break; + + case VMX_MSR_GUEST_LOADONLY: + ptr = &vmx->msr_area; + substart = vmx->msr_save_count; + subend = vmx->msr_load_count; + total = subend; break; default: @@ -1439,6 +1461,7 @@ int vmx_add_msr(struct vcpu *v, uint32_t break; case VMX_MSR_GUEST: + case VMX_MSR_GUEST_LOADONLY: __vmwrite(VM_EXIT_MSR_STORE_ADDR, addr); __vmwrite(VM_ENTRY_MSR_LOAD_ADDR, addr); break; @@ -1447,7 +1470,7 @@ int vmx_add_msr(struct vcpu *v, uint32_t start = *ptr; end = start + total; - ent = locate_msr_entry(start, end, msr); + ent = locate_msr_entry(start + substart, start + subend, msr); if ( (ent < end) && (ent->index == msr) ) { @@ -1474,9 +1497,12 @@ int vmx_add_msr(struct vcpu *v, uint32_t break; case VMX_MSR_GUEST: + __vmwrite(VM_EXIT_MSR_STORE_COUNT, ++vmx->msr_save_count); + + /* Fallthrough */ + case VMX_MSR_GUEST_LOADONLY: ent->data = 0; - __vmwrite(VM_EXIT_MSR_STORE_COUNT, ++vmx->msr_count); - __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_count); + __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, ++vmx->msr_load_count); break; } --- a/xen/include/asm-x86/hvm/vmx/vmcs.h +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h @@ -202,7 +202,8 @@ struct arch_vmx_struct { */ struct vmx_msr_entry *msr_area; struct vmx_msr_entry *host_msr_area; - unsigned int msr_count; + unsigned int msr_load_count; + unsigned int msr_save_count; unsigned int host_msr_count; unsigned long eoi_exitmap_changed; @@ -590,8 +591,18 @@ extern const unsigned int vmx_introspect enum vmx_msr_list_type { VMX_MSR_HOST, /* MSRs loaded on VMExit. */ VMX_MSR_GUEST, /* MSRs saved on VMExit, loaded on VMEntry. */ + VMX_MSR_GUEST_LOADONLY, /* MSRs loaded on VMEntry only. */ }; +/** + * Add an MSR to an MSR list (inserting space for the entry if necessary). + * + * It is undefined behaviour to try and insert the same MSR into both the + * GUEST and GUEST_LOADONLY list. + * + * May fail if unable to allocate memory for the list, or the total number of + * entries exceeds the memory allocated. + */ int vmx_add_msr(struct vcpu *v, uint32_t msr, enum vmx_msr_list_type type); static inline int vmx_add_guest_msr(struct vcpu *v, uint32_t msr)
Locations
Projects
Search
Status Monitor
Help
OpenBuildService.org
Documentation
API Documentation
Code of Conduct
Contact
Support
@OBShq
Terms
openSUSE Build Service is sponsored by
The Open Build Service is an
openSUSE project
.
Sign Up
Log In
Places
Places
All Projects
Status Monitor