Sign Up
Log In
Log In
or
Sign Up
Places
All Projects
Status Monitor
Collapse sidebar
SUSE:SLE-12-SP3:GA
xen.8425
53aac342-x86-HVM-consolidate-and-sanitize-CR4-g...
Overview
Repositories
Revisions
Requests
Users
Attributes
Meta
File 53aac342-x86-HVM-consolidate-and-sanitize-CR4-guest-reserved-bit-determination.patch of Package xen.8425
# Commit dab11417da4e21f43625f4ebbb68158f07003d04 # Date 2014-06-25 14:40:34 +0200 # Author Jan Beulich <jbeulich@suse.com> # Committer Jan Beulich <jbeulich@suse.com> x86/HVM: consolidate and sanitize CR4 guest reserved bit determination First of all, this is needed by just a single source file, so it gets moved there instead of getting fed to the compiler for most other source files too. With that it becomes sensible for this to no longer be a macro, allowing elimination of the mostly redundant helpers hvm_vcpu_has_{smep,smap}(). And finally, following the model SMEP and SMAP already used, tie the determination of reserved bits to the features the guest is shown rather than the host's. Signed-off-by: Jan Beulich <jbeulich@suse.com> Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com> --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -836,6 +836,73 @@ static bool_t hvm_efer_valid(struct doma ((value & (EFER_LME|EFER_LMA)) == EFER_LMA)); } +/* These reserved bits in lower 32 remain 0 after any load of CR0 */ +#define HVM_CR0_GUEST_RESERVED_BITS \ + (~((unsigned long) \ + (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | \ + X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | \ + X86_CR0_WP | X86_CR0_AM | X86_CR0_NW | \ + X86_CR0_CD | X86_CR0_PG))) + +/* These bits in CR4 cannot be set by the guest. */ +static unsigned long hvm_cr4_guest_reserved_bits(const struct vcpu *v, + bool_t restore) +{ + unsigned int leaf1_ecx = 0, leaf1_edx = 0; + unsigned int leaf7_0_ebx = 0, leaf7_0_ecx = 0; + + if ( likely(!restore) ) + { + unsigned int level; + + ASSERT(v == current); + hvm_cpuid(0, &level, NULL, NULL, NULL); + if ( level >= 1 ) + hvm_cpuid(1, NULL, NULL, &leaf1_ecx, &leaf1_edx); + if ( level >= 7 ) + hvm_cpuid(7, NULL, &leaf7_0_ebx, &leaf7_0_ecx, NULL); + } + else + { + leaf1_edx = boot_cpu_data.x86_capability[X86_FEATURE_VME / 32]; + leaf1_ecx = boot_cpu_data.x86_capability[X86_FEATURE_PCID / 32]; + leaf7_0_ebx = boot_cpu_data.x86_capability[X86_FEATURE_FSGSBASE / 32]; + } + + return ~(unsigned long) + ((leaf1_edx & cpufeat_mask(X86_FEATURE_VME) ? + X86_CR4_VME | X86_CR4_PVI : 0) | + (leaf1_edx & cpufeat_mask(X86_FEATURE_TSC) ? + X86_CR4_TSD : 0) | + (leaf1_edx & cpufeat_mask(X86_FEATURE_DE) ? + X86_CR4_DE : 0) | + (leaf1_edx & cpufeat_mask(X86_FEATURE_PSE) ? + X86_CR4_PSE : 0) | + (leaf1_edx & cpufeat_mask(X86_FEATURE_PAE) ? + X86_CR4_PAE : 0) | + (leaf1_edx & (cpufeat_mask(X86_FEATURE_MCE) | + cpufeat_mask(X86_FEATURE_MCA)) ? + X86_CR4_MCE : 0) | + (leaf1_edx & cpufeat_mask(X86_FEATURE_PGE) ? + X86_CR4_PGE : 0) | + X86_CR4_PCE | + (leaf1_edx & cpufeat_mask(X86_FEATURE_FXSR) ? + X86_CR4_OSFXSR : 0) | + (leaf1_edx & cpufeat_mask(X86_FEATURE_XMM) ? + X86_CR4_OSXMMEXCPT : 0) | + ((restore || nestedhvm_enabled(v->domain)) && + (leaf1_ecx & cpufeat_mask(X86_FEATURE_VMXE)) ? + X86_CR4_VMXE : 0) | + (leaf7_0_ebx & cpufeat_mask(X86_FEATURE_FSGSBASE) ? + X86_CR4_FSGSBASE : 0) | + (leaf1_ecx & cpufeat_mask(X86_FEATURE_PCID) ? + X86_CR4_PCIDE : 0) | + (leaf1_ecx & cpufeat_mask(X86_FEATURE_XSAVE) ? + X86_CR4_OSXSAVE : 0) | + (leaf7_0_ebx & cpufeat_mask(X86_FEATURE_SMEP) ? + X86_CR4_SMEP : 0)); +} + static int hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h) { int vcpuid; @@ -866,7 +933,7 @@ static int hvm_load_cpu_ctxt(struct doma return -EINVAL; } - if ( ctxt.cr4 & HVM_CR4_GUEST_RESERVED_BITS(v, 1) ) + if ( ctxt.cr4 & hvm_cr4_guest_reserved_bits(v, 1) ) { printk(XENLOG_G_ERR "HVM%d restore: bad CR4 %#" PRIx64 "\n", d->domain_id, ctxt.cr4); @@ -1999,7 +2066,7 @@ int hvm_set_cr4(unsigned long value) struct vcpu *v = current; unsigned long old_cr; - if ( value & HVM_CR4_GUEST_RESERVED_BITS(v, 0) ) + if ( value & hvm_cr4_guest_reserved_bits(v, 0) ) { HVM_DBG_LOG(DBG_LEVEL_1, "Guest attempts to set reserved bit in CR4: %lx", --- a/xen/include/asm-x86/hvm/hvm.h +++ b/xen/include/asm-x86/hvm/hvm.h @@ -350,51 +350,10 @@ static inline int hvm_event_pending(stru return hvm_funcs.event_pending(v); } -static inline bool_t hvm_vcpu_has_smep(void) -{ - unsigned int eax, ebx; - - hvm_cpuid(0, &eax, NULL, NULL, NULL); - - if ( eax < 7 ) - return 0; - - hvm_cpuid(7, NULL, &ebx, NULL, NULL); - return !!(ebx & cpufeat_mask(X86_FEATURE_SMEP)); -} - -/* These reserved bits in lower 32 remain 0 after any load of CR0 */ -#define HVM_CR0_GUEST_RESERVED_BITS \ - (~((unsigned long) \ - (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | \ - X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | \ - X86_CR0_WP | X86_CR0_AM | X86_CR0_NW | \ - X86_CR0_CD | X86_CR0_PG))) - /* These bits in CR4 are owned by the host. */ #define HVM_CR4_HOST_MASK (mmu_cr4_features & \ (X86_CR4_VMXE | X86_CR4_PAE | X86_CR4_MCE)) -/* These bits in CR4 cannot be set by the guest. */ -#define HVM_CR4_GUEST_RESERVED_BITS(v, restore) ({ \ - const struct vcpu *_v = (v); \ - bool_t _restore = !!(restore); \ - ASSERT((_restore) || _v == current); \ - (~((unsigned long) \ - (X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | \ - X86_CR4_DE | X86_CR4_PSE | X86_CR4_PAE | \ - X86_CR4_MCE | X86_CR4_PGE | X86_CR4_PCE | \ - X86_CR4_OSFXSR | X86_CR4_OSXMMEXCPT | \ - (((_restore) ? cpu_has_smep : \ - hvm_vcpu_has_smep()) ? \ - X86_CR4_SMEP : 0) | \ - (cpu_has_fsgsbase ? X86_CR4_FSGSBASE : 0) | \ - ((nestedhvm_enabled(_v->domain) && cpu_has_vmx) \ - ? X86_CR4_VMXE : 0) | \ - (cpu_has_pcid ? X86_CR4_PCIDE : 0) | \ - (cpu_has_xsave ? X86_CR4_OSXSAVE : 0)))); \ -}) - /* These exceptions must always be intercepted. */ #define HVM_TRAP_MASK ((1U << TRAP_debug) | \ (1U << TRAP_invalid_op) | \
Locations
Projects
Search
Status Monitor
Help
OpenBuildService.org
Documentation
API Documentation
Code of Conduct
Contact
Support
@OBShq
Terms
openSUSE Build Service is sponsored by
The Open Build Service is an
openSUSE project
.
Sign Up
Log In
Places
Places
All Projects
Status Monitor