Sign Up
Log In
Log In
or
Sign Up
Places
All Projects
Status Monitor
Collapse sidebar
SUSE:SLE-12-SP3:GA
xen.7316
5a6b36cd-9-x86-issue-speculation-barrier.patch
Overview
Repositories
Revisions
Requests
Users
Attributes
Meta
File 5a6b36cd-9-x86-issue-speculation-barrier.patch of Package xen.7316
# Commit a2ed643ed783020f885035432e9c0919756921d1 # Date 2018-01-26 14:10:21 +0000 # Author Andrew Cooper <andrew.cooper3@citrix.com> # Committer Andrew Cooper <andrew.cooper3@citrix.com> x86/ctxt: Issue a speculation barrier between vcpu contexts Issuing an IBPB command flushes the Branch Target Buffer, so that any poison left by one vcpu won't remain when beginning to execute the next. The cost of IBPB is substantial, and skipped on transition to idle, as Xen's idle code is robust already. All transitions into vcpu context are fully serialising in practice (and under consideration for being retroactively declared architecturally serialising), so a cunning attacker cannot use SP1 to try and skip the flush. Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com> Reviewed-by: Jan Beulich <jbeulich@suse.com> --- a/docs/misc/xen-command-line.markdown +++ b/docs/misc/xen-command-line.markdown @@ -238,7 +238,7 @@ enough. Setting this to a high value may the NMI watchdog is also enabled. ### bti (x86) -> `= List of [ thunk=retpoline|lfence|jmp, ibrs=<bool>, rsb_{vmexit,native}=<bool> ]` +> `= List of [ thunk=retpoline|lfence|jmp, ibrs=<bool>, ibpb=<bool>, rsb_{vmexit,native}=<bool> ]` Branch Target Injection controls. By default, Xen will pick the most appropriate BTI mitigations based on compiled in support, loaded microcode, @@ -257,6 +257,9 @@ On hardware supporting IBRS, the `ibrs=` prevent Xen using the feature itself. If Xen is not using IBRS itself, functionality is still set up so IBRS can be virtualised for guests. +On hardware supporting IBPB, the `ibpb=` option can be used to prevent Xen +from issuing Branch Prediction Barriers on vcpu context switches. + The `rsb_vmexit=` and `rsb_native=` options can be used to fine tune when the RSB gets overwritten. There are individual controls for an entry from HVM context, and an entry from a native (PV or Xen) context. --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -62,6 +62,7 @@ #include <xen/iommu.h> #include <compat/vcpu.h> #include <asm/psr.h> +#include <asm/spec_ctrl.h> DEFINE_PER_CPU(struct vcpu *, curr_vcpu); @@ -1666,6 +1667,34 @@ void context_switch(struct vcpu *prev, s set_cpuid_faulting(is_pv_vcpu(next) && !is_control_domain(next->domain) && !is_hardware_domain(next->domain)); + + if ( opt_ibpb && !is_idle_domain(next->domain) ) + { + static DEFINE_PER_CPU(unsigned int, last); + unsigned int *last_id = &this_cpu(last); + + /* + * Squash the domid and vcpu id together for comparison + * efficiency. We could in principle stash and compare the struct + * vcpu pointer, but this risks a false alias if a domain has died + * and the same 4k page gets reused for a new vcpu. + */ + unsigned int next_id = (((unsigned int)next->domain->domain_id << + 16) | (uint16_t)next->vcpu_id); + BUILD_BUG_ON(MAX_VIRT_CPUS > 0xffff); + + /* + * When scheduling from a vcpu, to idle, and back to the same vcpu + * (which might be common in a lightly loaded system, or when + * using vcpu pinning), there is no need to issue IBPB, as we are + * returning to the same security context. + */ + if ( *last_id != next_id ) + { + wrmsrl(MSR_PRED_CMD, PRED_CMD_IBPB); + *last_id = next_id; + } + } } if (is_hvm_vcpu(next) && (prev != next) ) --- a/xen/arch/x86/spec_ctrl.c +++ b/xen/arch/x86/spec_ctrl.c @@ -37,6 +37,7 @@ static enum ind_thunk { static int8_t __initdata opt_ibrs = -1; static bool_t __initdata opt_rsb_native = 1; static bool_t __initdata opt_rsb_vmexit = 1; +bool_t __read_mostly opt_ibpb = 1; uint8_t __read_mostly default_bti_ist_info; static int __init parse_bti(const char *s) @@ -64,6 +65,8 @@ static int __init parse_bti(const char * } else if ( (val = parse_boolean("ibrs", s, ss)) >= 0 ) opt_ibrs = val; + else if ( (val = parse_boolean("ibpb", s, ss)) >= 0 ) + opt_ibpb = val; else if ( (val = parse_boolean("rsb_native", s, ss)) >= 0 ) opt_rsb_native = val; else if ( (val = parse_boolean("rsb_vmexit", s, ss)) >= 0 ) @@ -105,13 +108,14 @@ static void __init print_details(enum in #endif printk(XENLOG_INFO - "BTI mitigations: Thunk %s, Others:%s%s%s\n", + "BTI mitigations: Thunk %s, Others:%s%s%s%s\n", thunk == THUNK_NONE ? "N/A" : thunk == THUNK_RETPOLINE ? "RETPOLINE" : thunk == THUNK_LFENCE ? "LFENCE" : thunk == THUNK_JMP ? "JMP" : "?", boot_cpu_has(X86_FEATURE_XEN_IBRS_SET) ? " IBRS+" : boot_cpu_has(X86_FEATURE_XEN_IBRS_CLEAR) ? " IBRS-" : "", + opt_ibpb ? " IBPB" : "", boot_cpu_has(X86_FEATURE_RSB_NATIVE) ? " RSB_NATIVE" : "", boot_cpu_has(X86_FEATURE_RSB_VMEXIT) ? " RSB_VMEXIT" : ""); } @@ -278,6 +282,10 @@ void __init init_speculation_mitigations if ( opt_rsb_vmexit ) __set_bit(X86_FEATURE_RSB_VMEXIT, boot_cpu_data.x86_capability); + /* Check we have hardware IBPB support before using it... */ + if ( !boot_cpu_has(X86_FEATURE_IBRSB) && !boot_cpu_has(X86_FEATURE_IBPB) ) + opt_ibpb = 0; + /* (Re)init BSP state now that default_bti_ist_info has been calculated. */ init_shadow_spec_ctrl_state(); --- a/xen/include/asm-x86/spec_ctrl.h +++ b/xen/include/asm-x86/spec_ctrl.h @@ -24,6 +24,7 @@ void init_speculation_mitigations(void); +extern bool_t opt_ibpb; extern uint8_t default_bti_ist_info; static inline void init_shadow_spec_ctrl_state(void)
Locations
Projects
Search
Status Monitor
Help
OpenBuildService.org
Documentation
API Documentation
Code of Conduct
Contact
Support
@OBShq
Terms
openSUSE Build Service is sponsored by
The Open Build Service is an
openSUSE project
.
Sign Up
Log In
Places
Places
All Projects
Status Monitor