Sign Up
Log In
Log In
or
Sign Up
Places
All Projects
Status Monitor
Collapse sidebar
home:yukoff:openSUSE:Leap:42.1:Backports
xen.5682
57360bc1-x86-suppress-SMEP-and-SMAP-while-runni...
Overview
Repositories
Revisions
Requests
Users
Attributes
Meta
File 57360bc1-x86-suppress-SMEP-and-SMAP-while-running-32-bit-PV-guest-code.patch of Package xen.5682
References: bsc#949889 # Commit ea3e8edfdbabfb17f0d39ed128716ec464f348b8 # Date 2016-05-13 18:15:45 +0100 # Author Jan Beulich <jbeulich@suse.com> # Committer Andrew Cooper <andrew.cooper3@citrix.com> x86: suppress SMEP and SMAP while running 32-bit PV guest code Since such guests' kernel code runs in ring 1, their memory accesses, at the paging layer, are supervisor mode ones, and hence subject to SMAP/SMEP checks. Such guests cannot be expected to be aware of those two features though (and so far we also don't expose the respective feature flags), and hence may suffer page faults they cannot deal with. While the placement of the re-enabling slightly weakens the intended protection, it was selected such that 64-bit paths would remain unaffected where possible. At the expense of a further performance hit the re-enabling could be put right next to the CLACs. Note that this introduces a number of extra TLB flushes - CR4.SMEP transitioning from 0 to 1 always causes a flush, and it transitioning from 1 to 0 may also do. Signed-off-by: Jan Beulich <jbeulich@suse.com> Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com> # Commit ad4aa3619f436e3ed79eea8498ac18aa8d5e6b83 # Date 2016-05-16 13:11:05 +0100 # Author Andrew Cooper <andrew.cooper3@citrix.com> # Committer Andrew Cooper <andrew.cooper3@citrix.com> x86/compat: Cleanup and further debugging of SMAP/SMEP fixup * Abstract (X86_CR4_SMEP | X86_CR4_SMAP) behind XEN_CR4_PV32_BITS to avoid opencoding the invidial bits which are fixed up behind a 32bit PV guests back. * Show cr4_pv32_mask in the BUG register dump Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com> Reviewed-by: Wei Liu <wei.liu2@citrix.com> # Commit e5e73163ec40b409151f2170d8e406a72b515ff2 # Date 2016-05-17 16:41:35 +0200 # Author Jan Beulich <jbeulich@suse.com> # Committer Jan Beulich <jbeulich@suse.com> x86: refine debugging of SMEP/SMAP fix Instead of just latching cr4_pv32_mask into %rdx, correct the found wrong value in %cr4 (to avoid triggering another BUG). Also there is one more place for XEN_CR4_PV32_BITS to be used. Signed-off-by: Jan Beulich <jbeulich@suse.com> Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com> # Commit 9e28baf22ec98a64f68757eff39df72173d5f1bb # Date 2016-05-17 16:42:15 +0200 # Author Jan Beulich <jbeulich@suse.com> # Committer Jan Beulich <jbeulich@suse.com> x86: make SMEP/SMAP suppression tolerate NMI/MCE at the "wrong" time There is one instruction boundary where any kind of interruption would break the assumptions cr4_pv32_restore's debug mode checking makes on the correlation between the CR4 register value and its in-memory cache. Correct this (see the code comment) even in non-debug mode, or else a subsequent cr4_pv32_restore would also be misguided into thinking the features are enabled when they really aren't. Signed-off-by: Jan Beulich <jbeulich@suse.com> Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com> --- a/xen/arch/x86/setup.c +++ b/xen/arch/x86/setup.c @@ -66,6 +66,8 @@ invbool_param("smep", disable_smep); static bool_t __initdata disable_smap; invbool_param("smap", disable_smap); +unsigned long __read_mostly cr4_pv32_mask; + /* Boot dom0 in pvh mode */ static bool_t __initdata opt_dom0pvh; boolean_param("dom0pvh", opt_dom0pvh); @@ -1285,6 +1287,8 @@ void __init noreturn __start_xen(unsigne if ( cpu_has_smap ) set_in_cr4(X86_CR4_SMAP); + cr4_pv32_mask = mmu_cr4_features & XEN_CR4_PV32_BITS; + if ( cpu_has_fsgsbase ) set_in_cr4(X86_CR4_FSGSBASE); @@ -1403,7 +1407,10 @@ void __init noreturn __start_xen(unsigne * copy_from_user(). */ if ( cpu_has_smap ) + { + cr4_pv32_mask &= ~X86_CR4_SMAP; write_cr4(read_cr4() & ~X86_CR4_SMAP); + } /* * We're going to setup domain0 using the module(s) that we stashed safely @@ -1416,7 +1423,10 @@ void __init noreturn __start_xen(unsigne panic("Could not set up DOM0 guest OS"); if ( cpu_has_smap ) + { write_cr4(read_cr4() | X86_CR4_SMAP); + cr4_pv32_mask |= X86_CR4_SMAP; + } /* Scrub RAM that is still free and so may go to an unprivileged domain. */ scrub_heap_pages(); --- a/xen/arch/x86/x86_64/asm-offsets.c +++ b/xen/arch/x86/x86_64/asm-offsets.c @@ -135,6 +135,7 @@ void __dummy__(void) OFFSET(CPUINFO_guest_cpu_user_regs, struct cpu_info, guest_cpu_user_regs); OFFSET(CPUINFO_processor_id, struct cpu_info, processor_id); OFFSET(CPUINFO_current_vcpu, struct cpu_info, current_vcpu); + OFFSET(CPUINFO_cr4, struct cpu_info, cr4); DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info)); BLANK(); --- a/xen/arch/x86/x86_64/compat/entry.S +++ b/xen/arch/x86/x86_64/compat/entry.S @@ -16,14 +16,16 @@ ENTRY(compat_hypercall) ASM_CLAC pushq $0 SAVE_VOLATILE type=TRAP_syscall compat=1 + CR4_PV32_RESTORE cmpb $0,untrusted_msi(%rip) UNLIKELY_START(ne, msi_check) movl $HYPERCALL_VECTOR,%edi call check_for_unexpected_msi - LOAD_C_CLOBBERED + LOAD_C_CLOBBERED compat=1 ax=0 UNLIKELY_END(msi_check) + movl UREGS_rax(%rsp),%eax GET_CURRENT(%rbx) cmpl $NR_hypercalls,%eax @@ -33,7 +35,6 @@ UNLIKELY_END(msi_check) pushq UREGS_rbx(%rsp); pushq %rcx; pushq %rdx; pushq %rsi; pushq %rdi pushq UREGS_rbp+5*8(%rsp) leaq compat_hypercall_args_table(%rip),%r10 - movl %eax,%eax movl $6,%ecx subb (%r10,%rax,1),%cl movq %rsp,%rdi @@ -48,7 +49,6 @@ UNLIKELY_END(msi_check) #define SHADOW_BYTES 16 /* Shadow EIP + shadow hypercall # */ #else /* Relocate argument registers and zero-extend to 64 bits. */ - movl %eax,%eax /* Hypercall # */ xchgl %ecx,%esi /* Arg 2, Arg 4 */ movl %edx,%edx /* Arg 3 */ movl %edi,%r8d /* Arg 5 */ @@ -174,6 +174,46 @@ compat_bad_hypercall: /* %rbx: struct vcpu, interrupts disabled */ ENTRY(compat_restore_all_guest) ASSERT_INTERRUPTS_DISABLED +.Lcr4_orig: + ASM_NOP8 /* testb $3,UREGS_cs(%rsp) */ + ASM_NOP2 /* jpe .Lcr4_alt_end */ + ASM_NOP8 /* mov CPUINFO_cr4...(%rsp), %rax */ + ASM_NOP6 /* and $..., %rax */ + ASM_NOP8 /* mov %rax, CPUINFO_cr4...(%rsp) */ + ASM_NOP3 /* mov %rax, %cr4 */ + ASM_NOP8 /* cmp %rax, CPUINFO_cr4...(%rsp) */ + ASM_NOP2 /* jne 1b */ +.Lcr4_orig_end: + .pushsection .altinstr_replacement, "ax" +.Lcr4_alt: + testb $3,UREGS_cs(%rsp) + jpe .Lcr4_alt_end + mov CPUINFO_cr4-CPUINFO_guest_cpu_user_regs(%rsp), %rax + and $~XEN_CR4_PV32_BITS, %rax +1: + mov %rax, CPUINFO_cr4-CPUINFO_guest_cpu_user_regs(%rsp) + mov %rax, %cr4 + /* + * An NMI or MCE may have occurred between the previous two + * instructions, leaving register and cache in a state where + * the next exit from the guest would trigger the BUG in + * cr4_pv32_restore. If this happened, the cached value is no + * longer what we just set it to, which we can utilize to + * correct that state. Note that we do not have to fear this + * loop to cause a live lock: If NMIs/MCEs occurred at that + * high a rate, we'd be live locked anyway. + */ + cmp %rax, CPUINFO_cr4-CPUINFO_guest_cpu_user_regs(%rsp) + jne 1b +.Lcr4_alt_end: + .section .altinstructions, "a" + altinstruction_entry .Lcr4_orig, .Lcr4_alt, X86_FEATURE_SMEP, \ + (.Lcr4_orig_end - .Lcr4_orig), \ + (.Lcr4_alt_end - .Lcr4_alt) + altinstruction_entry .Lcr4_orig, .Lcr4_alt, X86_FEATURE_SMAP, \ + (.Lcr4_orig_end - .Lcr4_orig), \ + (.Lcr4_alt_end - .Lcr4_alt) + .popsection RESTORE_ALL adj=8 compat=1 .Lft0: iretq @@ -210,6 +250,38 @@ compat_failsafe_callback: _ASM_PRE_EXTABLE(.Lft0, .Lfx0) _ASM_EXTABLE(.Ldf0, compat_failsafe_callback) +/* This mustn't modify registers other than %rax. */ +ENTRY(cr4_pv32_restore) + push %rdx + GET_CPUINFO_FIELD(cr4, %rdx) + mov (%rdx), %rax + test $XEN_CR4_PV32_BITS, %eax + jnz 0f + or cr4_pv32_mask(%rip), %rax + mov %rax, %cr4 + mov %rax, (%rdx) + pop %rdx + ret +0: +#ifndef NDEBUG + /* Check that _all_ of the bits intended to be set actually are. */ + mov %cr4, %rax + and cr4_pv32_mask(%rip), %eax + cmp cr4_pv32_mask(%rip), %eax + je 1f + /* Cause cr4_pv32_mask to be visible in the BUG register dump. */ + mov cr4_pv32_mask(%rip), %rdx + /* Avoid coming back here while handling the #UD we cause below. */ + mov %cr4, %rcx + or %rdx, %rcx + mov %rcx, %cr4 + ud2 +1: +#endif + pop %rdx + xor %eax, %eax + ret + /* %rdx: trap_bounce, %rbx: struct vcpu */ ENTRY(compat_post_handle_exception) testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%rdx) @@ -220,6 +292,7 @@ ENTRY(compat_post_handle_exception) jmp compat_test_all_events ENTRY(compat_syscall) + CR4_PV32_RESTORE cmpb $0,VCPU_syscall32_disables_events(%rbx) movzwl VCPU_syscall32_sel(%rbx),%esi movq VCPU_syscall32_addr(%rbx),%rax @@ -244,6 +317,7 @@ UNLIKELY_END(compat_syscall_gpf) jmp .Lcompat_bounce_exception ENTRY(compat_sysenter) + CR4_PV32_RESTORE movq VCPU_trap_ctxt(%rbx),%rcx cmpb $TRAP_gp_fault,UREGS_entry_vector(%rsp) movzwl VCPU_sysenter_sel(%rbx),%eax @@ -257,6 +331,7 @@ ENTRY(compat_sysenter) jmp compat_test_all_events ENTRY(compat_int80_direct_trap) + CR4_PV32_RESTORE call compat_create_bounce_frame jmp compat_test_all_events --- a/xen/arch/x86/x86_64/entry.S +++ b/xen/arch/x86/x86_64/entry.S @@ -482,6 +482,7 @@ ENTRY(dom_crash_sync_extable) ENTRY(common_interrupt) SAVE_ALL CLAC + CR4_PV32_RESTORE movq %rsp,%rdi callq do_IRQ jmp ret_from_intr @@ -502,13 +503,67 @@ ENTRY(page_fault) GLOBAL(handle_exception) SAVE_ALL CLAC handle_exception_saved: + GET_CURRENT(%rbx) testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%rsp) jz exception_with_ints_disabled + +.Lcr4_pv32_orig: + jmp .Lcr4_pv32_done + .skip (.Lcr4_pv32_alt_end - .Lcr4_pv32_alt) - (. - .Lcr4_pv32_orig), 0xcc + .pushsection .altinstr_replacement, "ax" +.Lcr4_pv32_alt: + mov VCPU_domain(%rbx),%rax +.Lcr4_pv32_alt_end: + .section .altinstructions, "a" + altinstruction_entry .Lcr4_pv32_orig, .Lcr4_pv32_alt, \ + X86_FEATURE_SMEP, \ + (.Lcr4_pv32_alt_end - .Lcr4_pv32_alt), \ + (.Lcr4_pv32_alt_end - .Lcr4_pv32_alt) + altinstruction_entry .Lcr4_pv32_orig, .Lcr4_pv32_alt, \ + X86_FEATURE_SMAP, \ + (.Lcr4_pv32_alt_end - .Lcr4_pv32_alt), \ + (.Lcr4_pv32_alt_end - .Lcr4_pv32_alt) + .popsection + + testb $3,UREGS_cs(%rsp) + jz .Lcr4_pv32_done + cmpb $0,DOMAIN_is_32bit_pv(%rax) + je .Lcr4_pv32_done + call cr4_pv32_restore + /* + * An NMI or #MC may occur between clearing CR4.SMEP / CR4.SMAP in + * compat_restore_all_guest and it actually returning to guest + * context, in which case the guest would run with the two features + * enabled. The only bad that can happen from this is a kernel mode + * #PF which the guest doesn't expect. Rather than trying to make the + * NMI/#MC exit path honor the intended CR4 setting, simply check + * whether the wrong CR4 was in use when the #PF occurred, and exit + * back to the guest (which will in turn clear the two CR4 bits) to + * re-execute the instruction. If we get back here, the CR4 bits + * should then be found clear (unless another NMI/#MC occurred at + * exactly the right time), and we'll continue processing the + * exception as normal. + */ + test %rax,%rax + jnz .Lcr4_pv32_done + /* + * The below effectively is + * if ( regs->entry_vector == TRAP_page_fault && + * (regs->error_code & PFEC_page_present) && + * !(regs->error_code & ~(PFEC_write_access|PFEC_insn_fetch)) ) + * goto compat_test_all_events; + */ + mov $PFEC_page_present,%al + cmpb $TRAP_page_fault,UREGS_entry_vector(%rsp) + jne .Lcr4_pv32_done + xor UREGS_error_code(%rsp),%eax + test $~(PFEC_write_access|PFEC_insn_fetch),%eax + jz compat_test_all_events +.Lcr4_pv32_done: sti 1: movq %rsp,%rdi movzbl UREGS_entry_vector(%rsp),%eax leaq exception_table(%rip),%rdx - GET_CURRENT(%rbx) PERFC_INCR(exceptions, %rax, %rbx) callq *(%rdx,%rax,8) testb $3,UREGS_cs(%rsp) @@ -637,6 +692,7 @@ ENTRY(nmi) movl $TRAP_nmi,4(%rsp) handle_ist_exception: SAVE_ALL CLAC + CR4_PV32_RESTORE testb $3,UREGS_cs(%rsp) jz 1f /* Interrupted guest context. Copy the context to stack bottom. */ --- a/xen/include/asm-x86/asm_defns.h +++ b/xen/include/asm-x86/asm_defns.h @@ -178,6 +178,16 @@ void ret_from_intr(void); #define ASM_STAC ASM_AC(STAC) #define ASM_CLAC ASM_AC(CLAC) + +#define CR4_PV32_RESTORE \ + 667: ASM_NOP5; \ + .pushsection .altinstr_replacement, "ax"; \ + 668: call cr4_pv32_restore; \ + .section .altinstructions, "a"; \ + altinstruction_entry 667b, 668b, X86_FEATURE_SMEP, 5, 5; \ + altinstruction_entry 667b, 668b, X86_FEATURE_SMAP, 5, 5; \ + .popsection + #else static always_inline void clac(void) { @@ -277,14 +287,18 @@ static always_inline void stac(void) * * For the way it is used in RESTORE_ALL, this macro must preserve EFLAGS.ZF. */ -.macro LOAD_C_CLOBBERED compat=0 +.macro LOAD_C_CLOBBERED compat=0 ax=1 .if !\compat movq UREGS_r11(%rsp),%r11 movq UREGS_r10(%rsp),%r10 movq UREGS_r9(%rsp),%r9 movq UREGS_r8(%rsp),%r8 -.endif +.if \ax movq UREGS_rax(%rsp),%rax +.endif +.elseif \ax + movl UREGS_rax(%rsp),%eax +.endif movq UREGS_rcx(%rsp),%rcx movq UREGS_rdx(%rsp),%rdx movq UREGS_rsi(%rsp),%rsi --- a/xen/include/asm-x86/processor.h +++ b/xen/include/asm-x86/processor.h @@ -134,16 +134,18 @@ #define TF_kernel_mode (1<<_TF_kernel_mode) /* #PF error code values. */ -#define PFEC_page_present (1U<<0) -#define PFEC_write_access (1U<<1) -#define PFEC_user_mode (1U<<2) -#define PFEC_reserved_bit (1U<<3) -#define PFEC_insn_fetch (1U<<4) +#define PFEC_page_present (_AC(1,U) << 0) +#define PFEC_write_access (_AC(1,U) << 1) +#define PFEC_user_mode (_AC(1,U) << 2) +#define PFEC_reserved_bit (_AC(1,U) << 3) +#define PFEC_insn_fetch (_AC(1,U) << 4) #define PFEC_page_paged (1U<<5) #define PFEC_page_shared (1U<<6) #define XEN_MINIMAL_CR4 (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE) +#define XEN_CR4_PV32_BITS (X86_CR4_SMEP | X86_CR4_SMAP) + #define XEN_SYSCALL_MASK (X86_EFLAGS_AC|X86_EFLAGS_VM|X86_EFLAGS_RF| \ X86_EFLAGS_NT|X86_EFLAGS_DF|X86_EFLAGS_IF| \ X86_EFLAGS_TF)
Locations
Projects
Search
Status Monitor
Help
OpenBuildService.org
Documentation
API Documentation
Code of Conduct
Contact
Support
@OBShq
Terms
openSUSE Build Service is sponsored by
The Open Build Service is an
openSUSE project
.
Sign Up
Log In
Places
Places
All Projects
Status Monitor