Sign Up
Log In
Log In
or
Sign Up
Places
All Projects
Status Monitor
Collapse sidebar
SUSE:SLE-15-SP7:GA
xen.8389
5aec744a-6-x86-xpti-cr3-valid-flag.patch
Overview
Repositories
Revisions
Requests
Users
Attributes
Meta
File 5aec744a-6-x86-xpti-cr3-valid-flag.patch of Package xen.8389
From 065a499f78d5b644fa586e3e66f88949821e4f8c Mon Sep 17 00:00:00 2001 From: Juergen Gross <jgross@suse.com> Date: Thu, 26 Apr 2018 13:33:15 +0200 Subject: [PATCH] xen/x86: use flag byte for decision whether xen_cr3 is valid Today cpu_info->xen_cr3 is either 0 to indicate %cr3 doesn't need to be switched on entry to Xen, or negative for keeping the value while indicating not to restore %cr3, or positive in case %cr3 is to be restored. Switch to use a flag byte instead of a negative xen_cr3 value in order to allow %cr3 values with the high bit set in case we want to keep TLB entries when using the PCID feature. This reduces the number of branches in interrupt handling and results in better performance (e.g. parallel make of the Xen hypervisor on my system was using about 3% less system time). Signed-off-by: Juergen Gross <jgross@suse.com> Reviewed-by: Jan Beulich <jbeulich@suse.com> --- xen/arch/x86/domain.c | 1 + xen/arch/x86/mm.c | 3 +- xen/arch/x86/smpboot.c | 2 ++ xen/arch/x86/x86_64/asm-offsets.c | 1 + xen/arch/x86/x86_64/compat/entry.S | 5 ++- xen/arch/x86/x86_64/entry.S | 63 ++++++++++++++++---------------------- xen/include/asm-x86/current.h | 12 +++++--- 7 files changed, 43 insertions(+), 44 deletions(-) --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -1688,6 +1688,7 @@ void context_switch(struct vcpu *prev, s ASSERT(local_irq_is_enabled()); + get_cpu_info()->use_pv_cr3 = false; get_cpu_info()->xen_cr3 = 0; cpumask_copy(&dirty_mask, next->vcpu_dirty_cpumask); --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -523,7 +523,8 @@ void write_ptbase(struct vcpu *v) } else { - /* Make sure to clear xen_cr3 before pv_cr3. */ + /* Make sure to clear use_pv_cr3 and xen_cr3 before pv_cr3. */ + cpu_info->use_pv_cr3 = false; cpu_info->xen_cr3 = 0; /* switch_cr3_cr4() serializes. */ switch_cr3_cr4(v->arch.cr3, new_cr4); --- a/xen/arch/x86/smpboot.c +++ b/xen/arch/x86/smpboot.c @@ -330,6 +330,7 @@ void start_secondary(void *unused) */ spin_debug_disable(); + get_cpu_info()->use_pv_cr3 = false; get_cpu_info()->xen_cr3 = 0; get_cpu_info()->pv_cr3 = 0; @@ -1135,6 +1136,7 @@ void __init smp_prepare_boot_cpu(void) per_cpu(scratch_cpumask, cpu) = &scratch_cpu0mask; #endif + get_cpu_info()->use_pv_cr3 = false; get_cpu_info()->xen_cr3 = 0; get_cpu_info()->pv_cr3 = 0; } --- a/xen/arch/x86/x86_64/asm-offsets.c +++ b/xen/arch/x86/x86_64/asm-offsets.c @@ -145,6 +145,7 @@ void __dummy__(void) OFFSET(CPUINFO_xen_spec_ctrl, struct cpu_info, xen_spec_ctrl); OFFSET(CPUINFO_spec_ctrl_flags, struct cpu_info, spec_ctrl_flags); OFFSET(CPUINFO_root_pgt_changed, struct cpu_info, root_pgt_changed); + OFFSET(CPUINFO_use_pv_cr3, struct cpu_info, use_pv_cr3); DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info)); BLANK(); --- a/xen/arch/x86/x86_64/compat/entry.S +++ b/xen/arch/x86/x86_64/compat/entry.S @@ -224,10 +224,9 @@ ENTRY(cstar_enter) GET_STACK_END(bx) mov STACK_CPUINFO_FIELD(xen_cr3)(%rbx), %rcx - neg %rcx + test %rcx, %rcx jz .Lcstar_cr3_okay - mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%rbx) - neg %rcx + movb $0, STACK_CPUINFO_FIELD(use_pv_cr3)(%rbx) mov %rcx, %cr3 movq $0, STACK_CPUINFO_FIELD(xen_cr3)(%rbx) .Lcstar_cr3_okay: --- a/xen/arch/x86/x86_64/entry.S +++ b/xen/arch/x86/x86_64/entry.S @@ -189,6 +189,7 @@ restore_all_guest: rep movsq .Lrag_copy_done: mov %r9, STACK_CPUINFO_FIELD(xen_cr3)(%rdx) + movb $1, STACK_CPUINFO_FIELD(use_pv_cr3)(%rdx) mov %rax, %cr3 .Lrag_keep_cr3: @@ -237,14 +238,9 @@ restore_all_xen: * case we return to late PV exit code (from an NMI or #MC). */ GET_STACK_END(bx) - mov STACK_CPUINFO_FIELD(xen_cr3)(%rbx), %rdx + cmpb $0, STACK_CPUINFO_FIELD(use_pv_cr3)(%rbx) +UNLIKELY_START(ne, exit_cr3) mov STACK_CPUINFO_FIELD(pv_cr3)(%rbx), %rax - test %rdx, %rdx - /* - * Ideally the condition would be "nsz", but such doesn't exist, - * so "g" will have to do. - */ -UNLIKELY_START(g, exit_cr3) mov %rax, %cr3 UNLIKELY_END(exit_cr3) @@ -286,10 +282,9 @@ ENTRY(lstar_enter) GET_STACK_END(bx) mov STACK_CPUINFO_FIELD(xen_cr3)(%rbx), %rcx - neg %rcx + test %rcx, %rcx jz .Llstar_cr3_okay - mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%rbx) - neg %rcx + movb $0, STACK_CPUINFO_FIELD(use_pv_cr3)(%rbx) mov %rcx, %cr3 movq $0, STACK_CPUINFO_FIELD(xen_cr3)(%rbx) .Llstar_cr3_okay: @@ -323,10 +318,9 @@ GLOBAL(sysenter_eflags_saved) /* PUSHF above has saved EFLAGS.IF clear (the caller had it set). */ orl $X86_EFLAGS_IF, UREGS_eflags(%rsp) mov STACK_CPUINFO_FIELD(xen_cr3)(%rbx), %rcx - neg %rcx + test %rcx, %rcx jz .Lsyse_cr3_okay - mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%rbx) - neg %rcx + movb $0, STACK_CPUINFO_FIELD(use_pv_cr3)(%rbx) mov %rcx, %cr3 movq $0, STACK_CPUINFO_FIELD(xen_cr3)(%rbx) .Lsyse_cr3_okay: @@ -373,10 +367,9 @@ ENTRY(int80_direct_trap) GET_STACK_END(bx) mov STACK_CPUINFO_FIELD(xen_cr3)(%rbx), %rcx - neg %rcx + test %rcx, %rcx jz .Lint80_cr3_okay - mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%rbx) - neg %rcx + movb $0, STACK_CPUINFO_FIELD(use_pv_cr3)(%rbx) mov %rcx, %cr3 movq $0, STACK_CPUINFO_FIELD(xen_cr3)(%rbx) .Lint80_cr3_okay: @@ -563,24 +556,24 @@ ENTRY(common_interrupt) /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */ mov STACK_CPUINFO_FIELD(xen_cr3)(%r14), %rcx + mov STACK_CPUINFO_FIELD(use_pv_cr3)(%r14), %bl mov %rcx, %r15 - neg %rcx + test %rcx, %rcx jz .Lintr_cr3_okay - jns .Lintr_cr3_load - mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%r14) - neg %rcx -.Lintr_cr3_load: + movb $0, STACK_CPUINFO_FIELD(use_pv_cr3)(%r14) mov %rcx, %cr3 xor %ecx, %ecx mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%r14) testb $3, UREGS_cs(%rsp) cmovnz %rcx, %r15 + cmovnz %rcx, %rbx .Lintr_cr3_okay: CR4_PV32_RESTORE movq %rsp,%rdi callq do_IRQ mov %r15, STACK_CPUINFO_FIELD(xen_cr3)(%r14) + mov %bl, STACK_CPUINFO_FIELD(use_pv_cr3)(%r14) jmp ret_from_intr ENTRY(page_fault) @@ -595,18 +588,17 @@ GLOBAL(handle_exception) /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */ mov STACK_CPUINFO_FIELD(xen_cr3)(%r14), %rcx + mov STACK_CPUINFO_FIELD(use_pv_cr3)(%r14), %r13b mov %rcx, %r15 - neg %rcx + test %rcx, %rcx jz .Lxcpt_cr3_okay - jns .Lxcpt_cr3_load - mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%r14) - neg %rcx -.Lxcpt_cr3_load: + movb $0, STACK_CPUINFO_FIELD(use_pv_cr3)(%r14) mov %rcx, %cr3 xor %ecx, %ecx mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%r14) testb $3, UREGS_cs(%rsp) cmovnz %rcx, %r15 + cmovnz %rcx, %r13 .Lxcpt_cr3_okay: handle_exception_saved: @@ -675,6 +667,7 @@ handle_exception_saved: mov (%rdx, %rax, 8), %rdx INDIRECT_CALL %rdx mov %r15, STACK_CPUINFO_FIELD(xen_cr3)(%r14) + mov %r13b, STACK_CPUINFO_FIELD(use_pv_cr3)(%r14) testb $3,UREGS_cs(%rsp) jz restore_all_xen movq VCPU_domain(%rbx),%rax @@ -702,6 +695,7 @@ exception_with_ints_disabled: 1: movq UREGS_error_code(%rsp),%rax # ec/ev movq %rax,UREGS_kernel_sizeof(%rsp) mov %r15, STACK_CPUINFO_FIELD(xen_cr3)(%r14) + mov %r13b, STACK_CPUINFO_FIELD(use_pv_cr3)(%r14) jmp restore_all_xen # return to fixup code /* No special register assumptions. */ @@ -787,12 +781,9 @@ ENTRY(double_fault) /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */ mov STACK_CPUINFO_FIELD(xen_cr3)(%r14), %rbx - neg %rbx + test %rbx, %rbx jz .Ldblf_cr3_okay - jns .Ldblf_cr3_load - mov %rbx, STACK_CPUINFO_FIELD(xen_cr3)(%r14) - neg %rbx -.Ldblf_cr3_load: + movb $0, STACK_CPUINFO_FIELD(use_pv_cr3)(%r14) mov %rbx, %cr3 movq $0, STACK_CPUINFO_FIELD(xen_cr3)(%r14) .Ldblf_cr3_okay: @@ -822,13 +813,11 @@ handle_ist_exception: /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */ mov STACK_CPUINFO_FIELD(xen_cr3)(%r14), %rcx + mov STACK_CPUINFO_FIELD(use_pv_cr3)(%r14), %bl mov %rcx, %r15 - neg %rcx + test %rcx, %rcx jz .List_cr3_okay - jns .List_cr3_load - mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%r14) - neg %rcx -.List_cr3_load: + movb $0, STACK_CPUINFO_FIELD(use_pv_cr3)(%r14) mov %rcx, %cr3 movq $0, STACK_CPUINFO_FIELD(xen_cr3)(%r14) .List_cr3_okay: @@ -841,6 +830,7 @@ handle_ist_exception: * and copy the context to stack bottom. */ xor %r15, %r15 + xor %ebx, %ebx GET_CPUINFO_FIELD(guest_cpu_user_regs,di) movq %rsp,%rsi movl $UREGS_kernel_sizeof/8,%ecx @@ -852,6 +842,7 @@ handle_ist_exception: mov (%rdx, %rax, 8), %rdx INDIRECT_CALL %rdx mov %r15, STACK_CPUINFO_FIELD(xen_cr3)(%r14) + mov %bl, STACK_CPUINFO_FIELD(use_pv_cr3)(%r14) cmpb $TRAP_nmi,UREGS_entry_vector(%rsp) jne ret_from_intr --- a/xen/include/asm-x86/current.h +++ b/xen/include/asm-x86/current.h @@ -47,10 +47,7 @@ struct cpu_info { * context is being entered. A value of zero indicates no setting of CR3 * is to be performed. * The former is the value to restore when re-entering Xen, if any. IOW - * its value being zero means there's nothing to restore. However, its - * value can also be negative, indicating to the exit-to-Xen code that - * restoring is not necessary, but allowing any nested entry code paths - * to still know the value to put back into CR3. + * its value being zero means there's nothing to restore. */ unsigned long xen_cr3; unsigned long pv_cr3; @@ -68,6 +65,13 @@ struct cpu_info { */ bool root_pgt_changed; + /* + * use_pv_cr3 is set in case the value of pv_cr3 is to be written into + * CR3 when returning from an interrupt. The main use is when returning + * from a NMI or MCE to hypervisor code where pv_cr3 was active. + */ + bool use_pv_cr3; + unsigned long __pad; /* get_stack_bottom() must be 16-byte aligned */ };
Locations
Projects
Search
Status Monitor
Help
OpenBuildService.org
Documentation
API Documentation
Code of Conduct
Contact
Support
@OBShq
Terms
openSUSE Build Service is sponsored by
The Open Build Service is an
openSUSE project
.
Sign Up
Log In
Places
Places
All Projects
Status Monitor