Sign Up
Log In
Log In
or
Sign Up
Places
All Projects
Status Monitor
Collapse sidebar
SUSE:SLE-15-SP5:GA
xen.35285
xsa452-1.patch
Overview
Repositories
Revisions
Requests
Users
Attributes
Meta
File xsa452-1.patch of Package xen.35285
From: Andrew Cooper <andrew.cooper3@citrix.com> Subject: x86/entry: Introduce EFRAME_* constants restore_all_guest() does a lot of manipulation of the stack after popping the GPRs, and uses raw %rsp displacements to do so. Also, almost all entrypaths use raw %rsp displacements prior to pushing GPRs. Provide better mnemonics, to aid readability and reduce the chance of errors when editing. No functional change. The resulting binary is identical. Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com> Reviewed-by: Jan Beulich <jbeulich@suse.com> (cherry picked from commit 37541208f119a9c552c6c6c3246ea61be0d44035) --- a/xen/arch/x86/x86_64/asm-offsets.c +++ b/xen/arch/x86/x86_64/asm-offsets.c @@ -49,6 +49,23 @@ void __dummy__(void) OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, es); BLANK(); + /* + * EFRAME_* is for the entry/exit logic where %rsp is pointing at + * UREGS_error_code and GPRs are still/already guest values. + */ +#define OFFSET_EF(sym, mem) \ + DEFINE(sym, offsetof(struct cpu_user_regs, mem) - \ + offsetof(struct cpu_user_regs, error_code)) + + OFFSET_EF(EFRAME_entry_vector, entry_vector); + OFFSET_EF(EFRAME_rip, rip); + OFFSET_EF(EFRAME_cs, cs); + OFFSET_EF(EFRAME_eflags, eflags); + OFFSET_EF(EFRAME_rsp, rsp); + BLANK(); + +#undef OFFSET_EF + OFFSET(VCPU_processor, struct vcpu, processor); OFFSET(VCPU_domain, struct vcpu, domain); OFFSET(VCPU_vcpu_info, struct vcpu, vcpu_info); --- a/xen/arch/x86/x86_64/compat/entry.S +++ b/xen/arch/x86/x86_64/compat/entry.S @@ -14,7 +14,7 @@ ENTRY(entry_int82) ASM_CLAC pushq $0 - movl $HYPERCALL_VECTOR, 4(%rsp) + movl $HYPERCALL_VECTOR, EFRAME_entry_vector(%rsp) SAVE_ALL compat=1 /* DPL1 gate, restricted to 32bit PV guests only. */ SPEC_CTRL_ENTRY_FROM_PV /* Req: %rsp=regs/cpuinfo, %rdx=0, Clob: acd */ @@ -206,7 +206,7 @@ ENTRY(cstar_enter) pushq $FLAT_USER_CS32 pushq %rcx pushq $0 - movl $TRAP_syscall, 4(%rsp) + movl $TRAP_syscall, EFRAME_entry_vector(%rsp) SAVE_ALL SPEC_CTRL_ENTRY_FROM_PV /* Req: %rsp=regs/cpuinfo, %rdx=0, Clob: acd */ --- a/xen/arch/x86/x86_64/entry.S +++ b/xen/arch/x86/x86_64/entry.S @@ -178,23 +178,23 @@ restore_all_guest: SPEC_CTRL_EXIT_TO_PV /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */ RESTORE_ALL - testw $TRAP_syscall,4(%rsp) + testw $TRAP_syscall, EFRAME_entry_vector(%rsp) jz iret_exit_to_guest - movq 24(%rsp),%r11 # RFLAGS + mov EFRAME_eflags(%rsp), %r11 andq $~(X86_EFLAGS_IOPL | X86_EFLAGS_VM), %r11 orq $X86_EFLAGS_IF,%r11 /* Don't use SYSRET path if the return address is not canonical. */ - movq 8(%rsp),%rcx + mov EFRAME_rip(%rsp), %rcx sarq $47,%rcx incl %ecx cmpl $1,%ecx - movq 8(%rsp),%rcx # RIP + mov EFRAME_rip(%rsp), %rcx ja iret_exit_to_guest - cmpw $FLAT_USER_CS32,16(%rsp)# CS - movq 32(%rsp),%rsp # RSP + cmpw $FLAT_USER_CS32, EFRAME_cs(%rsp) + mov EFRAME_rsp(%rsp), %rsp je 1f sysretq 1: sysretl @@ -202,8 +202,8 @@ restore_all_guest: ALIGN /* No special register assumptions. */ iret_exit_to_guest: - andl $~(X86_EFLAGS_IOPL | X86_EFLAGS_VM), 24(%rsp) - orl $X86_EFLAGS_IF,24(%rsp) + andl $~(X86_EFLAGS_IOPL | X86_EFLAGS_VM), EFRAME_eflags(%rsp) + orl $X86_EFLAGS_IF, EFRAME_eflags(%rsp) addq $8,%rsp .Lft0: iretq _ASM_PRE_EXTABLE(.Lft0, handle_exception) @@ -232,7 +232,7 @@ ENTRY(lstar_enter) pushq $FLAT_KERNEL_CS64 pushq %rcx pushq $0 - movl $TRAP_syscall, 4(%rsp) + movl $TRAP_syscall, EFRAME_entry_vector(%rsp) SAVE_ALL SPEC_CTRL_ENTRY_FROM_PV /* Req: %rsp=regs/cpuinfo, %rdx=0, Clob: acd */ @@ -266,7 +266,7 @@ GLOBAL(sysenter_eflags_saved) pushq $3 /* ring 3 null cs */ pushq $0 /* null rip */ pushq $0 - movl $TRAP_syscall, 4(%rsp) + movl $TRAP_syscall, EFRAME_entry_vector(%rsp) SAVE_ALL SPEC_CTRL_ENTRY_FROM_PV /* Req: %rsp=regs/cpuinfo, %rdx=0, Clob: acd */ @@ -317,7 +317,7 @@ UNLIKELY_END(sysenter_gpf) ENTRY(int80_direct_trap) ASM_CLAC pushq $0 - movl $0x80, 4(%rsp) + movl $0x80, EFRAME_entry_vector(%rsp) SAVE_ALL SPEC_CTRL_ENTRY_FROM_PV /* Req: %rsp=regs/cpuinfo, %rdx=0, Clob: acd */ @@ -612,7 +612,7 @@ ENTRY(common_interrupt) jmp ret_from_intr ENTRY(page_fault) - movl $TRAP_page_fault,4(%rsp) + movl $TRAP_page_fault, EFRAME_entry_vector(%rsp) /* No special register assumptions. */ GLOBAL(handle_exception) SAVE_ALL CLAC @@ -741,71 +741,71 @@ FATAL_exception_with_ints_disabled: ENTRY(divide_error) pushq $0 - movl $TRAP_divide_error,4(%rsp) + movl $TRAP_divide_error, EFRAME_entry_vector(%rsp) jmp handle_exception ENTRY(coprocessor_error) pushq $0 - movl $TRAP_copro_error,4(%rsp) + movl $TRAP_copro_error, EFRAME_entry_vector(%rsp) jmp handle_exception ENTRY(simd_coprocessor_error) pushq $0 - movl $TRAP_simd_error,4(%rsp) + movl $TRAP_simd_error, EFRAME_entry_vector(%rsp) jmp handle_exception ENTRY(device_not_available) pushq $0 - movl $TRAP_no_device,4(%rsp) + movl $TRAP_no_device, EFRAME_entry_vector(%rsp) jmp handle_exception ENTRY(debug) pushq $0 - movl $TRAP_debug,4(%rsp) + movl $TRAP_debug, EFRAME_entry_vector(%rsp) jmp handle_ist_exception ENTRY(int3) pushq $0 - movl $TRAP_int3,4(%rsp) + movl $TRAP_int3, EFRAME_entry_vector(%rsp) jmp handle_exception ENTRY(overflow) pushq $0 - movl $TRAP_overflow,4(%rsp) + movl $TRAP_overflow, EFRAME_entry_vector(%rsp) jmp handle_exception ENTRY(bounds) pushq $0 - movl $TRAP_bounds,4(%rsp) + movl $TRAP_bounds, EFRAME_entry_vector(%rsp) jmp handle_exception ENTRY(invalid_op) pushq $0 - movl $TRAP_invalid_op,4(%rsp) + movl $TRAP_invalid_op, EFRAME_entry_vector(%rsp) jmp handle_exception ENTRY(invalid_TSS) - movl $TRAP_invalid_tss,4(%rsp) + movl $TRAP_invalid_tss, EFRAME_entry_vector(%rsp) jmp handle_exception ENTRY(segment_not_present) - movl $TRAP_no_segment,4(%rsp) + movl $TRAP_no_segment, EFRAME_entry_vector(%rsp) jmp handle_exception ENTRY(stack_segment) - movl $TRAP_stack_error,4(%rsp) + movl $TRAP_stack_error, EFRAME_entry_vector(%rsp) jmp handle_exception ENTRY(general_protection) - movl $TRAP_gp_fault,4(%rsp) + movl $TRAP_gp_fault, EFRAME_entry_vector(%rsp) jmp handle_exception ENTRY(alignment_check) - movl $TRAP_alignment_check,4(%rsp) + movl $TRAP_alignment_check, EFRAME_entry_vector(%rsp) jmp handle_exception ENTRY(double_fault) - movl $TRAP_double_fault,4(%rsp) + movl $TRAP_double_fault, EFRAME_entry_vector(%rsp) /* Set AC to reduce chance of further SMAP faults */ SAVE_ALL STAC @@ -829,7 +829,7 @@ ENTRY(double_fault) .pushsection .init.text, "ax", @progbits ENTRY(early_page_fault) - movl $TRAP_page_fault,4(%rsp) + movl $TRAP_page_fault, EFRAME_entry_vector(%rsp) SAVE_ALL movq %rsp,%rdi call do_early_page_fault @@ -838,7 +838,7 @@ ENTRY(early_page_fault) ENTRY(nmi) pushq $0 - movl $TRAP_nmi,4(%rsp) + movl $TRAP_nmi, EFRAME_entry_vector(%rsp) handle_ist_exception: SAVE_ALL CLAC @@ -915,7 +915,7 @@ handle_ist_exception: ENTRY(machine_check) pushq $0 - movl $TRAP_machine_check,4(%rsp) + movl $TRAP_machine_check, EFRAME_entry_vector(%rsp) jmp handle_ist_exception /* No op trap handler. Required for kexec crash path. */ @@ -947,7 +947,7 @@ autogen_stubs: /* Automatically generate ALIGN 1: pushq $0 - movb $vec,4(%rsp) + movb $vec, EFRAME_entry_vector(%rsp) jmp common_interrupt entrypoint 1b @@ -958,7 +958,7 @@ autogen_stubs: /* Automatically generate 1: test $8,%spl /* 64bit exception frames are 16 byte aligned, but the word */ jz 2f /* size is 8 bytes. Check whether the processor gave us an */ pushq $0 /* error code, and insert an empty one if not. */ -2: movb $vec,4(%rsp) +2: movb $vec, EFRAME_entry_vector(%rsp) jmp handle_exception entrypoint 1b
Locations
Projects
Search
Status Monitor
Help
OpenBuildService.org
Documentation
API Documentation
Code of Conduct
Contact
Support
@OBShq
Terms
openSUSE Build Service is sponsored by
The Open Build Service is an
openSUSE project
.
Sign Up
Log In
Places
Places
All Projects
Status Monitor