Sign Up
Log In
Log In
or
Sign Up
Places
All Projects
Status Monitor
Collapse sidebar
SUSE:SLE-12-SP1:GA
xen.8005
5a5e2cff-x86-Meltdown-band-aid.patch
Overview
Repositories
Revisions
Requests
Users
Attributes
Meta
File 5a5e2cff-x86-Meltdown-band-aid.patch of Package xen.8005
# Commit 5784de3e2067ed73efc2fe42e62831e8ae7f46c4 # Date 2018-01-16 17:49:03 +0100 # Author Jan Beulich <jbeulich@suse.com> # Committer Jan Beulich <jbeulich@suse.com> x86: Meltdown band-aid against malicious 64-bit PV guests This is a very simplistic change limiting the amount of memory a running 64-bit PV guest has mapped (and hence available for attacking): Only the mappings of stack, IDT, and TSS are being cloned from the direct map into per-CPU page tables. Guest controlled parts of the page tables are being copied into those per-CPU page tables upon entry into the guest. Cross-vCPU synchronization of top level page table entry changes is being effected by forcing other active vCPU-s of the guest into the hypervisor. The change to context_switch() isn't strictly necessary, but there's no reason to keep switching page tables once a PV guest is being scheduled out. This isn't providing full isolation yet, but it should be covering all pieces of information exposure of which would otherwise require an XSA. There is certainly much room for improvement, especially of performance, here - first and foremost suppressing all the negative effects on AMD systems. But in the interest of backportability (including to really old hypervisors, which may not even have alternative patching) any such is being left out here. Signed-off-by: Jan Beulich <jbeulich@suse.com> Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com> --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -1379,6 +1379,9 @@ static void paravirt_ctxt_switch_to(stru set_int80_direct_trap(v); switch_kernel_stack(v); + this_cpu(root_pgt)[root_table_offset(PERDOMAIN_VIRT_START)] = + l4e_from_page(v->domain->arch.perdomain_l3_pg, __PAGE_HYPERVISOR); + cr4 = pv_guest_cr4_to_real_cr4(v); if ( unlikely(cr4 != read_cr4()) ) write_cr4(cr4); @@ -1517,6 +1520,8 @@ void context_switch(struct vcpu *prev, s ASSERT(local_irq_is_enabled()); + get_cpu_info()->xen_cr3 = 0; + cpumask_copy(&dirty_mask, next->vcpu_dirty_cpumask); /* Allow at most one CPU at a time to be dirty. */ ASSERT(cpumask_weight(&dirty_mask) <= 1); --- a/xen/arch/x86/i8259.c +++ b/xen/arch/x86/i8259.c @@ -35,8 +35,6 @@ __asm__(".section .text"); -BUILD_COMMON_IRQ() - #define IRQ_NAME(nr) VEC##nr##_interrupt #define BI(nr) \ --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -3635,6 +3635,7 @@ long do_mmu_update( struct vcpu *curr = current, *v = curr; struct domain *d = v->domain, *pt_owner = d, *pg_owner; struct domain_mmap_cache mapcache; + bool_t sync_guest = 0; uint32_t xsm_needed = 0; uint32_t xsm_checked = 0; int rc = put_old_guest_table(curr); @@ -3820,6 +3821,8 @@ long do_mmu_update( case PGT_l4_page_table: rc = mod_l4_entry(va, l4e_from_intpte(req.val), mfn, cmd == MMU_PT_UPDATE_PRESERVE_AD, v); + if ( !rc ) + sync_guest = 1; break; case PGT_writable_page: perfc_incr(writable_mmu_updates); @@ -3917,6 +3920,20 @@ long do_mmu_update( domain_mmap_cache_destroy(&mapcache); + if ( sync_guest ) + { + /* + * Force other vCPU-s of the affected guest to pick up L4 entry + * changes (if any). Issue a flush IPI with empty operation mask to + * facilitate this (including ourselves waiting for the IPI to + * actually have arrived). Utilize the fact that FLUSH_VA_VALID is + * meaningless without FLUSH_CACHE, but will allow to pass the no-op + * check in flush_area_mask(). + */ + flush_area_mask(pt_owner->domain_dirty_cpumask, + ZERO_BLOCK_PTR, FLUSH_VA_VALID); + } + perfc_add(num_page_updates, i); out: --- a/xen/arch/x86/smpboot.c +++ b/xen/arch/x86/smpboot.c @@ -348,6 +348,9 @@ void start_secondary(void *unused) */ spin_debug_disable(); + get_cpu_info()->xen_cr3 = 0; + get_cpu_info()->pv_cr3 = __pa(this_cpu(root_pgt)); + percpu_traps_init(); cpu_init(); @@ -635,6 +638,187 @@ void cpu_exit_clear(unsigned int cpu) set_cpu_state(CPU_STATE_DEAD); } +static int clone_mapping(const void *ptr, root_pgentry_t *rpt) +{ + unsigned long linear = (unsigned long)ptr, pfn; + unsigned int flags; + l3_pgentry_t *pl3e = l4e_to_l3e(idle_pg_table[root_table_offset(linear)]) + + l3_table_offset(linear); + l2_pgentry_t *pl2e; + l1_pgentry_t *pl1e; + + if ( linear < DIRECTMAP_VIRT_START ) + return 0; + + flags = l3e_get_flags(*pl3e); + ASSERT(flags & _PAGE_PRESENT); + if ( flags & _PAGE_PSE ) + { + pfn = (l3e_get_pfn(*pl3e) & ~((1UL << (2 * PAGETABLE_ORDER)) - 1)) | + (PFN_DOWN(linear) & ((1UL << (2 * PAGETABLE_ORDER)) - 1)); + flags &= ~_PAGE_PSE; + } + else + { + pl2e = l3e_to_l2e(*pl3e) + l2_table_offset(linear); + flags = l2e_get_flags(*pl2e); + ASSERT(flags & _PAGE_PRESENT); + if ( flags & _PAGE_PSE ) + { + pfn = (l2e_get_pfn(*pl2e) & ~((1UL << PAGETABLE_ORDER) - 1)) | + (PFN_DOWN(linear) & ((1UL << PAGETABLE_ORDER) - 1)); + flags &= ~_PAGE_PSE; + } + else + { + pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(linear); + flags = l1e_get_flags(*pl1e); + if ( !(flags & _PAGE_PRESENT) ) + return 0; + pfn = l1e_get_pfn(*pl1e); + } + } + + if ( !(root_get_flags(rpt[root_table_offset(linear)]) & _PAGE_PRESENT) ) + { + pl3e = alloc_xen_pagetable(); + if ( !pl3e ) + return -ENOMEM; + clear_page(pl3e); + l4e_write(&rpt[root_table_offset(linear)], + l4e_from_paddr(__pa(pl3e), __PAGE_HYPERVISOR)); + } + else + pl3e = l4e_to_l3e(rpt[root_table_offset(linear)]); + + pl3e += l3_table_offset(linear); + + if ( !(l3e_get_flags(*pl3e) & _PAGE_PRESENT) ) + { + pl2e = alloc_xen_pagetable(); + if ( !pl2e ) + return -ENOMEM; + clear_page(pl2e); + l3e_write(pl3e, l3e_from_paddr(__pa(pl2e), __PAGE_HYPERVISOR)); + } + else + { + ASSERT(!(l3e_get_flags(*pl3e) & _PAGE_PSE)); + pl2e = l3e_to_l2e(*pl3e); + } + + pl2e += l2_table_offset(linear); + + if ( !(l2e_get_flags(*pl2e) & _PAGE_PRESENT) ) + { + pl1e = alloc_xen_pagetable(); + if ( !pl1e ) + return -ENOMEM; + clear_page(pl1e); + l2e_write(pl2e, l2e_from_paddr(__pa(pl1e), __PAGE_HYPERVISOR)); + } + else + { + ASSERT(!(l2e_get_flags(*pl2e) & _PAGE_PSE)); + pl1e = l2e_to_l1e(*pl2e); + } + + pl1e += l1_table_offset(linear); + + if ( l1e_get_flags(*pl1e) & _PAGE_PRESENT ) + { + ASSERT(l1e_get_pfn(*pl1e) == pfn); + ASSERT(l1e_get_flags(*pl1e) == flags); + } + else + l1e_write(pl1e, l1e_from_pfn(pfn, flags)); + + return 0; +} + +DEFINE_PER_CPU(root_pgentry_t *, root_pgt); + +static int setup_cpu_root_pgt(unsigned int cpu) +{ + root_pgentry_t *rpt = alloc_xen_pagetable(); + unsigned int off; + int rc; + + if ( !rpt ) + return -ENOMEM; + + clear_page(rpt); + per_cpu(root_pgt, cpu) = rpt; + + rpt[root_table_offset(RO_MPT_VIRT_START)] = + idle_pg_table[root_table_offset(RO_MPT_VIRT_START)]; + /* SH_LINEAR_PT inserted together with guest mappings. */ + /* PERDOMAIN inserted during context switch. */ + rpt[root_table_offset(XEN_VIRT_START)] = + idle_pg_table[root_table_offset(XEN_VIRT_START)]; + + /* Install direct map page table entries for stack, IDT, and TSS. */ + for ( off = rc = 0; !rc && off < STACK_SIZE; off += PAGE_SIZE ) + rc = clone_mapping(__va(__pa(stack_base[cpu])) + off, rpt); + + if ( !rc ) + rc = clone_mapping(idt_tables[cpu], rpt); + if ( !rc ) + rc = clone_mapping(&per_cpu(init_tss, cpu), rpt); + + return rc; +} + +static void cleanup_cpu_root_pgt(unsigned int cpu) +{ + root_pgentry_t *rpt = per_cpu(root_pgt, cpu); + unsigned int r; + + if ( !rpt ) + return; + + per_cpu(root_pgt, cpu) = NULL; + + for ( r = root_table_offset(DIRECTMAP_VIRT_START); + r < root_table_offset(HYPERVISOR_VIRT_END); ++r ) + { + l3_pgentry_t *l3t; + unsigned int i3; + + if ( !(root_get_flags(rpt[r]) & _PAGE_PRESENT) ) + continue; + + l3t = l4e_to_l3e(rpt[r]); + + for ( i3 = 0; i3 < L3_PAGETABLE_ENTRIES; ++i3 ) + { + l2_pgentry_t *l2t; + unsigned int i2; + + if ( !(l3e_get_flags(l3t[i3]) & _PAGE_PRESENT) ) + continue; + + ASSERT(!(l3e_get_flags(l3t[i3]) & _PAGE_PSE)); + l2t = l3e_to_l2e(l3t[i3]); + + for ( i2 = 0; i2 < L2_PAGETABLE_ENTRIES; ++i2 ) + { + if ( !(l2e_get_flags(l2t[i2]) & _PAGE_PRESENT) ) + continue; + + ASSERT(!(l2e_get_flags(l2t[i2]) & _PAGE_PSE)); + free_xen_pagetable(l2e_to_l1e(l2t[i2])); + } + + free_xen_pagetable(l2t); + } + + free_xen_pagetable(l3t); + } + + free_xen_pagetable(rpt); +} + static void cpu_smpboot_free(unsigned int cpu) { unsigned int order; @@ -642,6 +826,8 @@ static void cpu_smpboot_free(unsigned in free_cpumask_var(per_cpu(cpu_sibling_mask, cpu)); free_cpumask_var(per_cpu(cpu_core_mask, cpu)); + cleanup_cpu_root_pgt(cpu); + order = get_order_from_pages(NR_RESERVED_GDT_PAGES); free_xenheap_pages(per_cpu(gdt_table, cpu), order); @@ -694,6 +880,9 @@ static int cpu_smpboot_alloc(unsigned in set_ist(&idt_tables[cpu][TRAP_nmi], IST_NONE); set_ist(&idt_tables[cpu][TRAP_machine_check], IST_NONE); + if ( setup_cpu_root_pgt(cpu) ) + goto oom; + if ( zalloc_cpumask_var(&per_cpu(cpu_sibling_mask, cpu)) && zalloc_cpumask_var(&per_cpu(cpu_core_mask, cpu)) ) return 0; @@ -731,6 +920,8 @@ static struct notifier_block cpu_smpboot void __init smp_prepare_cpus(unsigned int max_cpus) { + int rc; + register_cpu_notifier(&cpu_smpboot_nfb); mtrr_aps_sync_begin(); @@ -744,6 +935,11 @@ void __init smp_prepare_cpus(unsigned in stack_base[0] = stack_start; + rc = setup_cpu_root_pgt(0); + if ( rc ) + panic("Error %d setting up PV root page table\n", rc); + get_cpu_info()->pv_cr3 = __pa(per_cpu(root_pgt, 0)); + if ( !zalloc_cpumask_var(&per_cpu(cpu_sibling_mask, 0)) || !zalloc_cpumask_var(&per_cpu(cpu_core_mask, 0)) ) panic("No memory for boot CPU sibling/core maps"); @@ -801,6 +997,8 @@ void __init smp_prepare_boot_cpu(void) { cpumask_set_cpu(smp_processor_id(), &cpu_online_map); cpumask_set_cpu(smp_processor_id(), &cpu_present_map); + + get_cpu_info()->xen_cr3 = 0; } static void --- a/xen/arch/x86/x86_64/asm-offsets.c +++ b/xen/arch/x86/x86_64/asm-offsets.c @@ -134,6 +134,8 @@ void __dummy__(void) OFFSET(CPUINFO_guest_cpu_user_regs, struct cpu_info, guest_cpu_user_regs); OFFSET(CPUINFO_processor_id, struct cpu_info, processor_id); OFFSET(CPUINFO_current_vcpu, struct cpu_info, current_vcpu); + OFFSET(CPUINFO_xen_cr3, struct cpu_info, xen_cr3); + OFFSET(CPUINFO_pv_cr3, struct cpu_info, pv_cr3); DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info)); BLANK(); --- a/xen/arch/x86/x86_64/entry.S +++ b/xen/arch/x86/x86_64/entry.S @@ -37,6 +37,32 @@ switch_to_kernel: /* %rbx: struct vcpu, interrupts disabled */ restore_all_guest: ASSERT_INTERRUPTS_DISABLED + + /* Copy guest mappings and switch to per-CPU root page table. */ + mov %cr3, %r9 + GET_STACK_BASE(%rdx) + mov STACK_CPUINFO_FIELD(pv_cr3)(%rdx), %rdi + movabs $PADDR_MASK & PAGE_MASK, %rsi + movabs $DIRECTMAP_VIRT_START, %rcx + mov %rdi, %rax + and %rsi, %rdi + and %r9, %rsi + add %rcx, %rdi + add %rcx, %rsi + mov $ROOT_PAGETABLE_FIRST_XEN_SLOT, %ecx + mov root_table_offset(SH_LINEAR_PT_VIRT_START)*8(%rsi), %r8 + mov %r8, root_table_offset(SH_LINEAR_PT_VIRT_START)*8(%rdi) + rep movsq + mov $ROOT_PAGETABLE_ENTRIES - \ + ROOT_PAGETABLE_LAST_XEN_SLOT - 1, %ecx + sub $(ROOT_PAGETABLE_FIRST_XEN_SLOT - \ + ROOT_PAGETABLE_LAST_XEN_SLOT - 1) * 8, %rsi + sub $(ROOT_PAGETABLE_FIRST_XEN_SLOT - \ + ROOT_PAGETABLE_LAST_XEN_SLOT - 1) * 8, %rdi + rep movsq + mov %r9, STACK_CPUINFO_FIELD(xen_cr3)(%rdx) + write_cr3 rax, rdi, rsi + RESTORE_ALL testw $TRAP_syscall,4(%rsp) jz iret_exit_to_guest @@ -101,6 +127,22 @@ failsafe_callback: ALIGN /* No special register assumptions. */ restore_all_xen: + /* + * Check whether we need to switch to the per-CPU page tables, in + * case we return to late PV exit code (from an NMI or #MC). + */ + GET_STACK_BASE(%rax) + mov STACK_CPUINFO_FIELD(xen_cr3)(%rax), %rdx + mov STACK_CPUINFO_FIELD(pv_cr3)(%rax), %rax + test %rdx, %rdx + /* + * Ideally the condition would be "nsz", but such doesn't exist, + * so "g" will have to do. + */ +UNLIKELY_START(g, exit_cr3) + write_cr3 rax, rdi, rsi +UNLIKELY_END(exit_cr3) + RESTORE_ALL adj=8 iretq @@ -128,7 +170,18 @@ ENTRY(syscall_enter) movq 24(%rsp),%r11 /* Re-load user RFLAGS into %r11 before saving */ movl $TRAP_syscall, 4(%rsp) SAVE_ALL - GET_CURRENT(%rbx) + + GET_STACK_BASE(%rbx) + mov STACK_CPUINFO_FIELD(xen_cr3)(%rbx), %rcx + neg %rcx + jz .Llstar_cr3_okay + mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%rbx) + neg %rcx + write_cr3 rcx, r11, r12 + movq $0, STACK_CPUINFO_FIELD(xen_cr3)(%rbx) +.Llstar_cr3_okay: + + __GET_CURRENT(%rbx) movq VCPU_domain(%rbx),%rcx testb $1,DOMAIN_is_32bit_pv(%rcx) jnz compat_syscall @@ -279,7 +332,18 @@ GLOBAL(sysenter_eflags_saved) pushq $0 movl $TRAP_syscall, 4(%rsp) SAVE_ALL - GET_CURRENT(%rbx) + + GET_STACK_BASE(%rbx) + mov STACK_CPUINFO_FIELD(xen_cr3)(%rbx), %rcx + neg %rcx + jz .Lsyse_cr3_okay + mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%rbx) + neg %rcx + write_cr3 rcx, rdi, rsi + movq $0, STACK_CPUINFO_FIELD(xen_cr3)(%rbx) +.Lsyse_cr3_okay: + + __GET_CURRENT(%rbx) cmpb $0,VCPU_sysenter_disables_events(%rbx) movq VCPU_sysenter_addr(%rbx),%rax setne %cl @@ -314,6 +378,16 @@ ENTRY(int80_direct_trap) movl $0x80, 4(%rsp) SAVE_ALL + GET_STACK_BASE(%rbx) + mov STACK_CPUINFO_FIELD(xen_cr3)(%rbx), %rcx + neg %rcx + jz .Lint80_cr3_okay + mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%rbx) + neg %rcx + write_cr3 rcx, rdi, rsi + movq $0, STACK_CPUINFO_FIELD(xen_cr3)(%rbx) +.Lint80_cr3_okay: + cmpb $0,untrusted_msi(%rip) UNLIKELY_START(ne, msi_check) movl $0x80,%edi @@ -321,7 +395,7 @@ UNLIKELY_START(ne, msi_check) LOAD_C_CLOBBERED UNLIKELY_END(msi_check) - GET_CURRENT(%rbx) + __GET_CURRENT(%rbx) /* Check that the callback is non-null. */ leaq VCPU_int80_bounce(%rbx),%rdx @@ -466,6 +540,30 @@ ENTRY(dom_crash_sync_extable) xorl %edi,%edi jmp asm_domain_crash_synchronous /* Does not return */ +ENTRY(common_interrupt) + SAVE_ALL + + GET_STACK_BASE(%r14) + mov STACK_CPUINFO_FIELD(xen_cr3)(%r14), %rcx + mov %rcx, %r15 + neg %rcx + jz .Lintr_cr3_okay + jns .Lintr_cr3_load + mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%r14) + neg %rcx +.Lintr_cr3_load: + write_cr3 rcx, rdi, rsi + xor %ecx, %ecx + mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%r14) + testb $3, UREGS_cs(%rsp) + cmovnz %rcx, %r15 +.Lintr_cr3_okay: + + movq %rsp,%rdi + callq do_IRQ + mov %r15, STACK_CPUINFO_FIELD(xen_cr3)(%r14) + jmp ret_from_intr + /* No special register assumptions. */ ENTRY(ret_from_intr) GET_CURRENT(%rbx) @@ -481,6 +579,23 @@ ENTRY(page_fault) /* No special register assumptions. */ GLOBAL(handle_exception) SAVE_ALL + + GET_STACK_BASE(%r14) + mov STACK_CPUINFO_FIELD(xen_cr3)(%r14), %rcx + mov %rcx, %r15 + neg %rcx + jz .Lxcpt_cr3_okay + jns .Lxcpt_cr3_load + mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%r14) + neg %rcx +.Lxcpt_cr3_load: + write_cr3 rcx, rdi, rsi + xor %ecx, %ecx + mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%r14) + testb $3, UREGS_cs(%rsp) + cmovnz %rcx, %r15 +.Lxcpt_cr3_okay: + handle_exception_saved: testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%rsp) jz exception_with_ints_disabled @@ -491,6 +606,7 @@ handle_exception_saved: GET_CURRENT(%rbx) PERFC_INCR(exceptions, %rax, %rbx) callq *(%rdx,%rax,8) + mov %r15, STACK_CPUINFO_FIELD(xen_cr3)(%r14) testb $3,UREGS_cs(%rsp) jz restore_all_xen leaq VCPU_trap_bounce(%rbx),%rdx @@ -523,6 +639,7 @@ exception_with_ints_disabled: rep; movsq # make room for ec/ev 1: movq UREGS_error_code(%rsp),%rax # ec/ev movq %rax,UREGS_kernel_sizeof(%rsp) + mov %r15, STACK_CPUINFO_FIELD(xen_cr3)(%r14) jmp restore_all_xen # return to fixup code /* No special register assumptions. */ @@ -610,6 +727,17 @@ ENTRY(spurious_interrupt_bug) ENTRY(double_fault) movl $TRAP_double_fault,4(%rsp) SAVE_ALL + + GET_STACK_BASE(%rbx) + mov STACK_CPUINFO_FIELD(xen_cr3)(%rbx), %rbx + test %rbx, %rbx + jz .Ldblf_cr3_okay + jns .Ldblf_cr3_load + neg %rbx +.Ldblf_cr3_load: + write_cr3 rbx, rdi, rsi +.Ldblf_cr3_okay: + movq %rsp,%rdi call do_double_fault ud2 @@ -627,9 +755,27 @@ ENTRY(nmi) movl $TRAP_nmi,4(%rsp) handle_ist_exception: SAVE_ALL + + GET_STACK_BASE(%r14) + mov STACK_CPUINFO_FIELD(xen_cr3)(%r14), %rcx + mov %rcx, %r15 + neg %rcx + jz .List_cr3_okay + jns .List_cr3_load + mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%r14) + neg %rcx +.List_cr3_load: + write_cr3 rcx, rdi, rsi + movq $0, STACK_CPUINFO_FIELD(xen_cr3)(%r14) +.List_cr3_okay: + testb $3,UREGS_cs(%rsp) jz 1f - /* Interrupted guest context. Copy the context to stack bottom. */ + /* + * Interrupted guest context. Clear the restore value for xen_cr3 + * and copy the context to stack bottom. + */ + xor %r15, %r15 GET_CPUINFO_FIELD(guest_cpu_user_regs,%rdi) movq %rsp,%rsi movl $UREGS_kernel_sizeof/8,%ecx @@ -639,6 +785,7 @@ handle_ist_exception: movzbl UREGS_entry_vector(%rsp),%eax leaq exception_table(%rip),%rdx callq *(%rdx,%rax,8) + mov %r15, STACK_CPUINFO_FIELD(xen_cr3)(%r14) cmpb $TRAP_nmi,UREGS_entry_vector(%rsp) jne ret_from_intr --- a/xen/include/asm-x86/asm_defns.h +++ b/xen/include/asm-x86/asm_defns.h @@ -69,6 +69,15 @@ void ret_from_intr(void); GET_STACK_BASE(reg); \ __GET_CURRENT(reg) +.macro write_cr3 val:req, tmp1:req, tmp2:req + mov %cr4, %\tmp1 + mov %\tmp1, %\tmp2 + and $~X86_CR4_PGE, %\tmp1 + mov %\tmp1, %cr4 + mov %\val, %cr3 + mov %\tmp2, %cr4 +.endm + #ifndef NDEBUG #define ASSERT_NOT_IN_ATOMIC \ sti; /* sometimes called with interrupts disabled: safe to enable */ \ --- a/xen/include/asm-x86/current.h +++ b/xen/include/asm-x86/current.h @@ -19,6 +19,18 @@ struct cpu_info { unsigned int processor_id; struct vcpu *current_vcpu; unsigned long per_cpu_offset; + /* + * Of the two following fields the latter is being set to the CR3 value + * to be used on the given pCPU for loading whenever 64-bit PV guest + * context is being entered. The value never changes once set. + * The former is the value to restore when re-entering Xen, if any. IOW + * its value being zero means there's nothing to restore. However, its + * value can also be negative, indicating to the exit-to-Xen code that + * restoring is not necessary, but allowing any nested entry code paths + * to still know the value to put back into CR3. + */ + unsigned long xen_cr3; + unsigned long pv_cr3; /* get_stack_bottom() must be 16-byte aligned */ unsigned long __pad_for_stack_bottom; }; --- a/xen/include/asm-x86/processor.h +++ b/xen/include/asm-x86/processor.h @@ -449,6 +449,7 @@ extern idt_entry_t idt_table[]; extern idt_entry_t *idt_tables[]; DECLARE_PER_CPU(struct tss_struct, init_tss); +DECLARE_PER_CPU(root_pgentry_t *, root_pgt); extern void init_int80_direct_trap(struct vcpu *v); --- a/xen/include/asm-x86/x86_64/asm_defns.h +++ b/xen/include/asm-x86/x86_64/asm_defns.h @@ -141,15 +141,6 @@ #define REX64_PREFIX "rex64/" #endif -#define BUILD_COMMON_IRQ() \ -__asm__( \ - "\n" __ALIGN_STR"\n" \ - "common_interrupt:\n\t" \ - STR(SAVE_ALL) "\n\t" \ - "movq %rsp,%rdi\n\t" \ - "callq " STR(do_IRQ) "\n\t" \ - "jmp ret_from_intr\n"); - #define BUILD_IRQ(nr) \ "pushq $0\n\t" \ "movl $"#nr",4(%rsp)\n\t" \ --- a/xen/include/asm-x86/x86_64/page.h +++ b/xen/include/asm-x86/x86_64/page.h @@ -25,8 +25,8 @@ /* These are architectural limits. Current CPUs support only 40-bit phys. */ #define PADDR_BITS 52 #define VADDR_BITS 48 -#define PADDR_MASK ((1UL << PADDR_BITS)-1) -#define VADDR_MASK ((1UL << VADDR_BITS)-1) +#define PADDR_MASK ((_AC(1,UL) << PADDR_BITS) - 1) +#define VADDR_MASK ((_AC(1,UL) << VADDR_BITS) - 1) #define is_canonical_address(x) (((long)(x) >> 47) == ((long)(x) >> 63)) @@ -138,6 +138,7 @@ typedef l4_pgentry_t root_pgentry_t; : (((_s) < ROOT_PAGETABLE_FIRST_XEN_SLOT) || \ ((_s) > ROOT_PAGETABLE_LAST_XEN_SLOT))) +#define root_table_offset l4_table_offset #define root_get_pfn l4e_get_pfn #define root_get_flags l4e_get_flags #define root_get_intpte l4e_get_intpte
Locations
Projects
Search
Status Monitor
Help
OpenBuildService.org
Documentation
API Documentation
Code of Conduct
Contact
Support
@OBShq
Terms
openSUSE Build Service is sponsored by
The Open Build Service is an
openSUSE project
.
Sign Up
Log In
Places
Places
All Projects
Status Monitor