Sign Up
Log In
Log In
or
Sign Up
Places
All Projects
Status Monitor
Collapse sidebar
SUSE:SLE-12-SP1:GA
xen.10697
5b508775-1-x86-distinguish-CPU-offlining-and-re...
Overview
Repositories
Revisions
Requests
Users
Attributes
Meta
File 5b508775-1-x86-distinguish-CPU-offlining-and-removal.patch of Package xen.10697
# Commit 2e6c8f182c9c50129b1c7a620242861e6ad6a9fb # Date 2018-07-19 13:43:33 +0100 # Author Jan Beulich <JBeulich@suse.com> # Committer Andrew Cooper <andrew.cooper3@citrix.com> x86: distinguish CPU offlining from CPU removal In order to be able to service #MC on offlined CPUs, the GDT, IDT, stack, and per-CPU data (which includes the TSS) need to be kept allocated. They should only be freed upon CPU removal (which we currently don't support, so some code is becoming effectively dead for the moment). Note that for now park_offline_cpus doesn't get set to true anywhere - this is going to be the subject of a subsequent patch. Signed-off-by: Jan Beulich <jbeulich@suse.com> Reviewed-by: Wei Liu <wei.liu2@citrix.com> Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com> --- a/xen/arch/x86/cpu/mcheck/mce.c +++ b/xen/arch/x86/cpu/mcheck/mce.c @@ -680,12 +680,15 @@ static void cpu_bank_free(unsigned int c mcabanks_free(poll); mcabanks_free(clr); + + per_cpu(poll_bankmask, cpu) = NULL; + per_cpu(mce_clear_banks, cpu) = NULL; } static int cpu_bank_alloc(unsigned int cpu) { - struct mca_banks *poll = mcabanks_alloc(); - struct mca_banks *clr = mcabanks_alloc(); + struct mca_banks *poll = per_cpu(poll_bankmask, cpu) ?: mcabanks_alloc(); + struct mca_banks *clr = per_cpu(mce_clear_banks, cpu) ?: mcabanks_alloc(); if ( !poll || !clr ) { @@ -712,7 +715,12 @@ static int cpu_callback( break; case CPU_UP_CANCELED: case CPU_DEAD: - cpu_bank_free(cpu); + if ( !park_offline_cpus ) + cpu_bank_free(cpu); + break; + case CPU_REMOVE: + if ( park_offline_cpus ) + cpu_bank_free(cpu); break; default: break; --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -151,10 +151,11 @@ static void play_dead(void) local_irq_disable(); /* - * NOTE: After cpu_exit_clear, per-cpu variables are no longer accessible, - * as they may be freed at any time. In this case, heap corruption or - * #PF can occur (when heap debugging is enabled). For example, even - * printk() can involve tasklet scheduling, which touches per-cpu vars. + * NOTE: After cpu_exit_clear, per-cpu variables may no longer accessible, + * as they may be freed at any time if offline CPUs don't get parked. In + * this case, heap corruption or #PF can occur (when heap debugging is + * enabled). For example, even printk() can involve tasklet scheduling, + * which touches per-cpu vars. * * Consider very carefully when adding code to *dead_idle. Most hypervisor * subsystems are unsafe to call. --- a/xen/arch/x86/genapic/x2apic.c +++ b/xen/arch/x86/genapic/x2apic.c @@ -198,18 +198,21 @@ static int update_clusterinfo( if ( !cluster_cpus_spare ) cluster_cpus_spare = xzalloc(cpumask_t); if ( !cluster_cpus_spare || - !alloc_cpumask_var(&per_cpu(scratch_mask, cpu)) ) + !cond_alloc_cpumask_var(&per_cpu(scratch_mask, cpu)) ) err = -ENOMEM; break; case CPU_UP_CANCELED: case CPU_DEAD: + case CPU_REMOVE: + if ( park_offline_cpus == (action != CPU_REMOVE) ) + break; if ( per_cpu(cluster_cpus, cpu) ) { cpumask_clear_cpu(cpu, per_cpu(cluster_cpus, cpu)); if ( cpumask_empty(per_cpu(cluster_cpus, cpu)) ) - xfree(per_cpu(cluster_cpus, cpu)); + XFREE(per_cpu(cluster_cpus, cpu)); } - free_cpumask_var(per_cpu(scratch_mask, cpu)); + FREE_CPUMASK_VAR(per_cpu(scratch_mask, cpu)); break; } --- a/xen/arch/x86/percpu.c +++ b/xen/arch/x86/percpu.c @@ -27,7 +27,7 @@ static int init_percpu_area(unsigned int { char *p; if ( __per_cpu_offset[cpu] != INVALID_PERCPU_AREA ) - return -EBUSY; + return 0; if ( (p = alloc_xenheap_pages(PERCPU_ORDER, 0)) == NULL ) return -ENOMEM; memset(p, 0, __per_cpu_data_end - __per_cpu_start); @@ -70,9 +70,12 @@ static int cpu_percpu_callback( break; case CPU_UP_CANCELED: case CPU_DEAD: - free_percpu_area(cpu); + if ( !park_offline_cpus ) + free_percpu_area(cpu); break; - default: + case CPU_REMOVE: + if ( park_offline_cpus ) + free_percpu_area(cpu); break; } --- a/xen/arch/x86/smpboot.c +++ b/xen/arch/x86/smpboot.c @@ -60,6 +60,8 @@ DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t cpumask_t cpu_online_map __read_mostly; EXPORT_SYMBOL(cpu_online_map); +bool_t __read_mostly park_offline_cpus; + struct cpuinfo_x86 cpu_data[NR_CPUS]; u32 x86_cpu_to_apicid[NR_CPUS] __read_mostly = @@ -844,29 +846,45 @@ static void cleanup_cpu_root_pgt(unsigne free_xen_pagetable(rpt); } -static void cpu_smpboot_free(unsigned int cpu) +/* + * The 'remove' boolean controls whether a CPU is just getting offlined (and + * parked), or outright removed / offlined without parking. Parked CPUs need + * things like their stack, GDT, IDT, TSS, and per-CPU data still available. + * A few other items, in particular CPU masks, are also retained, as it's + * difficult to prove that they're entirely unreferenced from parked CPUs. + */ +static void cpu_smpboot_free(unsigned int cpu, bool_t remove) { unsigned int order; - free_cpumask_var(per_cpu(cpu_sibling_mask, cpu)); - free_cpumask_var(per_cpu(cpu_core_mask, cpu)); + if ( remove ) + { + cpu_data[cpu].phys_proc_id = BAD_APICID; + cpu_data[cpu].cpu_core_id = BAD_APICID; + cpu_data[cpu].compute_unit_id = BAD_APICID; + + FREE_CPUMASK_VAR(per_cpu(cpu_sibling_mask, cpu)); + FREE_CPUMASK_VAR(per_cpu(cpu_core_mask, cpu)); + } cleanup_cpu_root_pgt(cpu); order = get_order_from_pages(NR_RESERVED_GDT_PAGES); - free_xenheap_pages(per_cpu(gdt_table, cpu), order); + if ( remove ) + FREE_XENHEAP_PAGES(per_cpu(gdt_table, cpu), order); free_xenheap_pages(per_cpu(compat_gdt_table, cpu), order); - order = get_order_from_bytes(IDT_ENTRIES * sizeof(idt_entry_t)); - free_xenheap_pages(idt_tables[cpu], order); - idt_tables[cpu] = NULL; - - if ( stack_base[cpu] != NULL ) + if ( remove ) { - memguard_unguard_stack(stack_base[cpu]); - free_xenheap_pages(stack_base[cpu], STACK_ORDER); - stack_base[cpu] = NULL; + order = get_order_from_bytes(IDT_ENTRIES * sizeof(idt_entry_t)); + FREE_XENHEAP_PAGES(idt_tables[cpu], order); + + if ( stack_base[cpu] ) + { + memguard_unguard_stack(stack_base[cpu]); + FREE_XENHEAP_PAGES(stack_base[cpu], STACK_ORDER); + } } } @@ -875,16 +893,18 @@ static int cpu_smpboot_alloc(unsigned in unsigned int order; struct desc_struct *gdt; - stack_base[cpu] = alloc_xenheap_pages(STACK_ORDER, 0); + if ( stack_base[cpu] == NULL ) + stack_base[cpu] = alloc_xenheap_pages(STACK_ORDER, 0); if ( stack_base[cpu] == NULL ) goto oom; memguard_guard_stack(stack_base[cpu]); order = get_order_from_pages(NR_RESERVED_GDT_PAGES); - per_cpu(gdt_table, cpu) = gdt = - alloc_xenheap_pages(order, MEMF_node(cpu_to_node(cpu))); + gdt = per_cpu(gdt_table, cpu) + ?: alloc_xenheap_pages(order, MEMF_node(cpu_to_node(cpu))); if ( gdt == NULL ) goto oom; + per_cpu(gdt_table, cpu) = gdt; memcpy(gdt, boot_cpu_gdt_table, NR_RESERVED_GDT_PAGES * PAGE_SIZE); BUILD_BUG_ON(NR_CPUS > 0x10000); gdt[PER_CPU_GDT_ENTRY - FIRST_RESERVED_GDT_ENTRY].a = cpu; @@ -897,7 +917,9 @@ static int cpu_smpboot_alloc(unsigned in gdt[PER_CPU_GDT_ENTRY - FIRST_RESERVED_GDT_ENTRY].a = cpu; order = get_order_from_bytes(IDT_ENTRIES * sizeof(idt_entry_t)); - idt_tables[cpu] = alloc_xenheap_pages(order, MEMF_node(cpu_to_node(cpu))); + if ( idt_tables[cpu] == NULL ) + idt_tables[cpu] = alloc_xenheap_pages(order, + MEMF_node(cpu_to_node(cpu))); if ( idt_tables[cpu] == NULL ) goto oom; memcpy(idt_tables[cpu], idt_table, IDT_ENTRIES * sizeof(idt_entry_t)); @@ -909,12 +931,12 @@ static int cpu_smpboot_alloc(unsigned in if ( setup_cpu_root_pgt(cpu) ) goto oom; - if ( zalloc_cpumask_var(&per_cpu(cpu_sibling_mask, cpu)) && - zalloc_cpumask_var(&per_cpu(cpu_core_mask, cpu)) ) + if ( cond_zalloc_cpumask_var(&per_cpu(cpu_sibling_mask, cpu)) && + cond_zalloc_cpumask_var(&per_cpu(cpu_core_mask, cpu)) ) return 0; oom: - cpu_smpboot_free(cpu); + cpu_smpboot_free(cpu, 1); return -ENOMEM; } @@ -931,9 +953,10 @@ static int cpu_smpboot_callback( break; case CPU_UP_CANCELED: case CPU_DEAD: - cpu_smpboot_free(cpu); + cpu_smpboot_free(cpu, !park_offline_cpus); break; - default: + case CPU_REMOVE: + cpu_smpboot_free(cpu, 1); break; } --- a/xen/include/asm-x86/smp.h +++ b/xen/include/asm-x86/smp.h @@ -27,6 +27,8 @@ extern void smp_alloc_memory(void); DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_mask); DECLARE_PER_CPU(cpumask_var_t, cpu_core_mask); +extern bool_t park_offline_cpus; + void smp_send_nmi_allbutself(void); void send_IPI_mask(const cpumask_t *, int vector); --- a/xen/include/xen/cpu.h +++ b/xen/include/xen/cpu.h @@ -47,6 +47,8 @@ void register_cpu_notifier(struct notifi #define CPU_DYING (0x0007 | NOTIFY_REVERSE) /* CPU_DEAD: CPU is dead. */ #define CPU_DEAD (0x0008 | NOTIFY_REVERSE) +/* CPU_REMOVE: CPU was removed. */ +#define CPU_REMOVE (0x0009 | NOTIFY_REVERSE) /* Perform CPU hotplug. May return -EAGAIN. */ int cpu_down(unsigned int cpu); --- a/xen/include/xen/cpumask.h +++ b/xen/include/xen/cpumask.h @@ -321,16 +321,35 @@ static inline bool_t alloc_cpumask_var(c return *mask != NULL; } +static inline bool_t cond_alloc_cpumask_var(cpumask_var_t *mask) +{ + if (*mask == NULL) + *mask = _xmalloc(nr_cpumask_bits / 8, sizeof(long)); + return *mask != NULL; +} + static inline bool_t zalloc_cpumask_var(cpumask_var_t *mask) { *(void **)mask = _xzalloc(nr_cpumask_bits / 8, sizeof(long)); return *mask != NULL; } +static inline bool_t cond_zalloc_cpumask_var(cpumask_var_t *mask) +{ + if (*mask == NULL) + *mask = _xzalloc(nr_cpumask_bits / 8, sizeof(long)); + else + cpumask_clear(*mask); + return *mask != NULL; +} + static inline void free_cpumask_var(cpumask_var_t mask) { xfree(mask); } + +/* Free an allocated mask, and zero the pointer to it. */ +#define FREE_CPUMASK_VAR(m) XFREE(m) #else typedef cpumask_t cpumask_var_t[1]; @@ -338,16 +357,20 @@ static inline bool_t alloc_cpumask_var(c { return 1; } +#define cond_alloc_cpumask_var alloc_cpumask_var static inline bool_t zalloc_cpumask_var(cpumask_var_t *mask) { cpumask_clear(*mask); return 1; } +#define cond_zalloc_cpumask_var zalloc_cpumask_var static inline void free_cpumask_var(cpumask_var_t mask) { } + +#define FREE_CPUMASK_VAR(m) free_cpumask_var(m) #endif #if NR_CPUS > 1 --- a/xen/include/xen/mm.h +++ b/xen/include/xen/mm.h @@ -48,6 +48,14 @@ void *alloc_xenheap_pages(unsigned int o void free_xenheap_pages(void *v, unsigned int order); #define alloc_xenheap_page() (alloc_xenheap_pages(0,0)) #define free_xenheap_page(v) (free_xenheap_pages(v,0)) + +/* Free an allocation, and zero the pointer to it. */ +#define FREE_XENHEAP_PAGES(p, o) do { \ + free_xenheap_pages(p, o); \ + (p) = NULL; \ +} while ( 0 ) +#define FREE_XENHEAP_PAGE(p) FREE_XENHEAP_PAGES(p, 0) + /* Map machine page range in Xen virtual address space. */ int map_pages_to_xen( unsigned long virt, --- a/xen/include/xen/xmalloc.h +++ b/xen/include/xen/xmalloc.h @@ -26,6 +26,12 @@ /* Free any of the above. */ extern void xfree(void *); +/* Free an allocation, and zero the pointer to it. */ +#define XFREE(p) do { \ + xfree(p); \ + (p) = NULL; \ +} while ( 0 ) + /* Underlying functions */ extern void *_xmalloc(unsigned long size, unsigned long align); extern void *_xzalloc(unsigned long size, unsigned long align);
Locations
Projects
Search
Status Monitor
Help
OpenBuildService.org
Documentation
API Documentation
Code of Conduct
Contact
Support
@OBShq
Terms
openSUSE Build Service is sponsored by
The Open Build Service is an
openSUSE project
.
Sign Up
Log In
Places
Places
All Projects
Status Monitor