Sign Up
Log In
Log In
or
Sign Up
Places
All Projects
Status Monitor
Collapse sidebar
SUSE:SLE-15-SP5:GA
xen.31135
xsa435-0-22.patch
Overview
Repositories
Revisions
Requests
Users
Attributes
Meta
File xsa435-0-22.patch of Package xen.31135
From 6bc33366795d14a21a3244d0f3b63f7dccea87ef Mon Sep 17 00:00:00 2001 From: Andrew Cooper <andrew.cooper3@citrix.com> Date: Wed, 29 Mar 2023 07:39:44 +0100 Subject: x86: Merge the system {cpuid,msr} policy objects Right now, they're the same underlying type, containing disjoint information. Introduce a new cpu-policy.{h,c} to be the new location for all policy handling logic. Place the combined objects in __ro_after_init, which is new since the original logic was written. As we're trying to phase out the use of struct old_cpu_policy entirely, rework update_domain_cpu_policy() to not pointer-chase through system_policies[]. This in turn allows system_policies[] in sysctl.c to become static and reduced in scope to XEN_SYSCTL_get_cpu_policy. No practical change. This undoes the transient doubling of storage space from earlier patches. Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com> Reviewed-by: Jan Beulich <jbeulich@suse.com> --- a/xen/arch/x86/Makefile +++ b/xen/arch/x86/Makefile @@ -16,6 +16,7 @@ obj-y += bitops.o obj-bin-y += bzimage.init.o obj-bin-y += clear_page.o obj-bin-y += copy_page.o +obj-y += cpu-policy.o obj-y += cpuid.o obj-$(CONFIG_PV) += compat.o x86_64/compat.o obj-$(CONFIG_KEXEC) += crash.o --- /dev/null +++ b/xen/arch/x86/cpu-policy.c @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#include <xen/cache.h> +#include <xen/kernel.h> + +#include <xen/lib/x86/cpu-policy.h> + +#include <asm/cpu-policy.h> + +struct cpu_policy __read_mostly raw_cpu_policy; +struct cpu_policy __read_mostly host_cpu_policy; +struct cpu_policy __read_mostly pv_max_cpu_policy; +struct cpu_policy __read_mostly hvm_max_cpu_policy; --- a/xen/arch/x86/cpu/common.c +++ b/xen/arch/x86/cpu/common.c @@ -2,6 +2,8 @@ #include <xen/string.h> #include <xen/delay.h> #include <xen/smp.h> + +#include <asm/cpu-policy.h> #include <asm/current.h> #include <asm/debugreg.h> #include <asm/processor.h> @@ -133,7 +135,7 @@ bool __init probe_cpuid_faulting(void) return false; if ((rc = rdmsr_safe(MSR_INTEL_PLATFORM_INFO, val)) == 0) - raw_msr_policy.platform_info.cpuid_faulting = + raw_cpu_policy.platform_info.cpuid_faulting = val & MSR_PLATFORM_INFO_CPUID_FAULTING; if (rc || --- a/xen/arch/x86/cpuid.c +++ b/xen/arch/x86/cpuid.c @@ -2,6 +2,7 @@ #include <xen/lib.h> #include <xen/sched.h> #include <xen/nospec.h> +#include <asm/cpu-policy.h> #include <asm/cpuid.h> #include <asm/hvm/hvm.h> #include <asm/hvm/nestedhvm.h> @@ -133,11 +134,6 @@ static void zero_leaves(struct cpuid_lea memset(&l[first], 0, sizeof(*l) * (last - first + 1)); } -struct cpuid_policy __read_mostly raw_cpuid_policy, - __read_mostly host_cpuid_policy, - __read_mostly pv_max_cpuid_policy, - __read_mostly hvm_max_cpuid_policy; - static void sanitise_featureset(uint32_t *fs) { /* for_each_set_bit() uses unsigned longs. Extend with zeroes. */ @@ -329,7 +325,7 @@ static void recalculate_misc(struct cpui static void __init calculate_raw_policy(void) { - struct cpuid_policy *p = &raw_cpuid_policy; + struct cpuid_policy *p = &raw_cpu_policy; x86_cpuid_policy_fill_native(p); @@ -339,10 +335,10 @@ static void __init calculate_raw_policy( static void __init calculate_host_policy(void) { - struct cpuid_policy *p = &host_cpuid_policy; + struct cpuid_policy *p = &host_cpu_policy; unsigned int max_extd_leaf; - *p = raw_cpuid_policy; + *p = raw_cpu_policy; p->basic.max_leaf = min_t(uint32_t, p->basic.max_leaf, ARRAY_SIZE(p->basic.raw) - 1); @@ -403,17 +399,17 @@ static void __init guest_common_feature_ * of IBRS by using the AMD feature bit. An administrator may wish for * performance reasons to offer IBPB without IBRS. */ - if ( host_cpuid_policy.feat.ibrsb ) + if ( host_cpu_policy.feat.ibrsb ) __set_bit(X86_FEATURE_IBPB, fs); } static void __init calculate_pv_max_policy(void) { - struct cpuid_policy *p = &pv_max_cpuid_policy; + struct cpuid_policy *p = &pv_max_cpu_policy; uint32_t pv_featureset[FSCAPINTS]; unsigned int i; - *p = host_cpuid_policy; + *p = host_cpu_policy; cpuid_policy_to_featureset(p, pv_featureset); for ( i = 0; i < ARRAY_SIZE(pv_featureset); ++i ) @@ -437,7 +433,7 @@ static void __init calculate_pv_max_poli static void __init calculate_hvm_max_policy(void) { - struct cpuid_policy *p = &hvm_max_cpuid_policy; + struct cpuid_policy *p = &hvm_max_cpu_policy; uint32_t hvm_featureset[FSCAPINTS]; unsigned int i; const uint32_t *hvm_featuremask; @@ -445,7 +441,7 @@ static void __init calculate_hvm_max_pol if ( !hvm_enabled ) return; - *p = host_cpuid_policy; + *p = host_cpu_policy; cpuid_policy_to_featureset(p, hvm_featureset); hvm_featuremask = hvm_hap_supported() ? @@ -466,7 +462,7 @@ static void __init calculate_hvm_max_pol * HVM guests are able if running in protected mode. */ if ( (boot_cpu_data.x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)) && - raw_cpuid_policy.basic.sep ) + raw_cpu_policy.basic.sep ) __set_bit(X86_FEATURE_SEP, hvm_featureset); /* @@ -530,7 +526,7 @@ void recalculate_cpuid_policy(struct dom { struct cpuid_policy *p = d->arch.cpuid; const struct cpuid_policy *max = - is_pv_domain(d) ? &pv_max_cpuid_policy : &hvm_max_cpuid_policy; + is_pv_domain(d) ? &pv_max_cpu_policy : &hvm_max_cpu_policy; uint32_t fs[FSCAPINTS], max_fs[FSCAPINTS]; unsigned int i; @@ -621,7 +617,7 @@ void recalculate_cpuid_policy(struct dom /* Fold host's FDP_EXCP_ONLY and NO_FPU_SEL into guest's view. */ fs[FEATURESET_7b0] &= ~(cpufeat_mask(X86_FEATURE_FDP_EXCP_ONLY) | cpufeat_mask(X86_FEATURE_NO_FPU_SEL)); - fs[FEATURESET_7b0] |= (host_cpuid_policy.feat._7b0 & + fs[FEATURESET_7b0] |= (host_cpu_policy.feat._7b0 & (cpufeat_mask(X86_FEATURE_FDP_EXCP_ONLY) | cpufeat_mask(X86_FEATURE_NO_FPU_SEL))); @@ -672,8 +668,8 @@ void recalculate_cpuid_policy(struct dom int init_domain_cpuid_policy(struct domain *d) { struct cpuid_policy *p = - xmemdup(is_pv_domain(d) ? &pv_max_cpuid_policy - : &hvm_max_cpuid_policy); + xmemdup(is_pv_domain(d) ? &pv_max_cpu_policy + : &hvm_max_cpu_policy); if ( !p ) return -ENOMEM; @@ -970,7 +966,7 @@ void guest_cpuid(const struct vcpu *v, u if ( is_pv_domain(d) && is_hardware_domain(d) && guest_kernel_mode(v, regs) && cpu_has_monitor && regs->entry_vector == TRAP_gp_fault ) - *res = raw_cpuid_policy.basic.raw[5]; + *res = raw_cpu_policy.basic.raw[5]; break; case 0x7: @@ -1102,14 +1098,14 @@ static void __init __maybe_unused build_ /* Find some more clever allocation scheme if this trips. */ BUILD_BUG_ON(sizeof(struct cpuid_policy) > PAGE_SIZE); - BUILD_BUG_ON(sizeof(raw_cpuid_policy.basic) != - sizeof(raw_cpuid_policy.basic.raw)); - BUILD_BUG_ON(sizeof(raw_cpuid_policy.feat) != - sizeof(raw_cpuid_policy.feat.raw)); - BUILD_BUG_ON(sizeof(raw_cpuid_policy.xstate) != - sizeof(raw_cpuid_policy.xstate.raw)); - BUILD_BUG_ON(sizeof(raw_cpuid_policy.extd) != - sizeof(raw_cpuid_policy.extd.raw)); + BUILD_BUG_ON(sizeof(raw_cpu_policy.basic) != + sizeof(raw_cpu_policy.basic.raw)); + BUILD_BUG_ON(sizeof(raw_cpu_policy.feat) != + sizeof(raw_cpu_policy.feat.raw)); + BUILD_BUG_ON(sizeof(raw_cpu_policy.xstate) != + sizeof(raw_cpu_policy.xstate.raw)); + BUILD_BUG_ON(sizeof(raw_cpu_policy.extd) != + sizeof(raw_cpu_policy.extd.raw)); } /* --- a/xen/arch/x86/domctl.c +++ b/xen/arch/x86/domctl.c @@ -34,7 +34,7 @@ #include <asm/xstate.h> #include <asm/debugger.h> #include <asm/psr.h> -#include <asm/cpuid.h> +#include <asm/cpu-policy.h> static int gdbsx_guest_mem_io(domid_t domid, struct xen_domctl_gdbsx_memio *iop) { @@ -217,12 +217,19 @@ static int update_domain_cpu_policy(stru xen_domctl_cpu_policy_t *xdpc) { struct old_cpu_policy new = {}; - const struct old_cpu_policy *sys = is_pv_domain(d) - ? &system_policies[XEN_SYSCTL_cpu_policy_pv_max] - : &system_policies[XEN_SYSCTL_cpu_policy_hvm_max]; + struct cpu_policy *sys = is_pv_domain(d) + ? (IS_ENABLED(CONFIG_PV) ? &pv_max_cpu_policy : NULL) + : (IS_ENABLED(CONFIG_HVM) ? &hvm_max_cpu_policy : NULL); + struct old_cpu_policy old_sys = { sys, sys }; struct cpu_policy_errors err = INIT_CPU_POLICY_ERRORS; int ret = -ENOMEM; + if ( !sys ) + { + ASSERT_UNREACHABLE(); + return -EOPNOTSUPP; + } + /* Start by copying the domain's existing policies. */ if ( !(new.cpuid = xmemdup(d->arch.cpuid)) || !(new.msr = xmemdup(d->arch.msr)) ) @@ -240,7 +247,7 @@ static int update_domain_cpu_policy(stru x86_cpuid_policy_clear_out_of_range_leaves(new.cpuid); /* Audit the combined dataset. */ - ret = x86_cpu_policies_are_compatible(sys, &new, &err); + ret = x86_cpu_policies_are_compatible(&old_sys, &new, &err); if ( ret ) goto out; --- a/xen/arch/x86/msr.c +++ b/xen/arch/x86/msr.c @@ -24,23 +24,19 @@ #include <xen/nospec.h> #include <xen/sched.h> +#include <asm/cpu-policy.h> #include <asm/debugreg.h> #include <asm/msr.h> #include <asm/setup.h> DEFINE_PER_CPU(uint32_t, tsc_aux); -struct msr_policy __read_mostly raw_msr_policy, - __read_mostly host_msr_policy, - __read_mostly hvm_max_msr_policy, - __read_mostly pv_max_msr_policy; - struct vcpu_msrs __read_mostly hvm_max_vcpu_msrs, __read_mostly pv_max_vcpu_msrs; static void __init calculate_raw_policy(void) { - struct msr_policy *mp = &raw_msr_policy; + struct msr_policy *mp = &raw_cpu_policy; /* 0x000000ce MSR_INTEL_PLATFORM_INFO */ /* Was already added by probe_cpuid_faulting() */ @@ -51,9 +47,9 @@ static void __init calculate_raw_policy( static void __init calculate_host_policy(void) { - struct msr_policy *mp = &host_msr_policy; + struct msr_policy *mp = &host_cpu_policy; - *mp = raw_msr_policy; + *mp = raw_cpu_policy; /* 0x000000ce MSR_INTEL_PLATFORM_INFO */ /* probe_cpuid_faulting() sanity checks presence of MISC_FEATURES_ENABLES */ @@ -71,12 +67,12 @@ static void __init calculate_host_policy static void __init calculate_hvm_max_policy(void) { - struct msr_policy *mp = &hvm_max_msr_policy; + struct msr_policy *mp = &hvm_max_cpu_policy; if ( !hvm_enabled ) return; - *mp = host_msr_policy; + *mp = host_cpu_policy; /* It's always possible to emulate CPUID faulting for HVM guests */ mp->platform_info.cpuid_faulting = true; @@ -86,9 +82,9 @@ static void __init calculate_hvm_max_pol static void __init calculate_pv_max_policy(void) { - struct msr_policy *mp = &pv_max_msr_policy; + struct msr_policy *mp = &pv_max_cpu_policy; - *mp = host_msr_policy; + *mp = host_cpu_policy; mp->arch_caps.raw = 0; /* Not supported yet. */ } @@ -104,8 +100,8 @@ void __init init_guest_msr_policy(void) int init_domain_msr_policy(struct domain *d) { struct msr_policy *mp = - xmemdup(is_pv_domain(d) ? &pv_max_msr_policy - : &hvm_max_msr_policy); + xmemdup(is_pv_domain(d) ? &pv_max_cpu_policy + : &hvm_max_cpu_policy); if ( !mp ) return -ENOMEM; --- a/xen/arch/x86/sysctl.c +++ b/xen/arch/x86/sysctl.c @@ -31,34 +31,7 @@ #include <xen/cpu.h> #include <xsm/xsm.h> #include <asm/psr.h> -#include <asm/cpuid.h> - -const struct old_cpu_policy system_policies[] = { - [ XEN_SYSCTL_cpu_policy_raw ] = { - &raw_cpuid_policy, - &raw_msr_policy, - }, - [ XEN_SYSCTL_cpu_policy_host ] = { - &host_cpuid_policy, - &host_msr_policy, - }, - [ XEN_SYSCTL_cpu_policy_pv_max ] = { - &pv_max_cpuid_policy, - &pv_max_msr_policy, - }, - [ XEN_SYSCTL_cpu_policy_hvm_max ] = { - &hvm_max_cpuid_policy, - &hvm_max_msr_policy, - }, - [ XEN_SYSCTL_cpu_policy_pv_default ] = { - &pv_max_cpuid_policy, - &pv_max_msr_policy, - }, - [ XEN_SYSCTL_cpu_policy_hvm_default ] = { - &hvm_max_cpuid_policy, - &hvm_max_msr_policy, - }, -}; +#include <asm/cpu-policy.h> struct l3_cache_info { int ret; @@ -359,15 +332,15 @@ long arch_do_sysctl( case XEN_SYSCTL_get_cpu_featureset: { - static const struct cpuid_policy *const policy_table[] = { - [XEN_SYSCTL_cpu_featureset_raw] = &raw_cpuid_policy, - [XEN_SYSCTL_cpu_featureset_host] = &host_cpuid_policy, - [XEN_SYSCTL_cpu_featureset_pv] = &pv_max_cpuid_policy, - [XEN_SYSCTL_cpu_featureset_hvm] = &hvm_max_cpuid_policy, - [XEN_SYSCTL_cpu_featureset_pv_max] = &pv_max_cpuid_policy, - [XEN_SYSCTL_cpu_featureset_hvm_max] = &hvm_max_cpuid_policy, + static const struct cpu_policy *const policy_table[6] = { + [XEN_SYSCTL_cpu_featureset_raw] = &raw_cpu_policy, + [XEN_SYSCTL_cpu_featureset_host] = &host_cpu_policy, + [XEN_SYSCTL_cpu_featureset_pv] = &pv_max_cpu_policy, + [XEN_SYSCTL_cpu_featureset_pv_max] = &pv_max_cpu_policy, + [XEN_SYSCTL_cpu_featureset_hvm] = &hvm_max_cpu_policy, + [XEN_SYSCTL_cpu_featureset_hvm_max] = &hvm_max_cpu_policy, }; - const struct cpuid_policy *p = NULL; + const struct cpu_policy *p = NULL; uint32_t featureset[FSCAPINTS]; unsigned int nr; @@ -415,7 +388,15 @@ long arch_do_sysctl( case XEN_SYSCTL_get_cpu_policy: { - const struct old_cpu_policy *policy; + static const struct cpu_policy *const system_policies[6] = { + [XEN_SYSCTL_cpu_policy_raw] = &raw_cpu_policy, + [XEN_SYSCTL_cpu_policy_host] = &host_cpu_policy, + [XEN_SYSCTL_cpu_policy_pv_max] = &pv_max_cpu_policy, + [XEN_SYSCTL_cpu_policy_pv_default] = &pv_max_cpu_policy, + [XEN_SYSCTL_cpu_policy_hvm_max] = &hvm_max_cpu_policy, + [XEN_SYSCTL_cpu_policy_hvm_default] = &hvm_max_cpu_policy, + }; + const struct cpu_policy *policy; /* Reserved field set, or bad policy index? */ if ( sysctl->u.cpu_policy._rsvd || @@ -424,7 +405,7 @@ long arch_do_sysctl( ret = -EINVAL; break; } - policy = &system_policies[ + policy = system_policies[ array_index_nospec(sysctl->u.cpu_policy.index, ARRAY_SIZE(system_policies))]; @@ -432,7 +413,7 @@ long arch_do_sysctl( if ( guest_handle_is_null(sysctl->u.cpu_policy.leaves) ) sysctl->u.cpu_policy.nr_leaves = CPUID_MAX_SERIALISED_LEAVES; else if ( (ret = x86_cpuid_copy_to_buffer( - policy->cpuid, + policy, sysctl->u.cpu_policy.leaves, &sysctl->u.cpu_policy.nr_leaves)) ) break; @@ -448,7 +429,7 @@ long arch_do_sysctl( if ( guest_handle_is_null(sysctl->u.cpu_policy.msrs) ) sysctl->u.cpu_policy.nr_msrs = MSR_MAX_SERIALISED_ENTRIES; else if ( (ret = x86_msr_copy_to_buffer( - policy->msr, + policy, sysctl->u.cpu_policy.msrs, &sysctl->u.cpu_policy.nr_msrs)) ) break; --- /dev/null +++ b/xen/include/asm-x86/cpu-policy.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef X86_CPU_POLICY_H +#define X86_CPU_POLICY_H + +struct cpu_policy; + +extern struct cpu_policy raw_cpu_policy; +extern struct cpu_policy host_cpu_policy; +extern struct cpu_policy pv_max_cpu_policy; +extern struct cpu_policy hvm_max_cpu_policy; + +#endif /* X86_CPU_POLICY_H */ --- a/xen/include/asm-x86/cpuid.h +++ b/xen/include/asm-x86/cpuid.h @@ -46,11 +46,6 @@ DECLARE_PER_CPU(struct cpuidmasks, cpuid /* Default masking MSR values, calculated at boot. */ extern struct cpuidmasks cpuidmask_defaults; -extern struct cpuid_policy raw_cpuid_policy, host_cpuid_policy, - pv_max_cpuid_policy, hvm_max_cpuid_policy; - -extern const struct old_cpu_policy system_policies[]; - /* Check that all previously present features are still available. */ bool recheck_cpu_features(unsigned int cpu); --- a/xen/include/asm-x86/msr.h +++ b/xen/include/asm-x86/msr.h @@ -270,11 +270,6 @@ static inline void wrmsr_tsc_aux(uint32_ uint64_t msr_spec_ctrl_valid_bits(const struct cpuid_policy *cp); -extern struct msr_policy raw_msr_policy, - host_msr_policy, - hvm_max_msr_policy, - pv_max_msr_policy; - /* Container object for per-vCPU MSRs */ struct vcpu_msrs {
Locations
Projects
Search
Status Monitor
Help
OpenBuildService.org
Documentation
API Documentation
Code of Conduct
Contact
Support
@OBShq
Terms
openSUSE Build Service is sponsored by
The Open Build Service is an
openSUSE project
.
Sign Up
Log In
Places
Places
All Projects
Status Monitor