Sign Up
Log In
Log In
or
Sign Up
Places
All Projects
Status Monitor
Collapse sidebar
SUSE:SLE-15-SP3:Update
xen.36362
xsa456-0o.patch
Overview
Repositories
Revisions
Requests
Users
Attributes
Meta
File xsa456-0o.patch of Package xen.36362
# Commit 97c5b8b657e41a6645de9d40713b881234417b49 # Date 2024-04-09 16:37:30 +0100 # Author Roger Pau Monne <roger.pau@citrix.com> # Committer Andrew Cooper <andrew.cooper3@citrix.com> x86/vmx: Add support for virtualize SPEC_CTRL The feature is defined in the tertiary exec control, and is available starting from Sapphire Rapids and Alder Lake CPUs. When enabled, two extra VMCS fields are used: SPEC_CTRL mask and shadow. Bits set in mask are not allowed to be toggled by the guest (either set or clear) and the value in the shadow field is the value the guest expects to be in the SPEC_CTRL register. By using it the hypervisor can force the value of SPEC_CTRL bits behind the guest back without having to trap all accesses to SPEC_CTRL, note that no bits are forced into the guest as part of this patch. It also allows getting rid of SPEC_CTRL in the guest MSR load list, since the value in the shadow field will be loaded by the hardware on vmentry. Signed-off-by: Roger Pau Monné <roger.pau@citrix.com> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com> # Commit cd2df4561edef2c104f46f8d0998e8ccefdf9c5e # Date 2024-04-15 11:26:00 +0100 # Author Roger Pau Monné <roger.pau@citrix.com> # Committer Andrew Cooper <andrew.cooper3@citrix.com> x86/vmx: prevent fallthrough in vmx_set_reg() for handled registers vmx_set_reg() logic is split into two parts, the top one handles registers that don't require loading the VMCS into context (ie: don't require a VMWRITE). The second half handles registers that do require the VMCS to be loaded. SPEC_CTRL MSR is handled differently depending on whether there's support for virtualize SPEC_CTRL. Without hardware help for virtualizing SPEC_CTRL the value is handled using MSR load lists, that don't require the VMCS to be loaded. When there's hardware assistance however the value is stored in the VMCS, and requires a VMWRITE. The lack of a return statement when handling SPEC_CTRL in the first half of the function leads to SPEC_CTRL being unconditionally handled as if the host had virtualize SPEC_CTRL, which means Xen will either hit an ASSERT in debug builds, or will attempt to perform a VMWRITE to an unhandled VMCS field if the host doesn't support the virtualize SPEC_CTRL feature. This bug occured because the context wasn't adjusted accordingly to account for the absence commit 0626219dcc6a ("x86/hvm: Drop hvm_{get,set}_guest_bndcfgs() and use {get,set}_regs() instead") in the 4.15 and 4.16 branches. Fix by returning early from the function if the register is handled without requiring the VMCS context to be loaded. Fixes: 295bf24af77c ('x86/vmx: Add support for virtualize SPEC_CTRL') Signed-off-by: Roger Pau Monné <roger.pau@citrix.com> Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com> --- a/xen/arch/x86/hvm/vmx/vmcs.c +++ b/xen/arch/x86/hvm/vmx/vmcs.c @@ -210,6 +210,7 @@ static void __init vmx_display_features( P(cpu_has_vmx_virt_exceptions, "Virtualisation Exceptions"); P(cpu_has_vmx_pml, "Page Modification Logging"); P(cpu_has_vmx_tsc_scaling, "TSC Scaling"); + P(cpu_has_vmx_virt_spec_ctrl, "Virtualize SPEC_CTRL"); #undef P if ( !printed ) @@ -357,7 +358,7 @@ static int vmx_init_vmcs_config(void) if ( _vmx_cpu_based_exec_control & CPU_BASED_ACTIVATE_TERTIARY_CONTROLS ) { - uint64_t opt = 0; + uint64_t opt = TERTIARY_EXEC_VIRT_SPEC_CTRL; _vmx_tertiary_exec_control = adjust_vmx_controls2( "Tertiary Exec Control", 0, opt, @@ -1366,6 +1367,12 @@ static int construct_vmcs(struct vcpu *v if ( cpu_has_vmx_tsc_scaling ) __vmwrite(TSC_MULTIPLIER, d->arch.hvm.tsc_scaling_ratio); + if ( cpu_has_vmx_virt_spec_ctrl ) + { + __vmwrite(SPEC_CTRL_MASK, 0); + __vmwrite(SPEC_CTRL_SHADOW, 0); + } + /* will update HOST & GUEST_CR3 as reqd */ paging_update_paging_modes(v); @@ -2074,6 +2081,9 @@ void vmcs_dump_vcpu(struct vcpu *v) if ( v->arch.hvm.vmx.secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY ) printk("InterruptStatus = %04x\n", vmr16(GUEST_INTR_STATUS)); + if ( cpu_has_vmx_virt_spec_ctrl ) + printk("SPEC_CTRL mask = 0x%016lx shadow = 0x%016lx\n", + vmr(SPEC_CTRL_MASK), vmr(SPEC_CTRL_SHADOW)); printk("*** Host State ***\n"); printk("RIP = 0x%016lx (%ps) RSP = 0x%016lx\n", --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -578,23 +578,28 @@ static void vmx_cpuid_policy_changed(str /* * We can safely pass MSR_SPEC_CTRL through to the guest, even if STIBP * isn't enumerated in hardware, as SPEC_CTRL_STIBP is ignored. + * + * If VMX_VIRT_SPEC_CTRL is available, it is activated by default and the + * guest MSR_SPEC_CTRL value lives in the VMCS. Otherwise, it lives in + * the MSR load/save list. */ if ( cp->feat.ibrsb ) { vmx_clear_msr_intercept(v, MSR_SPEC_CTRL, VMX_MSR_RW); - rc = vmx_add_guest_msr(v, MSR_SPEC_CTRL, 0); - if ( rc ) - goto out; + if ( !cpu_has_vmx_virt_spec_ctrl ) + { + rc = vmx_add_guest_msr(v, MSR_SPEC_CTRL, 0); + if ( rc ) + goto out; + } } else { vmx_set_msr_intercept(v, MSR_SPEC_CTRL, VMX_MSR_RW); - rc = vmx_del_msr(v, MSR_SPEC_CTRL, VMX_MSR_GUEST); - if ( rc && rc != -ESRCH ) - goto out; - rc = 0; /* Tolerate -ESRCH */ + if ( !cpu_has_vmx_virt_spec_ctrl ) + vmx_del_msr(v, MSR_SPEC_CTRL, VMX_MSR_GUEST); } /* MSR_PRED_CMD is safe to pass through if the guest knows about it. */ @@ -2274,9 +2279,14 @@ static uint64_t vmx_get_reg(struct vcpu uint64_t val = 0; int rc; + /* Logic which doesn't require remote VMCS acquisition. */ switch ( reg ) { case MSR_SPEC_CTRL: + if ( cpu_has_vmx_virt_spec_ctrl ) + /* Guest value in VMCS - fetched below. */ + break; + rc = vmx_read_guest_msr(v, reg, &val); if ( rc ) { @@ -2285,13 +2295,26 @@ static uint64_t vmx_get_reg(struct vcpu domain_crash(d); } return val; + } + + /* Logic which maybe requires remote VMCS acquisition. */ + vmx_vmcs_enter(v); + switch ( reg ) + { + case MSR_SPEC_CTRL: + ASSERT(cpu_has_vmx_virt_spec_ctrl); + __vmread(SPEC_CTRL_SHADOW, &val); + break; default: printk(XENLOG_G_ERR "%s(%pv, 0x%08x) Bad register\n", __func__, v, reg); domain_crash(d); - return 0; + break; } + vmx_vmcs_exit(v); + + return val; } static void vmx_set_reg(struct vcpu *v, unsigned int reg, uint64_t val) @@ -2299,9 +2322,14 @@ static void vmx_set_reg(struct vcpu *v, struct domain *d = v->domain; int rc; + /* Logic which doesn't require remote VMCS acquisition. */ switch ( reg ) { case MSR_SPEC_CTRL: + if ( cpu_has_vmx_virt_spec_ctrl ) + /* Guest value in VMCS - set below. */ + break; + rc = vmx_write_guest_msr(v, reg, val); if ( rc ) { @@ -2309,6 +2337,16 @@ static void vmx_set_reg(struct vcpu *v, __func__, v, reg, rc); domain_crash(d); } + return; + } + + /* Logic which maybe requires remote VMCS acquisition. */ + vmx_vmcs_enter(v); + switch ( reg ) + { + case MSR_SPEC_CTRL: + ASSERT(cpu_has_vmx_virt_spec_ctrl); + __vmwrite(SPEC_CTRL_SHADOW, val); break; default: @@ -2316,6 +2354,7 @@ static void vmx_set_reg(struct vcpu *v, __func__, v, reg, val); domain_crash(d); } + vmx_vmcs_exit(v); } static struct hvm_function_table __initdata vmx_function_table = { --- a/xen/include/asm-x86/hvm/vmx/vmcs.h +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h @@ -276,6 +276,9 @@ extern u32 vmx_secondary_exec_control; #define TERTIARY_EXEC_VIRT_SPEC_CTRL BIT(7, UL) extern uint64_t vmx_tertiary_exec_control; +#define cpu_has_vmx_virt_spec_ctrl \ + (vmx_tertiary_exec_control & TERTIARY_EXEC_VIRT_SPEC_CTRL) + #define VMX_EPT_EXEC_ONLY_SUPPORTED 0x00000001 #define VMX_EPT_WALK_LENGTH_4_SUPPORTED 0x00000040 #define VMX_EPT_MEMORY_TYPE_UC 0x00000100 @@ -432,6 +435,8 @@ enum vmcs_field { XSS_EXIT_BITMAP = 0x0000202c, TSC_MULTIPLIER = 0x00002032, TERTIARY_VM_EXEC_CONTROL = 0x00002034, + SPEC_CTRL_MASK = 0x0000204a, + SPEC_CTRL_SHADOW = 0x0000204c, GUEST_PHYSICAL_ADDRESS = 0x00002400, VMCS_LINK_POINTER = 0x00002800, GUEST_IA32_DEBUGCTL = 0x00002802, --- a/xen/include/asm-x86/msr.h +++ b/xen/include/asm-x86/msr.h @@ -279,8 +279,11 @@ struct vcpu_msrs * For PV guests, this holds the guest kernel value. It is accessed on * every entry/exit path. * - * For VT-x guests, the guest value is held in the MSR guest load/save - * list. + * For VT-x guests, one of two situations exist: + * + * - If hardware supports virtualized MSR_SPEC_CTRL, it is active by + * default and the guest value lives in the VMCS. + * - Otherwise, the guest value is held in the MSR load/save list. * * For SVM, the guest value lives in the VMCB, and hardware saves/restores * the host value automatically. However, guests run with the OR of the
Locations
Projects
Search
Status Monitor
Help
OpenBuildService.org
Documentation
API Documentation
Code of Conduct
Contact
Support
@OBShq
Terms
openSUSE Build Service is sponsored by
The Open Build Service is an
openSUSE project
.
Sign Up
Log In
Places
Places
All Projects
Status Monitor