Sign Up
Log In
Log In
or
Sign Up
Places
All Projects
Status Monitor
Collapse sidebar
home:olh:xen-buildrequires
valgrind
valgrind.xen-4.13-handle-all-versioned-domctl-o...
Overview
Repositories
Revisions
Requests
Users
Attributes
Meta
File valgrind.xen-4.13-handle-all-versioned-domctl-ops.patch of Package valgrind
From: Olaf Hering <olaf@aepfle.de> Date: Tue, 28 Jan 2020 13:21:42 +0100 Subject: xen-4.13: handle all versioned domctl ops When support for a new domctl version is added, all versioned ops must be adjusted. bz#390553 --- coregrind/m_syswrap/syswrap-xen.c | 7 +++++++ 1 file changed, 7 insertions(+) --- a/coregrind/m_syswrap/syswrap-xen.c +++ b/coregrind/m_syswrap/syswrap-xen.c @@ -1112,24 +1112,25 @@ PRE(domctl) break; default: bad_subop(tid, layout, arrghs, status, flags, "__HYPERVISOR_domctl_gethvmcontext_partial type", domctl->u.hvmcontext_partial_00000005.type); break; } break; case 0x0000000e: case 0x0000000f: case 0x00000010: case 0x00000011: + case 0x00000012: __PRE_XEN_DOMCTL_READ(gethvmcontext_partial, hvmcontext_partial_0000000e, type); __PRE_XEN_DOMCTL_READ(gethvmcontext_partial, hvmcontext_partial_0000000e, instance); __PRE_XEN_DOMCTL_READ(gethvmcontext_partial, hvmcontext_partial_0000000e, bufsz); __PRE_XEN_DOMCTL_READ(gethvmcontext_partial, hvmcontext_partial_0000000e, buffer); switch (domctl->u.hvmcontext_partial_0000000e.type) { case VKI_HVM_SAVE_CODE(CPU): if ( domctl->u.hvmcontext_partial_0000000e.buffer.p ) PRE_MEM_WRITE("XEN_DOMCTL_gethvmcontext_partial *buffer", (Addr)domctl->u.hvmcontext_partial_0000000e.buffer.p, VKI_HVM_SAVE_LENGTH(CPU)); break; case VKI_HVM_SAVE_CODE(MTRR): if ( domctl->u.hvmcontext_partial_0000000e.buffer.p ) @@ -1366,24 +1367,25 @@ PRE(domctl) case 0x00000008: case 0x00000009: case 0x0000000a: PRE_XEN_DOMCTL_READ(settimeoffset_00000001, time_offset_seconds); break; case 0x0000000b: case 0x0000000c: case 0x0000000d: case 0x0000000e: case 0x0000000f: case 0x00000010: case 0x00000011: + case 0x00000012: PRE_XEN_DOMCTL_READ(settimeoffset_0000000b, time_offset_seconds); break; } break; case VKI_XEN_DOMCTL_getvcpuinfo: PRE_XEN_DOMCTL_READ(getvcpuinfo, vcpu); break; case VKI_XEN_DOMCTL_scheduler_op: PRE_XEN_DOMCTL_READ(scheduler_op, sched_id); PRE_XEN_DOMCTL_READ(scheduler_op, cmd); @@ -1836,24 +1838,25 @@ PRE(domctl) break; } break; case VKI_XEN_DOMCTL_set_gnttab_limits: PRE_XEN_DOMCTL_READ(set_gnttab_limits_0000000e, grant_frames); PRE_XEN_DOMCTL_READ(set_gnttab_limits_0000000e, maptrack_frames); break; case VKI_XEN_DOMCTL_get_cpu_policy: switch (domctl->interface_version) { case 0x00000011: + case 0x00000012: PRE_XEN_DOMCTL_READ(cpu_policy_00000011, nr_leaves); PRE_XEN_DOMCTL_READ(cpu_policy_00000011, nr_msrs); break; } break; case VKI_XEN_DOMCTL_set_cpu_policy: switch (domctl->interface_version) { case 0x00000012: PRE_XEN_DOMCTL_READ(cpu_policy_00000012, nr_leaves); PRE_XEN_DOMCTL_READ(cpu_policy_00000012, nr_msrs); if (domctl->u.cpu_policy_00000012.cpuid_policy.p) @@ -2713,24 +2716,25 @@ POST(domctl){ case 0x0000000c: case 0x0000000d: switch (domctl->u.hvmcontext_partial_00000005.type) { case VKI_HVM_SAVE_CODE(CPU): if ( domctl->u.hvmcontext_partial_00000005.buffer.p ) POST_MEM_WRITE((Addr)domctl->u.hvmcontext_partial_00000005.buffer.p, VKI_HVM_SAVE_LENGTH(CPU)); break; } break; case 0x0000000e: case 0x0000000f: case 0x00000011: + case 0x00000012: switch (domctl->u.hvmcontext_partial_0000000e.type) { case VKI_HVM_SAVE_CODE(CPU): if ( domctl->u.hvmcontext_partial_0000000e.buffer.p ) POST_MEM_WRITE((Addr)domctl->u.hvmcontext_partial_0000000e.buffer.p, VKI_HVM_SAVE_LENGTH(CPU)); break; } break; } break; case VKI_XEN_DOMCTL_scheduler_op: if ( domctl->u.scheduler_op.cmd == VKI_XEN_DOMCTL_SCHEDOP_getinfo ) { @@ -2908,24 +2912,25 @@ POST(domctl){ mcg_cap); #endif break; case 0x00000009: case 0x0000000a: case 0x0000000b: case 0x0000000c: case 0x0000000d: case 0x0000000e: case 0x0000000f: case 0x00000011: + case 0x00000012: __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000009, size); #if defined(__i386__) || defined(__x86_64__) __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000009, syscall32_callback_eip); __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000009, sysenter_callback_eip); __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000009, syscall32_callback_cs); __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000009, sysenter_callback_cs); __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000009, syscall32_disables_events); @@ -3054,47 +3059,49 @@ POST(domctl){ case VKI_XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST: __POST_XEN_DOMCTL_WRITE(monitor_op, monitor_op_0000000e, u.guest_request); break; case VKI_XEN_DOMCTL_MONITOR_EVENT_DEBUG_EXCEPTION: __POST_XEN_DOMCTL_WRITE(monitor_op, monitor_op_0000000e, u.debug_exception); break; } } break; case 0x0000010: case 0x0000011: + case 0x00000012: if (domctl->u.monitor_op_00000010.op == VKI_XEN_DOMCTL_MONITOR_OP_GET_CAPABILITIES) { switch(domctl->u.monitor_op_00000010.event) { case VKI_XEN_DOMCTL_MONITOR_EVENT_WRITE_CTRLREG: __POST_XEN_DOMCTL_WRITE(monitor_op, monitor_op_00000010, u.mov_to_cr); break; case VKI_XEN_DOMCTL_MONITOR_EVENT_MOV_TO_MSR: __POST_XEN_DOMCTL_WRITE(monitor_op, monitor_op_00000010, u.mov_to_msr); break; case VKI_XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST: __POST_XEN_DOMCTL_WRITE(monitor_op, monitor_op_00000010, u.guest_request); break; case VKI_XEN_DOMCTL_MONITOR_EVENT_DEBUG_EXCEPTION: __POST_XEN_DOMCTL_WRITE(monitor_op, monitor_op_00000010, u.debug_exception); break; } } break; } break; case VKI_XEN_DOMCTL_get_cpu_policy: switch (domctl->interface_version) { case 0x00000011: + case 0x00000012: POST_XEN_DOMCTL_WRITE(cpu_policy_00000011, nr_leaves); POST_XEN_DOMCTL_WRITE(cpu_policy_00000011, nr_msrs); if (domctl->u.cpu_policy_00000011.cpuid_policy.p) POST_MEM_WRITE((Addr)domctl->u.cpu_policy_00000011.cpuid_policy.p, domctl->u.cpu_policy_00000011.nr_leaves); if (domctl->u.cpu_policy_00000011.msr_policy.p) POST_MEM_WRITE((Addr)domctl->u.cpu_policy_00000011.msr_policy.p, domctl->u.cpu_policy_00000011.nr_msrs); break; } break; case VKI_XEN_DOMCTL_set_cpu_policy:
Locations
Projects
Search
Status Monitor
Help
OpenBuildService.org
Documentation
API Documentation
Code of Conduct
Contact
Support
@OBShq
Terms
openSUSE Build Service is sponsored by
The Open Build Service is an
openSUSE project
.
Sign Up
Log In
Places
Places
All Projects
Status Monitor