Sign Up
Log In
Log In
or
Sign Up
Places
All Projects
Status Monitor
Collapse sidebar
SUSE:SLE-12-SP5:Update
oprofile.2004
oprofile-add-support-for-broadwell-microarchite...
Overview
Repositories
Revisions
Requests
Users
Attributes
Meta
File oprofile-add-support-for-broadwell-microarchitecture.patch of Package oprofile.2004
From: Andi Kleen <ak@linux.intel.com> Date: Wed Jul 16 08:03:54 2014 -0500 Subject: Add oprofile support for Broadwell microarchitecture Git-commit 6d692179cb44e68a3cfaeac213e3244f858676b8 References: FATE#318978 Signed-off-by: Tony Jones <tonyj@suse.de> [suse: adjust context] Add oprofile support for Broadwell microarchitecture This patch adds the event list of the Intel Broadwell architecture. Hopefully this can still make 1.0 The patch is very straight forward: just add the model numbers and type in the usual places and add the event list. Passes make check Some notes: - Haswell included one Broadwell model number by mistake. I moved that to Broadwell now. - oprofile doesn't support umask sub events with different counter constraints than other events. This affects a few events on Broadwell. However it's not a problem when oprofile uses perf as a backend, as perf will know how to schedule these events (once it gets the Broadwell support). It won't work correctly with the old driver. Most of these events are not too useful for sampling, so in practice it's not a real problem. - As usual PEBS events and events with offcore mask and uncore events are missing. Signed-off-by: Andi Kleen <ak@linux.intel.com> diff --git a/events/Makefile.am b/events/Makefile.am index 3e43d10..f6fd3d7 100644 --- a/events/Makefile.am +++ b/events/Makefile.am @@ -21,6 +21,7 @@ event_files = \ i386/sandybridge/events i386/sandybridge/unit_masks \ i386/ivybridge/events i386/ivybridge/unit_masks \ i386/haswell/events i386/haswell/unit_masks \ + i386/broadwell/events i386/broadwell/unit_masks \ i386/silvermont/events i386/silvermont/unit_masks \ ia64/ia64/events ia64/ia64/unit_masks \ ia64/itanium2/events ia64/itanium2/unit_masks \ diff --git a/events/i386/broadwell/events b/events/i386/broadwell/events new file mode 100644 index 0000000..6a4b388 --- /dev/null +++ b/events/i386/broadwell/events @@ -0,0 +1,65 @@ +# +# Intel "Broadwell" microarchitecture core events. +# +# See http://ark.intel.com/ for help in identifying Broadwell based CPUs +# +# Note the minimum counts are not discovered experimentally and could be likely +# lowered in many cases without ill effect. +# +include:i386/arch_perfmon +event:0x03 counters:cpuid um:ld_blocks minimum:100003 name:ld_blocks : +event:0x05 counters:cpuid um:misalign_mem_ref minimum:2000003 name:misalign_mem_ref : +event:0x07 counters:cpuid um:one minimum:100003 name:ld_blocks_partial_address_alias : +event:0x08 counters:cpuid um:dtlb_load_misses minimum:2000003 name:dtlb_load_misses : +event:0x0d counters:cpuid um:x03 minimum:2000003 name:int_misc_recovery_cycles : +event:0x0e counters:cpuid um:uops_issued minimum:2000003 name:uops_issued : +event:0x14 counters:cpuid um:one minimum:2000003 name:arith_fpu_div_active : +event:0x24 counters:cpuid um:l2_rqsts minimum:200003 name:l2_rqsts : +event:0x27 counters:cpuid um:x50 minimum:200003 name:l2_demand_rqsts_wb_hit : +event:0x48 counters:2 um:l1d_pend_miss minimum:2000003 name:l1d_pend_miss : +event:0x49 counters:cpuid um:dtlb_store_misses minimum:100003 name:dtlb_store_misses : +event:0x4c counters:cpuid um:x02 minimum:100003 name:load_hit_pre_hw_pf : +event:0x4f counters:cpuid um:x10 minimum:2000003 name:ept_walk_cycles : +event:0x51 counters:cpuid um:one minimum:2000003 name:l1d_replacement : +event:0x54 counters:cpuid um:tx_mem minimum:2000003 name:tx_mem : +event:0x58 counters:cpuid um:move_elimination minimum:1000003 name:move_elimination : +event:0x5c counters:cpuid um:cpl_cycles minimum:2000003 name:cpl_cycles : +event:0x5d counters:cpuid um:tx_exec minimum:2000003 name:tx_exec : +event:0x5e counters:cpuid um:rs_events minimum:2000003 name:rs_events : +event:0x60 counters:cpuid um:offcore_requests_outstanding minimum:2000003 name:offcore_requests_outstanding : +event:0x63 counters:cpuid um:lock_cycles minimum:2000003 name:lock_cycles : +event:0x79 counters:0,1,2,3 um:idq minimum:2000003 name:idq : +event:0x80 counters:cpuid um:x02 minimum:200003 name:icache_misses : +event:0x85 counters:cpuid um:itlb_misses minimum:100003 name:itlb_misses : +event:0x87 counters:cpuid um:one minimum:2000003 name:ild_stall_lcp : +event:0x88 counters:cpuid um:br_inst_exec minimum:200003 name:br_inst_exec : +event:0x89 counters:cpuid um:br_misp_exec minimum:200003 name:br_misp_exec : +event:0x9c counters:0,1,2,3 um:idq_uops_not_delivered minimum:2000003 name:idq_uops_not_delivered : +event:0xa1 counters:cpuid um:uops_executed_port minimum:2000003 name:uops_executed_port : +event:0xa1 counters:cpuid um:uops_dispatched_port minimum:2000003 name:uops_dispatched_port : +event:0xa2 counters:cpuid um:resource_stalls minimum:2000003 name:resource_stalls : +event:0xa3 counters:2 um:cycle_activity minimum:2000003 name:cycle_activity : +event:0xa8 counters:cpuid um:lsd minimum:2000003 name:lsd : +event:0xab counters:cpuid um:x02 minimum:2000003 name:dsb2mite_switches_penalty_cycles : +event:0xae counters:cpuid um:one minimum:100007 name:itlb_itlb_flush : +event:0xb0 counters:cpuid um:offcore_requests minimum:100003 name:offcore_requests : +event:0xb1 counters:cpuid um:uops_executed minimum:2000003 name:uops_executed : +event:0xbc counters:0,1,2,3 um:page_walker_loads minimum:2000003 name:page_walker_loads : +event:0xc0 counters:1 um:inst_retired minimum:2000003 name:inst_retired : +event:0xc1 counters:cpuid um:other_assists minimum:100003 name:other_assists : +event:0xc2 counters:cpuid um:uops_retired minimum:2000003 name:uops_retired : +event:0xc3 counters:cpuid um:machine_clears minimum:2000003 name:machine_clears : +event:0xc4 counters:cpuid um:br_inst_retired minimum:400009 name:br_inst_retired : +event:0xc5 counters:cpuid um:br_misp_retired minimum:400009 name:br_misp_retired : +event:0xc8 counters:cpuid um:hle_retired minimum:2000003 name:hle_retired : +event:0xc9 counters:0,1,2,3 um:rtm_retired minimum:2000003 name:rtm_retired : +event:0xca counters:cpuid um:fp_assist minimum:100003 name:fp_assist : +event:0xcc counters:cpuid um:x20 minimum:2000003 name:rob_misc_events_lbr_inserts : +event:0xd0 counters:0,1,2,3 um:mem_uops_retired minimum:2000003 name:mem_uops_retired : +event:0xd1 counters:0,1,2,3 um:mem_load_uops_retired minimum:2000003 name:mem_load_uops_retired : +event:0xd2 counters:0,1,2,3 um:mem_load_uops_l3_hit_retired minimum:100003 name:mem_load_uops_l3_hit_retired : +event:0xd3 counters:0,1,2,3 um:one minimum:100007 name:mem_load_uops_l3_miss_retired_local_dram : +event:0xe6 counters:cpuid um:x1f minimum:100003 name:baclears_any : +event:0xf0 counters:cpuid um:l2_trans minimum:200003 name:l2_trans : +event:0xf1 counters:cpuid um:l2_lines_in minimum:100003 name:l2_lines_in : +event:0xf2 counters:cpuid um:x05 minimum:100003 name:l2_lines_out_demand_clean : diff --git a/events/i386/broadwell/unit_masks b/events/i386/broadwell/unit_masks new file mode 100644 index 0000000..470e9e9 --- /dev/null +++ b/events/i386/broadwell/unit_masks @@ -0,0 +1,316 @@ +# +# Unit masks for the Intel "Broadwell" micro architecture +# +# See http://ark.intel.com/ for help in identifying Broadwell based CPUs +# +include:i386/arch_perfmon +name:x02 type:mandatory default:0x2 + 0x2 No unit mask +name:x03 type:mandatory default:0x3 + 0x3 No unit mask +name:x05 type:mandatory default:0x5 + 0x5 No unit mask +name:x10 type:mandatory default:0x10 + 0x10 No unit mask +name:x1f type:mandatory default:0x1f + 0x1f No unit mask +name:x20 type:mandatory default:0x20 + 0x20 No unit mask +name:x50 type:mandatory default:0x50 + 0x50 No unit mask +name:ld_blocks type:exclusive default:0x2 + 0x2 extra: store_forward This event counts how many times the load operation got the true Block-on-Store blocking code preventing store forwarding. This includes cases when: - preceding store conflicts with the load (incomplete overlap); - store forwarding is impossible due to u-arch limitations; - preceding lock RMW operations are not forwarded; - store has the no-forward bit set (uncacheable/page-split/masked stores); - all-blocking stores are used (mostly, fences and port I/O); and others. The most common case is a load blocked due to its address range overlapping with a preceding smaller uncompleted store. Note: This event does not take into account cases of out-of-SW-control (for example, SbTailHit), unknown physical STA, and cases of blocking loads on store due to being non-WB memory type or a lock. These cases are covered by other events. See the table of not supported store forwards in the Optimization Guide. + 0x8 extra: no_sr This event counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use. +name:misalign_mem_ref type:exclusive default:0x1 + 0x1 extra: loads This event counts speculative cache-line split load uops dispatched to the L1 cache. + 0x2 extra: stores This event counts speculative cache line split store-address (STA) uops dispatched to the L1 cache. +name:dtlb_load_misses type:exclusive default:0x1 + 0x1 extra: miss_causes_a_walk This event counts load misses in all DTLB levels that cause page walks of any page size (4K/2M/4M/1G). + 0x2 extra: walk_completed_4k This event counts load misses in all DTLB levels that cause a completed page walk (4K page size). The page walk can end with or without a fault. + 0x10 extra: walk_duration This event counts the number of cycles while PMH is busy with the page walk. + 0x20 extra: stlb_hit_4k Load misses that miss the DTLB and hit the STLB (4K) + 0xe extra: walk_completed Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size. + 0x60 extra: stlb_hit Load operations that miss the first DTLB level but hit the second and do not cause page walks +name:uops_issued type:exclusive default:0x1 + 0x1 extra: any This event counts the number of Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS). + 0x10 extra: flags_merge Number of flags-merge uops being allocated. Such uops considered perf sensitive; added by GSR u-arch. + 0x20 extra: slow_lea Number of slow LEA uops being allocated. A uop is generally considered SlowLea if it has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not. + 0x40 extra: single_mul Number of Multiply packed/scalar single precision uops allocated + 0x1 extra:inv stall_cycles This event counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread. +name:l2_rqsts type:exclusive default:0x21 + 0x21 extra: demand_data_rd_miss This event counts the number of demand Data Read requests that miss L2 cache. Only not rejected loads are counted. + 0x41 extra: demand_data_rd_hit This event counts the number of demand Data Read requests that hit L2 cache. Only not rejected loads are counted. + 0x30 extra: l2_pf_miss This event counts the number of requests from the L2 hardware prefetchers that miss L2 cache. + 0x50 extra: l2_pf_hit This event counts the number of requests from the L2 hardware prefetchers that hit L2 cache. L3 prefetch new types + 0xe1 extra: all_demand_data_rd This event counts the number of demand Data Read requests (including requests from L1D hardware prefetchers). These loads may hit or miss L2 cache. Only non rejected loads are counted. + 0xe2 extra: all_rfo This event counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches. + 0xe4 extra: all_code_rd This event counts the total number of L2 code requests. + 0xf8 extra: all_pf This event counts the total number of requests from the L2 hardware prefetchers. + 0x42 extra: rfo_hit RFO requests that hit L2 cache + 0x22 extra: rfo_miss RFO requests that miss L2 cache + 0x44 extra: code_rd_hit L2 cache hits when fetching instructions, code reads. + 0x24 extra: code_rd_miss L2 cache misses when fetching instructions + 0x27 extra: all_demand_miss Demand requests that miss L2 cache + 0xe7 extra: all_demand_references Demand requests to L2 cache + 0x3f extra: miss All requests that miss L2 cache + 0xff extra: references All L2 requests +name:l1d_pend_miss type:exclusive default:0x1 + 0x1 extra: pending This event counts duration of L1D miss outstanding, that is each cycle number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand; from the demand Hit FB, if it is allocated by hardware or software prefetch. Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type. + 0x1 extra: pending_cycles This event counts duration of L1D miss outstanding in cycles. +name:dtlb_store_misses type:exclusive default:0x1 + 0x1 extra: miss_causes_a_walk This event counts store misses in all DTLB levels that cause page walks of any page size (4K/2M/4M/1G). + 0x2 extra: walk_completed_4k This event counts store misses in all DTLB levels that cause a completed page walk (4K page size). The page walk can end with or without a fault. + 0x10 extra: walk_duration This event counts the number of cycles while PMH is busy with the page walk. + 0x20 extra: stlb_hit_4k Store misses that miss the DTLB and hit the STLB (4K) + 0xe extra: walk_completed Store misses in all DTLB levels that cause completed page walks + 0x60 extra: stlb_hit Store operations that miss the first TLB level but hit the second and do not cause page walks +name:tx_mem type:exclusive default:0x1 + 0x1 extra: abort_conflict Number of times a TSX line had a cache conflict + 0x2 extra: abort_capacity_write Number of times a TSX Abort was triggered due to an evicted line caused by a transaction overflow + 0x4 extra: abort_hle_store_to_elided_lock Number of times a TSX Abort was triggered due to a non-release/commit store to lock + 0x8 extra: abort_hle_elision_buffer_not_empty Number of times a TSX Abort was triggered due to commit but Lock Buffer not empty + 0x10 extra: abort_hle_elision_buffer_mismatch Number of times a TSX Abort was triggered due to release/commit but data and address mismatch + 0x20 extra: abort_hle_elision_buffer_unsupported_alignment Number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer + 0x40 extra: hle_elision_buffer_full Number of times we could not allocate Lock Buffer +name:move_elimination type:exclusive default:0x1 + 0x1 extra: int_eliminated Number of integer Move Elimination candidate uops that were eliminated. + 0x2 extra: simd_eliminated Number of SIMD Move Elimination candidate uops that were eliminated. + 0x4 extra: int_not_eliminated Number of integer Move Elimination candidate uops that were not eliminated. + 0x8 extra: simd_not_eliminated Number of SIMD Move Elimination candidate uops that were not eliminated. +name:cpl_cycles type:exclusive default:0x1 + 0x1 extra: ring0 This event counts the unhalted core cycles during which the thread is in the ring 0 privileged mode. + 0x2 extra: ring123 This event counts unhalted core cycles during which the thread is in rings 1, 2, or 3. + 0x1 extra:edge ring0_trans This event counts when there is a transition from ring 1,2 or 3 to ring0. +name:tx_exec type:exclusive default:0x1 + 0x1 extra: misc1 Unfriendly TSX abort triggered by a flowmarker + 0x2 extra: misc2 Unfriendly TSX abort triggered by a vzeroupper instruction + 0x4 extra: misc3 Unfriendly TSX abort triggered by a nest count that is too deep + 0x8 extra: misc4 RTM region detected inside HLE + 0x10 extra: misc5 # HLE inside HLE+ +name:rs_events type:exclusive default:0x1 + 0x1 extra: empty_cycles This event counts cycles during which the reservation station (RS) is empty for the thread. Note: In ST-mode, not active thread should drive 0. This is usually caused by severely costly branch mispredictions, or allocator/FE issues. + 0x1 extra:inv,edge empty_end Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues. +name:offcore_requests_outstanding type:exclusive default:0x1 + 0x1 extra: demand_data_rd This event counts the number of offcore outstanding Demand Data Read transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor. See the corresponding Umask under OFFCORE_REQUESTS. Note: A prefetch promoted to Demand is counted from the promotion point. + 0x2 extra: demand_code_rd This event counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The "Offcore outstanding" state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS. + 0x4 extra: demand_rfo This event counts the number of offcore outstanding RFO (store) transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS. + 0x8 extra: all_data_rd This event counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS. + 0x1 extra: cycles_with_demand_data_rd This event counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). + 0x8 extra: cycles_with_data_rd This event counts cycles when offcore outstanding cacheable Core Data Read transactions are present in the super queue. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS. +name:lock_cycles type:exclusive default:0x1 + 0x1 extra: split_lock_uc_lock_duration This event counts cycles in which the L1 and L2 are locked due to a UC lock or split lock. A lock is asserted in case of locked memory access, due to noncacheable memory, locked operation that spans two cache lines, or a page walk from the noncacheable page table. L1D and L2 locks have a very high performance penalty and it is highly recommended to avoid such access. + 0x2 extra: cache_lock_duration This event counts the number of cycles when the L1D is locked. It is a superset of the 0x1 mask (BUS_LOCK_CLOCKS.BUS_LOCK_DURATION). +name:idq type:exclusive default:0x2 + 0x2 extra: empty This counts the number of cycles that the instruction decoder queue is empty and can indicate that the application may be bound in the front end. It does not determine whether there are uops being delivered to the Alloc stage since uops can be delivered by bypass skipping the Instruction Decode Queue (IDQ) when it is empty. + 0x4 extra: mite_uops This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may "bypass" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB). + 0x8 extra: dsb_uops This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may "bypass" the IDQ. + 0x10 extra: ms_dsb_uops This event counts the number of uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may "bypass" the IDQ. + 0x20 extra: ms_mite_uops This event counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may "bypass" the IDQ. + 0x30 extra: ms_uops This event counts the total number of uops delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may "bypass" the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE. + 0x30 extra: ms_cycles This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may "bypass" the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE. + 0x4 extra: mite_cycles This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may "bypass" the IDQ. + 0x8 extra: dsb_cycles This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may "bypass" the IDQ. + 0x10 extra: ms_dsb_cycles This event counts cycles during which uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may "bypass" the IDQ. + 0x10 extra:edge ms_dsb_occur This event counts the number of deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while the Microcode Sequencer (MS) is busy. Counting includes uops that may "bypass" the IDQ. + 0x18 extra: all_dsb_cycles_4_uops This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may "bypass" the IDQ. + 0x18 extra: all_dsb_cycles_any_uops This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may "bypass" the IDQ. + 0x24 extra: all_mite_cycles_4_uops This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may "bypass" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB). + 0x24 extra: all_mite_cycles_any_uops This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may "bypass" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB). + 0x3c extra: mite_all_uops This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may "bypass" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB). + 0x30 extra:edge ms_switches Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer +name:itlb_misses type:exclusive default:0x1 + 0x1 extra: miss_causes_a_walk This event counts store misses in all DTLB levels that cause page walks of any page size (4K/2M/4M/1G). + 0x2 extra: walk_completed_4k This event counts store misses in all DTLB levels that cause a completed page walk (4K page size). The page walk can end with or without a fault. + 0x10 extra: walk_duration This event counts the number of cycles while PMH is busy with the page walk. + 0x20 extra: stlb_hit_4k Core misses that miss the DTLB and hit the STLB (4K) + 0xe extra: walk_completed Misses in all ITLB levels that cause completed page walks + 0x60 extra: stlb_hit Operations that miss the first ITLB level but hit the second and do not cause any page walks +name:br_inst_exec type:exclusive default:0xff + 0xff extra: all_branches This event counts both taken and not taken speculative and retired branch instructions. + 0x41 extra: nontaken_conditional This event counts not taken macro-conditional branch instructions. + 0x81 extra: taken_conditional This event counts taken speculative and retired macro-conditional branch instructions. + 0x82 extra: taken_direct_jump This event counts taken speculative and retired macro-conditional branch instructions excluding calls and indirect branches. + 0x84 extra: taken_indirect_jump_non_call_ret This event counts taken speculative and retired indirect branches excluding calls and return branches. + 0x88 extra: taken_indirect_near_return This event counts taken speculative and retired indirect branches that have a return mnemonic. + 0x90 extra: taken_direct_near_call This event counts taken speculative and retired direct near calls. + 0xa0 extra: taken_indirect_near_call This event counts taken speculative and retired indirect calls including both register and memory indirect. + 0xc1 extra: all_conditional This event counts both taken and not taken speculative and retired macro-conditional branch instructions. + 0xc2 extra: all_direct_jmp This event counts both taken and not taken speculative and retired macro-unconditional branch instructions, excluding calls and indirects. + 0xc4 extra: all_indirect_jump_non_call_ret This event counts both taken and not taken speculative and retired indirect branches excluding calls and return branches. + 0xc8 extra: all_indirect_near_return This event counts both taken and not taken speculative and retired indirect branches that have a return mnemonic. + 0xd0 extra: all_direct_near_call This event counts both taken and not taken speculative and retired direct near calls. +name:br_misp_exec type:exclusive default:0xff + 0xff extra: all_branches This event counts both taken and not taken speculative and retired mispredicted branch instructions. + 0x41 extra: nontaken_conditional This event counts not taken speculative and retired mispredicted macro conditional branch instructions. + 0x81 extra: taken_conditional This event counts taken speculative and retired mispredicted macro conditional branch instructions. + 0x84 extra: taken_indirect_jump_non_call_ret This event counts taken speculative and retired mispredicted indirect branches excluding calls and returns. + 0xc1 extra: all_conditional This event counts both taken and not taken speculative and retired mispredicted macro conditional branch instructions. + 0xc4 extra: all_indirect_jump_non_call_ret This event counts both taken and not taken mispredicted indirect branches excluding calls and returns. + 0xa0 extra: taken_indirect_near_call Taken speculative and retired mispredicted indirect calls +name:idq_uops_not_delivered type:exclusive default:0x1 + 0x1 extra: core This event counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding ?4 ? x? when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when: a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread; b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions); c. Instruction Decode Queue (IDQ) delivers four uops. + 0x1 extra: cycles_0_uops_deliv_core This event counts, on the per-thread basis, cycles when no uops are delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core =4. + 0x1 extra: cycles_le_1_uop_deliv_core This event counts, on the per-thread basis, cycles when less than 1 uop is delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core >=3. + 0x1 extra: cycles_le_2_uop_deliv_core Cycles with less than 2 uops delivered by the front end + 0x1 extra: cycles_le_3_uop_deliv_core Cycles with less than 3 uops delivered by the front end + 0x1 extra:inv cycles_fe_was_ok Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE. +name:uops_executed_port type:exclusive default:0x1 + 0x1 extra:any port_0_core Cycles per core when uops are exectuted in port 0 + 0x2 extra:any port_1_core Cycles per core when uops are exectuted in port 1 + 0x4 extra:any port_2_core Cycles per core when uops are dispatched to port 2 + 0x8 extra:any port_3_core Cycles per core when uops are dispatched to port 3 + 0x10 extra:any port_4_core Cycles per core when uops are exectuted in port 4 + 0x20 extra:any port_5_core Cycles per core when uops are exectuted in port 5 + 0x40 extra:any port_6_core Cycles per core when uops are exectuted in port 6 + 0x80 extra:any port_7_core Cycles per core when uops are dispatched to port 7 + 0x1 extra: port_0 This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0. + 0x2 extra: port_1 This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1. + 0x4 extra: port_2 This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2. + 0x8 extra: port_3 This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3. + 0x10 extra: port_4 This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4. + 0x20 extra: port_5 This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5. + 0x40 extra: port_6 This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6. + 0x80 extra: port_7 This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7. +name:uops_dispatched_port type:exclusive default:0x1 + 0x1 extra: port_0 This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0. + 0x2 extra: port_1 This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1. + 0x4 extra: port_2 This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2. + 0x8 extra: port_3 This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3. + 0x10 extra: port_4 This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4. + 0x20 extra: port_5 This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5. + 0x40 extra: port_6 This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6. + 0x80 extra: port_7 This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7. +name:resource_stalls type:exclusive default:0x1 + 0x1 extra: any This event counts resource-related stall cycles. Reasons for stalls can be as follows: - *any* u-arch structure got full (LB, SB, RS, ROB, BOB, LM, Physical Register Reclaim Table (PRRT), or Physical History Table (PHT) slots) - *any* u-arch structure got empty (like INT/SIMD FreeLists) - FPU control word (FPCW), MXCSR and others. This counts cycles that the pipeline backend blocked uop delivery from the front end. + 0x4 extra: rs This event counts stall cycles caused by absence of eligible entries in the reservation station (RS). This may result from RS overflow, or from RS deallocation because of the RS array Write Port allocation scheme (each RS entry has two write ports instead of four. As a result, empty entries could not be used, although RS is not really full). This counts cycles that the pipeline backend blocked uop delivery from the front end. + 0x8 extra: sb This event counts stall cycles caused by the store buffer (SB) overflow (excluding draining from synch). This counts cycles that the pipeline backend blocked uop delivery from the front end. + 0x10 extra: rob This event counts ROB full stall cycles. This counts cycles that the pipeline backend blocked uop delivery from the front end. +name:cycle_activity type:exclusive default:0x1 + 0x1 extra: cycles_l2_pending Counts number of cycles the CPU has at least one pending demand* load request missing the L2 cache. + 0x8 extra: cycles_l1d_pending Counts number of cycles the CPU has at least one pending demand load request missing the L1 data cache. + 0x2 extra: cycles_ldm_pending Counts number of cycles the CPU has at least one pending demand load request (that is cycles with non-completed load waiting for its data from memory subsystem) + 0x4 extra: cycles_no_execute Counts number of cycles nothing is executed on any execution port. + 0x5 extra: stalls_l2_pending Counts number of cycles nothing is executed on any execution port, while there was at least one pending demand* load request missing the L2 cache. (as a footprint) * includes also L1 HW prefetch requests that may or may not be required by demands + 0x6 extra: stalls_ldm_pending Counts number of cycles nothing is executed on any execution port, while there was at least one pending demand load request. + 0xc extra: stalls_l1d_pending Counts number of cycles nothing is executed on any execution port, while there was at least one pending demand load request missing the L1 data cache. + 0x8 extra: cycles_l1d_miss Cycles while L1 cache miss demand load is outstanding. + 0x1 extra: cycles_l2_miss Cycles while L2 cache miss demand load is outstanding. + 0x2 extra: cycles_mem_any Cycles while memory subsystem has an outstanding load. + 0x4 extra: stalls_total Total execution stalls. + 0xc extra: stalls_l1d_miss Execution stalls while L1 cache miss demand load is outstanding. + 0x5 extra: stalls_l2_miss Execution stalls while L2 cache miss demand load is outstanding. + 0x6 extra: stalls_mem_any Execution stalls while memory subsystem has an outstanding load. +name:lsd type:exclusive default:0x1 + 0x1 extra: uops Number of Uops delivered by the LSD. Read more on LSD under LSD_REPLAY.REPLAY + 0x1 extra: cycles_4_uops Cycles 4 Uops delivered by the LSD, but didn't come from the decoder + 0x1 extra: cycles_active Cycles Uops delivered by the LSD, but didn't come from the decoder +name:offcore_requests type:exclusive default:0x1 + 0x1 extra: demand_data_rd This event counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore. + 0x2 extra: demand_code_rd This event counts both cacheable and noncachaeble code read requests. + 0x4 extra: demand_rfo This event counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM. + 0x8 extra: all_data_rd This event counts the demand and prefetch data reads. All Core Data Reads include cacheable "Demands" and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type. +name:uops_executed type:exclusive default:0x1 + 0x1 extra: thread Number of uops to be executed per-thread each cycle. + 0x2 extra: core Number of uops executed from any thread + 0x1 extra:inv stall_cycles This event counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread. + 0x1 extra: cycles_ge_1_uop_exec Cycles where at least 1 uop was executed per-thread + 0x1 extra: cycles_ge_2_uops_exec Cycles where at least 2 uops were executed per-thread + 0x1 extra: cycles_ge_3_uops_exec Cycles where at least 3 uops were executed per-thread + 0x1 extra: cycles_ge_4_uops_exec Cycles where at least 4 uops were executed per-thread +name:page_walker_loads type:exclusive default:0x11 + 0x11 extra: dtlb_l1 Number of DTLB page walker hits in the L1+FB + 0x21 extra: itlb_l1 Number of ITLB page walker hits in the L1+FB + 0x12 extra: dtlb_l2 Number of DTLB page walker hits in the L2 + 0x22 extra: itlb_l2 Number of ITLB page walker hits in the L2 + 0x14 extra: dtlb_l3 Number of DTLB page walker hits in the L3 + XSNP + 0x24 extra: itlb_l3 Number of ITLB page walker hits in the L3 + XSNP + 0x18 extra: dtlb_memory Number of DTLB page walker hits in Memory +name:inst_retired type:exclusive default:0x2 + 0x2 extra: x87 This is a non-precise version (that is, does not use PEBS) of the event that counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling. + 0x1 extra: prec_dist This is a precise version (that is, uses PEBS) of the event that counts instructions retired. +name:other_assists type:exclusive default:0x8 + 0x8 extra: avx_to_sse This is a non-precise version (that is, does not use PEBS) of the event that counts the number of transitions from AVX-256 to legacy SSE when penalty is applicable. + 0x10 extra: sse_to_avx This is a non-precise version (that is, does not use PEBS) of the event that counts the number of transitions from legacy SSE to AVX-256 when penalty is applicable. + 0x40 extra: any_wb_assist Number of times any microcode assist is invoked by HW upon uop writeback. +name:uops_retired type:exclusive default:0x1 + 0x1 extra: all This is a non-precise version (that is, does not use PEBS) of the event that counts all actually retired uops. Counting increments by two for micro-fused uops, and by one for macro-fused and other uops. Maximal increment value for one cycle is eight. + 0x2 extra: retire_slots This is a non-precise version (that is, does not use PEBS) of the event that counts the number of retirement slots used. + 0x1 extra:inv stall_cycles This is a non-precise version (that is, does not use PEBS) of the event that counts cycles without actually retired uops. + 0x1 extra:inv total_cycles Number of cycles using always true condition (uops_ret < 16) applied to non PEBS uops retired event. +name:machine_clears type:exclusive default:0x1 + 0x1 extra: cycles This event counts both thread-specific (TS) and all-thread (AT) nukes. + 0x2 extra: memory_ordering This event counts the number of memory ordering Machine Clears detected. Memory Ordering Machine Clears can result from one of the following: 1. memory disambiguation, 2. external snoop, or 3. cross SMT-HW-thread snoop (stores) hitting load buffer. + 0x4 extra: smc This event counts self-modifying code (SMC) detected, which causes a machine clear. + 0x20 extra: maskmov Maskmov false fault - counts number of time ucode passes through Maskmov flow due to instruction's mask being 0 while the flow was completed without raising a fault. + 0x1 extra:edge count Number of machine clears (nukes) of any type. +name:br_inst_retired type:exclusive default:0x1 + 0x1 extra: conditional This is a non-precise version (that is, does not use PEBS) of the event that counts conditional branch instructions retired. + 0x2 extra: near_call This is a non-precise version (that is, does not use PEBS) of the event that counts both direct and indirect near call instructions retired. + 0x8 extra: near_return This is a non-precise version (that is, does not use PEBS) of the event that counts return instructions retired. + 0x10 extra: not_taken This is a non-precise version (that is, does not use PEBS) of the event that counts not taken branch instructions retired. + 0x20 extra: near_taken This is a non-precise version (that is, does not use PEBS) of the event that counts taken branch instructions retired. + 0x40 extra: far_branch This is a non-precise version (that is, does not use PEBS) of the event that counts far branch instructions retired. + 0x4 extra: all_branches_pebs This is a precise version of BR_INST_RETIRED.ALL_BRANCHES that counts all (macro) branch instructions retired. +name:br_misp_retired type:exclusive default:0x1 + 0x1 extra: conditional This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted conditional branch instructions retired. + 0x4 extra: all_branches_pebs This is a precise version of BR_MISP_RETIRED.ALL_BRANCHES that counts all mispredicted macro branch instructions retired. + 0x20 extra: near_taken number of near branch instructions retired that were mispredicted and taken. +name:hle_retired type:exclusive default:0x1 + 0x1 extra: start Number of times we entered an HLE region; does not count nested transactions + 0x2 extra: commit Number of times HLE commit succeeded + 0x4 extra: aborted Number of times HLE abort was triggered + 0x8 extra: aborted_misc1 Number of times an HLE abort was attributed to a Memory condition (See TSX_Memory event for additional details) + 0x10 extra: aborted_misc2 Number of times the TSX watchdog signaled an HLE abort + 0x20 extra: aborted_misc3 Number of times a disallowed operation caused an HLE abort + 0x40 extra: aborted_misc4 Number of times HLE caused a fault + 0x80 extra: aborted_misc5 Number of times HLE aborted and was not due to the abort conditions in subevents 3-6 +name:rtm_retired type:exclusive default:0x1 + 0x1 extra: start Number of times we entered an RTM region; does not count nested transactions + 0x2 extra: commit Number of times RTM commit succeeded + 0x4 extra: aborted Number of times RTM abort was triggered + 0x8 extra: aborted_misc1 Number of times an RTM abort was attributed to a Memory condition (See TSX_Memory event for additional details) + 0x10 extra: aborted_misc2 Number of times the TSX watchdog signaled an RTM abort + 0x20 extra: aborted_misc3 Number of times a disallowed operation caused an RTM abort + 0x40 extra: aborted_misc4 Number of times a RTM caused a fault + 0x80 extra: aborted_misc5 Number of times RTM aborted and was not due to the abort conditions in subevents 3-6 +name:fp_assist type:exclusive default:0x1e + 0x1e extra: any This event counts cycles with any input and output SSE or x87 FP assist. If an input and output assist are detected on the same cycle the event increments by 1. + 0x2 extra: x87_output This is a non-precise version (that is, does not use PEBS) of the event that counts the number of x87 floating point (FP) micro-code assist (numeric overflow/underflow, inexact result) when the output value (destination register) is invalid. + 0x4 extra: x87_input This is a non-precise version (that is, does not use PEBS) of the event that counts x87 floating point (FP) micro-code assist (invalid operation, denormal operand, SNaN operand) when the input value (one of the source operands to an FP instruction) is invalid. + 0x8 extra: simd_output This is a non-precise version (that is, does not use PEBS) of the event that counts the number of SSE* floating point (FP) micro-code assist (numeric overflow/underflow) when the output value (destination register) is invalid. Counting covers only cases involving penalties that require micro-code assist intervention. + 0x10 extra: simd_input This is a non-precise version (that is, does not use PEBS) of the event that counts any input SSE* FP assist - invalid operation, denormal operand, dividing by zero, SNaN operand. Counting includes only cases involving penalties that required micro-code assist intervention. +name:mem_uops_retired type:exclusive default:0x11 + 0x11 extra: stlb_miss_loads This is a non-precise version (that is, does not use PEBS) of the event that counts load uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault. + 0x12 extra: stlb_miss_stores This is a non-precise version (that is, does not use PEBS) of the event that counts store uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault. + 0x21 extra: lock_loads This is a non-precise version (that is, does not use PEBS) of the event that counts load uops with locked access retired to the architected path. + 0x41 extra: split_loads This is a non-precise version (that is, does not use PEBS) of the event that counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K). + 0x42 extra: split_stores This is a non-precise version (that is, does not use PEBS) of the event that counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K). + 0x81 extra: all_loads This is a non-precise version (that is, does not use PEBS) of the event that counts load uops retired to the architected path with a filter on bits 0 and 1 applied. Note: This event ?ounts AVX-256bit load/store double-pump memory uops as a single uop at retirement. This event also counts SW prefetches. + 0x82 extra: all_stores This is a non-precise version (that is, does not use PEBS) of the event that counts store uops retired to the architected path with a filter on bits 0 and 1 applied. Note: This event ?ounts AVX-256bit load/store double-pump memory uops as a single uop at retirement. +name:mem_load_uops_retired type:exclusive default:0x1 + 0x1 extra: l1_hit This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were hits in the nearest-level (L1) cache. Note: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load. This event also counts SW prefetches independent of the actual data source + 0x2 extra: l2_hit This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were hits in the mid-level (L2) cache. + 0x4 extra: l3_hit This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were data hits in the last-level (L3) cache without snoops required. + 0x8 extra: l1_miss This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were misses in the nearest-level (L1) cache. Counting excludes unknown and UC data source. + 0x10 extra: l2_miss This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were misses in the mid-level (L2) cache. Counting excludes unknown and UC data source. + 0x20 extra: l3_miss Miss in last-level (L3) cache. Excludes Unknown data-source. + 0x40 extra: hit_lfb This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were load uops missed L1 but hit a fill buffer due to a preceding miss to the same cache line with the data not ready. Note: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load. +name:mem_load_uops_l3_hit_retired type:exclusive default:0x1 + 0x1 extra: xsnp_miss This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were L3 Hit and a cross-core snoop missed in the on-pkg core cache. + 0x2 extra: xsnp_hit This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were L3 hit and a cross-core snoop hit in the on-pkg core cache. + 0x4 extra: xsnp_hitm This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were HitM responses from a core on same socket (shared L3). + 0x8 extra: xsnp_none This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were hits in the last-level (L3) cache without snoops required. +name:l2_trans type:exclusive default:0x80 + 0x80 extra: all_requests This event counts transactions that access the L2 pipe including snoops, pagewalks, and so on. + 0x1 extra: demand_data_rd This event counts Demand Data Read requests that access L2 cache, including rejects. + 0x2 extra: rfo This event counts Read for Ownership (RFO) requests that access L2 cache. + 0x4 extra: code_rd This event counts the number of L2 cache accesses when fetching instructions. + 0x8 extra: all_pf This event counts L2 or L3 HW prefetches that access L2 cache including rejects. + 0x10 extra: l1d_wb This event counts L1D writebacks that access L2 cache. + 0x20 extra: l2_fill This event counts L2 fill requests that access L2 cache. + 0x40 extra: l2_wb This event counts L2 writebacks that access L2 cache. +name:l2_lines_in type:exclusive default:0x7 + 0x7 extra: all This event counts the number of L2 cache lines filling the L2. Counting does not cover rejects. + 0x1 extra: i This event counts the number of L2 cache lines in the Invalidate state filling the L2. Counting does not cover rejects. + 0x2 extra: s This event counts the number of L2 cache lines in the Shared state filling the L2. Counting does not cover rejects. + 0x4 extra: e This event counts the number of L2 cache lines in the Exclusive state filling the L2. Counting does not cover rejects. diff --git a/libop/op_cpu_type.c b/libop/op_cpu_type.c index 0cfb4ea..bce230a 100644 --- a/libop/op_cpu_type.c +++ b/libop/op_cpu_type.c @@ -130,6 +130,7 @@ static struct cpu_descr const cpu_descrs[MAX_CPU_TYPE] = { { "IBM Power Architected Events V1", "ppc64/architected_events_v1", CPU_PPC64_ARCH_V1, 6 }, { "ppc64 POWER8", "ppc64/power8", CPU_PPC64_POWER8, 6 }, { "Intel Silvermont microarchitecture", "i386/silvermont", CPU_SILVERMONT, 2 }, + { "Intel Broadwell microarchitecture", "i386/broadwell", CPU_BROADWELL, 4 }, }; static size_t const nr_cpu_descrs = sizeof(cpu_descrs) / sizeof(struct cpu_descr); @@ -670,6 +671,7 @@ op_cpu op_cpu_base_type(op_cpu cpu_type) case CPU_ATOM: case CPU_NEHALEM: case CPU_HASWELL: + case CPU_BROADWELL: case CPU_SILVERMONT: case CPU_WESTMERE: case CPU_SANDYBRIDGE: diff --git a/libop/op_cpu_type.h b/libop/op_cpu_type.h index 7c478ad..3754156 100644 --- a/libop/op_cpu_type.h +++ b/libop/op_cpu_type.h @@ -110,6 +110,7 @@ typedef enum { CPU_PPC64_ARCH_V1, /** < IBM Power architected events version 1 */ CPU_PPC64_POWER8, /**< ppc64 POWER8 family */ CPU_SILVERMONT, /** < Intel Silvermont microarchitecture */ + CPU_BROADWELL, /** < Intel Broadwell (Core-M) microarchitecture */ MAX_CPU_TYPE } op_cpu; diff --git a/libop/op_events.c b/libop/op_events.c index 968ff04..9c27e6c 100644 --- a/libop/op_events.c +++ b/libop/op_events.c @@ -1201,6 +1201,7 @@ void op_default_event(op_cpu cpu_type, struct op_default_event_descr * descr) case CPU_CORE_I7: case CPU_NEHALEM: case CPU_HASWELL: + case CPU_BROADWELL: case CPU_SILVERMONT: case CPU_WESTMERE: case CPU_SANDYBRIDGE: diff --git a/libop/op_hw_specific.h b/libop/op_hw_specific.h index e86dcae..1d39692 100644 --- a/libop/op_hw_specific.h +++ b/libop/op_hw_specific.h @@ -148,8 +148,11 @@ static inline op_cpu op_cpu_specific_type(op_cpu cpu_type) case 0x3f: case 0x45: case 0x46: - case 0x47: return CPU_HASWELL; + case 0x3d: + case 0x47: + case 0x4f: + return CPU_BROADWELL; case 0x37: case 0x4d: return CPU_SILVERMONT; diff --git a/utils/ophelp.c b/utils/ophelp.c index 35f47bc..bf3fbcb 100644 --- a/utils/ophelp.c +++ b/utils/ophelp.c @@ -555,6 +555,7 @@ int main(int argc, char const * argv[]) case CPU_CORE_I7: case CPU_NEHALEM: case CPU_HASWELL: + case CPU_BROADWELL: case CPU_SILVERMONT: case CPU_WESTMERE: case CPU_SANDYBRIDGE:
Locations
Projects
Search
Status Monitor
Help
OpenBuildService.org
Documentation
API Documentation
Code of Conduct
Contact
Support
@OBShq
Terms
openSUSE Build Service is sponsored by
The Open Build Service is an
openSUSE project
.
Sign Up
Log In
Places
Places
All Projects
Status Monitor