root/arch/arm64/kernel/perf_event.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. armv8pmu_events_sysfs_show
  2. armv8pmu_event_attr_is_visible
  3. armv8pmu_event_is_64bit
  4. armv8pmu_event_is_chained
  5. armv8pmu_pmcr_read
  6. armv8pmu_pmcr_write
  7. armv8pmu_has_overflowed
  8. armv8pmu_counter_valid
  9. armv8pmu_counter_has_overflowed
  10. armv8pmu_select_counter
  11. armv8pmu_read_evcntr
  12. armv8pmu_read_hw_counter
  13. armv8pmu_read_counter
  14. armv8pmu_write_evcntr
  15. armv8pmu_write_hw_counter
  16. armv8pmu_write_counter
  17. armv8pmu_write_evtype
  18. armv8pmu_write_event_type
  19. armv8pmu_enable_counter
  20. armv8pmu_enable_event_counter
  21. armv8pmu_disable_counter
  22. armv8pmu_disable_event_counter
  23. armv8pmu_enable_intens
  24. armv8pmu_enable_event_irq
  25. armv8pmu_disable_intens
  26. armv8pmu_disable_event_irq
  27. armv8pmu_getreset_flags
  28. armv8pmu_enable_event
  29. armv8pmu_disable_event
  30. armv8pmu_start
  31. armv8pmu_stop
  32. armv8pmu_handle_irq
  33. armv8pmu_get_single_idx
  34. armv8pmu_get_chain_idx
  35. armv8pmu_get_event_idx
  36. armv8pmu_clear_event_idx
  37. armv8pmu_set_event_filter
  38. armv8pmu_filter_match
  39. armv8pmu_reset
  40. __armv8_pmuv3_map_event
  41. armv8_pmuv3_map_event
  42. armv8_a53_map_event
  43. armv8_a57_map_event
  44. armv8_a73_map_event
  45. armv8_thunder_map_event
  46. armv8_vulcan_map_event
  47. __armv8pmu_probe_pmu
  48. armv8pmu_probe_pmu
  49. armv8_pmu_init
  50. armv8_pmuv3_init
  51. armv8_a35_pmu_init
  52. armv8_a53_pmu_init
  53. armv8_a57_pmu_init
  54. armv8_a72_pmu_init
  55. armv8_a73_pmu_init
  56. armv8_thunder_pmu_init
  57. armv8_vulcan_pmu_init
  58. armv8_pmu_device_probe
  59. armv8_pmu_driver_init
  60. device_initcall

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * ARMv8 PMUv3 Performance Events handling code.
   4  *
   5  * Copyright (C) 2012 ARM Limited
   6  * Author: Will Deacon <will.deacon@arm.com>
   7  *
   8  * This code is based heavily on the ARMv7 perf event code.
   9  */
  10 
  11 #include <asm/irq_regs.h>
  12 #include <asm/perf_event.h>
  13 #include <asm/sysreg.h>
  14 #include <asm/virt.h>
  15 
  16 #include <linux/acpi.h>
  17 #include <linux/clocksource.h>
  18 #include <linux/kvm_host.h>
  19 #include <linux/of.h>
  20 #include <linux/perf/arm_pmu.h>
  21 #include <linux/platform_device.h>
  22 #include <linux/smp.h>
  23 
  24 /* ARMv8 Cortex-A53 specific event types. */
  25 #define ARMV8_A53_PERFCTR_PREF_LINEFILL                         0xC2
  26 
  27 /* ARMv8 Cavium ThunderX specific event types. */
  28 #define ARMV8_THUNDER_PERFCTR_L1D_CACHE_MISS_ST                 0xE9
  29 #define ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_ACCESS             0xEA
  30 #define ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_MISS               0xEB
  31 #define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS             0xEC
  32 #define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS               0xED
  33 
  34 /*
  35  * ARMv8 Architectural defined events, not all of these may
  36  * be supported on any given implementation. Unsupported events will
  37  * be disabled at run-time based on the PMCEID registers.
  38  */
  39 static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
  40         PERF_MAP_ALL_UNSUPPORTED,
  41         [PERF_COUNT_HW_CPU_CYCLES]              = ARMV8_PMUV3_PERFCTR_CPU_CYCLES,
  42         [PERF_COUNT_HW_INSTRUCTIONS]            = ARMV8_PMUV3_PERFCTR_INST_RETIRED,
  43         [PERF_COUNT_HW_CACHE_REFERENCES]        = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
  44         [PERF_COUNT_HW_CACHE_MISSES]            = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
  45         [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED,
  46         [PERF_COUNT_HW_BRANCH_MISSES]           = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
  47         [PERF_COUNT_HW_BUS_CYCLES]              = ARMV8_PMUV3_PERFCTR_BUS_CYCLES,
  48         [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV8_PMUV3_PERFCTR_STALL_FRONTEND,
  49         [PERF_COUNT_HW_STALLED_CYCLES_BACKEND]  = ARMV8_PMUV3_PERFCTR_STALL_BACKEND,
  50 };
  51 
  52 static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  53                                                 [PERF_COUNT_HW_CACHE_OP_MAX]
  54                                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  55         PERF_CACHE_MAP_ALL_UNSUPPORTED,
  56 
  57         [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
  58         [C(L1D)][C(OP_READ)][C(RESULT_MISS)]    = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
  59 
  60         [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV8_PMUV3_PERFCTR_L1I_CACHE,
  61         [C(L1I)][C(OP_READ)][C(RESULT_MISS)]    = ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL,
  62 
  63         [C(DTLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL,
  64         [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_TLB,
  65 
  66         [C(ITLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL,
  67         [C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB,
  68 
  69         [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV8_PMUV3_PERFCTR_BR_PRED,
  70         [C(BPU)][C(OP_READ)][C(RESULT_MISS)]    = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
  71 };
  72 
  73 static const unsigned armv8_a53_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  74                                               [PERF_COUNT_HW_CACHE_OP_MAX]
  75                                               [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  76         PERF_CACHE_MAP_ALL_UNSUPPORTED,
  77 
  78         [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_A53_PERFCTR_PREF_LINEFILL,
  79 
  80         [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
  81         [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
  82 };
  83 
  84 static const unsigned armv8_a57_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  85                                               [PERF_COUNT_HW_CACHE_OP_MAX]
  86                                               [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  87         PERF_CACHE_MAP_ALL_UNSUPPORTED,
  88 
  89         [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
  90         [C(L1D)][C(OP_READ)][C(RESULT_MISS)]    = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
  91         [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
  92         [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR,
  93 
  94         [C(DTLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
  95         [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
  96 
  97         [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
  98         [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
  99 };
 100 
 101 static const unsigned armv8_a73_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 102                                               [PERF_COUNT_HW_CACHE_OP_MAX]
 103                                               [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 104         PERF_CACHE_MAP_ALL_UNSUPPORTED,
 105 
 106         [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
 107         [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
 108 };
 109 
 110 static const unsigned armv8_thunder_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 111                                                    [PERF_COUNT_HW_CACHE_OP_MAX]
 112                                                    [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 113         PERF_CACHE_MAP_ALL_UNSUPPORTED,
 114 
 115         [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
 116         [C(L1D)][C(OP_READ)][C(RESULT_MISS)]    = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
 117         [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
 118         [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV8_THUNDER_PERFCTR_L1D_CACHE_MISS_ST,
 119         [C(L1D)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_ACCESS,
 120         [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_MISS,
 121 
 122         [C(L1I)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS,
 123         [C(L1I)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS,
 124 
 125         [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD,
 126         [C(DTLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
 127         [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR,
 128         [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
 129 };
 130 
 131 static const unsigned armv8_vulcan_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 132                                               [PERF_COUNT_HW_CACHE_OP_MAX]
 133                                               [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 134         PERF_CACHE_MAP_ALL_UNSUPPORTED,
 135 
 136         [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
 137         [C(L1D)][C(OP_READ)][C(RESULT_MISS)]    = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
 138         [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
 139         [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR,
 140 
 141         [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD,
 142         [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR,
 143         [C(DTLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
 144         [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
 145 
 146         [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
 147         [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
 148 };
 149 
 150 static ssize_t
 151 armv8pmu_events_sysfs_show(struct device *dev,
 152                            struct device_attribute *attr, char *page)
 153 {
 154         struct perf_pmu_events_attr *pmu_attr;
 155 
 156         pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
 157 
 158         return sprintf(page, "event=0x%03llx\n", pmu_attr->id);
 159 }
 160 
 161 #define ARMV8_EVENT_ATTR(name, config) \
 162         PMU_EVENT_ATTR(name, armv8_event_attr_##name, \
 163                        config, armv8pmu_events_sysfs_show)
 164 
 165 ARMV8_EVENT_ATTR(sw_incr, ARMV8_PMUV3_PERFCTR_SW_INCR);
 166 ARMV8_EVENT_ATTR(l1i_cache_refill, ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL);
 167 ARMV8_EVENT_ATTR(l1i_tlb_refill, ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL);
 168 ARMV8_EVENT_ATTR(l1d_cache_refill, ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL);
 169 ARMV8_EVENT_ATTR(l1d_cache, ARMV8_PMUV3_PERFCTR_L1D_CACHE);
 170 ARMV8_EVENT_ATTR(l1d_tlb_refill, ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL);
 171 ARMV8_EVENT_ATTR(ld_retired, ARMV8_PMUV3_PERFCTR_LD_RETIRED);
 172 ARMV8_EVENT_ATTR(st_retired, ARMV8_PMUV3_PERFCTR_ST_RETIRED);
 173 ARMV8_EVENT_ATTR(inst_retired, ARMV8_PMUV3_PERFCTR_INST_RETIRED);
 174 ARMV8_EVENT_ATTR(exc_taken, ARMV8_PMUV3_PERFCTR_EXC_TAKEN);
 175 ARMV8_EVENT_ATTR(exc_return, ARMV8_PMUV3_PERFCTR_EXC_RETURN);
 176 ARMV8_EVENT_ATTR(cid_write_retired, ARMV8_PMUV3_PERFCTR_CID_WRITE_RETIRED);
 177 ARMV8_EVENT_ATTR(pc_write_retired, ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED);
 178 ARMV8_EVENT_ATTR(br_immed_retired, ARMV8_PMUV3_PERFCTR_BR_IMMED_RETIRED);
 179 ARMV8_EVENT_ATTR(br_return_retired, ARMV8_PMUV3_PERFCTR_BR_RETURN_RETIRED);
 180 ARMV8_EVENT_ATTR(unaligned_ldst_retired, ARMV8_PMUV3_PERFCTR_UNALIGNED_LDST_RETIRED);
 181 ARMV8_EVENT_ATTR(br_mis_pred, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED);
 182 ARMV8_EVENT_ATTR(cpu_cycles, ARMV8_PMUV3_PERFCTR_CPU_CYCLES);
 183 ARMV8_EVENT_ATTR(br_pred, ARMV8_PMUV3_PERFCTR_BR_PRED);
 184 ARMV8_EVENT_ATTR(mem_access, ARMV8_PMUV3_PERFCTR_MEM_ACCESS);
 185 ARMV8_EVENT_ATTR(l1i_cache, ARMV8_PMUV3_PERFCTR_L1I_CACHE);
 186 ARMV8_EVENT_ATTR(l1d_cache_wb, ARMV8_PMUV3_PERFCTR_L1D_CACHE_WB);
 187 ARMV8_EVENT_ATTR(l2d_cache, ARMV8_PMUV3_PERFCTR_L2D_CACHE);
 188 ARMV8_EVENT_ATTR(l2d_cache_refill, ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL);
 189 ARMV8_EVENT_ATTR(l2d_cache_wb, ARMV8_PMUV3_PERFCTR_L2D_CACHE_WB);
 190 ARMV8_EVENT_ATTR(bus_access, ARMV8_PMUV3_PERFCTR_BUS_ACCESS);
 191 ARMV8_EVENT_ATTR(memory_error, ARMV8_PMUV3_PERFCTR_MEMORY_ERROR);
 192 ARMV8_EVENT_ATTR(inst_spec, ARMV8_PMUV3_PERFCTR_INST_SPEC);
 193 ARMV8_EVENT_ATTR(ttbr_write_retired, ARMV8_PMUV3_PERFCTR_TTBR_WRITE_RETIRED);
 194 ARMV8_EVENT_ATTR(bus_cycles, ARMV8_PMUV3_PERFCTR_BUS_CYCLES);
 195 /* Don't expose the chain event in /sys, since it's useless in isolation */
 196 ARMV8_EVENT_ATTR(l1d_cache_allocate, ARMV8_PMUV3_PERFCTR_L1D_CACHE_ALLOCATE);
 197 ARMV8_EVENT_ATTR(l2d_cache_allocate, ARMV8_PMUV3_PERFCTR_L2D_CACHE_ALLOCATE);
 198 ARMV8_EVENT_ATTR(br_retired, ARMV8_PMUV3_PERFCTR_BR_RETIRED);
 199 ARMV8_EVENT_ATTR(br_mis_pred_retired, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED_RETIRED);
 200 ARMV8_EVENT_ATTR(stall_frontend, ARMV8_PMUV3_PERFCTR_STALL_FRONTEND);
 201 ARMV8_EVENT_ATTR(stall_backend, ARMV8_PMUV3_PERFCTR_STALL_BACKEND);
 202 ARMV8_EVENT_ATTR(l1d_tlb, ARMV8_PMUV3_PERFCTR_L1D_TLB);
 203 ARMV8_EVENT_ATTR(l1i_tlb, ARMV8_PMUV3_PERFCTR_L1I_TLB);
 204 ARMV8_EVENT_ATTR(l2i_cache, ARMV8_PMUV3_PERFCTR_L2I_CACHE);
 205 ARMV8_EVENT_ATTR(l2i_cache_refill, ARMV8_PMUV3_PERFCTR_L2I_CACHE_REFILL);
 206 ARMV8_EVENT_ATTR(l3d_cache_allocate, ARMV8_PMUV3_PERFCTR_L3D_CACHE_ALLOCATE);
 207 ARMV8_EVENT_ATTR(l3d_cache_refill, ARMV8_PMUV3_PERFCTR_L3D_CACHE_REFILL);
 208 ARMV8_EVENT_ATTR(l3d_cache, ARMV8_PMUV3_PERFCTR_L3D_CACHE);
 209 ARMV8_EVENT_ATTR(l3d_cache_wb, ARMV8_PMUV3_PERFCTR_L3D_CACHE_WB);
 210 ARMV8_EVENT_ATTR(l2d_tlb_refill, ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL);
 211 ARMV8_EVENT_ATTR(l2i_tlb_refill, ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL);
 212 ARMV8_EVENT_ATTR(l2d_tlb, ARMV8_PMUV3_PERFCTR_L2D_TLB);
 213 ARMV8_EVENT_ATTR(l2i_tlb, ARMV8_PMUV3_PERFCTR_L2I_TLB);
 214 ARMV8_EVENT_ATTR(remote_access, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS);
 215 ARMV8_EVENT_ATTR(ll_cache, ARMV8_PMUV3_PERFCTR_LL_CACHE);
 216 ARMV8_EVENT_ATTR(ll_cache_miss, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS);
 217 ARMV8_EVENT_ATTR(dtlb_walk, ARMV8_PMUV3_PERFCTR_DTLB_WALK);
 218 ARMV8_EVENT_ATTR(itlb_walk, ARMV8_PMUV3_PERFCTR_ITLB_WALK);
 219 ARMV8_EVENT_ATTR(ll_cache_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_RD);
 220 ARMV8_EVENT_ATTR(ll_cache_miss_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD);
 221 ARMV8_EVENT_ATTR(remote_access_rd, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS_RD);
 222 ARMV8_EVENT_ATTR(sample_pop, ARMV8_SPE_PERFCTR_SAMPLE_POP);
 223 ARMV8_EVENT_ATTR(sample_feed, ARMV8_SPE_PERFCTR_SAMPLE_FEED);
 224 ARMV8_EVENT_ATTR(sample_filtrate, ARMV8_SPE_PERFCTR_SAMPLE_FILTRATE);
 225 ARMV8_EVENT_ATTR(sample_collision, ARMV8_SPE_PERFCTR_SAMPLE_COLLISION);
 226 
 227 static struct attribute *armv8_pmuv3_event_attrs[] = {
 228         &armv8_event_attr_sw_incr.attr.attr,
 229         &armv8_event_attr_l1i_cache_refill.attr.attr,
 230         &armv8_event_attr_l1i_tlb_refill.attr.attr,
 231         &armv8_event_attr_l1d_cache_refill.attr.attr,
 232         &armv8_event_attr_l1d_cache.attr.attr,
 233         &armv8_event_attr_l1d_tlb_refill.attr.attr,
 234         &armv8_event_attr_ld_retired.attr.attr,
 235         &armv8_event_attr_st_retired.attr.attr,
 236         &armv8_event_attr_inst_retired.attr.attr,
 237         &armv8_event_attr_exc_taken.attr.attr,
 238         &armv8_event_attr_exc_return.attr.attr,
 239         &armv8_event_attr_cid_write_retired.attr.attr,
 240         &armv8_event_attr_pc_write_retired.attr.attr,
 241         &armv8_event_attr_br_immed_retired.attr.attr,
 242         &armv8_event_attr_br_return_retired.attr.attr,
 243         &armv8_event_attr_unaligned_ldst_retired.attr.attr,
 244         &armv8_event_attr_br_mis_pred.attr.attr,
 245         &armv8_event_attr_cpu_cycles.attr.attr,
 246         &armv8_event_attr_br_pred.attr.attr,
 247         &armv8_event_attr_mem_access.attr.attr,
 248         &armv8_event_attr_l1i_cache.attr.attr,
 249         &armv8_event_attr_l1d_cache_wb.attr.attr,
 250         &armv8_event_attr_l2d_cache.attr.attr,
 251         &armv8_event_attr_l2d_cache_refill.attr.attr,
 252         &armv8_event_attr_l2d_cache_wb.attr.attr,
 253         &armv8_event_attr_bus_access.attr.attr,
 254         &armv8_event_attr_memory_error.attr.attr,
 255         &armv8_event_attr_inst_spec.attr.attr,
 256         &armv8_event_attr_ttbr_write_retired.attr.attr,
 257         &armv8_event_attr_bus_cycles.attr.attr,
 258         &armv8_event_attr_l1d_cache_allocate.attr.attr,
 259         &armv8_event_attr_l2d_cache_allocate.attr.attr,
 260         &armv8_event_attr_br_retired.attr.attr,
 261         &armv8_event_attr_br_mis_pred_retired.attr.attr,
 262         &armv8_event_attr_stall_frontend.attr.attr,
 263         &armv8_event_attr_stall_backend.attr.attr,
 264         &armv8_event_attr_l1d_tlb.attr.attr,
 265         &armv8_event_attr_l1i_tlb.attr.attr,
 266         &armv8_event_attr_l2i_cache.attr.attr,
 267         &armv8_event_attr_l2i_cache_refill.attr.attr,
 268         &armv8_event_attr_l3d_cache_allocate.attr.attr,
 269         &armv8_event_attr_l3d_cache_refill.attr.attr,
 270         &armv8_event_attr_l3d_cache.attr.attr,
 271         &armv8_event_attr_l3d_cache_wb.attr.attr,
 272         &armv8_event_attr_l2d_tlb_refill.attr.attr,
 273         &armv8_event_attr_l2i_tlb_refill.attr.attr,
 274         &armv8_event_attr_l2d_tlb.attr.attr,
 275         &armv8_event_attr_l2i_tlb.attr.attr,
 276         &armv8_event_attr_remote_access.attr.attr,
 277         &armv8_event_attr_ll_cache.attr.attr,
 278         &armv8_event_attr_ll_cache_miss.attr.attr,
 279         &armv8_event_attr_dtlb_walk.attr.attr,
 280         &armv8_event_attr_itlb_walk.attr.attr,
 281         &armv8_event_attr_ll_cache_rd.attr.attr,
 282         &armv8_event_attr_ll_cache_miss_rd.attr.attr,
 283         &armv8_event_attr_remote_access_rd.attr.attr,
 284         &armv8_event_attr_sample_pop.attr.attr,
 285         &armv8_event_attr_sample_feed.attr.attr,
 286         &armv8_event_attr_sample_filtrate.attr.attr,
 287         &armv8_event_attr_sample_collision.attr.attr,
 288         NULL,
 289 };
 290 
 291 static umode_t
 292 armv8pmu_event_attr_is_visible(struct kobject *kobj,
 293                                struct attribute *attr, int unused)
 294 {
 295         struct device *dev = kobj_to_dev(kobj);
 296         struct pmu *pmu = dev_get_drvdata(dev);
 297         struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
 298         struct perf_pmu_events_attr *pmu_attr;
 299 
 300         pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr);
 301 
 302         if (pmu_attr->id < ARMV8_PMUV3_MAX_COMMON_EVENTS &&
 303             test_bit(pmu_attr->id, cpu_pmu->pmceid_bitmap))
 304                 return attr->mode;
 305 
 306         pmu_attr->id -= ARMV8_PMUV3_EXT_COMMON_EVENT_BASE;
 307         if (pmu_attr->id < ARMV8_PMUV3_MAX_COMMON_EVENTS &&
 308             test_bit(pmu_attr->id, cpu_pmu->pmceid_ext_bitmap))
 309                 return attr->mode;
 310 
 311         return 0;
 312 }
 313 
 314 static struct attribute_group armv8_pmuv3_events_attr_group = {
 315         .name = "events",
 316         .attrs = armv8_pmuv3_event_attrs,
 317         .is_visible = armv8pmu_event_attr_is_visible,
 318 };
 319 
 320 PMU_FORMAT_ATTR(event, "config:0-15");
 321 PMU_FORMAT_ATTR(long, "config1:0");
 322 
 323 static inline bool armv8pmu_event_is_64bit(struct perf_event *event)
 324 {
 325         return event->attr.config1 & 0x1;
 326 }
 327 
 328 static struct attribute *armv8_pmuv3_format_attrs[] = {
 329         &format_attr_event.attr,
 330         &format_attr_long.attr,
 331         NULL,
 332 };
 333 
 334 static struct attribute_group armv8_pmuv3_format_attr_group = {
 335         .name = "format",
 336         .attrs = armv8_pmuv3_format_attrs,
 337 };
 338 
 339 /*
 340  * Perf Events' indices
 341  */
 342 #define ARMV8_IDX_CYCLE_COUNTER 0
 343 #define ARMV8_IDX_COUNTER0      1
 344 #define ARMV8_IDX_COUNTER_LAST(cpu_pmu) \
 345         (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
 346 
 347 /*
 348  * We must chain two programmable counters for 64 bit events,
 349  * except when we have allocated the 64bit cycle counter (for CPU
 350  * cycles event). This must be called only when the event has
 351  * a counter allocated.
 352  */
 353 static inline bool armv8pmu_event_is_chained(struct perf_event *event)
 354 {
 355         int idx = event->hw.idx;
 356 
 357         return !WARN_ON(idx < 0) &&
 358                armv8pmu_event_is_64bit(event) &&
 359                (idx != ARMV8_IDX_CYCLE_COUNTER);
 360 }
 361 
 362 /*
 363  * ARMv8 low level PMU access
 364  */
 365 
 366 /*
 367  * Perf Event to low level counters mapping
 368  */
 369 #define ARMV8_IDX_TO_COUNTER(x) \
 370         (((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK)
 371 
 372 static inline u32 armv8pmu_pmcr_read(void)
 373 {
 374         return read_sysreg(pmcr_el0);
 375 }
 376 
 377 static inline void armv8pmu_pmcr_write(u32 val)
 378 {
 379         val &= ARMV8_PMU_PMCR_MASK;
 380         isb();
 381         write_sysreg(val, pmcr_el0);
 382 }
 383 
 384 static inline int armv8pmu_has_overflowed(u32 pmovsr)
 385 {
 386         return pmovsr & ARMV8_PMU_OVERFLOWED_MASK;
 387 }
 388 
 389 static inline int armv8pmu_counter_valid(struct arm_pmu *cpu_pmu, int idx)
 390 {
 391         return idx >= ARMV8_IDX_CYCLE_COUNTER &&
 392                 idx <= ARMV8_IDX_COUNTER_LAST(cpu_pmu);
 393 }
 394 
 395 static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
 396 {
 397         return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
 398 }
 399 
 400 static inline void armv8pmu_select_counter(int idx)
 401 {
 402         u32 counter = ARMV8_IDX_TO_COUNTER(idx);
 403         write_sysreg(counter, pmselr_el0);
 404         isb();
 405 }
 406 
 407 static inline u32 armv8pmu_read_evcntr(int idx)
 408 {
 409         armv8pmu_select_counter(idx);
 410         return read_sysreg(pmxevcntr_el0);
 411 }
 412 
 413 static inline u64 armv8pmu_read_hw_counter(struct perf_event *event)
 414 {
 415         int idx = event->hw.idx;
 416         u64 val = 0;
 417 
 418         val = armv8pmu_read_evcntr(idx);
 419         if (armv8pmu_event_is_chained(event))
 420                 val = (val << 32) | armv8pmu_read_evcntr(idx - 1);
 421         return val;
 422 }
 423 
 424 static u64 armv8pmu_read_counter(struct perf_event *event)
 425 {
 426         struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
 427         struct hw_perf_event *hwc = &event->hw;
 428         int idx = hwc->idx;
 429         u64 value = 0;
 430 
 431         if (!armv8pmu_counter_valid(cpu_pmu, idx))
 432                 pr_err("CPU%u reading wrong counter %d\n",
 433                         smp_processor_id(), idx);
 434         else if (idx == ARMV8_IDX_CYCLE_COUNTER)
 435                 value = read_sysreg(pmccntr_el0);
 436         else
 437                 value = armv8pmu_read_hw_counter(event);
 438 
 439         return value;
 440 }
 441 
 442 static inline void armv8pmu_write_evcntr(int idx, u32 value)
 443 {
 444         armv8pmu_select_counter(idx);
 445         write_sysreg(value, pmxevcntr_el0);
 446 }
 447 
 448 static inline void armv8pmu_write_hw_counter(struct perf_event *event,
 449                                              u64 value)
 450 {
 451         int idx = event->hw.idx;
 452 
 453         if (armv8pmu_event_is_chained(event)) {
 454                 armv8pmu_write_evcntr(idx, upper_32_bits(value));
 455                 armv8pmu_write_evcntr(idx - 1, lower_32_bits(value));
 456         } else {
 457                 armv8pmu_write_evcntr(idx, value);
 458         }
 459 }
 460 
 461 static void armv8pmu_write_counter(struct perf_event *event, u64 value)
 462 {
 463         struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
 464         struct hw_perf_event *hwc = &event->hw;
 465         int idx = hwc->idx;
 466 
 467         if (!armv8pmu_counter_valid(cpu_pmu, idx))
 468                 pr_err("CPU%u writing wrong counter %d\n",
 469                         smp_processor_id(), idx);
 470         else if (idx == ARMV8_IDX_CYCLE_COUNTER) {
 471                 /*
 472                  * The cycles counter is really a 64-bit counter.
 473                  * When treating it as a 32-bit counter, we only count
 474                  * the lower 32 bits, and set the upper 32-bits so that
 475                  * we get an interrupt upon 32-bit overflow.
 476                  */
 477                 if (!armv8pmu_event_is_64bit(event))
 478                         value |= 0xffffffff00000000ULL;
 479                 write_sysreg(value, pmccntr_el0);
 480         } else
 481                 armv8pmu_write_hw_counter(event, value);
 482 }
 483 
 484 static inline void armv8pmu_write_evtype(int idx, u32 val)
 485 {
 486         armv8pmu_select_counter(idx);
 487         val &= ARMV8_PMU_EVTYPE_MASK;
 488         write_sysreg(val, pmxevtyper_el0);
 489 }
 490 
 491 static inline void armv8pmu_write_event_type(struct perf_event *event)
 492 {
 493         struct hw_perf_event *hwc = &event->hw;
 494         int idx = hwc->idx;
 495 
 496         /*
 497          * For chained events, the low counter is programmed to count
 498          * the event of interest and the high counter is programmed
 499          * with CHAIN event code with filters set to count at all ELs.
 500          */
 501         if (armv8pmu_event_is_chained(event)) {
 502                 u32 chain_evt = ARMV8_PMUV3_PERFCTR_CHAIN |
 503                                 ARMV8_PMU_INCLUDE_EL2;
 504 
 505                 armv8pmu_write_evtype(idx - 1, hwc->config_base);
 506                 armv8pmu_write_evtype(idx, chain_evt);
 507         } else {
 508                 armv8pmu_write_evtype(idx, hwc->config_base);
 509         }
 510 }
 511 
 512 static inline int armv8pmu_enable_counter(int idx)
 513 {
 514         u32 counter = ARMV8_IDX_TO_COUNTER(idx);
 515         write_sysreg(BIT(counter), pmcntenset_el0);
 516         return idx;
 517 }
 518 
 519 static inline void armv8pmu_enable_event_counter(struct perf_event *event)
 520 {
 521         struct perf_event_attr *attr = &event->attr;
 522         int idx = event->hw.idx;
 523         u32 counter_bits = BIT(ARMV8_IDX_TO_COUNTER(idx));
 524 
 525         if (armv8pmu_event_is_chained(event))
 526                 counter_bits |= BIT(ARMV8_IDX_TO_COUNTER(idx - 1));
 527 
 528         kvm_set_pmu_events(counter_bits, attr);
 529 
 530         /* We rely on the hypervisor switch code to enable guest counters */
 531         if (!kvm_pmu_counter_deferred(attr)) {
 532                 armv8pmu_enable_counter(idx);
 533                 if (armv8pmu_event_is_chained(event))
 534                         armv8pmu_enable_counter(idx - 1);
 535         }
 536 }
 537 
 538 static inline int armv8pmu_disable_counter(int idx)
 539 {
 540         u32 counter = ARMV8_IDX_TO_COUNTER(idx);
 541         write_sysreg(BIT(counter), pmcntenclr_el0);
 542         return idx;
 543 }
 544 
 545 static inline void armv8pmu_disable_event_counter(struct perf_event *event)
 546 {
 547         struct hw_perf_event *hwc = &event->hw;
 548         struct perf_event_attr *attr = &event->attr;
 549         int idx = hwc->idx;
 550         u32 counter_bits = BIT(ARMV8_IDX_TO_COUNTER(idx));
 551 
 552         if (armv8pmu_event_is_chained(event))
 553                 counter_bits |= BIT(ARMV8_IDX_TO_COUNTER(idx - 1));
 554 
 555         kvm_clr_pmu_events(counter_bits);
 556 
 557         /* We rely on the hypervisor switch code to disable guest counters */
 558         if (!kvm_pmu_counter_deferred(attr)) {
 559                 if (armv8pmu_event_is_chained(event))
 560                         armv8pmu_disable_counter(idx - 1);
 561                 armv8pmu_disable_counter(idx);
 562         }
 563 }
 564 
 565 static inline int armv8pmu_enable_intens(int idx)
 566 {
 567         u32 counter = ARMV8_IDX_TO_COUNTER(idx);
 568         write_sysreg(BIT(counter), pmintenset_el1);
 569         return idx;
 570 }
 571 
 572 static inline int armv8pmu_enable_event_irq(struct perf_event *event)
 573 {
 574         return armv8pmu_enable_intens(event->hw.idx);
 575 }
 576 
 577 static inline int armv8pmu_disable_intens(int idx)
 578 {
 579         u32 counter = ARMV8_IDX_TO_COUNTER(idx);
 580         write_sysreg(BIT(counter), pmintenclr_el1);
 581         isb();
 582         /* Clear the overflow flag in case an interrupt is pending. */
 583         write_sysreg(BIT(counter), pmovsclr_el0);
 584         isb();
 585 
 586         return idx;
 587 }
 588 
 589 static inline int armv8pmu_disable_event_irq(struct perf_event *event)
 590 {
 591         return armv8pmu_disable_intens(event->hw.idx);
 592 }
 593 
 594 static inline u32 armv8pmu_getreset_flags(void)
 595 {
 596         u32 value;
 597 
 598         /* Read */
 599         value = read_sysreg(pmovsclr_el0);
 600 
 601         /* Write to clear flags */
 602         value &= ARMV8_PMU_OVSR_MASK;
 603         write_sysreg(value, pmovsclr_el0);
 604 
 605         return value;
 606 }
 607 
 608 static void armv8pmu_enable_event(struct perf_event *event)
 609 {
 610         unsigned long flags;
 611         struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
 612         struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
 613 
 614         /*
 615          * Enable counter and interrupt, and set the counter to count
 616          * the event that we're interested in.
 617          */
 618         raw_spin_lock_irqsave(&events->pmu_lock, flags);
 619 
 620         /*
 621          * Disable counter
 622          */
 623         armv8pmu_disable_event_counter(event);
 624 
 625         /*
 626          * Set event (if destined for PMNx counters).
 627          */
 628         armv8pmu_write_event_type(event);
 629 
 630         /*
 631          * Enable interrupt for this counter
 632          */
 633         armv8pmu_enable_event_irq(event);
 634 
 635         /*
 636          * Enable counter
 637          */
 638         armv8pmu_enable_event_counter(event);
 639 
 640         raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
 641 }
 642 
 643 static void armv8pmu_disable_event(struct perf_event *event)
 644 {
 645         unsigned long flags;
 646         struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
 647         struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
 648 
 649         /*
 650          * Disable counter and interrupt
 651          */
 652         raw_spin_lock_irqsave(&events->pmu_lock, flags);
 653 
 654         /*
 655          * Disable counter
 656          */
 657         armv8pmu_disable_event_counter(event);
 658 
 659         /*
 660          * Disable interrupt for this counter
 661          */
 662         armv8pmu_disable_event_irq(event);
 663 
 664         raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
 665 }
 666 
 667 static void armv8pmu_start(struct arm_pmu *cpu_pmu)
 668 {
 669         unsigned long flags;
 670         struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
 671 
 672         raw_spin_lock_irqsave(&events->pmu_lock, flags);
 673         /* Enable all counters */
 674         armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
 675         raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
 676 }
 677 
 678 static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
 679 {
 680         unsigned long flags;
 681         struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
 682 
 683         raw_spin_lock_irqsave(&events->pmu_lock, flags);
 684         /* Disable all counters */
 685         armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
 686         raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
 687 }
 688 
 689 static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
 690 {
 691         u32 pmovsr;
 692         struct perf_sample_data data;
 693         struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
 694         struct pt_regs *regs;
 695         int idx;
 696 
 697         /*
 698          * Get and reset the IRQ flags
 699          */
 700         pmovsr = armv8pmu_getreset_flags();
 701 
 702         /*
 703          * Did an overflow occur?
 704          */
 705         if (!armv8pmu_has_overflowed(pmovsr))
 706                 return IRQ_NONE;
 707 
 708         /*
 709          * Handle the counter(s) overflow(s)
 710          */
 711         regs = get_irq_regs();
 712 
 713         /*
 714          * Stop the PMU while processing the counter overflows
 715          * to prevent skews in group events.
 716          */
 717         armv8pmu_stop(cpu_pmu);
 718         for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
 719                 struct perf_event *event = cpuc->events[idx];
 720                 struct hw_perf_event *hwc;
 721 
 722                 /* Ignore if we don't have an event. */
 723                 if (!event)
 724                         continue;
 725 
 726                 /*
 727                  * We have a single interrupt for all counters. Check that
 728                  * each counter has overflowed before we process it.
 729                  */
 730                 if (!armv8pmu_counter_has_overflowed(pmovsr, idx))
 731                         continue;
 732 
 733                 hwc = &event->hw;
 734                 armpmu_event_update(event);
 735                 perf_sample_data_init(&data, 0, hwc->last_period);
 736                 if (!armpmu_event_set_period(event))
 737                         continue;
 738 
 739                 if (perf_event_overflow(event, &data, regs))
 740                         cpu_pmu->disable(event);
 741         }
 742         armv8pmu_start(cpu_pmu);
 743 
 744         /*
 745          * Handle the pending perf events.
 746          *
 747          * Note: this call *must* be run with interrupts disabled. For
 748          * platforms that can have the PMU interrupts raised as an NMI, this
 749          * will not work.
 750          */
 751         irq_work_run();
 752 
 753         return IRQ_HANDLED;
 754 }
 755 
 756 static int armv8pmu_get_single_idx(struct pmu_hw_events *cpuc,
 757                                     struct arm_pmu *cpu_pmu)
 758 {
 759         int idx;
 760 
 761         for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; idx ++) {
 762                 if (!test_and_set_bit(idx, cpuc->used_mask))
 763                         return idx;
 764         }
 765         return -EAGAIN;
 766 }
 767 
 768 static int armv8pmu_get_chain_idx(struct pmu_hw_events *cpuc,
 769                                    struct arm_pmu *cpu_pmu)
 770 {
 771         int idx;
 772 
 773         /*
 774          * Chaining requires two consecutive event counters, where
 775          * the lower idx must be even.
 776          */
 777         for (idx = ARMV8_IDX_COUNTER0 + 1; idx < cpu_pmu->num_events; idx += 2) {
 778                 if (!test_and_set_bit(idx, cpuc->used_mask)) {
 779                         /* Check if the preceding even counter is available */
 780                         if (!test_and_set_bit(idx - 1, cpuc->used_mask))
 781                                 return idx;
 782                         /* Release the Odd counter */
 783                         clear_bit(idx, cpuc->used_mask);
 784                 }
 785         }
 786         return -EAGAIN;
 787 }
 788 
 789 static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
 790                                   struct perf_event *event)
 791 {
 792         struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
 793         struct hw_perf_event *hwc = &event->hw;
 794         unsigned long evtype = hwc->config_base & ARMV8_PMU_EVTYPE_EVENT;
 795 
 796         /* Always prefer to place a cycle counter into the cycle counter. */
 797         if (evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) {
 798                 if (!test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
 799                         return ARMV8_IDX_CYCLE_COUNTER;
 800         }
 801 
 802         /*
 803          * Otherwise use events counters
 804          */
 805         if (armv8pmu_event_is_64bit(event))
 806                 return  armv8pmu_get_chain_idx(cpuc, cpu_pmu);
 807         else
 808                 return armv8pmu_get_single_idx(cpuc, cpu_pmu);
 809 }
 810 
 811 static void armv8pmu_clear_event_idx(struct pmu_hw_events *cpuc,
 812                                      struct perf_event *event)
 813 {
 814         int idx = event->hw.idx;
 815 
 816         clear_bit(idx, cpuc->used_mask);
 817         if (armv8pmu_event_is_chained(event))
 818                 clear_bit(idx - 1, cpuc->used_mask);
 819 }
 820 
 821 /*
 822  * Add an event filter to a given event.
 823  */
 824 static int armv8pmu_set_event_filter(struct hw_perf_event *event,
 825                                      struct perf_event_attr *attr)
 826 {
 827         unsigned long config_base = 0;
 828 
 829         if (attr->exclude_idle)
 830                 return -EPERM;
 831 
 832         /*
 833          * If we're running in hyp mode, then we *are* the hypervisor.
 834          * Therefore we ignore exclude_hv in this configuration, since
 835          * there's no hypervisor to sample anyway. This is consistent
 836          * with other architectures (x86 and Power).
 837          */
 838         if (is_kernel_in_hyp_mode()) {
 839                 if (!attr->exclude_kernel && !attr->exclude_host)
 840                         config_base |= ARMV8_PMU_INCLUDE_EL2;
 841                 if (attr->exclude_guest)
 842                         config_base |= ARMV8_PMU_EXCLUDE_EL1;
 843                 if (attr->exclude_host)
 844                         config_base |= ARMV8_PMU_EXCLUDE_EL0;
 845         } else {
 846                 if (!attr->exclude_hv && !attr->exclude_host)
 847                         config_base |= ARMV8_PMU_INCLUDE_EL2;
 848         }
 849 
 850         /*
 851          * Filter out !VHE kernels and guest kernels
 852          */
 853         if (attr->exclude_kernel)
 854                 config_base |= ARMV8_PMU_EXCLUDE_EL1;
 855 
 856         if (attr->exclude_user)
 857                 config_base |= ARMV8_PMU_EXCLUDE_EL0;
 858 
 859         /*
 860          * Install the filter into config_base as this is used to
 861          * construct the event type.
 862          */
 863         event->config_base = config_base;
 864 
 865         return 0;
 866 }
 867 
 868 static int armv8pmu_filter_match(struct perf_event *event)
 869 {
 870         unsigned long evtype = event->hw.config_base & ARMV8_PMU_EVTYPE_EVENT;
 871         return evtype != ARMV8_PMUV3_PERFCTR_CHAIN;
 872 }
 873 
 874 static void armv8pmu_reset(void *info)
 875 {
 876         struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
 877         u32 idx, nb_cnt = cpu_pmu->num_events;
 878 
 879         /* The counter and interrupt enable registers are unknown at reset. */
 880         for (idx = ARMV8_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
 881                 armv8pmu_disable_counter(idx);
 882                 armv8pmu_disable_intens(idx);
 883         }
 884 
 885         /* Clear the counters we flip at guest entry/exit */
 886         kvm_clr_pmu_events(U32_MAX);
 887 
 888         /*
 889          * Initialize & Reset PMNC. Request overflow interrupt for
 890          * 64 bit cycle counter but cheat in armv8pmu_write_counter().
 891          */
 892         armv8pmu_pmcr_write(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C |
 893                             ARMV8_PMU_PMCR_LC);
 894 }
 895 
 896 static int __armv8_pmuv3_map_event(struct perf_event *event,
 897                                    const unsigned (*extra_event_map)
 898                                                   [PERF_COUNT_HW_MAX],
 899                                    const unsigned (*extra_cache_map)
 900                                                   [PERF_COUNT_HW_CACHE_MAX]
 901                                                   [PERF_COUNT_HW_CACHE_OP_MAX]
 902                                                   [PERF_COUNT_HW_CACHE_RESULT_MAX])
 903 {
 904         int hw_event_id;
 905         struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
 906 
 907         hw_event_id = armpmu_map_event(event, &armv8_pmuv3_perf_map,
 908                                        &armv8_pmuv3_perf_cache_map,
 909                                        ARMV8_PMU_EVTYPE_EVENT);
 910 
 911         if (armv8pmu_event_is_64bit(event))
 912                 event->hw.flags |= ARMPMU_EVT_64BIT;
 913 
 914         /* Only expose micro/arch events supported by this PMU */
 915         if ((hw_event_id > 0) && (hw_event_id < ARMV8_PMUV3_MAX_COMMON_EVENTS)
 916             && test_bit(hw_event_id, armpmu->pmceid_bitmap)) {
 917                 return hw_event_id;
 918         }
 919 
 920         return armpmu_map_event(event, extra_event_map, extra_cache_map,
 921                                 ARMV8_PMU_EVTYPE_EVENT);
 922 }
 923 
 924 static int armv8_pmuv3_map_event(struct perf_event *event)
 925 {
 926         return __armv8_pmuv3_map_event(event, NULL, NULL);
 927 }
 928 
 929 static int armv8_a53_map_event(struct perf_event *event)
 930 {
 931         return __armv8_pmuv3_map_event(event, NULL, &armv8_a53_perf_cache_map);
 932 }
 933 
 934 static int armv8_a57_map_event(struct perf_event *event)
 935 {
 936         return __armv8_pmuv3_map_event(event, NULL, &armv8_a57_perf_cache_map);
 937 }
 938 
 939 static int armv8_a73_map_event(struct perf_event *event)
 940 {
 941         return __armv8_pmuv3_map_event(event, NULL, &armv8_a73_perf_cache_map);
 942 }
 943 
 944 static int armv8_thunder_map_event(struct perf_event *event)
 945 {
 946         return __armv8_pmuv3_map_event(event, NULL,
 947                                        &armv8_thunder_perf_cache_map);
 948 }
 949 
 950 static int armv8_vulcan_map_event(struct perf_event *event)
 951 {
 952         return __armv8_pmuv3_map_event(event, NULL,
 953                                        &armv8_vulcan_perf_cache_map);
 954 }
 955 
 956 struct armv8pmu_probe_info {
 957         struct arm_pmu *pmu;
 958         bool present;
 959 };
 960 
 961 static void __armv8pmu_probe_pmu(void *info)
 962 {
 963         struct armv8pmu_probe_info *probe = info;
 964         struct arm_pmu *cpu_pmu = probe->pmu;
 965         u64 dfr0;
 966         u64 pmceid_raw[2];
 967         u32 pmceid[2];
 968         int pmuver;
 969 
 970         dfr0 = read_sysreg(id_aa64dfr0_el1);
 971         pmuver = cpuid_feature_extract_unsigned_field(dfr0,
 972                         ID_AA64DFR0_PMUVER_SHIFT);
 973         if (pmuver == 0xf || pmuver == 0)
 974                 return;
 975 
 976         probe->present = true;
 977 
 978         /* Read the nb of CNTx counters supported from PMNC */
 979         cpu_pmu->num_events = (armv8pmu_pmcr_read() >> ARMV8_PMU_PMCR_N_SHIFT)
 980                 & ARMV8_PMU_PMCR_N_MASK;
 981 
 982         /* Add the CPU cycles counter */
 983         cpu_pmu->num_events += 1;
 984 
 985         pmceid[0] = pmceid_raw[0] = read_sysreg(pmceid0_el0);
 986         pmceid[1] = pmceid_raw[1] = read_sysreg(pmceid1_el0);
 987 
 988         bitmap_from_arr32(cpu_pmu->pmceid_bitmap,
 989                              pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
 990 
 991         pmceid[0] = pmceid_raw[0] >> 32;
 992         pmceid[1] = pmceid_raw[1] >> 32;
 993 
 994         bitmap_from_arr32(cpu_pmu->pmceid_ext_bitmap,
 995                              pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
 996 }
 997 
 998 static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu)
 999 {
1000         struct armv8pmu_probe_info probe = {
1001                 .pmu = cpu_pmu,
1002                 .present = false,
1003         };
1004         int ret;
1005 
1006         ret = smp_call_function_any(&cpu_pmu->supported_cpus,
1007                                     __armv8pmu_probe_pmu,
1008                                     &probe, 1);
1009         if (ret)
1010                 return ret;
1011 
1012         return probe.present ? 0 : -ENODEV;
1013 }
1014 
1015 static int armv8_pmu_init(struct arm_pmu *cpu_pmu)
1016 {
1017         int ret = armv8pmu_probe_pmu(cpu_pmu);
1018         if (ret)
1019                 return ret;
1020 
1021         cpu_pmu->handle_irq             = armv8pmu_handle_irq;
1022         cpu_pmu->enable                 = armv8pmu_enable_event;
1023         cpu_pmu->disable                = armv8pmu_disable_event;
1024         cpu_pmu->read_counter           = armv8pmu_read_counter;
1025         cpu_pmu->write_counter          = armv8pmu_write_counter;
1026         cpu_pmu->get_event_idx          = armv8pmu_get_event_idx;
1027         cpu_pmu->clear_event_idx        = armv8pmu_clear_event_idx;
1028         cpu_pmu->start                  = armv8pmu_start;
1029         cpu_pmu->stop                   = armv8pmu_stop;
1030         cpu_pmu->reset                  = armv8pmu_reset;
1031         cpu_pmu->set_event_filter       = armv8pmu_set_event_filter;
1032         cpu_pmu->filter_match           = armv8pmu_filter_match;
1033 
1034         return 0;
1035 }
1036 
1037 static int armv8_pmuv3_init(struct arm_pmu *cpu_pmu)
1038 {
1039         int ret = armv8_pmu_init(cpu_pmu);
1040         if (ret)
1041                 return ret;
1042 
1043         cpu_pmu->name                   = "armv8_pmuv3";
1044         cpu_pmu->map_event              = armv8_pmuv3_map_event;
1045         cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1046                 &armv8_pmuv3_events_attr_group;
1047         cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1048                 &armv8_pmuv3_format_attr_group;
1049 
1050         return 0;
1051 }
1052 
1053 static int armv8_a35_pmu_init(struct arm_pmu *cpu_pmu)
1054 {
1055         int ret = armv8_pmu_init(cpu_pmu);
1056         if (ret)
1057                 return ret;
1058 
1059         cpu_pmu->name                   = "armv8_cortex_a35";
1060         cpu_pmu->map_event              = armv8_a53_map_event;
1061         cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1062                 &armv8_pmuv3_events_attr_group;
1063         cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1064                 &armv8_pmuv3_format_attr_group;
1065 
1066         return 0;
1067 }
1068 
1069 static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu)
1070 {
1071         int ret = armv8_pmu_init(cpu_pmu);
1072         if (ret)
1073                 return ret;
1074 
1075         cpu_pmu->name                   = "armv8_cortex_a53";
1076         cpu_pmu->map_event              = armv8_a53_map_event;
1077         cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1078                 &armv8_pmuv3_events_attr_group;
1079         cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1080                 &armv8_pmuv3_format_attr_group;
1081 
1082         return 0;
1083 }
1084 
1085 static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu)
1086 {
1087         int ret = armv8_pmu_init(cpu_pmu);
1088         if (ret)
1089                 return ret;
1090 
1091         cpu_pmu->name                   = "armv8_cortex_a57";
1092         cpu_pmu->map_event              = armv8_a57_map_event;
1093         cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1094                 &armv8_pmuv3_events_attr_group;
1095         cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1096                 &armv8_pmuv3_format_attr_group;
1097 
1098         return 0;
1099 }
1100 
1101 static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu)
1102 {
1103         int ret = armv8_pmu_init(cpu_pmu);
1104         if (ret)
1105                 return ret;
1106 
1107         cpu_pmu->name                   = "armv8_cortex_a72";
1108         cpu_pmu->map_event              = armv8_a57_map_event;
1109         cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1110                 &armv8_pmuv3_events_attr_group;
1111         cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1112                 &armv8_pmuv3_format_attr_group;
1113 
1114         return 0;
1115 }
1116 
1117 static int armv8_a73_pmu_init(struct arm_pmu *cpu_pmu)
1118 {
1119         int ret = armv8_pmu_init(cpu_pmu);
1120         if (ret)
1121                 return ret;
1122 
1123         cpu_pmu->name                   = "armv8_cortex_a73";
1124         cpu_pmu->map_event              = armv8_a73_map_event;
1125         cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1126                 &armv8_pmuv3_events_attr_group;
1127         cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1128                 &armv8_pmuv3_format_attr_group;
1129 
1130         return 0;
1131 }
1132 
1133 static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu)
1134 {
1135         int ret = armv8_pmu_init(cpu_pmu);
1136         if (ret)
1137                 return ret;
1138 
1139         cpu_pmu->name                   = "armv8_cavium_thunder";
1140         cpu_pmu->map_event              = armv8_thunder_map_event;
1141         cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1142                 &armv8_pmuv3_events_attr_group;
1143         cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1144                 &armv8_pmuv3_format_attr_group;
1145 
1146         return 0;
1147 }
1148 
1149 static int armv8_vulcan_pmu_init(struct arm_pmu *cpu_pmu)
1150 {
1151         int ret = armv8_pmu_init(cpu_pmu);
1152         if (ret)
1153                 return ret;
1154 
1155         cpu_pmu->name                   = "armv8_brcm_vulcan";
1156         cpu_pmu->map_event              = armv8_vulcan_map_event;
1157         cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1158                 &armv8_pmuv3_events_attr_group;
1159         cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1160                 &armv8_pmuv3_format_attr_group;
1161 
1162         return 0;
1163 }
1164 
1165 static const struct of_device_id armv8_pmu_of_device_ids[] = {
1166         {.compatible = "arm,armv8-pmuv3",       .data = armv8_pmuv3_init},
1167         {.compatible = "arm,cortex-a35-pmu",    .data = armv8_a35_pmu_init},
1168         {.compatible = "arm,cortex-a53-pmu",    .data = armv8_a53_pmu_init},
1169         {.compatible = "arm,cortex-a57-pmu",    .data = armv8_a57_pmu_init},
1170         {.compatible = "arm,cortex-a72-pmu",    .data = armv8_a72_pmu_init},
1171         {.compatible = "arm,cortex-a73-pmu",    .data = armv8_a73_pmu_init},
1172         {.compatible = "cavium,thunder-pmu",    .data = armv8_thunder_pmu_init},
1173         {.compatible = "brcm,vulcan-pmu",       .data = armv8_vulcan_pmu_init},
1174         {},
1175 };
1176 
1177 static int armv8_pmu_device_probe(struct platform_device *pdev)
1178 {
1179         return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids, NULL);
1180 }
1181 
1182 static struct platform_driver armv8_pmu_driver = {
1183         .driver         = {
1184                 .name   = ARMV8_PMU_PDEV_NAME,
1185                 .of_match_table = armv8_pmu_of_device_ids,
1186                 .suppress_bind_attrs = true,
1187         },
1188         .probe          = armv8_pmu_device_probe,
1189 };
1190 
1191 static int __init armv8_pmu_driver_init(void)
1192 {
1193         if (acpi_disabled)
1194                 return platform_driver_register(&armv8_pmu_driver);
1195         else
1196                 return arm_pmu_acpi_probe(armv8_pmuv3_init);
1197 }
1198 device_initcall(armv8_pmu_driver_init)
1199 
1200 void arch_perf_update_userpage(struct perf_event *event,
1201                                struct perf_event_mmap_page *userpg, u64 now)
1202 {
1203         u32 freq;
1204         u32 shift;
1205 
1206         /*
1207          * Internal timekeeping for enabled/running/stopped times
1208          * is always computed with the sched_clock.
1209          */
1210         freq = arch_timer_get_rate();
1211         userpg->cap_user_time = 1;
1212 
1213         clocks_calc_mult_shift(&userpg->time_mult, &shift, freq,
1214                         NSEC_PER_SEC, 0);
1215         /*
1216          * time_shift is not expected to be greater than 31 due to
1217          * the original published conversion algorithm shifting a
1218          * 32-bit value (now specifies a 64-bit value) - refer
1219          * perf_event_mmap_page documentation in perf_event.h.
1220          */
1221         if (shift == 32) {
1222                 shift = 31;
1223                 userpg->time_mult >>= 1;
1224         }
1225         userpg->time_shift = (u16)shift;
1226         userpg->time_offset = -now;
1227 }

/* [<][>][^][v][top][bottom][index][help] */