root/drivers/perf/arm_pmu.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. arm_pmu_event_max_period
  2. armpmu_map_cache_event
  3. armpmu_map_hw_event
  4. armpmu_map_raw_event
  5. armpmu_map_event
  6. armpmu_event_set_period
  7. armpmu_event_update
  8. armpmu_read
  9. armpmu_stop
  10. armpmu_start
  11. armpmu_del
  12. armpmu_add
  13. validate_event
  14. validate_group
  15. armpmu_dispatch_irq
  16. __hw_perf_event_init
  17. armpmu_event_init
  18. armpmu_enable
  19. armpmu_disable
  20. armpmu_filter_match
  21. armpmu_cpumask_show
  22. perf_pmu_name
  23. perf_num_counters
  24. armpmu_count_irq_users
  25. armpmu_free_irq
  26. armpmu_request_irq
  27. armpmu_get_cpu_irq
  28. arm_perf_starting_cpu
  29. arm_perf_teardown_cpu
  30. cpu_pm_pmu_setup
  31. cpu_pm_pmu_notify
  32. cpu_pm_pmu_register
  33. cpu_pm_pmu_unregister
  34. cpu_pm_pmu_register
  35. cpu_pm_pmu_unregister
  36. cpu_pmu_init
  37. cpu_pmu_destroy
  38. __armpmu_alloc
  39. armpmu_alloc
  40. armpmu_alloc_atomic
  41. armpmu_free
  42. armpmu_register
  43. arm_pmu_hp_init

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 #undef DEBUG
   3 
   4 /*
   5  * ARM performance counter support.
   6  *
   7  * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
   8  * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
   9  *
  10  * This code is based on the sparc64 perf event code, which is in turn based
  11  * on the x86 code.
  12  */
  13 #define pr_fmt(fmt) "hw perfevents: " fmt
  14 
  15 #include <linux/bitmap.h>
  16 #include <linux/cpumask.h>
  17 #include <linux/cpu_pm.h>
  18 #include <linux/export.h>
  19 #include <linux/kernel.h>
  20 #include <linux/perf/arm_pmu.h>
  21 #include <linux/slab.h>
  22 #include <linux/sched/clock.h>
  23 #include <linux/spinlock.h>
  24 #include <linux/irq.h>
  25 #include <linux/irqdesc.h>
  26 
  27 #include <asm/irq_regs.h>
  28 
  29 static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu);
  30 static DEFINE_PER_CPU(int, cpu_irq);
  31 
  32 static inline u64 arm_pmu_event_max_period(struct perf_event *event)
  33 {
  34         if (event->hw.flags & ARMPMU_EVT_64BIT)
  35                 return GENMASK_ULL(63, 0);
  36         else
  37                 return GENMASK_ULL(31, 0);
  38 }
  39 
  40 static int
  41 armpmu_map_cache_event(const unsigned (*cache_map)
  42                                       [PERF_COUNT_HW_CACHE_MAX]
  43                                       [PERF_COUNT_HW_CACHE_OP_MAX]
  44                                       [PERF_COUNT_HW_CACHE_RESULT_MAX],
  45                        u64 config)
  46 {
  47         unsigned int cache_type, cache_op, cache_result, ret;
  48 
  49         cache_type = (config >>  0) & 0xff;
  50         if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
  51                 return -EINVAL;
  52 
  53         cache_op = (config >>  8) & 0xff;
  54         if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
  55                 return -EINVAL;
  56 
  57         cache_result = (config >> 16) & 0xff;
  58         if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
  59                 return -EINVAL;
  60 
  61         if (!cache_map)
  62                 return -ENOENT;
  63 
  64         ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
  65 
  66         if (ret == CACHE_OP_UNSUPPORTED)
  67                 return -ENOENT;
  68 
  69         return ret;
  70 }
  71 
  72 static int
  73 armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
  74 {
  75         int mapping;
  76 
  77         if (config >= PERF_COUNT_HW_MAX)
  78                 return -EINVAL;
  79 
  80         if (!event_map)
  81                 return -ENOENT;
  82 
  83         mapping = (*event_map)[config];
  84         return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
  85 }
  86 
  87 static int
  88 armpmu_map_raw_event(u32 raw_event_mask, u64 config)
  89 {
  90         return (int)(config & raw_event_mask);
  91 }
  92 
  93 int
  94 armpmu_map_event(struct perf_event *event,
  95                  const unsigned (*event_map)[PERF_COUNT_HW_MAX],
  96                  const unsigned (*cache_map)
  97                                 [PERF_COUNT_HW_CACHE_MAX]
  98                                 [PERF_COUNT_HW_CACHE_OP_MAX]
  99                                 [PERF_COUNT_HW_CACHE_RESULT_MAX],
 100                  u32 raw_event_mask)
 101 {
 102         u64 config = event->attr.config;
 103         int type = event->attr.type;
 104 
 105         if (type == event->pmu->type)
 106                 return armpmu_map_raw_event(raw_event_mask, config);
 107 
 108         switch (type) {
 109         case PERF_TYPE_HARDWARE:
 110                 return armpmu_map_hw_event(event_map, config);
 111         case PERF_TYPE_HW_CACHE:
 112                 return armpmu_map_cache_event(cache_map, config);
 113         case PERF_TYPE_RAW:
 114                 return armpmu_map_raw_event(raw_event_mask, config);
 115         }
 116 
 117         return -ENOENT;
 118 }
 119 
 120 int armpmu_event_set_period(struct perf_event *event)
 121 {
 122         struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
 123         struct hw_perf_event *hwc = &event->hw;
 124         s64 left = local64_read(&hwc->period_left);
 125         s64 period = hwc->sample_period;
 126         u64 max_period;
 127         int ret = 0;
 128 
 129         max_period = arm_pmu_event_max_period(event);
 130         if (unlikely(left <= -period)) {
 131                 left = period;
 132                 local64_set(&hwc->period_left, left);
 133                 hwc->last_period = period;
 134                 ret = 1;
 135         }
 136 
 137         if (unlikely(left <= 0)) {
 138                 left += period;
 139                 local64_set(&hwc->period_left, left);
 140                 hwc->last_period = period;
 141                 ret = 1;
 142         }
 143 
 144         /*
 145          * Limit the maximum period to prevent the counter value
 146          * from overtaking the one we are about to program. In
 147          * effect we are reducing max_period to account for
 148          * interrupt latency (and we are being very conservative).
 149          */
 150         if (left > (max_period >> 1))
 151                 left = (max_period >> 1);
 152 
 153         local64_set(&hwc->prev_count, (u64)-left);
 154 
 155         armpmu->write_counter(event, (u64)(-left) & max_period);
 156 
 157         perf_event_update_userpage(event);
 158 
 159         return ret;
 160 }
 161 
 162 u64 armpmu_event_update(struct perf_event *event)
 163 {
 164         struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
 165         struct hw_perf_event *hwc = &event->hw;
 166         u64 delta, prev_raw_count, new_raw_count;
 167         u64 max_period = arm_pmu_event_max_period(event);
 168 
 169 again:
 170         prev_raw_count = local64_read(&hwc->prev_count);
 171         new_raw_count = armpmu->read_counter(event);
 172 
 173         if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
 174                              new_raw_count) != prev_raw_count)
 175                 goto again;
 176 
 177         delta = (new_raw_count - prev_raw_count) & max_period;
 178 
 179         local64_add(delta, &event->count);
 180         local64_sub(delta, &hwc->period_left);
 181 
 182         return new_raw_count;
 183 }
 184 
 185 static void
 186 armpmu_read(struct perf_event *event)
 187 {
 188         armpmu_event_update(event);
 189 }
 190 
 191 static void
 192 armpmu_stop(struct perf_event *event, int flags)
 193 {
 194         struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
 195         struct hw_perf_event *hwc = &event->hw;
 196 
 197         /*
 198          * ARM pmu always has to update the counter, so ignore
 199          * PERF_EF_UPDATE, see comments in armpmu_start().
 200          */
 201         if (!(hwc->state & PERF_HES_STOPPED)) {
 202                 armpmu->disable(event);
 203                 armpmu_event_update(event);
 204                 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
 205         }
 206 }
 207 
 208 static void armpmu_start(struct perf_event *event, int flags)
 209 {
 210         struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
 211         struct hw_perf_event *hwc = &event->hw;
 212 
 213         /*
 214          * ARM pmu always has to reprogram the period, so ignore
 215          * PERF_EF_RELOAD, see the comment below.
 216          */
 217         if (flags & PERF_EF_RELOAD)
 218                 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
 219 
 220         hwc->state = 0;
 221         /*
 222          * Set the period again. Some counters can't be stopped, so when we
 223          * were stopped we simply disabled the IRQ source and the counter
 224          * may have been left counting. If we don't do this step then we may
 225          * get an interrupt too soon or *way* too late if the overflow has
 226          * happened since disabling.
 227          */
 228         armpmu_event_set_period(event);
 229         armpmu->enable(event);
 230 }
 231 
 232 static void
 233 armpmu_del(struct perf_event *event, int flags)
 234 {
 235         struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
 236         struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
 237         struct hw_perf_event *hwc = &event->hw;
 238         int idx = hwc->idx;
 239 
 240         armpmu_stop(event, PERF_EF_UPDATE);
 241         hw_events->events[idx] = NULL;
 242         armpmu->clear_event_idx(hw_events, event);
 243         perf_event_update_userpage(event);
 244         /* Clear the allocated counter */
 245         hwc->idx = -1;
 246 }
 247 
 248 static int
 249 armpmu_add(struct perf_event *event, int flags)
 250 {
 251         struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
 252         struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
 253         struct hw_perf_event *hwc = &event->hw;
 254         int idx;
 255 
 256         /* An event following a process won't be stopped earlier */
 257         if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
 258                 return -ENOENT;
 259 
 260         /* If we don't have a space for the counter then finish early. */
 261         idx = armpmu->get_event_idx(hw_events, event);
 262         if (idx < 0)
 263                 return idx;
 264 
 265         /*
 266          * If there is an event in the counter we are going to use then make
 267          * sure it is disabled.
 268          */
 269         event->hw.idx = idx;
 270         armpmu->disable(event);
 271         hw_events->events[idx] = event;
 272 
 273         hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
 274         if (flags & PERF_EF_START)
 275                 armpmu_start(event, PERF_EF_RELOAD);
 276 
 277         /* Propagate our changes to the userspace mapping. */
 278         perf_event_update_userpage(event);
 279 
 280         return 0;
 281 }
 282 
 283 static int
 284 validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events,
 285                                struct perf_event *event)
 286 {
 287         struct arm_pmu *armpmu;
 288 
 289         if (is_software_event(event))
 290                 return 1;
 291 
 292         /*
 293          * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
 294          * core perf code won't check that the pmu->ctx == leader->ctx
 295          * until after pmu->event_init(event).
 296          */
 297         if (event->pmu != pmu)
 298                 return 0;
 299 
 300         if (event->state < PERF_EVENT_STATE_OFF)
 301                 return 1;
 302 
 303         if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
 304                 return 1;
 305 
 306         armpmu = to_arm_pmu(event->pmu);
 307         return armpmu->get_event_idx(hw_events, event) >= 0;
 308 }
 309 
 310 static int
 311 validate_group(struct perf_event *event)
 312 {
 313         struct perf_event *sibling, *leader = event->group_leader;
 314         struct pmu_hw_events fake_pmu;
 315 
 316         /*
 317          * Initialise the fake PMU. We only need to populate the
 318          * used_mask for the purposes of validation.
 319          */
 320         memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask));
 321 
 322         if (!validate_event(event->pmu, &fake_pmu, leader))
 323                 return -EINVAL;
 324 
 325         for_each_sibling_event(sibling, leader) {
 326                 if (!validate_event(event->pmu, &fake_pmu, sibling))
 327                         return -EINVAL;
 328         }
 329 
 330         if (!validate_event(event->pmu, &fake_pmu, event))
 331                 return -EINVAL;
 332 
 333         return 0;
 334 }
 335 
 336 static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
 337 {
 338         struct arm_pmu *armpmu;
 339         int ret;
 340         u64 start_clock, finish_clock;
 341 
 342         /*
 343          * we request the IRQ with a (possibly percpu) struct arm_pmu**, but
 344          * the handlers expect a struct arm_pmu*. The percpu_irq framework will
 345          * do any necessary shifting, we just need to perform the first
 346          * dereference.
 347          */
 348         armpmu = *(void **)dev;
 349         if (WARN_ON_ONCE(!armpmu))
 350                 return IRQ_NONE;
 351 
 352         start_clock = sched_clock();
 353         ret = armpmu->handle_irq(armpmu);
 354         finish_clock = sched_clock();
 355 
 356         perf_sample_event_took(finish_clock - start_clock);
 357         return ret;
 358 }
 359 
 360 static int
 361 __hw_perf_event_init(struct perf_event *event)
 362 {
 363         struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
 364         struct hw_perf_event *hwc = &event->hw;
 365         int mapping;
 366 
 367         hwc->flags = 0;
 368         mapping = armpmu->map_event(event);
 369 
 370         if (mapping < 0) {
 371                 pr_debug("event %x:%llx not supported\n", event->attr.type,
 372                          event->attr.config);
 373                 return mapping;
 374         }
 375 
 376         /*
 377          * We don't assign an index until we actually place the event onto
 378          * hardware. Use -1 to signify that we haven't decided where to put it
 379          * yet. For SMP systems, each core has it's own PMU so we can't do any
 380          * clever allocation or constraints checking at this point.
 381          */
 382         hwc->idx                = -1;
 383         hwc->config_base        = 0;
 384         hwc->config             = 0;
 385         hwc->event_base         = 0;
 386 
 387         /*
 388          * Check whether we need to exclude the counter from certain modes.
 389          */
 390         if (armpmu->set_event_filter &&
 391             armpmu->set_event_filter(hwc, &event->attr)) {
 392                 pr_debug("ARM performance counters do not support "
 393                          "mode exclusion\n");
 394                 return -EOPNOTSUPP;
 395         }
 396 
 397         /*
 398          * Store the event encoding into the config_base field.
 399          */
 400         hwc->config_base            |= (unsigned long)mapping;
 401 
 402         if (!is_sampling_event(event)) {
 403                 /*
 404                  * For non-sampling runs, limit the sample_period to half
 405                  * of the counter width. That way, the new counter value
 406                  * is far less likely to overtake the previous one unless
 407                  * you have some serious IRQ latency issues.
 408                  */
 409                 hwc->sample_period  = arm_pmu_event_max_period(event) >> 1;
 410                 hwc->last_period    = hwc->sample_period;
 411                 local64_set(&hwc->period_left, hwc->sample_period);
 412         }
 413 
 414         if (event->group_leader != event) {
 415                 if (validate_group(event) != 0)
 416                         return -EINVAL;
 417         }
 418 
 419         return 0;
 420 }
 421 
 422 static int armpmu_event_init(struct perf_event *event)
 423 {
 424         struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
 425 
 426         /*
 427          * Reject CPU-affine events for CPUs that are of a different class to
 428          * that which this PMU handles. Process-following events (where
 429          * event->cpu == -1) can be migrated between CPUs, and thus we have to
 430          * reject them later (in armpmu_add) if they're scheduled on a
 431          * different class of CPU.
 432          */
 433         if (event->cpu != -1 &&
 434                 !cpumask_test_cpu(event->cpu, &armpmu->supported_cpus))
 435                 return -ENOENT;
 436 
 437         /* does not support taken branch sampling */
 438         if (has_branch_stack(event))
 439                 return -EOPNOTSUPP;
 440 
 441         if (armpmu->map_event(event) == -ENOENT)
 442                 return -ENOENT;
 443 
 444         return __hw_perf_event_init(event);
 445 }
 446 
 447 static void armpmu_enable(struct pmu *pmu)
 448 {
 449         struct arm_pmu *armpmu = to_arm_pmu(pmu);
 450         struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
 451         int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
 452 
 453         /* For task-bound events we may be called on other CPUs */
 454         if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
 455                 return;
 456 
 457         if (enabled)
 458                 armpmu->start(armpmu);
 459 }
 460 
 461 static void armpmu_disable(struct pmu *pmu)
 462 {
 463         struct arm_pmu *armpmu = to_arm_pmu(pmu);
 464 
 465         /* For task-bound events we may be called on other CPUs */
 466         if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
 467                 return;
 468 
 469         armpmu->stop(armpmu);
 470 }
 471 
 472 /*
 473  * In heterogeneous systems, events are specific to a particular
 474  * microarchitecture, and aren't suitable for another. Thus, only match CPUs of
 475  * the same microarchitecture.
 476  */
 477 static int armpmu_filter_match(struct perf_event *event)
 478 {
 479         struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
 480         unsigned int cpu = smp_processor_id();
 481         int ret;
 482 
 483         ret = cpumask_test_cpu(cpu, &armpmu->supported_cpus);
 484         if (ret && armpmu->filter_match)
 485                 return armpmu->filter_match(event);
 486 
 487         return ret;
 488 }
 489 
 490 static ssize_t armpmu_cpumask_show(struct device *dev,
 491                                    struct device_attribute *attr, char *buf)
 492 {
 493         struct arm_pmu *armpmu = to_arm_pmu(dev_get_drvdata(dev));
 494         return cpumap_print_to_pagebuf(true, buf, &armpmu->supported_cpus);
 495 }
 496 
 497 static DEVICE_ATTR(cpus, S_IRUGO, armpmu_cpumask_show, NULL);
 498 
 499 static struct attribute *armpmu_common_attrs[] = {
 500         &dev_attr_cpus.attr,
 501         NULL,
 502 };
 503 
 504 static struct attribute_group armpmu_common_attr_group = {
 505         .attrs = armpmu_common_attrs,
 506 };
 507 
 508 /* Set at runtime when we know what CPU type we are. */
 509 static struct arm_pmu *__oprofile_cpu_pmu;
 510 
 511 /*
 512  * Despite the names, these two functions are CPU-specific and are used
 513  * by the OProfile/perf code.
 514  */
 515 const char *perf_pmu_name(void)
 516 {
 517         if (!__oprofile_cpu_pmu)
 518                 return NULL;
 519 
 520         return __oprofile_cpu_pmu->name;
 521 }
 522 EXPORT_SYMBOL_GPL(perf_pmu_name);
 523 
 524 int perf_num_counters(void)
 525 {
 526         int max_events = 0;
 527 
 528         if (__oprofile_cpu_pmu != NULL)
 529                 max_events = __oprofile_cpu_pmu->num_events;
 530 
 531         return max_events;
 532 }
 533 EXPORT_SYMBOL_GPL(perf_num_counters);
 534 
 535 static int armpmu_count_irq_users(const int irq)
 536 {
 537         int cpu, count = 0;
 538 
 539         for_each_possible_cpu(cpu) {
 540                 if (per_cpu(cpu_irq, cpu) == irq)
 541                         count++;
 542         }
 543 
 544         return count;
 545 }
 546 
 547 void armpmu_free_irq(int irq, int cpu)
 548 {
 549         if (per_cpu(cpu_irq, cpu) == 0)
 550                 return;
 551         if (WARN_ON(irq != per_cpu(cpu_irq, cpu)))
 552                 return;
 553 
 554         if (!irq_is_percpu_devid(irq))
 555                 free_irq(irq, per_cpu_ptr(&cpu_armpmu, cpu));
 556         else if (armpmu_count_irq_users(irq) == 1)
 557                 free_percpu_irq(irq, &cpu_armpmu);
 558 
 559         per_cpu(cpu_irq, cpu) = 0;
 560 }
 561 
 562 int armpmu_request_irq(int irq, int cpu)
 563 {
 564         int err = 0;
 565         const irq_handler_t handler = armpmu_dispatch_irq;
 566         if (!irq)
 567                 return 0;
 568 
 569         if (!irq_is_percpu_devid(irq)) {
 570                 unsigned long irq_flags;
 571 
 572                 err = irq_force_affinity(irq, cpumask_of(cpu));
 573 
 574                 if (err && num_possible_cpus() > 1) {
 575                         pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
 576                                 irq, cpu);
 577                         goto err_out;
 578                 }
 579 
 580                 irq_flags = IRQF_PERCPU |
 581                             IRQF_NOBALANCING |
 582                             IRQF_NO_THREAD;
 583 
 584                 irq_set_status_flags(irq, IRQ_NOAUTOEN);
 585                 err = request_irq(irq, handler, irq_flags, "arm-pmu",
 586                                   per_cpu_ptr(&cpu_armpmu, cpu));
 587         } else if (armpmu_count_irq_users(irq) == 0) {
 588                 err = request_percpu_irq(irq, handler, "arm-pmu",
 589                                          &cpu_armpmu);
 590         }
 591 
 592         if (err)
 593                 goto err_out;
 594 
 595         per_cpu(cpu_irq, cpu) = irq;
 596         return 0;
 597 
 598 err_out:
 599         pr_err("unable to request IRQ%d for ARM PMU counters\n", irq);
 600         return err;
 601 }
 602 
 603 static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu)
 604 {
 605         struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
 606         return per_cpu(hw_events->irq, cpu);
 607 }
 608 
 609 /*
 610  * PMU hardware loses all context when a CPU goes offline.
 611  * When a CPU is hotplugged back in, since some hardware registers are
 612  * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
 613  * junk values out of them.
 614  */
 615 static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
 616 {
 617         struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
 618         int irq;
 619 
 620         if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
 621                 return 0;
 622         if (pmu->reset)
 623                 pmu->reset(pmu);
 624 
 625         per_cpu(cpu_armpmu, cpu) = pmu;
 626 
 627         irq = armpmu_get_cpu_irq(pmu, cpu);
 628         if (irq) {
 629                 if (irq_is_percpu_devid(irq))
 630                         enable_percpu_irq(irq, IRQ_TYPE_NONE);
 631                 else
 632                         enable_irq(irq);
 633         }
 634 
 635         return 0;
 636 }
 637 
 638 static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node)
 639 {
 640         struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
 641         int irq;
 642 
 643         if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
 644                 return 0;
 645 
 646         irq = armpmu_get_cpu_irq(pmu, cpu);
 647         if (irq) {
 648                 if (irq_is_percpu_devid(irq))
 649                         disable_percpu_irq(irq);
 650                 else
 651                         disable_irq_nosync(irq);
 652         }
 653 
 654         per_cpu(cpu_armpmu, cpu) = NULL;
 655 
 656         return 0;
 657 }
 658 
 659 #ifdef CONFIG_CPU_PM
 660 static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
 661 {
 662         struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
 663         struct perf_event *event;
 664         int idx;
 665 
 666         for (idx = 0; idx < armpmu->num_events; idx++) {
 667                 event = hw_events->events[idx];
 668                 if (!event)
 669                         continue;
 670 
 671                 switch (cmd) {
 672                 case CPU_PM_ENTER:
 673                         /*
 674                          * Stop and update the counter
 675                          */
 676                         armpmu_stop(event, PERF_EF_UPDATE);
 677                         break;
 678                 case CPU_PM_EXIT:
 679                 case CPU_PM_ENTER_FAILED:
 680                          /*
 681                           * Restore and enable the counter.
 682                           * armpmu_start() indirectly calls
 683                           *
 684                           * perf_event_update_userpage()
 685                           *
 686                           * that requires RCU read locking to be functional,
 687                           * wrap the call within RCU_NONIDLE to make the
 688                           * RCU subsystem aware this cpu is not idle from
 689                           * an RCU perspective for the armpmu_start() call
 690                           * duration.
 691                           */
 692                         RCU_NONIDLE(armpmu_start(event, PERF_EF_RELOAD));
 693                         break;
 694                 default:
 695                         break;
 696                 }
 697         }
 698 }
 699 
 700 static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
 701                              void *v)
 702 {
 703         struct arm_pmu *armpmu = container_of(b, struct arm_pmu, cpu_pm_nb);
 704         struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
 705         int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
 706 
 707         if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
 708                 return NOTIFY_DONE;
 709 
 710         /*
 711          * Always reset the PMU registers on power-up even if
 712          * there are no events running.
 713          */
 714         if (cmd == CPU_PM_EXIT && armpmu->reset)
 715                 armpmu->reset(armpmu);
 716 
 717         if (!enabled)
 718                 return NOTIFY_OK;
 719 
 720         switch (cmd) {
 721         case CPU_PM_ENTER:
 722                 armpmu->stop(armpmu);
 723                 cpu_pm_pmu_setup(armpmu, cmd);
 724                 break;
 725         case CPU_PM_EXIT:
 726         case CPU_PM_ENTER_FAILED:
 727                 cpu_pm_pmu_setup(armpmu, cmd);
 728                 armpmu->start(armpmu);
 729                 break;
 730         default:
 731                 return NOTIFY_DONE;
 732         }
 733 
 734         return NOTIFY_OK;
 735 }
 736 
 737 static int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu)
 738 {
 739         cpu_pmu->cpu_pm_nb.notifier_call = cpu_pm_pmu_notify;
 740         return cpu_pm_register_notifier(&cpu_pmu->cpu_pm_nb);
 741 }
 742 
 743 static void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu)
 744 {
 745         cpu_pm_unregister_notifier(&cpu_pmu->cpu_pm_nb);
 746 }
 747 #else
 748 static inline int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) { return 0; }
 749 static inline void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) { }
 750 #endif
 751 
 752 static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
 753 {
 754         int err;
 755 
 756         err = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_STARTING,
 757                                        &cpu_pmu->node);
 758         if (err)
 759                 goto out;
 760 
 761         err = cpu_pm_pmu_register(cpu_pmu);
 762         if (err)
 763                 goto out_unregister;
 764 
 765         return 0;
 766 
 767 out_unregister:
 768         cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
 769                                             &cpu_pmu->node);
 770 out:
 771         return err;
 772 }
 773 
 774 static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
 775 {
 776         cpu_pm_pmu_unregister(cpu_pmu);
 777         cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
 778                                             &cpu_pmu->node);
 779 }
 780 
 781 static struct arm_pmu *__armpmu_alloc(gfp_t flags)
 782 {
 783         struct arm_pmu *pmu;
 784         int cpu;
 785 
 786         pmu = kzalloc(sizeof(*pmu), flags);
 787         if (!pmu) {
 788                 pr_info("failed to allocate PMU device!\n");
 789                 goto out;
 790         }
 791 
 792         pmu->hw_events = alloc_percpu_gfp(struct pmu_hw_events, flags);
 793         if (!pmu->hw_events) {
 794                 pr_info("failed to allocate per-cpu PMU data.\n");
 795                 goto out_free_pmu;
 796         }
 797 
 798         pmu->pmu = (struct pmu) {
 799                 .pmu_enable     = armpmu_enable,
 800                 .pmu_disable    = armpmu_disable,
 801                 .event_init     = armpmu_event_init,
 802                 .add            = armpmu_add,
 803                 .del            = armpmu_del,
 804                 .start          = armpmu_start,
 805                 .stop           = armpmu_stop,
 806                 .read           = armpmu_read,
 807                 .filter_match   = armpmu_filter_match,
 808                 .attr_groups    = pmu->attr_groups,
 809                 /*
 810                  * This is a CPU PMU potentially in a heterogeneous
 811                  * configuration (e.g. big.LITTLE). This is not an uncore PMU,
 812                  * and we have taken ctx sharing into account (e.g. with our
 813                  * pmu::filter_match callback and pmu::event_init group
 814                  * validation).
 815                  */
 816                 .capabilities   = PERF_PMU_CAP_HETEROGENEOUS_CPUS,
 817         };
 818 
 819         pmu->attr_groups[ARMPMU_ATTR_GROUP_COMMON] =
 820                 &armpmu_common_attr_group;
 821 
 822         for_each_possible_cpu(cpu) {
 823                 struct pmu_hw_events *events;
 824 
 825                 events = per_cpu_ptr(pmu->hw_events, cpu);
 826                 raw_spin_lock_init(&events->pmu_lock);
 827                 events->percpu_pmu = pmu;
 828         }
 829 
 830         return pmu;
 831 
 832 out_free_pmu:
 833         kfree(pmu);
 834 out:
 835         return NULL;
 836 }
 837 
 838 struct arm_pmu *armpmu_alloc(void)
 839 {
 840         return __armpmu_alloc(GFP_KERNEL);
 841 }
 842 
 843 struct arm_pmu *armpmu_alloc_atomic(void)
 844 {
 845         return __armpmu_alloc(GFP_ATOMIC);
 846 }
 847 
 848 
 849 void armpmu_free(struct arm_pmu *pmu)
 850 {
 851         free_percpu(pmu->hw_events);
 852         kfree(pmu);
 853 }
 854 
 855 int armpmu_register(struct arm_pmu *pmu)
 856 {
 857         int ret;
 858 
 859         ret = cpu_pmu_init(pmu);
 860         if (ret)
 861                 return ret;
 862 
 863         if (!pmu->set_event_filter)
 864                 pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE;
 865 
 866         ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
 867         if (ret)
 868                 goto out_destroy;
 869 
 870         if (!__oprofile_cpu_pmu)
 871                 __oprofile_cpu_pmu = pmu;
 872 
 873         pr_info("enabled with %s PMU driver, %d counters available\n",
 874                 pmu->name, pmu->num_events);
 875 
 876         return 0;
 877 
 878 out_destroy:
 879         cpu_pmu_destroy(pmu);
 880         return ret;
 881 }
 882 
 883 static int arm_pmu_hp_init(void)
 884 {
 885         int ret;
 886 
 887         ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_STARTING,
 888                                       "perf/arm/pmu:starting",
 889                                       arm_perf_starting_cpu,
 890                                       arm_perf_teardown_cpu);
 891         if (ret)
 892                 pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n",
 893                        ret);
 894         return ret;
 895 }
 896 subsys_initcall(arm_pmu_hp_init);

/* [<][>][^][v][top][bottom][index][help] */