root/arch/powerpc/kernel/pmc.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. dummy_perf
  2. reserve_pmc_hardware
  3. release_pmc_hardware
  4. power4_enable_pmcs

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  *  arch/powerpc/kernel/pmc.c
   4  *
   5  *  Copyright (C) 2004 David Gibson, IBM Corporation.
   6  *  Includes code formerly from arch/ppc/kernel/perfmon.c:
   7  *    Author: Andy Fleming
   8  *    Copyright (c) 2004 Freescale Semiconductor, Inc
   9  */
  10 
  11 #include <linux/errno.h>
  12 #include <linux/bug.h>
  13 #include <linux/spinlock.h>
  14 #include <linux/export.h>
  15 
  16 #include <asm/processor.h>
  17 #include <asm/cputable.h>
  18 #include <asm/pmc.h>
  19 
  20 #ifndef MMCR0_PMAO
  21 #define MMCR0_PMAO      0
  22 #endif
  23 
  24 static void dummy_perf(struct pt_regs *regs)
  25 {
  26 #if defined(CONFIG_FSL_EMB_PERFMON)
  27         mtpmr(PMRN_PMGC0, mfpmr(PMRN_PMGC0) & ~PMGC0_PMIE);
  28 #elif defined(CONFIG_PPC64) || defined(CONFIG_PPC_BOOK3S_32)
  29         if (cur_cpu_spec->pmc_type == PPC_PMC_IBM)
  30                 mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) & ~(MMCR0_PMXE|MMCR0_PMAO));
  31 #else
  32         mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) & ~MMCR0_PMXE);
  33 #endif
  34 }
  35 
  36 
  37 static DEFINE_RAW_SPINLOCK(pmc_owner_lock);
  38 static void *pmc_owner_caller; /* mostly for debugging */
  39 perf_irq_t perf_irq = dummy_perf;
  40 
  41 int reserve_pmc_hardware(perf_irq_t new_perf_irq)
  42 {
  43         int err = 0;
  44 
  45         raw_spin_lock(&pmc_owner_lock);
  46 
  47         if (pmc_owner_caller) {
  48                 printk(KERN_WARNING "reserve_pmc_hardware: "
  49                        "PMC hardware busy (reserved by caller %p)\n",
  50                        pmc_owner_caller);
  51                 err = -EBUSY;
  52                 goto out;
  53         }
  54 
  55         pmc_owner_caller = __builtin_return_address(0);
  56         perf_irq = new_perf_irq ? new_perf_irq : dummy_perf;
  57 
  58  out:
  59         raw_spin_unlock(&pmc_owner_lock);
  60         return err;
  61 }
  62 EXPORT_SYMBOL_GPL(reserve_pmc_hardware);
  63 
  64 void release_pmc_hardware(void)
  65 {
  66         raw_spin_lock(&pmc_owner_lock);
  67 
  68         WARN_ON(! pmc_owner_caller);
  69 
  70         pmc_owner_caller = NULL;
  71         perf_irq = dummy_perf;
  72 
  73         raw_spin_unlock(&pmc_owner_lock);
  74 }
  75 EXPORT_SYMBOL_GPL(release_pmc_hardware);
  76 
  77 #ifdef CONFIG_PPC64
  78 void power4_enable_pmcs(void)
  79 {
  80         unsigned long hid0;
  81 
  82         hid0 = mfspr(SPRN_HID0);
  83         hid0 |= 1UL << (63 - 20);
  84 
  85         /* POWER4 requires the following sequence */
  86         asm volatile(
  87                 "sync\n"
  88                 "mtspr     %1, %0\n"
  89                 "mfspr     %0, %1\n"
  90                 "mfspr     %0, %1\n"
  91                 "mfspr     %0, %1\n"
  92                 "mfspr     %0, %1\n"
  93                 "mfspr     %0, %1\n"
  94                 "mfspr     %0, %1\n"
  95                 "isync" : "=&r" (hid0) : "i" (SPRN_HID0), "0" (hid0):
  96                 "memory");
  97 }
  98 #endif /* CONFIG_PPC64 */

/* [<][>][^][v][top][bottom][index][help] */