root/arch/x86/kvm/pmu_amd.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. get_msr_base
  2. msr_to_index
  3. get_gp_pmc_amd
  4. amd_find_arch_event
  5. amd_find_fixed_event
  6. amd_pmc_is_enabled
  7. amd_pmc_idx_to_pmc
  8. amd_is_valid_msr_idx
  9. amd_msr_idx_to_pmc
  10. amd_is_valid_msr
  11. amd_pmu_get_msr
  12. amd_pmu_set_msr
  13. amd_pmu_refresh
  14. amd_pmu_init
  15. amd_pmu_reset

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * KVM PMU support for AMD
   4  *
   5  * Copyright 2015, Red Hat, Inc. and/or its affiliates.
   6  *
   7  * Author:
   8  *   Wei Huang <wei@redhat.com>
   9  *
  10  * Implementation is based on pmu_intel.c file
  11  */
  12 #include <linux/types.h>
  13 #include <linux/kvm_host.h>
  14 #include <linux/perf_event.h>
  15 #include "x86.h"
  16 #include "cpuid.h"
  17 #include "lapic.h"
  18 #include "pmu.h"
  19 
  20 enum pmu_type {
  21         PMU_TYPE_COUNTER = 0,
  22         PMU_TYPE_EVNTSEL,
  23 };
  24 
  25 enum index {
  26         INDEX_ZERO = 0,
  27         INDEX_ONE,
  28         INDEX_TWO,
  29         INDEX_THREE,
  30         INDEX_FOUR,
  31         INDEX_FIVE,
  32         INDEX_ERROR,
  33 };
  34 
  35 /* duplicated from amd_perfmon_event_map, K7 and above should work. */
  36 static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
  37         [0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
  38         [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
  39         [2] = { 0x7d, 0x07, PERF_COUNT_HW_CACHE_REFERENCES },
  40         [3] = { 0x7e, 0x07, PERF_COUNT_HW_CACHE_MISSES },
  41         [4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
  42         [5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
  43         [6] = { 0xd0, 0x00, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
  44         [7] = { 0xd1, 0x00, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
  45 };
  46 
  47 static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type)
  48 {
  49         struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
  50 
  51         if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
  52                 if (type == PMU_TYPE_COUNTER)
  53                         return MSR_F15H_PERF_CTR;
  54                 else
  55                         return MSR_F15H_PERF_CTL;
  56         } else {
  57                 if (type == PMU_TYPE_COUNTER)
  58                         return MSR_K7_PERFCTR0;
  59                 else
  60                         return MSR_K7_EVNTSEL0;
  61         }
  62 }
  63 
  64 static enum index msr_to_index(u32 msr)
  65 {
  66         switch (msr) {
  67         case MSR_F15H_PERF_CTL0:
  68         case MSR_F15H_PERF_CTR0:
  69         case MSR_K7_EVNTSEL0:
  70         case MSR_K7_PERFCTR0:
  71                 return INDEX_ZERO;
  72         case MSR_F15H_PERF_CTL1:
  73         case MSR_F15H_PERF_CTR1:
  74         case MSR_K7_EVNTSEL1:
  75         case MSR_K7_PERFCTR1:
  76                 return INDEX_ONE;
  77         case MSR_F15H_PERF_CTL2:
  78         case MSR_F15H_PERF_CTR2:
  79         case MSR_K7_EVNTSEL2:
  80         case MSR_K7_PERFCTR2:
  81                 return INDEX_TWO;
  82         case MSR_F15H_PERF_CTL3:
  83         case MSR_F15H_PERF_CTR3:
  84         case MSR_K7_EVNTSEL3:
  85         case MSR_K7_PERFCTR3:
  86                 return INDEX_THREE;
  87         case MSR_F15H_PERF_CTL4:
  88         case MSR_F15H_PERF_CTR4:
  89                 return INDEX_FOUR;
  90         case MSR_F15H_PERF_CTL5:
  91         case MSR_F15H_PERF_CTR5:
  92                 return INDEX_FIVE;
  93         default:
  94                 return INDEX_ERROR;
  95         }
  96 }
  97 
  98 static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
  99                                              enum pmu_type type)
 100 {
 101         switch (msr) {
 102         case MSR_F15H_PERF_CTL0:
 103         case MSR_F15H_PERF_CTL1:
 104         case MSR_F15H_PERF_CTL2:
 105         case MSR_F15H_PERF_CTL3:
 106         case MSR_F15H_PERF_CTL4:
 107         case MSR_F15H_PERF_CTL5:
 108         case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
 109                 if (type != PMU_TYPE_EVNTSEL)
 110                         return NULL;
 111                 break;
 112         case MSR_F15H_PERF_CTR0:
 113         case MSR_F15H_PERF_CTR1:
 114         case MSR_F15H_PERF_CTR2:
 115         case MSR_F15H_PERF_CTR3:
 116         case MSR_F15H_PERF_CTR4:
 117         case MSR_F15H_PERF_CTR5:
 118         case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
 119                 if (type != PMU_TYPE_COUNTER)
 120                         return NULL;
 121                 break;
 122         default:
 123                 return NULL;
 124         }
 125 
 126         return &pmu->gp_counters[msr_to_index(msr)];
 127 }
 128 
 129 static unsigned amd_find_arch_event(struct kvm_pmu *pmu,
 130                                     u8 event_select,
 131                                     u8 unit_mask)
 132 {
 133         int i;
 134 
 135         for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
 136                 if (amd_event_mapping[i].eventsel == event_select
 137                     && amd_event_mapping[i].unit_mask == unit_mask)
 138                         break;
 139 
 140         if (i == ARRAY_SIZE(amd_event_mapping))
 141                 return PERF_COUNT_HW_MAX;
 142 
 143         return amd_event_mapping[i].event_type;
 144 }
 145 
 146 /* return PERF_COUNT_HW_MAX as AMD doesn't have fixed events */
 147 static unsigned amd_find_fixed_event(int idx)
 148 {
 149         return PERF_COUNT_HW_MAX;
 150 }
 151 
 152 /* check if a PMC is enabled by comparing it against global_ctrl bits. Because
 153  * AMD CPU doesn't have global_ctrl MSR, all PMCs are enabled (return TRUE).
 154  */
 155 static bool amd_pmc_is_enabled(struct kvm_pmc *pmc)
 156 {
 157         return true;
 158 }
 159 
 160 static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
 161 {
 162         unsigned int base = get_msr_base(pmu, PMU_TYPE_COUNTER);
 163         struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
 164 
 165         if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
 166                 /*
 167                  * The idx is contiguous. The MSRs are not. The counter MSRs
 168                  * are interleaved with the event select MSRs.
 169                  */
 170                 pmc_idx *= 2;
 171         }
 172 
 173         return get_gp_pmc_amd(pmu, base + pmc_idx, PMU_TYPE_COUNTER);
 174 }
 175 
 176 /* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */
 177 static int amd_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
 178 {
 179         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
 180 
 181         idx &= ~(3u << 30);
 182 
 183         return (idx >= pmu->nr_arch_gp_counters);
 184 }
 185 
 186 /* idx is the ECX register of RDPMC instruction */
 187 static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *mask)
 188 {
 189         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
 190         struct kvm_pmc *counters;
 191 
 192         idx &= ~(3u << 30);
 193         if (idx >= pmu->nr_arch_gp_counters)
 194                 return NULL;
 195         counters = pmu->gp_counters;
 196 
 197         return &counters[idx];
 198 }
 199 
 200 static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
 201 {
 202         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
 203         int ret = false;
 204 
 205         ret = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER) ||
 206                 get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
 207 
 208         return ret;
 209 }
 210 
 211 static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
 212 {
 213         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
 214         struct kvm_pmc *pmc;
 215 
 216         /* MSR_PERFCTRn */
 217         pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
 218         if (pmc) {
 219                 *data = pmc_read_counter(pmc);
 220                 return 0;
 221         }
 222         /* MSR_EVNTSELn */
 223         pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
 224         if (pmc) {
 225                 *data = pmc->eventsel;
 226                 return 0;
 227         }
 228 
 229         return 1;
 230 }
 231 
 232 static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 233 {
 234         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
 235         struct kvm_pmc *pmc;
 236         u32 msr = msr_info->index;
 237         u64 data = msr_info->data;
 238 
 239         /* MSR_PERFCTRn */
 240         pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
 241         if (pmc) {
 242                 pmc->counter += data - pmc_read_counter(pmc);
 243                 return 0;
 244         }
 245         /* MSR_EVNTSELn */
 246         pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
 247         if (pmc) {
 248                 if (data == pmc->eventsel)
 249                         return 0;
 250                 if (!(data & pmu->reserved_bits)) {
 251                         reprogram_gp_counter(pmc, data);
 252                         return 0;
 253                 }
 254         }
 255 
 256         return 1;
 257 }
 258 
 259 static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
 260 {
 261         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
 262 
 263         if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
 264                 pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS_CORE;
 265         else
 266                 pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
 267 
 268         pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
 269         pmu->reserved_bits = 0xffffffff00200000ull;
 270         pmu->version = 1;
 271         /* not applicable to AMD; but clean them to prevent any fall out */
 272         pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
 273         pmu->nr_arch_fixed_counters = 0;
 274         pmu->global_status = 0;
 275 }
 276 
 277 static void amd_pmu_init(struct kvm_vcpu *vcpu)
 278 {
 279         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
 280         int i;
 281 
 282         BUILD_BUG_ON(AMD64_NUM_COUNTERS_CORE > INTEL_PMC_MAX_GENERIC);
 283 
 284         for (i = 0; i < AMD64_NUM_COUNTERS_CORE ; i++) {
 285                 pmu->gp_counters[i].type = KVM_PMC_GP;
 286                 pmu->gp_counters[i].vcpu = vcpu;
 287                 pmu->gp_counters[i].idx = i;
 288         }
 289 }
 290 
 291 static void amd_pmu_reset(struct kvm_vcpu *vcpu)
 292 {
 293         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
 294         int i;
 295 
 296         for (i = 0; i < AMD64_NUM_COUNTERS_CORE; i++) {
 297                 struct kvm_pmc *pmc = &pmu->gp_counters[i];
 298 
 299                 pmc_stop_counter(pmc);
 300                 pmc->counter = pmc->eventsel = 0;
 301         }
 302 }
 303 
 304 struct kvm_pmu_ops amd_pmu_ops = {
 305         .find_arch_event = amd_find_arch_event,
 306         .find_fixed_event = amd_find_fixed_event,
 307         .pmc_is_enabled = amd_pmc_is_enabled,
 308         .pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
 309         .msr_idx_to_pmc = amd_msr_idx_to_pmc,
 310         .is_valid_msr_idx = amd_is_valid_msr_idx,
 311         .is_valid_msr = amd_is_valid_msr,
 312         .get_msr = amd_pmu_get_msr,
 313         .set_msr = amd_pmu_set_msr,
 314         .refresh = amd_pmu_refresh,
 315         .init = amd_pmu_init,
 316         .reset = amd_pmu_reset,
 317 };

/* [<][>][^][v][top][bottom][index][help] */