root/arch/x86/events/intel/uncore_snb.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. snb_uncore_msr_enable_event
  2. snb_uncore_msr_disable_event
  3. snb_uncore_msr_init_box
  4. snb_uncore_msr_enable_box
  5. snb_uncore_msr_exit_box
  6. snb_uncore_cpu_init
  7. skl_uncore_msr_init_box
  8. skl_uncore_msr_enable_box
  9. skl_uncore_msr_exit_box
  10. skl_uncore_cpu_init
  11. icl_get_cbox_num
  12. icl_uncore_cpu_init
  13. snb_uncore_imc_init_box
  14. snb_uncore_imc_enable_box
  15. snb_uncore_imc_disable_box
  16. snb_uncore_imc_enable_event
  17. snb_uncore_imc_disable_event
  18. snb_uncore_imc_event_init
  19. snb_uncore_imc_hw_config
  20. snb_pci2phy_map_init
  21. imc_uncore_find_dev
  22. imc_uncore_pci_init
  23. snb_uncore_pci_init
  24. ivb_uncore_pci_init
  25. hsw_uncore_pci_init
  26. bdw_uncore_pci_init
  27. skl_uncore_pci_init
  28. nhm_uncore_msr_disable_box
  29. nhm_uncore_msr_enable_box
  30. nhm_uncore_msr_enable_event
  31. nhm_uncore_cpu_init

   1 // SPDX-License-Identifier: GPL-2.0
   2 /* Nehalem/SandBridge/Haswell/Broadwell/Skylake uncore support */
   3 #include "uncore.h"
   4 
   5 /* Uncore IMC PCI IDs */
   6 #define PCI_DEVICE_ID_INTEL_SNB_IMC             0x0100
   7 #define PCI_DEVICE_ID_INTEL_IVB_IMC             0x0154
   8 #define PCI_DEVICE_ID_INTEL_IVB_E3_IMC          0x0150
   9 #define PCI_DEVICE_ID_INTEL_HSW_IMC             0x0c00
  10 #define PCI_DEVICE_ID_INTEL_HSW_U_IMC           0x0a04
  11 #define PCI_DEVICE_ID_INTEL_BDW_IMC             0x1604
  12 #define PCI_DEVICE_ID_INTEL_SKL_U_IMC           0x1904
  13 #define PCI_DEVICE_ID_INTEL_SKL_Y_IMC           0x190c
  14 #define PCI_DEVICE_ID_INTEL_SKL_HD_IMC          0x1900
  15 #define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC          0x1910
  16 #define PCI_DEVICE_ID_INTEL_SKL_SD_IMC          0x190f
  17 #define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC          0x191f
  18 #define PCI_DEVICE_ID_INTEL_SKL_E3_IMC          0x1918
  19 #define PCI_DEVICE_ID_INTEL_KBL_Y_IMC           0x590c
  20 #define PCI_DEVICE_ID_INTEL_KBL_U_IMC           0x5904
  21 #define PCI_DEVICE_ID_INTEL_KBL_UQ_IMC          0x5914
  22 #define PCI_DEVICE_ID_INTEL_KBL_SD_IMC          0x590f
  23 #define PCI_DEVICE_ID_INTEL_KBL_SQ_IMC          0x591f
  24 #define PCI_DEVICE_ID_INTEL_KBL_HQ_IMC          0x5910
  25 #define PCI_DEVICE_ID_INTEL_KBL_WQ_IMC          0x5918
  26 #define PCI_DEVICE_ID_INTEL_CFL_2U_IMC          0x3ecc
  27 #define PCI_DEVICE_ID_INTEL_CFL_4U_IMC          0x3ed0
  28 #define PCI_DEVICE_ID_INTEL_CFL_4H_IMC          0x3e10
  29 #define PCI_DEVICE_ID_INTEL_CFL_6H_IMC          0x3ec4
  30 #define PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC        0x3e0f
  31 #define PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC        0x3e1f
  32 #define PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC        0x3ec2
  33 #define PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC        0x3e30
  34 #define PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC        0x3e18
  35 #define PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC        0x3ec6
  36 #define PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC        0x3e31
  37 #define PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC        0x3e33
  38 #define PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC        0x3eca
  39 #define PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC        0x3e32
  40 #define PCI_DEVICE_ID_INTEL_AML_YD_IMC          0x590c
  41 #define PCI_DEVICE_ID_INTEL_AML_YQ_IMC          0x590d
  42 #define PCI_DEVICE_ID_INTEL_WHL_UQ_IMC          0x3ed0
  43 #define PCI_DEVICE_ID_INTEL_WHL_4_UQ_IMC        0x3e34
  44 #define PCI_DEVICE_ID_INTEL_WHL_UD_IMC          0x3e35
  45 #define PCI_DEVICE_ID_INTEL_ICL_U_IMC           0x8a02
  46 #define PCI_DEVICE_ID_INTEL_ICL_U2_IMC          0x8a12
  47 
  48 
  49 /* SNB event control */
  50 #define SNB_UNC_CTL_EV_SEL_MASK                 0x000000ff
  51 #define SNB_UNC_CTL_UMASK_MASK                  0x0000ff00
  52 #define SNB_UNC_CTL_EDGE_DET                    (1 << 18)
  53 #define SNB_UNC_CTL_EN                          (1 << 22)
  54 #define SNB_UNC_CTL_INVERT                      (1 << 23)
  55 #define SNB_UNC_CTL_CMASK_MASK                  0x1f000000
  56 #define NHM_UNC_CTL_CMASK_MASK                  0xff000000
  57 #define NHM_UNC_FIXED_CTR_CTL_EN                (1 << 0)
  58 
  59 #define SNB_UNC_RAW_EVENT_MASK                  (SNB_UNC_CTL_EV_SEL_MASK | \
  60                                                  SNB_UNC_CTL_UMASK_MASK | \
  61                                                  SNB_UNC_CTL_EDGE_DET | \
  62                                                  SNB_UNC_CTL_INVERT | \
  63                                                  SNB_UNC_CTL_CMASK_MASK)
  64 
  65 #define NHM_UNC_RAW_EVENT_MASK                  (SNB_UNC_CTL_EV_SEL_MASK | \
  66                                                  SNB_UNC_CTL_UMASK_MASK | \
  67                                                  SNB_UNC_CTL_EDGE_DET | \
  68                                                  SNB_UNC_CTL_INVERT | \
  69                                                  NHM_UNC_CTL_CMASK_MASK)
  70 
  71 /* SNB global control register */
  72 #define SNB_UNC_PERF_GLOBAL_CTL                 0x391
  73 #define SNB_UNC_FIXED_CTR_CTRL                  0x394
  74 #define SNB_UNC_FIXED_CTR                       0x395
  75 
  76 /* SNB uncore global control */
  77 #define SNB_UNC_GLOBAL_CTL_CORE_ALL             ((1 << 4) - 1)
  78 #define SNB_UNC_GLOBAL_CTL_EN                   (1 << 29)
  79 
  80 /* SNB Cbo register */
  81 #define SNB_UNC_CBO_0_PERFEVTSEL0               0x700
  82 #define SNB_UNC_CBO_0_PER_CTR0                  0x706
  83 #define SNB_UNC_CBO_MSR_OFFSET                  0x10
  84 
  85 /* SNB ARB register */
  86 #define SNB_UNC_ARB_PER_CTR0                    0x3b0
  87 #define SNB_UNC_ARB_PERFEVTSEL0                 0x3b2
  88 #define SNB_UNC_ARB_MSR_OFFSET                  0x10
  89 
  90 /* NHM global control register */
  91 #define NHM_UNC_PERF_GLOBAL_CTL                 0x391
  92 #define NHM_UNC_FIXED_CTR                       0x394
  93 #define NHM_UNC_FIXED_CTR_CTRL                  0x395
  94 
  95 /* NHM uncore global control */
  96 #define NHM_UNC_GLOBAL_CTL_EN_PC_ALL            ((1ULL << 8) - 1)
  97 #define NHM_UNC_GLOBAL_CTL_EN_FC                (1ULL << 32)
  98 
  99 /* NHM uncore register */
 100 #define NHM_UNC_PERFEVTSEL0                     0x3c0
 101 #define NHM_UNC_UNCORE_PMC0                     0x3b0
 102 
 103 /* SKL uncore global control */
 104 #define SKL_UNC_PERF_GLOBAL_CTL                 0xe01
 105 #define SKL_UNC_GLOBAL_CTL_CORE_ALL             ((1 << 5) - 1)
 106 
 107 /* ICL Cbo register */
 108 #define ICL_UNC_CBO_CONFIG                      0x396
 109 #define ICL_UNC_NUM_CBO_MASK                    0xf
 110 #define ICL_UNC_CBO_0_PER_CTR0                  0x702
 111 #define ICL_UNC_CBO_MSR_OFFSET                  0x8
 112 
 113 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
 114 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
 115 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
 116 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
 117 DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28");
 118 DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31");
 119 
 120 /* Sandy Bridge uncore support */
 121 static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
 122 {
 123         struct hw_perf_event *hwc = &event->hw;
 124 
 125         if (hwc->idx < UNCORE_PMC_IDX_FIXED)
 126                 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
 127         else
 128                 wrmsrl(hwc->config_base, SNB_UNC_CTL_EN);
 129 }
 130 
 131 static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
 132 {
 133         wrmsrl(event->hw.config_base, 0);
 134 }
 135 
 136 static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
 137 {
 138         if (box->pmu->pmu_idx == 0) {
 139                 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
 140                         SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
 141         }
 142 }
 143 
 144 static void snb_uncore_msr_enable_box(struct intel_uncore_box *box)
 145 {
 146         wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
 147                 SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
 148 }
 149 
 150 static void snb_uncore_msr_exit_box(struct intel_uncore_box *box)
 151 {
 152         if (box->pmu->pmu_idx == 0)
 153                 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, 0);
 154 }
 155 
 156 static struct uncore_event_desc snb_uncore_events[] = {
 157         INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
 158         { /* end: all zeroes */ },
 159 };
 160 
 161 static struct attribute *snb_uncore_formats_attr[] = {
 162         &format_attr_event.attr,
 163         &format_attr_umask.attr,
 164         &format_attr_edge.attr,
 165         &format_attr_inv.attr,
 166         &format_attr_cmask5.attr,
 167         NULL,
 168 };
 169 
 170 static const struct attribute_group snb_uncore_format_group = {
 171         .name           = "format",
 172         .attrs          = snb_uncore_formats_attr,
 173 };
 174 
 175 static struct intel_uncore_ops snb_uncore_msr_ops = {
 176         .init_box       = snb_uncore_msr_init_box,
 177         .enable_box     = snb_uncore_msr_enable_box,
 178         .exit_box       = snb_uncore_msr_exit_box,
 179         .disable_event  = snb_uncore_msr_disable_event,
 180         .enable_event   = snb_uncore_msr_enable_event,
 181         .read_counter   = uncore_msr_read_counter,
 182 };
 183 
 184 static struct event_constraint snb_uncore_arb_constraints[] = {
 185         UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
 186         UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
 187         EVENT_CONSTRAINT_END
 188 };
 189 
 190 static struct intel_uncore_type snb_uncore_cbox = {
 191         .name           = "cbox",
 192         .num_counters   = 2,
 193         .num_boxes      = 4,
 194         .perf_ctr_bits  = 44,
 195         .fixed_ctr_bits = 48,
 196         .perf_ctr       = SNB_UNC_CBO_0_PER_CTR0,
 197         .event_ctl      = SNB_UNC_CBO_0_PERFEVTSEL0,
 198         .fixed_ctr      = SNB_UNC_FIXED_CTR,
 199         .fixed_ctl      = SNB_UNC_FIXED_CTR_CTRL,
 200         .single_fixed   = 1,
 201         .event_mask     = SNB_UNC_RAW_EVENT_MASK,
 202         .msr_offset     = SNB_UNC_CBO_MSR_OFFSET,
 203         .ops            = &snb_uncore_msr_ops,
 204         .format_group   = &snb_uncore_format_group,
 205         .event_descs    = snb_uncore_events,
 206 };
 207 
 208 static struct intel_uncore_type snb_uncore_arb = {
 209         .name           = "arb",
 210         .num_counters   = 2,
 211         .num_boxes      = 1,
 212         .perf_ctr_bits  = 44,
 213         .perf_ctr       = SNB_UNC_ARB_PER_CTR0,
 214         .event_ctl      = SNB_UNC_ARB_PERFEVTSEL0,
 215         .event_mask     = SNB_UNC_RAW_EVENT_MASK,
 216         .msr_offset     = SNB_UNC_ARB_MSR_OFFSET,
 217         .constraints    = snb_uncore_arb_constraints,
 218         .ops            = &snb_uncore_msr_ops,
 219         .format_group   = &snb_uncore_format_group,
 220 };
 221 
 222 static struct intel_uncore_type *snb_msr_uncores[] = {
 223         &snb_uncore_cbox,
 224         &snb_uncore_arb,
 225         NULL,
 226 };
 227 
 228 void snb_uncore_cpu_init(void)
 229 {
 230         uncore_msr_uncores = snb_msr_uncores;
 231         if (snb_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
 232                 snb_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
 233 }
 234 
 235 static void skl_uncore_msr_init_box(struct intel_uncore_box *box)
 236 {
 237         if (box->pmu->pmu_idx == 0) {
 238                 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL,
 239                         SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
 240         }
 241 
 242         /* The 8th CBOX has different MSR space */
 243         if (box->pmu->pmu_idx == 7)
 244                 __set_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags);
 245 }
 246 
 247 static void skl_uncore_msr_enable_box(struct intel_uncore_box *box)
 248 {
 249         wrmsrl(SKL_UNC_PERF_GLOBAL_CTL,
 250                 SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
 251 }
 252 
 253 static void skl_uncore_msr_exit_box(struct intel_uncore_box *box)
 254 {
 255         if (box->pmu->pmu_idx == 0)
 256                 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, 0);
 257 }
 258 
 259 static struct intel_uncore_ops skl_uncore_msr_ops = {
 260         .init_box       = skl_uncore_msr_init_box,
 261         .enable_box     = skl_uncore_msr_enable_box,
 262         .exit_box       = skl_uncore_msr_exit_box,
 263         .disable_event  = snb_uncore_msr_disable_event,
 264         .enable_event   = snb_uncore_msr_enable_event,
 265         .read_counter   = uncore_msr_read_counter,
 266 };
 267 
 268 static struct intel_uncore_type skl_uncore_cbox = {
 269         .name           = "cbox",
 270         .num_counters   = 4,
 271         .num_boxes      = 8,
 272         .perf_ctr_bits  = 44,
 273         .fixed_ctr_bits = 48,
 274         .perf_ctr       = SNB_UNC_CBO_0_PER_CTR0,
 275         .event_ctl      = SNB_UNC_CBO_0_PERFEVTSEL0,
 276         .fixed_ctr      = SNB_UNC_FIXED_CTR,
 277         .fixed_ctl      = SNB_UNC_FIXED_CTR_CTRL,
 278         .single_fixed   = 1,
 279         .event_mask     = SNB_UNC_RAW_EVENT_MASK,
 280         .msr_offset     = SNB_UNC_CBO_MSR_OFFSET,
 281         .ops            = &skl_uncore_msr_ops,
 282         .format_group   = &snb_uncore_format_group,
 283         .event_descs    = snb_uncore_events,
 284 };
 285 
 286 static struct intel_uncore_type *skl_msr_uncores[] = {
 287         &skl_uncore_cbox,
 288         &snb_uncore_arb,
 289         NULL,
 290 };
 291 
 292 void skl_uncore_cpu_init(void)
 293 {
 294         uncore_msr_uncores = skl_msr_uncores;
 295         if (skl_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
 296                 skl_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
 297         snb_uncore_arb.ops = &skl_uncore_msr_ops;
 298 }
 299 
 300 static struct intel_uncore_type icl_uncore_cbox = {
 301         .name           = "cbox",
 302         .num_counters   = 4,
 303         .perf_ctr_bits  = 44,
 304         .perf_ctr       = ICL_UNC_CBO_0_PER_CTR0,
 305         .event_ctl      = SNB_UNC_CBO_0_PERFEVTSEL0,
 306         .event_mask     = SNB_UNC_RAW_EVENT_MASK,
 307         .msr_offset     = ICL_UNC_CBO_MSR_OFFSET,
 308         .ops            = &skl_uncore_msr_ops,
 309         .format_group   = &snb_uncore_format_group,
 310 };
 311 
 312 static struct uncore_event_desc icl_uncore_events[] = {
 313         INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff"),
 314         { /* end: all zeroes */ },
 315 };
 316 
 317 static struct attribute *icl_uncore_clock_formats_attr[] = {
 318         &format_attr_event.attr,
 319         NULL,
 320 };
 321 
 322 static struct attribute_group icl_uncore_clock_format_group = {
 323         .name = "format",
 324         .attrs = icl_uncore_clock_formats_attr,
 325 };
 326 
 327 static struct intel_uncore_type icl_uncore_clockbox = {
 328         .name           = "clock",
 329         .num_counters   = 1,
 330         .num_boxes      = 1,
 331         .fixed_ctr_bits = 48,
 332         .fixed_ctr      = SNB_UNC_FIXED_CTR,
 333         .fixed_ctl      = SNB_UNC_FIXED_CTR_CTRL,
 334         .single_fixed   = 1,
 335         .event_mask     = SNB_UNC_CTL_EV_SEL_MASK,
 336         .format_group   = &icl_uncore_clock_format_group,
 337         .ops            = &skl_uncore_msr_ops,
 338         .event_descs    = icl_uncore_events,
 339 };
 340 
 341 static struct intel_uncore_type *icl_msr_uncores[] = {
 342         &icl_uncore_cbox,
 343         &snb_uncore_arb,
 344         &icl_uncore_clockbox,
 345         NULL,
 346 };
 347 
 348 static int icl_get_cbox_num(void)
 349 {
 350         u64 num_boxes;
 351 
 352         rdmsrl(ICL_UNC_CBO_CONFIG, num_boxes);
 353 
 354         return num_boxes & ICL_UNC_NUM_CBO_MASK;
 355 }
 356 
 357 void icl_uncore_cpu_init(void)
 358 {
 359         uncore_msr_uncores = icl_msr_uncores;
 360         icl_uncore_cbox.num_boxes = icl_get_cbox_num();
 361         snb_uncore_arb.ops = &skl_uncore_msr_ops;
 362 }
 363 
 364 enum {
 365         SNB_PCI_UNCORE_IMC,
 366 };
 367 
 368 static struct uncore_event_desc snb_uncore_imc_events[] = {
 369         INTEL_UNCORE_EVENT_DESC(data_reads,  "event=0x01"),
 370         INTEL_UNCORE_EVENT_DESC(data_reads.scale, "6.103515625e-5"),
 371         INTEL_UNCORE_EVENT_DESC(data_reads.unit, "MiB"),
 372 
 373         INTEL_UNCORE_EVENT_DESC(data_writes, "event=0x02"),
 374         INTEL_UNCORE_EVENT_DESC(data_writes.scale, "6.103515625e-5"),
 375         INTEL_UNCORE_EVENT_DESC(data_writes.unit, "MiB"),
 376 
 377         { /* end: all zeroes */ },
 378 };
 379 
 380 #define SNB_UNCORE_PCI_IMC_EVENT_MASK           0xff
 381 #define SNB_UNCORE_PCI_IMC_BAR_OFFSET           0x48
 382 
 383 /* page size multiple covering all config regs */
 384 #define SNB_UNCORE_PCI_IMC_MAP_SIZE             0x6000
 385 
 386 #define SNB_UNCORE_PCI_IMC_DATA_READS           0x1
 387 #define SNB_UNCORE_PCI_IMC_DATA_READS_BASE      0x5050
 388 #define SNB_UNCORE_PCI_IMC_DATA_WRITES          0x2
 389 #define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE     0x5054
 390 #define SNB_UNCORE_PCI_IMC_CTR_BASE             SNB_UNCORE_PCI_IMC_DATA_READS_BASE
 391 
 392 enum perf_snb_uncore_imc_freerunning_types {
 393         SNB_PCI_UNCORE_IMC_DATA         = 0,
 394         SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX,
 395 };
 396 
 397 static struct freerunning_counters snb_uncore_imc_freerunning[] = {
 398         [SNB_PCI_UNCORE_IMC_DATA]     = { SNB_UNCORE_PCI_IMC_DATA_READS_BASE, 0x4, 0x0, 2, 32 },
 399 };
 400 
 401 static struct attribute *snb_uncore_imc_formats_attr[] = {
 402         &format_attr_event.attr,
 403         NULL,
 404 };
 405 
 406 static const struct attribute_group snb_uncore_imc_format_group = {
 407         .name = "format",
 408         .attrs = snb_uncore_imc_formats_attr,
 409 };
 410 
 411 static void snb_uncore_imc_init_box(struct intel_uncore_box *box)
 412 {
 413         struct pci_dev *pdev = box->pci_dev;
 414         int where = SNB_UNCORE_PCI_IMC_BAR_OFFSET;
 415         resource_size_t addr;
 416         u32 pci_dword;
 417 
 418         pci_read_config_dword(pdev, where, &pci_dword);
 419         addr = pci_dword;
 420 
 421 #ifdef CONFIG_PHYS_ADDR_T_64BIT
 422         pci_read_config_dword(pdev, where + 4, &pci_dword);
 423         addr |= ((resource_size_t)pci_dword << 32);
 424 #endif
 425 
 426         addr &= ~(PAGE_SIZE - 1);
 427 
 428         box->io_addr = ioremap(addr, SNB_UNCORE_PCI_IMC_MAP_SIZE);
 429         box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL;
 430 }
 431 
 432 static void snb_uncore_imc_enable_box(struct intel_uncore_box *box)
 433 {}
 434 
 435 static void snb_uncore_imc_disable_box(struct intel_uncore_box *box)
 436 {}
 437 
 438 static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct perf_event *event)
 439 {}
 440 
 441 static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event)
 442 {}
 443 
 444 /*
 445  * Keep the custom event_init() function compatible with old event
 446  * encoding for free running counters.
 447  */
 448 static int snb_uncore_imc_event_init(struct perf_event *event)
 449 {
 450         struct intel_uncore_pmu *pmu;
 451         struct intel_uncore_box *box;
 452         struct hw_perf_event *hwc = &event->hw;
 453         u64 cfg = event->attr.config & SNB_UNCORE_PCI_IMC_EVENT_MASK;
 454         int idx, base;
 455 
 456         if (event->attr.type != event->pmu->type)
 457                 return -ENOENT;
 458 
 459         pmu = uncore_event_to_pmu(event);
 460         /* no device found for this pmu */
 461         if (pmu->func_id < 0)
 462                 return -ENOENT;
 463 
 464         /* Sampling not supported yet */
 465         if (hwc->sample_period)
 466                 return -EINVAL;
 467 
 468         /* unsupported modes and filters */
 469         if (event->attr.sample_period) /* no sampling */
 470                 return -EINVAL;
 471 
 472         /*
 473          * Place all uncore events for a particular physical package
 474          * onto a single cpu
 475          */
 476         if (event->cpu < 0)
 477                 return -EINVAL;
 478 
 479         /* check only supported bits are set */
 480         if (event->attr.config & ~SNB_UNCORE_PCI_IMC_EVENT_MASK)
 481                 return -EINVAL;
 482 
 483         box = uncore_pmu_to_box(pmu, event->cpu);
 484         if (!box || box->cpu < 0)
 485                 return -EINVAL;
 486 
 487         event->cpu = box->cpu;
 488         event->pmu_private = box;
 489 
 490         event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG;
 491 
 492         event->hw.idx = -1;
 493         event->hw.last_tag = ~0ULL;
 494         event->hw.extra_reg.idx = EXTRA_REG_NONE;
 495         event->hw.branch_reg.idx = EXTRA_REG_NONE;
 496         /*
 497          * check event is known (whitelist, determines counter)
 498          */
 499         switch (cfg) {
 500         case SNB_UNCORE_PCI_IMC_DATA_READS:
 501                 base = SNB_UNCORE_PCI_IMC_DATA_READS_BASE;
 502                 idx = UNCORE_PMC_IDX_FREERUNNING;
 503                 break;
 504         case SNB_UNCORE_PCI_IMC_DATA_WRITES:
 505                 base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE;
 506                 idx = UNCORE_PMC_IDX_FREERUNNING;
 507                 break;
 508         default:
 509                 return -EINVAL;
 510         }
 511 
 512         /* must be done before validate_group */
 513         event->hw.event_base = base;
 514         event->hw.idx = idx;
 515 
 516         /* Convert to standard encoding format for freerunning counters */
 517         event->hw.config = ((cfg - 1) << 8) | 0x10ff;
 518 
 519         /* no group validation needed, we have free running counters */
 520 
 521         return 0;
 522 }
 523 
 524 static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_event *event)
 525 {
 526         return 0;
 527 }
 528 
 529 int snb_pci2phy_map_init(int devid)
 530 {
 531         struct pci_dev *dev = NULL;
 532         struct pci2phy_map *map;
 533         int bus, segment;
 534 
 535         dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, dev);
 536         if (!dev)
 537                 return -ENOTTY;
 538 
 539         bus = dev->bus->number;
 540         segment = pci_domain_nr(dev->bus);
 541 
 542         raw_spin_lock(&pci2phy_map_lock);
 543         map = __find_pci2phy_map(segment);
 544         if (!map) {
 545                 raw_spin_unlock(&pci2phy_map_lock);
 546                 pci_dev_put(dev);
 547                 return -ENOMEM;
 548         }
 549         map->pbus_to_physid[bus] = 0;
 550         raw_spin_unlock(&pci2phy_map_lock);
 551 
 552         pci_dev_put(dev);
 553 
 554         return 0;
 555 }
 556 
 557 static struct pmu snb_uncore_imc_pmu = {
 558         .task_ctx_nr    = perf_invalid_context,
 559         .event_init     = snb_uncore_imc_event_init,
 560         .add            = uncore_pmu_event_add,
 561         .del            = uncore_pmu_event_del,
 562         .start          = uncore_pmu_event_start,
 563         .stop           = uncore_pmu_event_stop,
 564         .read           = uncore_pmu_event_read,
 565         .capabilities   = PERF_PMU_CAP_NO_EXCLUDE,
 566 };
 567 
 568 static struct intel_uncore_ops snb_uncore_imc_ops = {
 569         .init_box       = snb_uncore_imc_init_box,
 570         .exit_box       = uncore_mmio_exit_box,
 571         .enable_box     = snb_uncore_imc_enable_box,
 572         .disable_box    = snb_uncore_imc_disable_box,
 573         .disable_event  = snb_uncore_imc_disable_event,
 574         .enable_event   = snb_uncore_imc_enable_event,
 575         .hw_config      = snb_uncore_imc_hw_config,
 576         .read_counter   = uncore_mmio_read_counter,
 577 };
 578 
 579 static struct intel_uncore_type snb_uncore_imc = {
 580         .name           = "imc",
 581         .num_counters   = 2,
 582         .num_boxes      = 1,
 583         .num_freerunning_types  = SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX,
 584         .freerunning    = snb_uncore_imc_freerunning,
 585         .event_descs    = snb_uncore_imc_events,
 586         .format_group   = &snb_uncore_imc_format_group,
 587         .ops            = &snb_uncore_imc_ops,
 588         .pmu            = &snb_uncore_imc_pmu,
 589 };
 590 
 591 static struct intel_uncore_type *snb_pci_uncores[] = {
 592         [SNB_PCI_UNCORE_IMC]    = &snb_uncore_imc,
 593         NULL,
 594 };
 595 
 596 static const struct pci_device_id snb_uncore_pci_ids[] = {
 597         { /* IMC */
 598                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SNB_IMC),
 599                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 600         },
 601         { /* end: all zeroes */ },
 602 };
 603 
 604 static const struct pci_device_id ivb_uncore_pci_ids[] = {
 605         { /* IMC */
 606                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_IMC),
 607                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 608         },
 609         { /* IMC */
 610                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_E3_IMC),
 611                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 612         },
 613         { /* end: all zeroes */ },
 614 };
 615 
 616 static const struct pci_device_id hsw_uncore_pci_ids[] = {
 617         { /* IMC */
 618                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_IMC),
 619                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 620         },
 621         { /* IMC */
 622                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_U_IMC),
 623                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 624         },
 625         { /* end: all zeroes */ },
 626 };
 627 
 628 static const struct pci_device_id bdw_uncore_pci_ids[] = {
 629         { /* IMC */
 630                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BDW_IMC),
 631                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 632         },
 633         { /* end: all zeroes */ },
 634 };
 635 
 636 static const struct pci_device_id skl_uncore_pci_ids[] = {
 637         { /* IMC */
 638                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_Y_IMC),
 639                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 640         },
 641         { /* IMC */
 642                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_U_IMC),
 643                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 644         },
 645         { /* IMC */
 646                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HD_IMC),
 647                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 648         },
 649         { /* IMC */
 650                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HQ_IMC),
 651                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 652         },
 653         { /* IMC */
 654                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SD_IMC),
 655                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 656         },
 657         { /* IMC */
 658                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC),
 659                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 660         },
 661         { /* IMC */
 662                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_E3_IMC),
 663                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 664         },
 665         { /* IMC */
 666                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_Y_IMC),
 667                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 668         },
 669         { /* IMC */
 670                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_U_IMC),
 671                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 672         },
 673         { /* IMC */
 674                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_UQ_IMC),
 675                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 676         },
 677         { /* IMC */
 678                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SD_IMC),
 679                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 680         },
 681         { /* IMC */
 682                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SQ_IMC),
 683                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 684         },
 685         { /* IMC */
 686                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_HQ_IMC),
 687                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 688         },
 689         { /* IMC */
 690                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_WQ_IMC),
 691                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 692         },
 693         { /* IMC */
 694                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2U_IMC),
 695                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 696         },
 697         { /* IMC */
 698                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4U_IMC),
 699                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 700         },
 701         { /* IMC */
 702                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4H_IMC),
 703                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 704         },
 705         { /* IMC */
 706                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6H_IMC),
 707                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 708         },
 709         { /* IMC */
 710                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC),
 711                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 712         },
 713         { /* IMC */
 714                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC),
 715                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 716         },
 717         { /* IMC */
 718                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC),
 719                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 720         },
 721         { /* IMC */
 722                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC),
 723                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 724         },
 725         { /* IMC */
 726                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC),
 727                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 728         },
 729         { /* IMC */
 730                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC),
 731                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 732         },
 733         { /* IMC */
 734                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC),
 735                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 736         },
 737         { /* IMC */
 738                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC),
 739                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 740         },
 741         { /* IMC */
 742                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC),
 743                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 744         },
 745         { /* IMC */
 746                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC),
 747                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 748         },
 749         { /* IMC */
 750                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AML_YD_IMC),
 751                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 752         },
 753         { /* IMC */
 754                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AML_YQ_IMC),
 755                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 756         },
 757         { /* IMC */
 758                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WHL_UQ_IMC),
 759                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 760         },
 761         { /* IMC */
 762                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WHL_4_UQ_IMC),
 763                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 764         },
 765         { /* IMC */
 766                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WHL_UD_IMC),
 767                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 768         },
 769         { /* end: all zeroes */ },
 770 };
 771 
 772 static const struct pci_device_id icl_uncore_pci_ids[] = {
 773         { /* IMC */
 774                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICL_U_IMC),
 775                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 776         },
 777         { /* IMC */
 778                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICL_U2_IMC),
 779                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 780         },
 781         { /* end: all zeroes */ },
 782 };
 783 
 784 static struct pci_driver snb_uncore_pci_driver = {
 785         .name           = "snb_uncore",
 786         .id_table       = snb_uncore_pci_ids,
 787 };
 788 
 789 static struct pci_driver ivb_uncore_pci_driver = {
 790         .name           = "ivb_uncore",
 791         .id_table       = ivb_uncore_pci_ids,
 792 };
 793 
 794 static struct pci_driver hsw_uncore_pci_driver = {
 795         .name           = "hsw_uncore",
 796         .id_table       = hsw_uncore_pci_ids,
 797 };
 798 
 799 static struct pci_driver bdw_uncore_pci_driver = {
 800         .name           = "bdw_uncore",
 801         .id_table       = bdw_uncore_pci_ids,
 802 };
 803 
 804 static struct pci_driver skl_uncore_pci_driver = {
 805         .name           = "skl_uncore",
 806         .id_table       = skl_uncore_pci_ids,
 807 };
 808 
 809 static struct pci_driver icl_uncore_pci_driver = {
 810         .name           = "icl_uncore",
 811         .id_table       = icl_uncore_pci_ids,
 812 };
 813 
 814 struct imc_uncore_pci_dev {
 815         __u32 pci_id;
 816         struct pci_driver *driver;
 817 };
 818 #define IMC_DEV(a, d) \
 819         { .pci_id = PCI_DEVICE_ID_INTEL_##a, .driver = (d) }
 820 
 821 static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
 822         IMC_DEV(SNB_IMC, &snb_uncore_pci_driver),
 823         IMC_DEV(IVB_IMC, &ivb_uncore_pci_driver),    /* 3rd Gen Core processor */
 824         IMC_DEV(IVB_E3_IMC, &ivb_uncore_pci_driver), /* Xeon E3-1200 v2/3rd Gen Core processor */
 825         IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver),    /* 4th Gen Core Processor */
 826         IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver),  /* 4th Gen Core ULT Mobile Processor */
 827         IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver),    /* 5th Gen Core U */
 828         IMC_DEV(SKL_Y_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core Y */
 829         IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core U */
 830         IMC_DEV(SKL_HD_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core H Dual Core */
 831         IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core H Quad Core */
 832         IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core S Dual Core */
 833         IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core S Quad Core */
 834         IMC_DEV(SKL_E3_IMC, &skl_uncore_pci_driver),  /* Xeon E3 V5 Gen Core processor */
 835         IMC_DEV(KBL_Y_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core Y */
 836         IMC_DEV(KBL_U_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core U */
 837         IMC_DEV(KBL_UQ_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core U Quad Core */
 838         IMC_DEV(KBL_SD_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core S Dual Core */
 839         IMC_DEV(KBL_SQ_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core S Quad Core */
 840         IMC_DEV(KBL_HQ_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core H Quad Core */
 841         IMC_DEV(KBL_WQ_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core S 4 cores Work Station */
 842         IMC_DEV(CFL_2U_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core U 2 Cores */
 843         IMC_DEV(CFL_4U_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core U 4 Cores */
 844         IMC_DEV(CFL_4H_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core H 4 Cores */
 845         IMC_DEV(CFL_6H_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core H 6 Cores */
 846         IMC_DEV(CFL_2S_D_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 2 Cores Desktop */
 847         IMC_DEV(CFL_4S_D_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 4 Cores Desktop */
 848         IMC_DEV(CFL_6S_D_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 6 Cores Desktop */
 849         IMC_DEV(CFL_8S_D_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 8 Cores Desktop */
 850         IMC_DEV(CFL_4S_W_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 4 Cores Work Station */
 851         IMC_DEV(CFL_6S_W_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 6 Cores Work Station */
 852         IMC_DEV(CFL_8S_W_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 8 Cores Work Station */
 853         IMC_DEV(CFL_4S_S_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 4 Cores Server */
 854         IMC_DEV(CFL_6S_S_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 6 Cores Server */
 855         IMC_DEV(CFL_8S_S_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 8 Cores Server */
 856         IMC_DEV(AML_YD_IMC, &skl_uncore_pci_driver),    /* 8th Gen Core Y Mobile Dual Core */
 857         IMC_DEV(AML_YQ_IMC, &skl_uncore_pci_driver),    /* 8th Gen Core Y Mobile Quad Core */
 858         IMC_DEV(WHL_UQ_IMC, &skl_uncore_pci_driver),    /* 8th Gen Core U Mobile Quad Core */
 859         IMC_DEV(WHL_4_UQ_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core U Mobile Quad Core */
 860         IMC_DEV(WHL_UD_IMC, &skl_uncore_pci_driver),    /* 8th Gen Core U Mobile Dual Core */
 861         IMC_DEV(ICL_U_IMC, &icl_uncore_pci_driver),     /* 10th Gen Core Mobile */
 862         IMC_DEV(ICL_U2_IMC, &icl_uncore_pci_driver),    /* 10th Gen Core Mobile */
 863         {  /* end marker */ }
 864 };
 865 
 866 
 867 #define for_each_imc_pci_id(x, t) \
 868         for (x = (t); (x)->pci_id; x++)
 869 
 870 static struct pci_driver *imc_uncore_find_dev(void)
 871 {
 872         const struct imc_uncore_pci_dev *p;
 873         int ret;
 874 
 875         for_each_imc_pci_id(p, desktop_imc_pci_ids) {
 876                 ret = snb_pci2phy_map_init(p->pci_id);
 877                 if (ret == 0)
 878                         return p->driver;
 879         }
 880         return NULL;
 881 }
 882 
 883 static int imc_uncore_pci_init(void)
 884 {
 885         struct pci_driver *imc_drv = imc_uncore_find_dev();
 886 
 887         if (!imc_drv)
 888                 return -ENODEV;
 889 
 890         uncore_pci_uncores = snb_pci_uncores;
 891         uncore_pci_driver = imc_drv;
 892 
 893         return 0;
 894 }
 895 
 896 int snb_uncore_pci_init(void)
 897 {
 898         return imc_uncore_pci_init();
 899 }
 900 
 901 int ivb_uncore_pci_init(void)
 902 {
 903         return imc_uncore_pci_init();
 904 }
 905 int hsw_uncore_pci_init(void)
 906 {
 907         return imc_uncore_pci_init();
 908 }
 909 
 910 int bdw_uncore_pci_init(void)
 911 {
 912         return imc_uncore_pci_init();
 913 }
 914 
 915 int skl_uncore_pci_init(void)
 916 {
 917         return imc_uncore_pci_init();
 918 }
 919 
 920 /* end of Sandy Bridge uncore support */
 921 
 922 /* Nehalem uncore support */
 923 static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box)
 924 {
 925         wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0);
 926 }
 927 
 928 static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box)
 929 {
 930         wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
 931 }
 932 
 933 static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
 934 {
 935         struct hw_perf_event *hwc = &event->hw;
 936 
 937         if (hwc->idx < UNCORE_PMC_IDX_FIXED)
 938                 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
 939         else
 940                 wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN);
 941 }
 942 
 943 static struct attribute *nhm_uncore_formats_attr[] = {
 944         &format_attr_event.attr,
 945         &format_attr_umask.attr,
 946         &format_attr_edge.attr,
 947         &format_attr_inv.attr,
 948         &format_attr_cmask8.attr,
 949         NULL,
 950 };
 951 
 952 static const struct attribute_group nhm_uncore_format_group = {
 953         .name = "format",
 954         .attrs = nhm_uncore_formats_attr,
 955 };
 956 
 957 static struct uncore_event_desc nhm_uncore_events[] = {
 958         INTEL_UNCORE_EVENT_DESC(clockticks,                "event=0xff,umask=0x00"),
 959         INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any,       "event=0x2f,umask=0x0f"),
 960         INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any,      "event=0x2c,umask=0x0f"),
 961         INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads,     "event=0x20,umask=0x01"),
 962         INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes,    "event=0x20,umask=0x02"),
 963         INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads,  "event=0x20,umask=0x04"),
 964         INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"),
 965         INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads,   "event=0x20,umask=0x10"),
 966         INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes,  "event=0x20,umask=0x20"),
 967         { /* end: all zeroes */ },
 968 };
 969 
 970 static struct intel_uncore_ops nhm_uncore_msr_ops = {
 971         .disable_box    = nhm_uncore_msr_disable_box,
 972         .enable_box     = nhm_uncore_msr_enable_box,
 973         .disable_event  = snb_uncore_msr_disable_event,
 974         .enable_event   = nhm_uncore_msr_enable_event,
 975         .read_counter   = uncore_msr_read_counter,
 976 };
 977 
 978 static struct intel_uncore_type nhm_uncore = {
 979         .name           = "",
 980         .num_counters   = 8,
 981         .num_boxes      = 1,
 982         .perf_ctr_bits  = 48,
 983         .fixed_ctr_bits = 48,
 984         .event_ctl      = NHM_UNC_PERFEVTSEL0,
 985         .perf_ctr       = NHM_UNC_UNCORE_PMC0,
 986         .fixed_ctr      = NHM_UNC_FIXED_CTR,
 987         .fixed_ctl      = NHM_UNC_FIXED_CTR_CTRL,
 988         .event_mask     = NHM_UNC_RAW_EVENT_MASK,
 989         .event_descs    = nhm_uncore_events,
 990         .ops            = &nhm_uncore_msr_ops,
 991         .format_group   = &nhm_uncore_format_group,
 992 };
 993 
 994 static struct intel_uncore_type *nhm_msr_uncores[] = {
 995         &nhm_uncore,
 996         NULL,
 997 };
 998 
 999 void nhm_uncore_cpu_init(void)
1000 {
1001         uncore_msr_uncores = nhm_msr_uncores;
1002 }
1003 
1004 /* end of Nehalem uncore support */

/* [<][>][^][v][top][bottom][index][help] */