This source file includes following definitions.
- rdmsrl_amd_safe
- wrmsrl_amd_safe
- init_amd_k5
- init_amd_k6
- init_amd_k7
- nearby_node
- legacy_fixup_core_id
- amd_get_topology_early
- amd_get_topology
- amd_detect_cmp
- amd_get_nb_id
- amd_get_nodes_per_socket
- srat_detect_node
- early_init_amd_mc
- bsp_init_amd
- early_detect_mem_encrypt
- early_init_amd
- init_amd_k8
- init_amd_gh
- init_amd_ln
- rdrand_cmdline
- clear_rdrand_cpuid_bit
- init_amd_jg
- init_amd_bd
- init_amd_zn
- init_amd
- amd_size_cache
- cpu_detect_tlb_amd
- cpu_has_amd_erratum
- set_dr_addr_mask
   1 
   2 #include <linux/export.h>
   3 #include <linux/bitops.h>
   4 #include <linux/elf.h>
   5 #include <linux/mm.h>
   6 
   7 #include <linux/io.h>
   8 #include <linux/sched.h>
   9 #include <linux/sched/clock.h>
  10 #include <linux/random.h>
  11 #include <linux/topology.h>
  12 #include <asm/processor.h>
  13 #include <asm/apic.h>
  14 #include <asm/cacheinfo.h>
  15 #include <asm/cpu.h>
  16 #include <asm/spec-ctrl.h>
  17 #include <asm/smp.h>
  18 #include <asm/pci-direct.h>
  19 #include <asm/delay.h>
  20 #include <asm/debugreg.h>
  21 
  22 #ifdef CONFIG_X86_64
  23 # include <asm/mmconfig.h>
  24 # include <asm/set_memory.h>
  25 #endif
  26 
  27 #include "cpu.h"
  28 
  29 static const int amd_erratum_383[];
  30 static const int amd_erratum_400[];
  31 static const int amd_erratum_1054[];
  32 static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
  33 
  34 
  35 
  36 
  37 
  38 
  39 static u32 nodes_per_socket = 1;
  40 
  41 static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
  42 {
  43         u32 gprs[8] = { 0 };
  44         int err;
  45 
  46         WARN_ONCE((boot_cpu_data.x86 != 0xf),
  47                   "%s should only be used on K8!\n", __func__);
  48 
  49         gprs[1] = msr;
  50         gprs[7] = 0x9c5a203a;
  51 
  52         err = rdmsr_safe_regs(gprs);
  53 
  54         *p = gprs[0] | ((u64)gprs[2] << 32);
  55 
  56         return err;
  57 }
  58 
  59 static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
  60 {
  61         u32 gprs[8] = { 0 };
  62 
  63         WARN_ONCE((boot_cpu_data.x86 != 0xf),
  64                   "%s should only be used on K8!\n", __func__);
  65 
  66         gprs[0] = (u32)val;
  67         gprs[1] = msr;
  68         gprs[2] = val >> 32;
  69         gprs[7] = 0x9c5a203a;
  70 
  71         return wrmsr_safe_regs(gprs);
  72 }
  73 
  74 
  75 
  76 
  77 
  78 
  79 
  80 
  81 
  82 
  83 
  84 
  85 
  86 
  87 
  88 #ifdef CONFIG_X86_32
  89 extern __visible void vide(void);
  90 __asm__(".text\n"
  91         ".globl vide\n"
  92         ".type vide, @function\n"
  93         ".align 4\n"
  94         "vide: ret\n");
  95 #endif
  96 
  97 static void init_amd_k5(struct cpuinfo_x86 *c)
  98 {
  99 #ifdef CONFIG_X86_32
 100 
 101 
 102 
 103 
 104 
 105 
 106 #define CBAR            (0xfffc) 
 107 #define CBAR_ENB        (0x80000000)
 108 #define CBAR_KEY        (0X000000CB)
 109         if (c->x86_model == 9 || c->x86_model == 10) {
 110                 if (inl(CBAR) & CBAR_ENB)
 111                         outl(0 | CBAR_KEY, CBAR);
 112         }
 113 #endif
 114 }
 115 
 116 static void init_amd_k6(struct cpuinfo_x86 *c)
 117 {
 118 #ifdef CONFIG_X86_32
 119         u32 l, h;
 120         int mbytes = get_num_physpages() >> (20-PAGE_SHIFT);
 121 
 122         if (c->x86_model < 6) {
 123                 
 124                 if (c->x86_model == 0) {
 125                         clear_cpu_cap(c, X86_FEATURE_APIC);
 126                         set_cpu_cap(c, X86_FEATURE_PGE);
 127                 }
 128                 return;
 129         }
 130 
 131         if (c->x86_model == 6 && c->x86_stepping == 1) {
 132                 const int K6_BUG_LOOP = 1000000;
 133                 int n;
 134                 void (*f_vide)(void);
 135                 u64 d, d2;
 136 
 137                 pr_info("AMD K6 stepping B detected - ");
 138 
 139                 
 140 
 141 
 142 
 143 
 144                 n = K6_BUG_LOOP;
 145                 f_vide = vide;
 146                 OPTIMIZER_HIDE_VAR(f_vide);
 147                 d = rdtsc();
 148                 while (n--)
 149                         f_vide();
 150                 d2 = rdtsc();
 151                 d = d2-d;
 152 
 153                 if (d > 20*K6_BUG_LOOP)
 154                         pr_cont("system stability may be impaired when more than 32 MB are used.\n");
 155                 else
 156                         pr_cont("probably OK (after B9730xxxx).\n");
 157         }
 158 
 159         
 160         if (c->x86_model < 8 ||
 161            (c->x86_model == 8 && c->x86_stepping < 8)) {
 162                 
 163                 if (mbytes > 508)
 164                         mbytes = 508;
 165 
 166                 rdmsr(MSR_K6_WHCR, l, h);
 167                 if ((l&0x0000FFFF) == 0) {
 168                         unsigned long flags;
 169                         l = (1<<0)|((mbytes/4)<<1);
 170                         local_irq_save(flags);
 171                         wbinvd();
 172                         wrmsr(MSR_K6_WHCR, l, h);
 173                         local_irq_restore(flags);
 174                         pr_info("Enabling old style K6 write allocation for %d Mb\n",
 175                                 mbytes);
 176                 }
 177                 return;
 178         }
 179 
 180         if ((c->x86_model == 8 && c->x86_stepping > 7) ||
 181              c->x86_model == 9 || c->x86_model == 13) {
 182                 
 183 
 184                 if (mbytes > 4092)
 185                         mbytes = 4092;
 186 
 187                 rdmsr(MSR_K6_WHCR, l, h);
 188                 if ((l&0xFFFF0000) == 0) {
 189                         unsigned long flags;
 190                         l = ((mbytes>>2)<<22)|(1<<16);
 191                         local_irq_save(flags);
 192                         wbinvd();
 193                         wrmsr(MSR_K6_WHCR, l, h);
 194                         local_irq_restore(flags);
 195                         pr_info("Enabling new style K6 write allocation for %d Mb\n",
 196                                 mbytes);
 197                 }
 198 
 199                 return;
 200         }
 201 
 202         if (c->x86_model == 10) {
 203                 
 204                 
 205                 return;
 206         }
 207 #endif
 208 }
 209 
 210 static void init_amd_k7(struct cpuinfo_x86 *c)
 211 {
 212 #ifdef CONFIG_X86_32
 213         u32 l, h;
 214 
 215         
 216 
 217 
 218 
 219 
 220         if (c->x86_model >= 6 && c->x86_model <= 10) {
 221                 if (!cpu_has(c, X86_FEATURE_XMM)) {
 222                         pr_info("Enabling disabled K7/SSE Support.\n");
 223                         msr_clear_bit(MSR_K7_HWCR, 15);
 224                         set_cpu_cap(c, X86_FEATURE_XMM);
 225                 }
 226         }
 227 
 228         
 229 
 230 
 231 
 232 
 233         if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) {
 234                 rdmsr(MSR_K7_CLK_CTL, l, h);
 235                 if ((l & 0xfff00000) != 0x20000000) {
 236                         pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
 237                                 l, ((l & 0x000fffff)|0x20000000));
 238                         wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
 239                 }
 240         }
 241 
 242         
 243         if (!c->cpu_index)
 244                 return;
 245 
 246         
 247 
 248 
 249 
 250         
 251         if ((c->x86_model == 6) && ((c->x86_stepping == 0) ||
 252             (c->x86_stepping == 1)))
 253                 return;
 254 
 255         
 256         if ((c->x86_model == 7) && (c->x86_stepping == 0))
 257                 return;
 258 
 259         
 260 
 261 
 262 
 263 
 264 
 265 
 266         if (((c->x86_model == 6) && (c->x86_stepping >= 2)) ||
 267             ((c->x86_model == 7) && (c->x86_stepping >= 1)) ||
 268              (c->x86_model > 7))
 269                 if (cpu_has(c, X86_FEATURE_MP))
 270                         return;
 271 
 272         
 273 
 274         
 275 
 276 
 277 
 278         WARN_ONCE(1, "WARNING: This combination of AMD"
 279                 " processors is not suitable for SMP.\n");
 280         add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
 281 #endif
 282 }
 283 
 284 #ifdef CONFIG_NUMA
 285 
 286 
 287 
 288 
 289 static int nearby_node(int apicid)
 290 {
 291         int i, node;
 292 
 293         for (i = apicid - 1; i >= 0; i--) {
 294                 node = __apicid_to_node[i];
 295                 if (node != NUMA_NO_NODE && node_online(node))
 296                         return node;
 297         }
 298         for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
 299                 node = __apicid_to_node[i];
 300                 if (node != NUMA_NO_NODE && node_online(node))
 301                         return node;
 302         }
 303         return first_node(node_online_map); 
 304 }
 305 #endif
 306 
 307 
 308 
 309 
 310 
 311 
 312 static void legacy_fixup_core_id(struct cpuinfo_x86 *c)
 313 {
 314         u32 cus_per_node;
 315 
 316         if (c->x86 >= 0x17)
 317                 return;
 318 
 319         cus_per_node = c->x86_max_cores / nodes_per_socket;
 320         c->cpu_core_id %= cus_per_node;
 321 }
 322 
 323 
 324 static void amd_get_topology_early(struct cpuinfo_x86 *c)
 325 {
 326         if (cpu_has(c, X86_FEATURE_TOPOEXT))
 327                 smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1;
 328 }
 329 
 330 
 331 
 332 
 333 
 334 
 335 
 336 static void amd_get_topology(struct cpuinfo_x86 *c)
 337 {
 338         u8 node_id;
 339         int cpu = smp_processor_id();
 340 
 341         
 342         if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
 343                 int err;
 344                 u32 eax, ebx, ecx, edx;
 345 
 346                 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
 347 
 348                 node_id  = ecx & 0xff;
 349 
 350                 if (c->x86 == 0x15)
 351                         c->cu_id = ebx & 0xff;
 352 
 353                 if (c->x86 >= 0x17) {
 354                         c->cpu_core_id = ebx & 0xff;
 355 
 356                         if (smp_num_siblings > 1)
 357                                 c->x86_max_cores /= smp_num_siblings;
 358                 }
 359 
 360                 
 361 
 362 
 363 
 364                 err = detect_extended_topology(c);
 365                 if (!err)
 366                         c->x86_coreid_bits = get_count_order(c->x86_max_cores);
 367 
 368                 cacheinfo_amd_init_llc_id(c, cpu, node_id);
 369 
 370         } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
 371                 u64 value;
 372 
 373                 rdmsrl(MSR_FAM10H_NODE_ID, value);
 374                 node_id = value & 7;
 375 
 376                 per_cpu(cpu_llc_id, cpu) = node_id;
 377         } else
 378                 return;
 379 
 380         if (nodes_per_socket > 1) {
 381                 set_cpu_cap(c, X86_FEATURE_AMD_DCM);
 382                 legacy_fixup_core_id(c);
 383         }
 384 }
 385 
 386 
 387 
 388 
 389 
 390 static void amd_detect_cmp(struct cpuinfo_x86 *c)
 391 {
 392         unsigned bits;
 393         int cpu = smp_processor_id();
 394 
 395         bits = c->x86_coreid_bits;
 396         
 397         c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
 398         
 399         c->phys_proc_id = c->initial_apicid >> bits;
 400         
 401         per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
 402 }
 403 
 404 u16 amd_get_nb_id(int cpu)
 405 {
 406         return per_cpu(cpu_llc_id, cpu);
 407 }
 408 EXPORT_SYMBOL_GPL(amd_get_nb_id);
 409 
 410 u32 amd_get_nodes_per_socket(void)
 411 {
 412         return nodes_per_socket;
 413 }
 414 EXPORT_SYMBOL_GPL(amd_get_nodes_per_socket);
 415 
 416 static void srat_detect_node(struct cpuinfo_x86 *c)
 417 {
 418 #ifdef CONFIG_NUMA
 419         int cpu = smp_processor_id();
 420         int node;
 421         unsigned apicid = c->apicid;
 422 
 423         node = numa_cpu_node(cpu);
 424         if (node == NUMA_NO_NODE)
 425                 node = per_cpu(cpu_llc_id, cpu);
 426 
 427         
 428 
 429 
 430 
 431 
 432         if (x86_cpuinit.fixup_cpu_id)
 433                 x86_cpuinit.fixup_cpu_id(c, node);
 434 
 435         if (!node_online(node)) {
 436                 
 437 
 438 
 439 
 440 
 441 
 442 
 443 
 444 
 445 
 446 
 447 
 448 
 449 
 450 
 451 
 452 
 453 
 454 
 455                 int ht_nodeid = c->initial_apicid;
 456 
 457                 if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
 458                         node = __apicid_to_node[ht_nodeid];
 459                 
 460                 if (!node_online(node))
 461                         node = nearby_node(apicid);
 462         }
 463         numa_set_node(cpu, node);
 464 #endif
 465 }
 466 
 467 static void early_init_amd_mc(struct cpuinfo_x86 *c)
 468 {
 469 #ifdef CONFIG_SMP
 470         unsigned bits, ecx;
 471 
 472         
 473         if (c->extended_cpuid_level < 0x80000008)
 474                 return;
 475 
 476         ecx = cpuid_ecx(0x80000008);
 477 
 478         c->x86_max_cores = (ecx & 0xff) + 1;
 479 
 480         
 481         bits = (ecx >> 12) & 0xF;
 482 
 483         
 484         if (bits == 0) {
 485                 while ((1 << bits) < c->x86_max_cores)
 486                         bits++;
 487         }
 488 
 489         c->x86_coreid_bits = bits;
 490 #endif
 491 }
 492 
 493 static void bsp_init_amd(struct cpuinfo_x86 *c)
 494 {
 495 
 496 #ifdef CONFIG_X86_64
 497         if (c->x86 >= 0xf) {
 498                 unsigned long long tseg;
 499 
 500                 
 501 
 502 
 503 
 504 
 505                 if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
 506                         unsigned long pfn = tseg >> PAGE_SHIFT;
 507 
 508                         pr_debug("tseg: %010llx\n", tseg);
 509                         if (pfn_range_is_mapped(pfn, pfn + 1))
 510                                 set_memory_4k((unsigned long)__va(tseg), 1);
 511                 }
 512         }
 513 #endif
 514 
 515         if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
 516 
 517                 if (c->x86 > 0x10 ||
 518                     (c->x86 == 0x10 && c->x86_model >= 0x2)) {
 519                         u64 val;
 520 
 521                         rdmsrl(MSR_K7_HWCR, val);
 522                         if (!(val & BIT(24)))
 523                                 pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n");
 524                 }
 525         }
 526 
 527         if (c->x86 == 0x15) {
 528                 unsigned long upperbit;
 529                 u32 cpuid, assoc;
 530 
 531                 cpuid    = cpuid_edx(0x80000005);
 532                 assoc    = cpuid >> 16 & 0xff;
 533                 upperbit = ((cpuid >> 24) << 10) / assoc;
 534 
 535                 va_align.mask     = (upperbit - 1) & PAGE_MASK;
 536                 va_align.flags    = ALIGN_VA_32 | ALIGN_VA_64;
 537 
 538                 
 539                 va_align.bits = get_random_int() & va_align.mask;
 540         }
 541 
 542         if (cpu_has(c, X86_FEATURE_MWAITX))
 543                 use_mwaitx_delay();
 544 
 545         if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
 546                 u32 ecx;
 547 
 548                 ecx = cpuid_ecx(0x8000001e);
 549                 nodes_per_socket = ((ecx >> 8) & 7) + 1;
 550         } else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
 551                 u64 value;
 552 
 553                 rdmsrl(MSR_FAM10H_NODE_ID, value);
 554                 nodes_per_socket = ((value >> 3) & 7) + 1;
 555         }
 556 
 557         if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
 558             !boot_cpu_has(X86_FEATURE_VIRT_SSBD) &&
 559             c->x86 >= 0x15 && c->x86 <= 0x17) {
 560                 unsigned int bit;
 561 
 562                 switch (c->x86) {
 563                 case 0x15: bit = 54; break;
 564                 case 0x16: bit = 33; break;
 565                 case 0x17: bit = 10; break;
 566                 default: return;
 567                 }
 568                 
 569 
 570 
 571 
 572                 if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
 573                         setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
 574                         setup_force_cpu_cap(X86_FEATURE_SSBD);
 575                         x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
 576                 }
 577         }
 578 }
 579 
 580 static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
 581 {
 582         u64 msr;
 583 
 584         
 585 
 586 
 587 
 588 
 589 
 590 
 591 
 592 
 593 
 594 
 595 
 596         if (cpu_has(c, X86_FEATURE_SME) || cpu_has(c, X86_FEATURE_SEV)) {
 597                 
 598                 rdmsrl(MSR_K8_SYSCFG, msr);
 599                 if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
 600                         goto clear_all;
 601 
 602                 
 603 
 604 
 605 
 606 
 607                 c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f;
 608 
 609                 if (IS_ENABLED(CONFIG_X86_32))
 610                         goto clear_all;
 611 
 612                 rdmsrl(MSR_K7_HWCR, msr);
 613                 if (!(msr & MSR_K7_HWCR_SMMLOCK))
 614                         goto clear_sev;
 615 
 616                 return;
 617 
 618 clear_all:
 619                 setup_clear_cpu_cap(X86_FEATURE_SME);
 620 clear_sev:
 621                 setup_clear_cpu_cap(X86_FEATURE_SEV);
 622         }
 623 }
 624 
 625 static void early_init_amd(struct cpuinfo_x86 *c)
 626 {
 627         u64 value;
 628         u32 dummy;
 629 
 630         early_init_amd_mc(c);
 631 
 632 #ifdef CONFIG_X86_32
 633         if (c->x86 == 6)
 634                 set_cpu_cap(c, X86_FEATURE_K7);
 635 #endif
 636 
 637         if (c->x86 >= 0xf)
 638                 set_cpu_cap(c, X86_FEATURE_K8);
 639 
 640         rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
 641 
 642         
 643 
 644 
 645 
 646         if (c->x86_power & (1 << 8)) {
 647                 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
 648                 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
 649         }
 650 
 651         
 652         if (c->x86_power & BIT(12))
 653                 set_cpu_cap(c, X86_FEATURE_ACC_POWER);
 654 
 655 #ifdef CONFIG_X86_64
 656         set_cpu_cap(c, X86_FEATURE_SYSCALL32);
 657 #else
 658         
 659         if (c->x86 == 5)
 660                 if (c->x86_model == 13 || c->x86_model == 9 ||
 661                     (c->x86_model == 8 && c->x86_stepping >= 8))
 662                         set_cpu_cap(c, X86_FEATURE_K6_MTRR);
 663 #endif
 664 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
 665         
 666 
 667 
 668 
 669 
 670 
 671         if (boot_cpu_has(X86_FEATURE_APIC)) {
 672                 if (c->x86 > 0x16)
 673                         set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
 674                 else if (c->x86 >= 0xf) {
 675                         
 676                         unsigned int val;
 677 
 678                         val = read_pci_config(0, 24, 0, 0x68);
 679                         if ((val >> 17 & 0x3) == 0x3)
 680                                 set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
 681                 }
 682         }
 683 #endif
 684 
 685         
 686 
 687 
 688 
 689 
 690         set_cpu_cap(c, X86_FEATURE_VMMCALL);
 691 
 692         
 693         if (c->x86 == 0x16 && c->x86_model <= 0xf)
 694                 msr_set_bit(MSR_AMD64_LS_CFG, 15);
 695 
 696         
 697 
 698 
 699 
 700 
 701 
 702         if (cpu_has_amd_erratum(c, amd_erratum_400))
 703                 set_cpu_bug(c, X86_BUG_AMD_E400);
 704 
 705         early_detect_mem_encrypt(c);
 706 
 707         
 708         if (c->x86 == 0x15 &&
 709             (c->x86_model >= 0x10 && c->x86_model <= 0x6f) &&
 710             !cpu_has(c, X86_FEATURE_TOPOEXT)) {
 711 
 712                 if (msr_set_bit(0xc0011005, 54) > 0) {
 713                         rdmsrl(0xc0011005, value);
 714                         if (value & BIT_64(54)) {
 715                                 set_cpu_cap(c, X86_FEATURE_TOPOEXT);
 716                                 pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
 717                         }
 718                 }
 719         }
 720 
 721         amd_get_topology_early(c);
 722 }
 723 
 724 static void init_amd_k8(struct cpuinfo_x86 *c)
 725 {
 726         u32 level;
 727         u64 value;
 728 
 729         
 730         level = cpuid_eax(1);
 731         if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
 732                 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
 733 
 734         
 735 
 736 
 737 
 738 
 739         if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
 740                 clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
 741                 if (!rdmsrl_amd_safe(0xc001100d, &value)) {
 742                         value &= ~BIT_64(32);
 743                         wrmsrl_amd_safe(0xc001100d, value);
 744                 }
 745         }
 746 
 747         if (!c->x86_model_id[0])
 748                 strcpy(c->x86_model_id, "Hammer");
 749 
 750 #ifdef CONFIG_SMP
 751         
 752 
 753 
 754 
 755 
 756 
 757 
 758         msr_set_bit(MSR_K7_HWCR, 6);
 759 #endif
 760         set_cpu_bug(c, X86_BUG_SWAPGS_FENCE);
 761 }
 762 
 763 static void init_amd_gh(struct cpuinfo_x86 *c)
 764 {
 765 #ifdef CONFIG_MMCONF_FAM10H
 766         
 767         if (c == &boot_cpu_data)
 768                 check_enable_amd_mmconf_dmi();
 769 
 770         fam10h_check_enable_mmcfg();
 771 #endif
 772 
 773         
 774 
 775 
 776 
 777 
 778 
 779 
 780 
 781         msr_set_bit(MSR_AMD64_MCx_MASK(4), 10);
 782 
 783         
 784 
 785 
 786 
 787 
 788 
 789 
 790 
 791 
 792         msr_clear_bit(MSR_AMD64_BU_CFG2, 24);
 793 
 794         if (cpu_has_amd_erratum(c, amd_erratum_383))
 795                 set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
 796 }
 797 
 798 #define MSR_AMD64_DE_CFG        0xC0011029
 799 
 800 static void init_amd_ln(struct cpuinfo_x86 *c)
 801 {
 802         
 803 
 804 
 805 
 806         msr_set_bit(MSR_AMD64_DE_CFG, 31);
 807 }
 808 
 809 static bool rdrand_force;
 810 
 811 static int __init rdrand_cmdline(char *str)
 812 {
 813         if (!str)
 814                 return -EINVAL;
 815 
 816         if (!strcmp(str, "force"))
 817                 rdrand_force = true;
 818         else
 819                 return -EINVAL;
 820 
 821         return 0;
 822 }
 823 early_param("rdrand", rdrand_cmdline);
 824 
 825 static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c)
 826 {
 827         
 828 
 829 
 830 
 831 
 832         if (!IS_ENABLED(CONFIG_PM_SLEEP))
 833                 return;
 834 
 835         
 836 
 837 
 838 
 839         if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force)
 840                 return;
 841 
 842         msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62);
 843 
 844         
 845 
 846 
 847 
 848         if (cpuid_ecx(1) & BIT(30)) {
 849                 pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n");
 850                 return;
 851         }
 852 
 853         clear_cpu_cap(c, X86_FEATURE_RDRAND);
 854         pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n");
 855 }
 856 
 857 static void init_amd_jg(struct cpuinfo_x86 *c)
 858 {
 859         
 860 
 861 
 862 
 863 
 864         clear_rdrand_cpuid_bit(c);
 865 }
 866 
 867 static void init_amd_bd(struct cpuinfo_x86 *c)
 868 {
 869         u64 value;
 870 
 871         
 872 
 873 
 874 
 875         if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
 876                 if (!rdmsrl_safe(MSR_F15H_IC_CFG, &value) && !(value & 0x1E)) {
 877                         value |= 0x1E;
 878                         wrmsrl_safe(MSR_F15H_IC_CFG, value);
 879                 }
 880         }
 881 
 882         
 883 
 884 
 885 
 886 
 887         clear_rdrand_cpuid_bit(c);
 888 }
 889 
 890 static void init_amd_zn(struct cpuinfo_x86 *c)
 891 {
 892         set_cpu_cap(c, X86_FEATURE_ZEN);
 893 
 894 #ifdef CONFIG_NUMA
 895         node_reclaim_distance = 32;
 896 #endif
 897 
 898         
 899 
 900 
 901 
 902         if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_CPB))
 903                 set_cpu_cap(c, X86_FEATURE_CPB);
 904 }
 905 
 906 static void init_amd(struct cpuinfo_x86 *c)
 907 {
 908         early_init_amd(c);
 909 
 910         
 911 
 912 
 913 
 914         clear_cpu_cap(c, 0*32+31);
 915 
 916         if (c->x86 >= 0x10)
 917                 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
 918 
 919         
 920         c->apicid = hard_smp_processor_id();
 921 
 922         
 923         if (c->x86 < 6)
 924                 clear_cpu_cap(c, X86_FEATURE_MCE);
 925 
 926         switch (c->x86) {
 927         case 4:    init_amd_k5(c); break;
 928         case 5:    init_amd_k6(c); break;
 929         case 6:    init_amd_k7(c); break;
 930         case 0xf:  init_amd_k8(c); break;
 931         case 0x10: init_amd_gh(c); break;
 932         case 0x12: init_amd_ln(c); break;
 933         case 0x15: init_amd_bd(c); break;
 934         case 0x16: init_amd_jg(c); break;
 935         case 0x17: init_amd_zn(c); break;
 936         }
 937 
 938         
 939 
 940 
 941 
 942         if ((c->x86 >= 6) && (!cpu_has(c, X86_FEATURE_XSAVEERPTR)))
 943                 set_cpu_bug(c, X86_BUG_FXSAVE_LEAK);
 944 
 945         cpu_detect_cache_sizes(c);
 946 
 947         amd_detect_cmp(c);
 948         amd_get_topology(c);
 949         srat_detect_node(c);
 950 
 951         init_amd_cacheinfo(c);
 952 
 953         if (cpu_has(c, X86_FEATURE_XMM2)) {
 954                 
 955 
 956 
 957 
 958 
 959 
 960                 msr_set_bit(MSR_F10H_DECFG,
 961                             MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
 962 
 963                 
 964                 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
 965         }
 966 
 967         
 968 
 969 
 970 
 971         if (c->x86 > 0x11)
 972                 set_cpu_cap(c, X86_FEATURE_ARAT);
 973 
 974         
 975         if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH))
 976                 if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
 977                         set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH);
 978 
 979         
 980         if (!cpu_has(c, X86_FEATURE_XENPV))
 981                 set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
 982 
 983         
 984 
 985 
 986 
 987 
 988         if (cpu_has(c, X86_FEATURE_IRPERF) &&
 989             !cpu_has_amd_erratum(c, amd_erratum_1054))
 990                 msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
 991 }
 992 
 993 #ifdef CONFIG_X86_32
 994 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
 995 {
 996         
 997         if (c->x86 == 6) {
 998                 
 999                 if (c->x86_model == 3 && c->x86_stepping == 0)
1000                         size = 64;
1001                 
1002                 if (c->x86_model == 4 &&
1003                         (c->x86_stepping == 0 || c->x86_stepping == 1))
1004                         size = 256;
1005         }
1006         return size;
1007 }
1008 #endif
1009 
1010 static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
1011 {
1012         u32 ebx, eax, ecx, edx;
1013         u16 mask = 0xfff;
1014 
1015         if (c->x86 < 0xf)
1016                 return;
1017 
1018         if (c->extended_cpuid_level < 0x80000006)
1019                 return;
1020 
1021         cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
1022 
1023         tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask;
1024         tlb_lli_4k[ENTRIES] = ebx & mask;
1025 
1026         
1027 
1028 
1029 
1030         if (c->x86 == 0xf) {
1031                 cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
1032                 mask = 0xff;
1033         }
1034 
1035         
1036         if (!((eax >> 16) & mask))
1037                 tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff;
1038         else
1039                 tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;
1040 
1041         
1042         tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;
1043 
1044         
1045         if (!(eax & mask)) {
1046                 
1047                 if (c->x86 == 0x15 && c->x86_model <= 0x1f) {
1048                         tlb_lli_2m[ENTRIES] = 1024;
1049                 } else {
1050                         cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
1051                         tlb_lli_2m[ENTRIES] = eax & 0xff;
1052                 }
1053         } else
1054                 tlb_lli_2m[ENTRIES] = eax & mask;
1055 
1056         tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1;
1057 }
1058 
1059 static const struct cpu_dev amd_cpu_dev = {
1060         .c_vendor       = "AMD",
1061         .c_ident        = { "AuthenticAMD" },
1062 #ifdef CONFIG_X86_32
1063         .legacy_models = {
1064                 { .family = 4, .model_names =
1065                   {
1066                           [3] = "486 DX/2",
1067                           [7] = "486 DX/2-WB",
1068                           [8] = "486 DX/4",
1069                           [9] = "486 DX/4-WB",
1070                           [14] = "Am5x86-WT",
1071                           [15] = "Am5x86-WB"
1072                   }
1073                 },
1074         },
1075         .legacy_cache_size = amd_size_cache,
1076 #endif
1077         .c_early_init   = early_init_amd,
1078         .c_detect_tlb   = cpu_detect_tlb_amd,
1079         .c_bsp_init     = bsp_init_amd,
1080         .c_init         = init_amd,
1081         .c_x86_vendor   = X86_VENDOR_AMD,
1082 };
1083 
1084 cpu_dev_register(amd_cpu_dev);
1085 
1086 
1087 
1088 
1089 
1090 
1091 
1092 
1093 
1094 
1095 
1096 
1097 
1098 
1099 
1100 
1101 
1102 
1103 #define AMD_LEGACY_ERRATUM(...)         { -1, __VA_ARGS__, 0 }
1104 #define AMD_OSVW_ERRATUM(osvw_id, ...)  { osvw_id, __VA_ARGS__, 0 }
1105 #define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
1106         ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
1107 #define AMD_MODEL_RANGE_FAMILY(range)   (((range) >> 24) & 0xff)
1108 #define AMD_MODEL_RANGE_START(range)    (((range) >> 12) & 0xfff)
1109 #define AMD_MODEL_RANGE_END(range)      ((range) & 0xfff)
1110 
1111 static const int amd_erratum_400[] =
1112         AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
1113                             AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
1114 
1115 static const int amd_erratum_383[] =
1116         AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
1117 
1118 
1119 static const int amd_erratum_1054[] =
1120         AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf));
1121 
1122 static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
1123 {
1124         int osvw_id = *erratum++;
1125         u32 range;
1126         u32 ms;
1127 
1128         if (osvw_id >= 0 && osvw_id < 65536 &&
1129             cpu_has(cpu, X86_FEATURE_OSVW)) {
1130                 u64 osvw_len;
1131 
1132                 rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
1133                 if (osvw_id < osvw_len) {
1134                         u64 osvw_bits;
1135 
1136                         rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
1137                             osvw_bits);
1138                         return osvw_bits & (1ULL << (osvw_id & 0x3f));
1139                 }
1140         }
1141 
1142         
1143         ms = (cpu->x86_model << 4) | cpu->x86_stepping;
1144         while ((range = *erratum++))
1145                 if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
1146                     (ms >= AMD_MODEL_RANGE_START(range)) &&
1147                     (ms <= AMD_MODEL_RANGE_END(range)))
1148                         return true;
1149 
1150         return false;
1151 }
1152 
1153 void set_dr_addr_mask(unsigned long mask, int dr)
1154 {
1155         if (!boot_cpu_has(X86_FEATURE_BPEXT))
1156                 return;
1157 
1158         switch (dr) {
1159         case 0:
1160                 wrmsr(MSR_F16H_DR0_ADDR_MASK, mask, 0);
1161                 break;
1162         case 1:
1163         case 2:
1164         case 3:
1165                 wrmsr(MSR_F16H_DR1_ADDR_MASK - 1 + dr, mask, 0);
1166                 break;
1167         default:
1168                 break;
1169         }
1170 }