root/arch/x86/kernel/cpu/mtrr/mtrr.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. mtrr_enabled
  2. set_mtrr_ops
  3. have_wrcomb
  4. set_num_var_ranges
  5. init_table
  6. mtrr_rendezvous_handler
  7. types_compatible
  8. set_mtrr
  9. set_mtrr_cpuslocked
  10. set_mtrr_from_inactive_cpu
  11. mtrr_add_page
  12. mtrr_check
  13. mtrr_add
  14. mtrr_del_page
  15. mtrr_del
  16. arch_phys_wc_add
  17. arch_phys_wc_del
  18. arch_phys_wc_index
  19. init_ifs
  20. mtrr_save
  21. mtrr_restore
  22. mtrr_bp_init
  23. mtrr_ap_init
  24. mtrr_save_state
  25. set_mtrr_aps_delayed_init
  26. mtrr_aps_init
  27. mtrr_bp_restore
  28. mtrr_init_finialize

   1 /*  Generic MTRR (Memory Type Range Register) driver.
   2 
   3     Copyright (C) 1997-2000  Richard Gooch
   4     Copyright (c) 2002       Patrick Mochel
   5 
   6     This library is free software; you can redistribute it and/or
   7     modify it under the terms of the GNU Library General Public
   8     License as published by the Free Software Foundation; either
   9     version 2 of the License, or (at your option) any later version.
  10 
  11     This library is distributed in the hope that it will be useful,
  12     but WITHOUT ANY WARRANTY; without even the implied warranty of
  13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14     Library General Public License for more details.
  15 
  16     You should have received a copy of the GNU Library General Public
  17     License along with this library; if not, write to the Free
  18     Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  19 
  20     Richard Gooch may be reached by email at  rgooch@atnf.csiro.au
  21     The postal address is:
  22       Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
  23 
  24     Source: "Pentium Pro Family Developer's Manual, Volume 3:
  25     Operating System Writer's Guide" (Intel document number 242692),
  26     section 11.11.7
  27 
  28     This was cleaned and made readable by Patrick Mochel <mochel@osdl.org>
  29     on 6-7 March 2002.
  30     Source: Intel Architecture Software Developers Manual, Volume 3:
  31     System Programming Guide; Section 9.11. (1997 edition - PPro).
  32 */
  33 
  34 #define DEBUG
  35 
  36 #include <linux/types.h> /* FIXME: kvm_para.h needs this */
  37 
  38 #include <linux/stop_machine.h>
  39 #include <linux/kvm_para.h>
  40 #include <linux/uaccess.h>
  41 #include <linux/export.h>
  42 #include <linux/mutex.h>
  43 #include <linux/init.h>
  44 #include <linux/sort.h>
  45 #include <linux/cpu.h>
  46 #include <linux/pci.h>
  47 #include <linux/smp.h>
  48 #include <linux/syscore_ops.h>
  49 #include <linux/rcupdate.h>
  50 
  51 #include <asm/cpufeature.h>
  52 #include <asm/e820/api.h>
  53 #include <asm/mtrr.h>
  54 #include <asm/msr.h>
  55 #include <asm/pat.h>
  56 
  57 #include "mtrr.h"
  58 
  59 /* arch_phys_wc_add returns an MTRR register index plus this offset. */
  60 #define MTRR_TO_PHYS_WC_OFFSET 1000
  61 
  62 u32 num_var_ranges;
  63 static bool __mtrr_enabled;
  64 
  65 static bool mtrr_enabled(void)
  66 {
  67         return __mtrr_enabled;
  68 }
  69 
  70 unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
  71 static DEFINE_MUTEX(mtrr_mutex);
  72 
  73 u64 size_or_mask, size_and_mask;
  74 static bool mtrr_aps_delayed_init;
  75 
  76 static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __ro_after_init;
  77 
  78 const struct mtrr_ops *mtrr_if;
  79 
  80 static void set_mtrr(unsigned int reg, unsigned long base,
  81                      unsigned long size, mtrr_type type);
  82 
  83 void __init set_mtrr_ops(const struct mtrr_ops *ops)
  84 {
  85         if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
  86                 mtrr_ops[ops->vendor] = ops;
  87 }
  88 
  89 /*  Returns non-zero if we have the write-combining memory type  */
  90 static int have_wrcomb(void)
  91 {
  92         struct pci_dev *dev;
  93 
  94         dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL);
  95         if (dev != NULL) {
  96                 /*
  97                  * ServerWorks LE chipsets < rev 6 have problems with
  98                  * write-combining. Don't allow it and leave room for other
  99                  * chipsets to be tagged
 100                  */
 101                 if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS &&
 102                     dev->device == PCI_DEVICE_ID_SERVERWORKS_LE &&
 103                     dev->revision <= 5) {
 104                         pr_info("Serverworks LE rev < 6 detected. Write-combining disabled.\n");
 105                         pci_dev_put(dev);
 106                         return 0;
 107                 }
 108                 /*
 109                  * Intel 450NX errata # 23. Non ascending cacheline evictions to
 110                  * write combining memory may resulting in data corruption
 111                  */
 112                 if (dev->vendor == PCI_VENDOR_ID_INTEL &&
 113                     dev->device == PCI_DEVICE_ID_INTEL_82451NX) {
 114                         pr_info("Intel 450NX MMC detected. Write-combining disabled.\n");
 115                         pci_dev_put(dev);
 116                         return 0;
 117                 }
 118                 pci_dev_put(dev);
 119         }
 120         return mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0;
 121 }
 122 
 123 /*  This function returns the number of variable MTRRs  */
 124 static void __init set_num_var_ranges(void)
 125 {
 126         unsigned long config = 0, dummy;
 127 
 128         if (use_intel())
 129                 rdmsr(MSR_MTRRcap, config, dummy);
 130         else if (is_cpu(AMD) || is_cpu(HYGON))
 131                 config = 2;
 132         else if (is_cpu(CYRIX) || is_cpu(CENTAUR))
 133                 config = 8;
 134 
 135         num_var_ranges = config & 0xff;
 136 }
 137 
 138 static void __init init_table(void)
 139 {
 140         int i, max;
 141 
 142         max = num_var_ranges;
 143         for (i = 0; i < max; i++)
 144                 mtrr_usage_table[i] = 1;
 145 }
 146 
 147 struct set_mtrr_data {
 148         unsigned long   smp_base;
 149         unsigned long   smp_size;
 150         unsigned int    smp_reg;
 151         mtrr_type       smp_type;
 152 };
 153 
 154 /**
 155  * mtrr_rendezvous_handler - Work done in the synchronization handler. Executed
 156  * by all the CPUs.
 157  * @info: pointer to mtrr configuration data
 158  *
 159  * Returns nothing.
 160  */
 161 static int mtrr_rendezvous_handler(void *info)
 162 {
 163         struct set_mtrr_data *data = info;
 164 
 165         /*
 166          * We use this same function to initialize the mtrrs during boot,
 167          * resume, runtime cpu online and on an explicit request to set a
 168          * specific MTRR.
 169          *
 170          * During boot or suspend, the state of the boot cpu's mtrrs has been
 171          * saved, and we want to replicate that across all the cpus that come
 172          * online (either at the end of boot or resume or during a runtime cpu
 173          * online). If we're doing that, @reg is set to something special and on
 174          * all the cpu's we do mtrr_if->set_all() (On the logical cpu that
 175          * started the boot/resume sequence, this might be a duplicate
 176          * set_all()).
 177          */
 178         if (data->smp_reg != ~0U) {
 179                 mtrr_if->set(data->smp_reg, data->smp_base,
 180                              data->smp_size, data->smp_type);
 181         } else if (mtrr_aps_delayed_init || !cpu_online(smp_processor_id())) {
 182                 mtrr_if->set_all();
 183         }
 184         return 0;
 185 }
 186 
 187 static inline int types_compatible(mtrr_type type1, mtrr_type type2)
 188 {
 189         return type1 == MTRR_TYPE_UNCACHABLE ||
 190                type2 == MTRR_TYPE_UNCACHABLE ||
 191                (type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK) ||
 192                (type1 == MTRR_TYPE_WRBACK && type2 == MTRR_TYPE_WRTHROUGH);
 193 }
 194 
 195 /**
 196  * set_mtrr - update mtrrs on all processors
 197  * @reg:        mtrr in question
 198  * @base:       mtrr base
 199  * @size:       mtrr size
 200  * @type:       mtrr type
 201  *
 202  * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly:
 203  *
 204  * 1. Queue work to do the following on all processors:
 205  * 2. Disable Interrupts
 206  * 3. Wait for all procs to do so
 207  * 4. Enter no-fill cache mode
 208  * 5. Flush caches
 209  * 6. Clear PGE bit
 210  * 7. Flush all TLBs
 211  * 8. Disable all range registers
 212  * 9. Update the MTRRs
 213  * 10. Enable all range registers
 214  * 11. Flush all TLBs and caches again
 215  * 12. Enter normal cache mode and reenable caching
 216  * 13. Set PGE
 217  * 14. Wait for buddies to catch up
 218  * 15. Enable interrupts.
 219  *
 220  * What does that mean for us? Well, stop_machine() will ensure that
 221  * the rendezvous handler is started on each CPU. And in lockstep they
 222  * do the state transition of disabling interrupts, updating MTRR's
 223  * (the CPU vendors may each do it differently, so we call mtrr_if->set()
 224  * callback and let them take care of it.) and enabling interrupts.
 225  *
 226  * Note that the mechanism is the same for UP systems, too; all the SMP stuff
 227  * becomes nops.
 228  */
 229 static void
 230 set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type)
 231 {
 232         struct set_mtrr_data data = { .smp_reg = reg,
 233                                       .smp_base = base,
 234                                       .smp_size = size,
 235                                       .smp_type = type
 236                                     };
 237 
 238         stop_machine(mtrr_rendezvous_handler, &data, cpu_online_mask);
 239 }
 240 
 241 static void set_mtrr_cpuslocked(unsigned int reg, unsigned long base,
 242                                 unsigned long size, mtrr_type type)
 243 {
 244         struct set_mtrr_data data = { .smp_reg = reg,
 245                                       .smp_base = base,
 246                                       .smp_size = size,
 247                                       .smp_type = type
 248                                     };
 249 
 250         stop_machine_cpuslocked(mtrr_rendezvous_handler, &data, cpu_online_mask);
 251 }
 252 
 253 static void set_mtrr_from_inactive_cpu(unsigned int reg, unsigned long base,
 254                                       unsigned long size, mtrr_type type)
 255 {
 256         struct set_mtrr_data data = { .smp_reg = reg,
 257                                       .smp_base = base,
 258                                       .smp_size = size,
 259                                       .smp_type = type
 260                                     };
 261 
 262         stop_machine_from_inactive_cpu(mtrr_rendezvous_handler, &data,
 263                                        cpu_callout_mask);
 264 }
 265 
 266 /**
 267  * mtrr_add_page - Add a memory type region
 268  * @base: Physical base address of region in pages (in units of 4 kB!)
 269  * @size: Physical size of region in pages (4 kB)
 270  * @type: Type of MTRR desired
 271  * @increment: If this is true do usage counting on the region
 272  *
 273  * Memory type region registers control the caching on newer Intel and
 274  * non Intel processors. This function allows drivers to request an
 275  * MTRR is added. The details and hardware specifics of each processor's
 276  * implementation are hidden from the caller, but nevertheless the
 277  * caller should expect to need to provide a power of two size on an
 278  * equivalent power of two boundary.
 279  *
 280  * If the region cannot be added either because all regions are in use
 281  * or the CPU cannot support it a negative value is returned. On success
 282  * the register number for this entry is returned, but should be treated
 283  * as a cookie only.
 284  *
 285  * On a multiprocessor machine the changes are made to all processors.
 286  * This is required on x86 by the Intel processors.
 287  *
 288  * The available types are
 289  *
 290  * %MTRR_TYPE_UNCACHABLE - No caching
 291  *
 292  * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
 293  *
 294  * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
 295  *
 296  * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
 297  *
 298  * BUGS: Needs a quiet flag for the cases where drivers do not mind
 299  * failures and do not wish system log messages to be sent.
 300  */
 301 int mtrr_add_page(unsigned long base, unsigned long size,
 302                   unsigned int type, bool increment)
 303 {
 304         unsigned long lbase, lsize;
 305         int i, replace, error;
 306         mtrr_type ltype;
 307 
 308         if (!mtrr_enabled())
 309                 return -ENXIO;
 310 
 311         error = mtrr_if->validate_add_page(base, size, type);
 312         if (error)
 313                 return error;
 314 
 315         if (type >= MTRR_NUM_TYPES) {
 316                 pr_warn("type: %u invalid\n", type);
 317                 return -EINVAL;
 318         }
 319 
 320         /* If the type is WC, check that this processor supports it */
 321         if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) {
 322                 pr_warn("your processor doesn't support write-combining\n");
 323                 return -ENOSYS;
 324         }
 325 
 326         if (!size) {
 327                 pr_warn("zero sized request\n");
 328                 return -EINVAL;
 329         }
 330 
 331         if ((base | (base + size - 1)) >>
 332             (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) {
 333                 pr_warn("base or size exceeds the MTRR width\n");
 334                 return -EINVAL;
 335         }
 336 
 337         error = -EINVAL;
 338         replace = -1;
 339 
 340         /* No CPU hotplug when we change MTRR entries */
 341         get_online_cpus();
 342 
 343         /* Search for existing MTRR  */
 344         mutex_lock(&mtrr_mutex);
 345         for (i = 0; i < num_var_ranges; ++i) {
 346                 mtrr_if->get(i, &lbase, &lsize, &ltype);
 347                 if (!lsize || base > lbase + lsize - 1 ||
 348                     base + size - 1 < lbase)
 349                         continue;
 350                 /*
 351                  * At this point we know there is some kind of
 352                  * overlap/enclosure
 353                  */
 354                 if (base < lbase || base + size - 1 > lbase + lsize - 1) {
 355                         if (base <= lbase &&
 356                             base + size - 1 >= lbase + lsize - 1) {
 357                                 /*  New region encloses an existing region  */
 358                                 if (type == ltype) {
 359                                         replace = replace == -1 ? i : -2;
 360                                         continue;
 361                                 } else if (types_compatible(type, ltype))
 362                                         continue;
 363                         }
 364                         pr_warn("0x%lx000,0x%lx000 overlaps existing 0x%lx000,0x%lx000\n", base, size, lbase,
 365                                 lsize);
 366                         goto out;
 367                 }
 368                 /* New region is enclosed by an existing region */
 369                 if (ltype != type) {
 370                         if (types_compatible(type, ltype))
 371                                 continue;
 372                         pr_warn("type mismatch for %lx000,%lx000 old: %s new: %s\n",
 373                                 base, size, mtrr_attrib_to_str(ltype),
 374                                 mtrr_attrib_to_str(type));
 375                         goto out;
 376                 }
 377                 if (increment)
 378                         ++mtrr_usage_table[i];
 379                 error = i;
 380                 goto out;
 381         }
 382         /* Search for an empty MTRR */
 383         i = mtrr_if->get_free_region(base, size, replace);
 384         if (i >= 0) {
 385                 set_mtrr_cpuslocked(i, base, size, type);
 386                 if (likely(replace < 0)) {
 387                         mtrr_usage_table[i] = 1;
 388                 } else {
 389                         mtrr_usage_table[i] = mtrr_usage_table[replace];
 390                         if (increment)
 391                                 mtrr_usage_table[i]++;
 392                         if (unlikely(replace != i)) {
 393                                 set_mtrr_cpuslocked(replace, 0, 0, 0);
 394                                 mtrr_usage_table[replace] = 0;
 395                         }
 396                 }
 397         } else {
 398                 pr_info("no more MTRRs available\n");
 399         }
 400         error = i;
 401  out:
 402         mutex_unlock(&mtrr_mutex);
 403         put_online_cpus();
 404         return error;
 405 }
 406 
 407 static int mtrr_check(unsigned long base, unsigned long size)
 408 {
 409         if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
 410                 pr_warn("size and base must be multiples of 4 kiB\n");
 411                 pr_debug("size: 0x%lx  base: 0x%lx\n", size, base);
 412                 dump_stack();
 413                 return -1;
 414         }
 415         return 0;
 416 }
 417 
 418 /**
 419  * mtrr_add - Add a memory type region
 420  * @base: Physical base address of region
 421  * @size: Physical size of region
 422  * @type: Type of MTRR desired
 423  * @increment: If this is true do usage counting on the region
 424  *
 425  * Memory type region registers control the caching on newer Intel and
 426  * non Intel processors. This function allows drivers to request an
 427  * MTRR is added. The details and hardware specifics of each processor's
 428  * implementation are hidden from the caller, but nevertheless the
 429  * caller should expect to need to provide a power of two size on an
 430  * equivalent power of two boundary.
 431  *
 432  * If the region cannot be added either because all regions are in use
 433  * or the CPU cannot support it a negative value is returned. On success
 434  * the register number for this entry is returned, but should be treated
 435  * as a cookie only.
 436  *
 437  * On a multiprocessor machine the changes are made to all processors.
 438  * This is required on x86 by the Intel processors.
 439  *
 440  * The available types are
 441  *
 442  * %MTRR_TYPE_UNCACHABLE - No caching
 443  *
 444  * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
 445  *
 446  * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
 447  *
 448  * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
 449  *
 450  * BUGS: Needs a quiet flag for the cases where drivers do not mind
 451  * failures and do not wish system log messages to be sent.
 452  */
 453 int mtrr_add(unsigned long base, unsigned long size, unsigned int type,
 454              bool increment)
 455 {
 456         if (!mtrr_enabled())
 457                 return -ENODEV;
 458         if (mtrr_check(base, size))
 459                 return -EINVAL;
 460         return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
 461                              increment);
 462 }
 463 
 464 /**
 465  * mtrr_del_page - delete a memory type region
 466  * @reg: Register returned by mtrr_add
 467  * @base: Physical base address
 468  * @size: Size of region
 469  *
 470  * If register is supplied then base and size are ignored. This is
 471  * how drivers should call it.
 472  *
 473  * Releases an MTRR region. If the usage count drops to zero the
 474  * register is freed and the region returns to default state.
 475  * On success the register is returned, on failure a negative error
 476  * code.
 477  */
 478 int mtrr_del_page(int reg, unsigned long base, unsigned long size)
 479 {
 480         int i, max;
 481         mtrr_type ltype;
 482         unsigned long lbase, lsize;
 483         int error = -EINVAL;
 484 
 485         if (!mtrr_enabled())
 486                 return -ENODEV;
 487 
 488         max = num_var_ranges;
 489         /* No CPU hotplug when we change MTRR entries */
 490         get_online_cpus();
 491         mutex_lock(&mtrr_mutex);
 492         if (reg < 0) {
 493                 /*  Search for existing MTRR  */
 494                 for (i = 0; i < max; ++i) {
 495                         mtrr_if->get(i, &lbase, &lsize, &ltype);
 496                         if (lbase == base && lsize == size) {
 497                                 reg = i;
 498                                 break;
 499                         }
 500                 }
 501                 if (reg < 0) {
 502                         pr_debug("no MTRR for %lx000,%lx000 found\n",
 503                                  base, size);
 504                         goto out;
 505                 }
 506         }
 507         if (reg >= max) {
 508                 pr_warn("register: %d too big\n", reg);
 509                 goto out;
 510         }
 511         mtrr_if->get(reg, &lbase, &lsize, &ltype);
 512         if (lsize < 1) {
 513                 pr_warn("MTRR %d not used\n", reg);
 514                 goto out;
 515         }
 516         if (mtrr_usage_table[reg] < 1) {
 517                 pr_warn("reg: %d has count=0\n", reg);
 518                 goto out;
 519         }
 520         if (--mtrr_usage_table[reg] < 1)
 521                 set_mtrr_cpuslocked(reg, 0, 0, 0);
 522         error = reg;
 523  out:
 524         mutex_unlock(&mtrr_mutex);
 525         put_online_cpus();
 526         return error;
 527 }
 528 
 529 /**
 530  * mtrr_del - delete a memory type region
 531  * @reg: Register returned by mtrr_add
 532  * @base: Physical base address
 533  * @size: Size of region
 534  *
 535  * If register is supplied then base and size are ignored. This is
 536  * how drivers should call it.
 537  *
 538  * Releases an MTRR region. If the usage count drops to zero the
 539  * register is freed and the region returns to default state.
 540  * On success the register is returned, on failure a negative error
 541  * code.
 542  */
 543 int mtrr_del(int reg, unsigned long base, unsigned long size)
 544 {
 545         if (!mtrr_enabled())
 546                 return -ENODEV;
 547         if (mtrr_check(base, size))
 548                 return -EINVAL;
 549         return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
 550 }
 551 
 552 /**
 553  * arch_phys_wc_add - add a WC MTRR and handle errors if PAT is unavailable
 554  * @base: Physical base address
 555  * @size: Size of region
 556  *
 557  * If PAT is available, this does nothing.  If PAT is unavailable, it
 558  * attempts to add a WC MTRR covering size bytes starting at base and
 559  * logs an error if this fails.
 560  *
 561  * The called should provide a power of two size on an equivalent
 562  * power of two boundary.
 563  *
 564  * Drivers must store the return value to pass to mtrr_del_wc_if_needed,
 565  * but drivers should not try to interpret that return value.
 566  */
 567 int arch_phys_wc_add(unsigned long base, unsigned long size)
 568 {
 569         int ret;
 570 
 571         if (pat_enabled() || !mtrr_enabled())
 572                 return 0;  /* Success!  (We don't need to do anything.) */
 573 
 574         ret = mtrr_add(base, size, MTRR_TYPE_WRCOMB, true);
 575         if (ret < 0) {
 576                 pr_warn("Failed to add WC MTRR for [%p-%p]; performance may suffer.",
 577                         (void *)base, (void *)(base + size - 1));
 578                 return ret;
 579         }
 580         return ret + MTRR_TO_PHYS_WC_OFFSET;
 581 }
 582 EXPORT_SYMBOL(arch_phys_wc_add);
 583 
 584 /*
 585  * arch_phys_wc_del - undoes arch_phys_wc_add
 586  * @handle: Return value from arch_phys_wc_add
 587  *
 588  * This cleans up after mtrr_add_wc_if_needed.
 589  *
 590  * The API guarantees that mtrr_del_wc_if_needed(error code) and
 591  * mtrr_del_wc_if_needed(0) do nothing.
 592  */
 593 void arch_phys_wc_del(int handle)
 594 {
 595         if (handle >= 1) {
 596                 WARN_ON(handle < MTRR_TO_PHYS_WC_OFFSET);
 597                 mtrr_del(handle - MTRR_TO_PHYS_WC_OFFSET, 0, 0);
 598         }
 599 }
 600 EXPORT_SYMBOL(arch_phys_wc_del);
 601 
 602 /*
 603  * arch_phys_wc_index - translates arch_phys_wc_add's return value
 604  * @handle: Return value from arch_phys_wc_add
 605  *
 606  * This will turn the return value from arch_phys_wc_add into an mtrr
 607  * index suitable for debugging.
 608  *
 609  * Note: There is no legitimate use for this function, except possibly
 610  * in printk line.  Alas there is an illegitimate use in some ancient
 611  * drm ioctls.
 612  */
 613 int arch_phys_wc_index(int handle)
 614 {
 615         if (handle < MTRR_TO_PHYS_WC_OFFSET)
 616                 return -1;
 617         else
 618                 return handle - MTRR_TO_PHYS_WC_OFFSET;
 619 }
 620 EXPORT_SYMBOL_GPL(arch_phys_wc_index);
 621 
 622 /*
 623  * HACK ALERT!
 624  * These should be called implicitly, but we can't yet until all the initcall
 625  * stuff is done...
 626  */
 627 static void __init init_ifs(void)
 628 {
 629 #ifndef CONFIG_X86_64
 630         amd_init_mtrr();
 631         cyrix_init_mtrr();
 632         centaur_init_mtrr();
 633 #endif
 634 }
 635 
 636 /* The suspend/resume methods are only for CPU without MTRR. CPU using generic
 637  * MTRR driver doesn't require this
 638  */
 639 struct mtrr_value {
 640         mtrr_type       ltype;
 641         unsigned long   lbase;
 642         unsigned long   lsize;
 643 };
 644 
 645 static struct mtrr_value mtrr_value[MTRR_MAX_VAR_RANGES];
 646 
 647 static int mtrr_save(void)
 648 {
 649         int i;
 650 
 651         for (i = 0; i < num_var_ranges; i++) {
 652                 mtrr_if->get(i, &mtrr_value[i].lbase,
 653                                 &mtrr_value[i].lsize,
 654                                 &mtrr_value[i].ltype);
 655         }
 656         return 0;
 657 }
 658 
 659 static void mtrr_restore(void)
 660 {
 661         int i;
 662 
 663         for (i = 0; i < num_var_ranges; i++) {
 664                 if (mtrr_value[i].lsize) {
 665                         set_mtrr(i, mtrr_value[i].lbase,
 666                                     mtrr_value[i].lsize,
 667                                     mtrr_value[i].ltype);
 668                 }
 669         }
 670 }
 671 
 672 
 673 
 674 static struct syscore_ops mtrr_syscore_ops = {
 675         .suspend        = mtrr_save,
 676         .resume         = mtrr_restore,
 677 };
 678 
 679 int __initdata changed_by_mtrr_cleanup;
 680 
 681 #define SIZE_OR_MASK_BITS(n)  (~((1ULL << ((n) - PAGE_SHIFT)) - 1))
 682 /**
 683  * mtrr_bp_init - initialize mtrrs on the boot CPU
 684  *
 685  * This needs to be called early; before any of the other CPUs are
 686  * initialized (i.e. before smp_init()).
 687  *
 688  */
 689 void __init mtrr_bp_init(void)
 690 {
 691         u32 phys_addr;
 692 
 693         init_ifs();
 694 
 695         phys_addr = 32;
 696 
 697         if (boot_cpu_has(X86_FEATURE_MTRR)) {
 698                 mtrr_if = &generic_mtrr_ops;
 699                 size_or_mask = SIZE_OR_MASK_BITS(36);
 700                 size_and_mask = 0x00f00000;
 701                 phys_addr = 36;
 702 
 703                 /*
 704                  * This is an AMD specific MSR, but we assume(hope?) that
 705                  * Intel will implement it too when they extend the address
 706                  * bus of the Xeon.
 707                  */
 708                 if (cpuid_eax(0x80000000) >= 0x80000008) {
 709                         phys_addr = cpuid_eax(0x80000008) & 0xff;
 710                         /* CPUID workaround for Intel 0F33/0F34 CPU */
 711                         if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
 712                             boot_cpu_data.x86 == 0xF &&
 713                             boot_cpu_data.x86_model == 0x3 &&
 714                             (boot_cpu_data.x86_stepping == 0x3 ||
 715                              boot_cpu_data.x86_stepping == 0x4))
 716                                 phys_addr = 36;
 717 
 718                         size_or_mask = SIZE_OR_MASK_BITS(phys_addr);
 719                         size_and_mask = ~size_or_mask & 0xfffff00000ULL;
 720                 } else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR &&
 721                            boot_cpu_data.x86 == 6) {
 722                         /*
 723                          * VIA C* family have Intel style MTRRs,
 724                          * but don't support PAE
 725                          */
 726                         size_or_mask = SIZE_OR_MASK_BITS(32);
 727                         size_and_mask = 0;
 728                         phys_addr = 32;
 729                 }
 730         } else {
 731                 switch (boot_cpu_data.x86_vendor) {
 732                 case X86_VENDOR_AMD:
 733                         if (cpu_feature_enabled(X86_FEATURE_K6_MTRR)) {
 734                                 /* Pre-Athlon (K6) AMD CPU MTRRs */
 735                                 mtrr_if = mtrr_ops[X86_VENDOR_AMD];
 736                                 size_or_mask = SIZE_OR_MASK_BITS(32);
 737                                 size_and_mask = 0;
 738                         }
 739                         break;
 740                 case X86_VENDOR_CENTAUR:
 741                         if (cpu_feature_enabled(X86_FEATURE_CENTAUR_MCR)) {
 742                                 mtrr_if = mtrr_ops[X86_VENDOR_CENTAUR];
 743                                 size_or_mask = SIZE_OR_MASK_BITS(32);
 744                                 size_and_mask = 0;
 745                         }
 746                         break;
 747                 case X86_VENDOR_CYRIX:
 748                         if (cpu_feature_enabled(X86_FEATURE_CYRIX_ARR)) {
 749                                 mtrr_if = mtrr_ops[X86_VENDOR_CYRIX];
 750                                 size_or_mask = SIZE_OR_MASK_BITS(32);
 751                                 size_and_mask = 0;
 752                         }
 753                         break;
 754                 default:
 755                         break;
 756                 }
 757         }
 758 
 759         if (mtrr_if) {
 760                 __mtrr_enabled = true;
 761                 set_num_var_ranges();
 762                 init_table();
 763                 if (use_intel()) {
 764                         /* BIOS may override */
 765                         __mtrr_enabled = get_mtrr_state();
 766 
 767                         if (mtrr_enabled())
 768                                 mtrr_bp_pat_init();
 769 
 770                         if (mtrr_cleanup(phys_addr)) {
 771                                 changed_by_mtrr_cleanup = 1;
 772                                 mtrr_if->set_all();
 773                         }
 774                 }
 775         }
 776 
 777         if (!mtrr_enabled()) {
 778                 pr_info("Disabled\n");
 779 
 780                 /*
 781                  * PAT initialization relies on MTRR's rendezvous handler.
 782                  * Skip PAT init until the handler can initialize both
 783                  * features independently.
 784                  */
 785                 pat_disable("MTRRs disabled, skipping PAT initialization too.");
 786         }
 787 }
 788 
 789 void mtrr_ap_init(void)
 790 {
 791         if (!mtrr_enabled())
 792                 return;
 793 
 794         if (!use_intel() || mtrr_aps_delayed_init)
 795                 return;
 796 
 797         rcu_cpu_starting(smp_processor_id());
 798 
 799         /*
 800          * Ideally we should hold mtrr_mutex here to avoid mtrr entries
 801          * changed, but this routine will be called in cpu boot time,
 802          * holding the lock breaks it.
 803          *
 804          * This routine is called in two cases:
 805          *
 806          *   1. very earily time of software resume, when there absolutely
 807          *      isn't mtrr entry changes;
 808          *
 809          *   2. cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug
 810          *      lock to prevent mtrr entry changes
 811          */
 812         set_mtrr_from_inactive_cpu(~0U, 0, 0, 0);
 813 }
 814 
 815 /**
 816  * Save current fixed-range MTRR state of the first cpu in cpu_online_mask.
 817  */
 818 void mtrr_save_state(void)
 819 {
 820         int first_cpu;
 821 
 822         if (!mtrr_enabled())
 823                 return;
 824 
 825         first_cpu = cpumask_first(cpu_online_mask);
 826         smp_call_function_single(first_cpu, mtrr_save_fixed_ranges, NULL, 1);
 827 }
 828 
 829 void set_mtrr_aps_delayed_init(void)
 830 {
 831         if (!mtrr_enabled())
 832                 return;
 833         if (!use_intel())
 834                 return;
 835 
 836         mtrr_aps_delayed_init = true;
 837 }
 838 
 839 /*
 840  * Delayed MTRR initialization for all AP's
 841  */
 842 void mtrr_aps_init(void)
 843 {
 844         if (!use_intel() || !mtrr_enabled())
 845                 return;
 846 
 847         /*
 848          * Check if someone has requested the delay of AP MTRR initialization,
 849          * by doing set_mtrr_aps_delayed_init(), prior to this point. If not,
 850          * then we are done.
 851          */
 852         if (!mtrr_aps_delayed_init)
 853                 return;
 854 
 855         set_mtrr(~0U, 0, 0, 0);
 856         mtrr_aps_delayed_init = false;
 857 }
 858 
 859 void mtrr_bp_restore(void)
 860 {
 861         if (!use_intel() || !mtrr_enabled())
 862                 return;
 863 
 864         mtrr_if->set_all();
 865 }
 866 
 867 static int __init mtrr_init_finialize(void)
 868 {
 869         if (!mtrr_enabled())
 870                 return 0;
 871 
 872         if (use_intel()) {
 873                 if (!changed_by_mtrr_cleanup)
 874                         mtrr_state_warn();
 875                 return 0;
 876         }
 877 
 878         /*
 879          * The CPU has no MTRR and seems to not support SMP. They have
 880          * specific drivers, we use a tricky method to support
 881          * suspend/resume for them.
 882          *
 883          * TBD: is there any system with such CPU which supports
 884          * suspend/resume? If no, we should remove the code.
 885          */
 886         register_syscore_ops(&mtrr_syscore_ops);
 887 
 888         return 0;
 889 }
 890 subsys_initcall(mtrr_init_finialize);

/* [<][>][^][v][top][bottom][index][help] */