linux/arch/x86/kernel/cpu/mtrr/main.c
<<
>>
Prefs
   1/*  Generic MTRR (Memory Type Range Register) driver.
   2
   3    Copyright (C) 1997-2000  Richard Gooch
   4    Copyright (c) 2002       Patrick Mochel
   5
   6    This library is free software; you can redistribute it and/or
   7    modify it under the terms of the GNU Library General Public
   8    License as published by the Free Software Foundation; either
   9    version 2 of the License, or (at your option) any later version.
  10
  11    This library is distributed in the hope that it will be useful,
  12    but WITHOUT ANY WARRANTY; without even the implied warranty of
  13    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14    Library General Public License for more details.
  15
  16    You should have received a copy of the GNU Library General Public
  17    License along with this library; if not, write to the Free
  18    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  19
  20    Richard Gooch may be reached by email at  rgooch@atnf.csiro.au
  21    The postal address is:
  22      Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
  23
  24    Source: "Pentium Pro Family Developer's Manual, Volume 3:
  25    Operating System Writer's Guide" (Intel document number 242692),
  26    section 11.11.7
  27
  28    This was cleaned and made readable by Patrick Mochel <mochel@osdl.org>
  29    on 6-7 March 2002.
  30    Source: Intel Architecture Software Developers Manual, Volume 3:
  31    System Programming Guide; Section 9.11. (1997 edition - PPro).
  32*/
  33
  34#define DEBUG
  35
  36#include <linux/types.h> /* FIXME: kvm_para.h needs this */
  37
  38#include <linux/stop_machine.h>
  39#include <linux/kvm_para.h>
  40#include <linux/uaccess.h>
  41#include <linux/module.h>
  42#include <linux/mutex.h>
  43#include <linux/init.h>
  44#include <linux/sort.h>
  45#include <linux/cpu.h>
  46#include <linux/pci.h>
  47#include <linux/smp.h>
  48#include <linux/syscore_ops.h>
  49
  50#include <asm/cpufeature.h>
  51#include <asm/e820.h>
  52#include <asm/mtrr.h>
  53#include <asm/msr.h>
  54#include <asm/pat.h>
  55
  56#include "mtrr.h"
  57
  58/* arch_phys_wc_add returns an MTRR register index plus this offset. */
  59#define MTRR_TO_PHYS_WC_OFFSET 1000
  60
  61u32 num_var_ranges;
  62static bool __mtrr_enabled;
  63
  64static bool mtrr_enabled(void)
  65{
  66        return __mtrr_enabled;
  67}
  68
  69unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
  70static DEFINE_MUTEX(mtrr_mutex);
  71
  72u64 size_or_mask, size_and_mask;
  73static bool mtrr_aps_delayed_init;
  74
  75static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
  76
  77const struct mtrr_ops *mtrr_if;
  78
  79static void set_mtrr(unsigned int reg, unsigned long base,
  80                     unsigned long size, mtrr_type type);
  81
  82void set_mtrr_ops(const struct mtrr_ops *ops)
  83{
  84        if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
  85                mtrr_ops[ops->vendor] = ops;
  86}
  87
  88/*  Returns non-zero if we have the write-combining memory type  */
  89static int have_wrcomb(void)
  90{
  91        struct pci_dev *dev;
  92
  93        dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL);
  94        if (dev != NULL) {
  95                /*
  96                 * ServerWorks LE chipsets < rev 6 have problems with
  97                 * write-combining. Don't allow it and leave room for other
  98                 * chipsets to be tagged
  99                 */
 100                if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS &&
 101                    dev->device == PCI_DEVICE_ID_SERVERWORKS_LE &&
 102                    dev->revision <= 5) {
 103                        pr_info("mtrr: Serverworks LE rev < 6 detected. Write-combining disabled.\n");
 104                        pci_dev_put(dev);
 105                        return 0;
 106                }
 107                /*
 108                 * Intel 450NX errata # 23. Non ascending cacheline evictions to
 109                 * write combining memory may resulting in data corruption
 110                 */
 111                if (dev->vendor == PCI_VENDOR_ID_INTEL &&
 112                    dev->device == PCI_DEVICE_ID_INTEL_82451NX) {
 113                        pr_info("mtrr: Intel 450NX MMC detected. Write-combining disabled.\n");
 114                        pci_dev_put(dev);
 115                        return 0;
 116                }
 117                pci_dev_put(dev);
 118        }
 119        return mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0;
 120}
 121
 122/*  This function returns the number of variable MTRRs  */
 123static void __init set_num_var_ranges(void)
 124{
 125        unsigned long config = 0, dummy;
 126
 127        if (use_intel())
 128                rdmsr(MSR_MTRRcap, config, dummy);
 129        else if (is_cpu(AMD))
 130                config = 2;
 131        else if (is_cpu(CYRIX) || is_cpu(CENTAUR))
 132                config = 8;
 133
 134        num_var_ranges = config & 0xff;
 135}
 136
 137static void __init init_table(void)
 138{
 139        int i, max;
 140
 141        max = num_var_ranges;
 142        for (i = 0; i < max; i++)
 143                mtrr_usage_table[i] = 1;
 144}
 145
 146struct set_mtrr_data {
 147        unsigned long   smp_base;
 148        unsigned long   smp_size;
 149        unsigned int    smp_reg;
 150        mtrr_type       smp_type;
 151};
 152
 153/**
 154 * mtrr_rendezvous_handler - Work done in the synchronization handler. Executed
 155 * by all the CPUs.
 156 * @info: pointer to mtrr configuration data
 157 *
 158 * Returns nothing.
 159 */
 160static int mtrr_rendezvous_handler(void *info)
 161{
 162        struct set_mtrr_data *data = info;
 163
 164        /*
 165         * We use this same function to initialize the mtrrs during boot,
 166         * resume, runtime cpu online and on an explicit request to set a
 167         * specific MTRR.
 168         *
 169         * During boot or suspend, the state of the boot cpu's mtrrs has been
 170         * saved, and we want to replicate that across all the cpus that come
 171         * online (either at the end of boot or resume or during a runtime cpu
 172         * online). If we're doing that, @reg is set to something special and on
 173         * all the cpu's we do mtrr_if->set_all() (On the logical cpu that
 174         * started the boot/resume sequence, this might be a duplicate
 175         * set_all()).
 176         */
 177        if (data->smp_reg != ~0U) {
 178                mtrr_if->set(data->smp_reg, data->smp_base,
 179                             data->smp_size, data->smp_type);
 180        } else if (mtrr_aps_delayed_init || !cpu_online(smp_processor_id())) {
 181                mtrr_if->set_all();
 182        }
 183        return 0;
 184}
 185
 186static inline int types_compatible(mtrr_type type1, mtrr_type type2)
 187{
 188        return type1 == MTRR_TYPE_UNCACHABLE ||
 189               type2 == MTRR_TYPE_UNCACHABLE ||
 190               (type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK) ||
 191               (type1 == MTRR_TYPE_WRBACK && type2 == MTRR_TYPE_WRTHROUGH);
 192}
 193
 194/**
 195 * set_mtrr - update mtrrs on all processors
 196 * @reg:        mtrr in question
 197 * @base:       mtrr base
 198 * @size:       mtrr size
 199 * @type:       mtrr type
 200 *
 201 * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly:
 202 *
 203 * 1. Queue work to do the following on all processors:
 204 * 2. Disable Interrupts
 205 * 3. Wait for all procs to do so
 206 * 4. Enter no-fill cache mode
 207 * 5. Flush caches
 208 * 6. Clear PGE bit
 209 * 7. Flush all TLBs
 210 * 8. Disable all range registers
 211 * 9. Update the MTRRs
 212 * 10. Enable all range registers
 213 * 11. Flush all TLBs and caches again
 214 * 12. Enter normal cache mode and reenable caching
 215 * 13. Set PGE
 216 * 14. Wait for buddies to catch up
 217 * 15. Enable interrupts.
 218 *
 219 * What does that mean for us? Well, stop_machine() will ensure that
 220 * the rendezvous handler is started on each CPU. And in lockstep they
 221 * do the state transition of disabling interrupts, updating MTRR's
 222 * (the CPU vendors may each do it differently, so we call mtrr_if->set()
 223 * callback and let them take care of it.) and enabling interrupts.
 224 *
 225 * Note that the mechanism is the same for UP systems, too; all the SMP stuff
 226 * becomes nops.
 227 */
 228static void
 229set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type)
 230{
 231        struct set_mtrr_data data = { .smp_reg = reg,
 232                                      .smp_base = base,
 233                                      .smp_size = size,
 234                                      .smp_type = type
 235                                    };
 236
 237        stop_machine(mtrr_rendezvous_handler, &data, cpu_online_mask);
 238}
 239
 240static void set_mtrr_from_inactive_cpu(unsigned int reg, unsigned long base,
 241                                      unsigned long size, mtrr_type type)
 242{
 243        struct set_mtrr_data data = { .smp_reg = reg,
 244                                      .smp_base = base,
 245                                      .smp_size = size,
 246                                      .smp_type = type
 247                                    };
 248
 249        stop_machine_from_inactive_cpu(mtrr_rendezvous_handler, &data,
 250                                       cpu_callout_mask);
 251}
 252
 253/**
 254 * mtrr_add_page - Add a memory type region
 255 * @base: Physical base address of region in pages (in units of 4 kB!)
 256 * @size: Physical size of region in pages (4 kB)
 257 * @type: Type of MTRR desired
 258 * @increment: If this is true do usage counting on the region
 259 *
 260 * Memory type region registers control the caching on newer Intel and
 261 * non Intel processors. This function allows drivers to request an
 262 * MTRR is added. The details and hardware specifics of each processor's
 263 * implementation are hidden from the caller, but nevertheless the
 264 * caller should expect to need to provide a power of two size on an
 265 * equivalent power of two boundary.
 266 *
 267 * If the region cannot be added either because all regions are in use
 268 * or the CPU cannot support it a negative value is returned. On success
 269 * the register number for this entry is returned, but should be treated
 270 * as a cookie only.
 271 *
 272 * On a multiprocessor machine the changes are made to all processors.
 273 * This is required on x86 by the Intel processors.
 274 *
 275 * The available types are
 276 *
 277 * %MTRR_TYPE_UNCACHABLE - No caching
 278 *
 279 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
 280 *
 281 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
 282 *
 283 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
 284 *
 285 * BUGS: Needs a quiet flag for the cases where drivers do not mind
 286 * failures and do not wish system log messages to be sent.
 287 */
 288int mtrr_add_page(unsigned long base, unsigned long size,
 289                  unsigned int type, bool increment)
 290{
 291        unsigned long lbase, lsize;
 292        int i, replace, error;
 293        mtrr_type ltype;
 294
 295        if (!mtrr_enabled())
 296                return -ENXIO;
 297
 298        error = mtrr_if->validate_add_page(base, size, type);
 299        if (error)
 300                return error;
 301
 302        if (type >= MTRR_NUM_TYPES) {
 303                pr_warn("mtrr: type: %u invalid\n", type);
 304                return -EINVAL;
 305        }
 306
 307        /* If the type is WC, check that this processor supports it */
 308        if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) {
 309                pr_warn("mtrr: your processor doesn't support write-combining\n");
 310                return -ENOSYS;
 311        }
 312
 313        if (!size) {
 314                pr_warn("mtrr: zero sized request\n");
 315                return -EINVAL;
 316        }
 317
 318        if ((base | (base + size - 1)) >>
 319            (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) {
 320                pr_warn("mtrr: base or size exceeds the MTRR width\n");
 321                return -EINVAL;
 322        }
 323
 324        error = -EINVAL;
 325        replace = -1;
 326
 327        /* No CPU hotplug when we change MTRR entries */
 328        get_online_cpus();
 329
 330        /* Search for existing MTRR  */
 331        mutex_lock(&mtrr_mutex);
 332        for (i = 0; i < num_var_ranges; ++i) {
 333                mtrr_if->get(i, &lbase, &lsize, &ltype);
 334                if (!lsize || base > lbase + lsize - 1 ||
 335                    base + size - 1 < lbase)
 336                        continue;
 337                /*
 338                 * At this point we know there is some kind of
 339                 * overlap/enclosure
 340                 */
 341                if (base < lbase || base + size - 1 > lbase + lsize - 1) {
 342                        if (base <= lbase &&
 343                            base + size - 1 >= lbase + lsize - 1) {
 344                                /*  New region encloses an existing region  */
 345                                if (type == ltype) {
 346                                        replace = replace == -1 ? i : -2;
 347                                        continue;
 348                                } else if (types_compatible(type, ltype))
 349                                        continue;
 350                        }
 351                        pr_warn("mtrr: 0x%lx000,0x%lx000 overlaps existing"
 352                                " 0x%lx000,0x%lx000\n", base, size, lbase,
 353                                lsize);
 354                        goto out;
 355                }
 356                /* New region is enclosed by an existing region */
 357                if (ltype != type) {
 358                        if (types_compatible(type, ltype))
 359                                continue;
 360                        pr_warn("mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n",
 361                                base, size, mtrr_attrib_to_str(ltype),
 362                                mtrr_attrib_to_str(type));
 363                        goto out;
 364                }
 365                if (increment)
 366                        ++mtrr_usage_table[i];
 367                error = i;
 368                goto out;
 369        }
 370        /* Search for an empty MTRR */
 371        i = mtrr_if->get_free_region(base, size, replace);
 372        if (i >= 0) {
 373                set_mtrr(i, base, size, type);
 374                if (likely(replace < 0)) {
 375                        mtrr_usage_table[i] = 1;
 376                } else {
 377                        mtrr_usage_table[i] = mtrr_usage_table[replace];
 378                        if (increment)
 379                                mtrr_usage_table[i]++;
 380                        if (unlikely(replace != i)) {
 381                                set_mtrr(replace, 0, 0, 0);
 382                                mtrr_usage_table[replace] = 0;
 383                        }
 384                }
 385        } else {
 386                pr_info("mtrr: no more MTRRs available\n");
 387        }
 388        error = i;
 389 out:
 390        mutex_unlock(&mtrr_mutex);
 391        put_online_cpus();
 392        return error;
 393}
 394
 395static int mtrr_check(unsigned long base, unsigned long size)
 396{
 397        if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
 398                pr_warn("mtrr: size and base must be multiples of 4 kiB\n");
 399                pr_debug("mtrr: size: 0x%lx  base: 0x%lx\n", size, base);
 400                dump_stack();
 401                return -1;
 402        }
 403        return 0;
 404}
 405
 406/**
 407 * mtrr_add - Add a memory type region
 408 * @base: Physical base address of region
 409 * @size: Physical size of region
 410 * @type: Type of MTRR desired
 411 * @increment: If this is true do usage counting on the region
 412 *
 413 * Memory type region registers control the caching on newer Intel and
 414 * non Intel processors. This function allows drivers to request an
 415 * MTRR is added. The details and hardware specifics of each processor's
 416 * implementation are hidden from the caller, but nevertheless the
 417 * caller should expect to need to provide a power of two size on an
 418 * equivalent power of two boundary.
 419 *
 420 * If the region cannot be added either because all regions are in use
 421 * or the CPU cannot support it a negative value is returned. On success
 422 * the register number for this entry is returned, but should be treated
 423 * as a cookie only.
 424 *
 425 * On a multiprocessor machine the changes are made to all processors.
 426 * This is required on x86 by the Intel processors.
 427 *
 428 * The available types are
 429 *
 430 * %MTRR_TYPE_UNCACHABLE - No caching
 431 *
 432 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
 433 *
 434 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
 435 *
 436 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
 437 *
 438 * BUGS: Needs a quiet flag for the cases where drivers do not mind
 439 * failures and do not wish system log messages to be sent.
 440 */
 441int mtrr_add(unsigned long base, unsigned long size, unsigned int type,
 442             bool increment)
 443{
 444        if (!mtrr_enabled())
 445                return -ENODEV;
 446        if (mtrr_check(base, size))
 447                return -EINVAL;
 448        return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
 449                             increment);
 450}
 451
 452/**
 453 * mtrr_del_page - delete a memory type region
 454 * @reg: Register returned by mtrr_add
 455 * @base: Physical base address
 456 * @size: Size of region
 457 *
 458 * If register is supplied then base and size are ignored. This is
 459 * how drivers should call it.
 460 *
 461 * Releases an MTRR region. If the usage count drops to zero the
 462 * register is freed and the region returns to default state.
 463 * On success the register is returned, on failure a negative error
 464 * code.
 465 */
 466int mtrr_del_page(int reg, unsigned long base, unsigned long size)
 467{
 468        int i, max;
 469        mtrr_type ltype;
 470        unsigned long lbase, lsize;
 471        int error = -EINVAL;
 472
 473        if (!mtrr_enabled())
 474                return -ENODEV;
 475
 476        max = num_var_ranges;
 477        /* No CPU hotplug when we change MTRR entries */
 478        get_online_cpus();
 479        mutex_lock(&mtrr_mutex);
 480        if (reg < 0) {
 481                /*  Search for existing MTRR  */
 482                for (i = 0; i < max; ++i) {
 483                        mtrr_if->get(i, &lbase, &lsize, &ltype);
 484                        if (lbase == base && lsize == size) {
 485                                reg = i;
 486                                break;
 487                        }
 488                }
 489                if (reg < 0) {
 490                        pr_debug("mtrr: no MTRR for %lx000,%lx000 found\n",
 491                                 base, size);
 492                        goto out;
 493                }
 494        }
 495        if (reg >= max) {
 496                pr_warn("mtrr: register: %d too big\n", reg);
 497                goto out;
 498        }
 499        mtrr_if->get(reg, &lbase, &lsize, &ltype);
 500        if (lsize < 1) {
 501                pr_warn("mtrr: MTRR %d not used\n", reg);
 502                goto out;
 503        }
 504        if (mtrr_usage_table[reg] < 1) {
 505                pr_warn("mtrr: reg: %d has count=0\n", reg);
 506                goto out;
 507        }
 508        if (--mtrr_usage_table[reg] < 1)
 509                set_mtrr(reg, 0, 0, 0);
 510        error = reg;
 511 out:
 512        mutex_unlock(&mtrr_mutex);
 513        put_online_cpus();
 514        return error;
 515}
 516
 517/**
 518 * mtrr_del - delete a memory type region
 519 * @reg: Register returned by mtrr_add
 520 * @base: Physical base address
 521 * @size: Size of region
 522 *
 523 * If register is supplied then base and size are ignored. This is
 524 * how drivers should call it.
 525 *
 526 * Releases an MTRR region. If the usage count drops to zero the
 527 * register is freed and the region returns to default state.
 528 * On success the register is returned, on failure a negative error
 529 * code.
 530 */
 531int mtrr_del(int reg, unsigned long base, unsigned long size)
 532{
 533        if (!mtrr_enabled())
 534                return -ENODEV;
 535        if (mtrr_check(base, size))
 536                return -EINVAL;
 537        return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
 538}
 539
 540/**
 541 * arch_phys_wc_add - add a WC MTRR and handle errors if PAT is unavailable
 542 * @base: Physical base address
 543 * @size: Size of region
 544 *
 545 * If PAT is available, this does nothing.  If PAT is unavailable, it
 546 * attempts to add a WC MTRR covering size bytes starting at base and
 547 * logs an error if this fails.
 548 *
 549 * The called should provide a power of two size on an equivalent
 550 * power of two boundary.
 551 *
 552 * Drivers must store the return value to pass to mtrr_del_wc_if_needed,
 553 * but drivers should not try to interpret that return value.
 554 */
 555int arch_phys_wc_add(unsigned long base, unsigned long size)
 556{
 557        int ret;
 558
 559        if (pat_enabled() || !mtrr_enabled())
 560                return 0;  /* Success!  (We don't need to do anything.) */
 561
 562        ret = mtrr_add(base, size, MTRR_TYPE_WRCOMB, true);
 563        if (ret < 0) {
 564                pr_warn("Failed to add WC MTRR for [%p-%p]; performance may suffer.",
 565                        (void *)base, (void *)(base + size - 1));
 566                return ret;
 567        }
 568        return ret + MTRR_TO_PHYS_WC_OFFSET;
 569}
 570EXPORT_SYMBOL(arch_phys_wc_add);
 571
 572/*
 573 * arch_phys_wc_del - undoes arch_phys_wc_add
 574 * @handle: Return value from arch_phys_wc_add
 575 *
 576 * This cleans up after mtrr_add_wc_if_needed.
 577 *
 578 * The API guarantees that mtrr_del_wc_if_needed(error code) and
 579 * mtrr_del_wc_if_needed(0) do nothing.
 580 */
 581void arch_phys_wc_del(int handle)
 582{
 583        if (handle >= 1) {
 584                WARN_ON(handle < MTRR_TO_PHYS_WC_OFFSET);
 585                mtrr_del(handle - MTRR_TO_PHYS_WC_OFFSET, 0, 0);
 586        }
 587}
 588EXPORT_SYMBOL(arch_phys_wc_del);
 589
 590/*
 591 * arch_phys_wc_index - translates arch_phys_wc_add's return value
 592 * @handle: Return value from arch_phys_wc_add
 593 *
 594 * This will turn the return value from arch_phys_wc_add into an mtrr
 595 * index suitable for debugging.
 596 *
 597 * Note: There is no legitimate use for this function, except possibly
 598 * in printk line.  Alas there is an illegitimate use in some ancient
 599 * drm ioctls.
 600 */
 601int arch_phys_wc_index(int handle)
 602{
 603        if (handle < MTRR_TO_PHYS_WC_OFFSET)
 604                return -1;
 605        else
 606                return handle - MTRR_TO_PHYS_WC_OFFSET;
 607}
 608EXPORT_SYMBOL_GPL(arch_phys_wc_index);
 609
 610/*
 611 * HACK ALERT!
 612 * These should be called implicitly, but we can't yet until all the initcall
 613 * stuff is done...
 614 */
 615static void __init init_ifs(void)
 616{
 617#ifndef CONFIG_X86_64
 618        amd_init_mtrr();
 619        cyrix_init_mtrr();
 620        centaur_init_mtrr();
 621#endif
 622}
 623
 624/* The suspend/resume methods are only for CPU without MTRR. CPU using generic
 625 * MTRR driver doesn't require this
 626 */
 627struct mtrr_value {
 628        mtrr_type       ltype;
 629        unsigned long   lbase;
 630        unsigned long   lsize;
 631};
 632
 633static struct mtrr_value mtrr_value[MTRR_MAX_VAR_RANGES];
 634
 635static int mtrr_save(void)
 636{
 637        int i;
 638
 639        for (i = 0; i < num_var_ranges; i++) {
 640                mtrr_if->get(i, &mtrr_value[i].lbase,
 641                                &mtrr_value[i].lsize,
 642                                &mtrr_value[i].ltype);
 643        }
 644        return 0;
 645}
 646
 647static void mtrr_restore(void)
 648{
 649        int i;
 650
 651        for (i = 0; i < num_var_ranges; i++) {
 652                if (mtrr_value[i].lsize) {
 653                        set_mtrr(i, mtrr_value[i].lbase,
 654                                    mtrr_value[i].lsize,
 655                                    mtrr_value[i].ltype);
 656                }
 657        }
 658}
 659
 660
 661
 662static struct syscore_ops mtrr_syscore_ops = {
 663        .suspend        = mtrr_save,
 664        .resume         = mtrr_restore,
 665};
 666
 667int __initdata changed_by_mtrr_cleanup;
 668
 669#define SIZE_OR_MASK_BITS(n)  (~((1ULL << ((n) - PAGE_SHIFT)) - 1))
 670/**
 671 * mtrr_bp_init - initialize mtrrs on the boot CPU
 672 *
 673 * This needs to be called early; before any of the other CPUs are
 674 * initialized (i.e. before smp_init()).
 675 *
 676 */
 677void __init mtrr_bp_init(void)
 678{
 679        u32 phys_addr;
 680
 681        init_ifs();
 682
 683        phys_addr = 32;
 684
 685        if (boot_cpu_has(X86_FEATURE_MTRR)) {
 686                mtrr_if = &generic_mtrr_ops;
 687                size_or_mask = SIZE_OR_MASK_BITS(36);
 688                size_and_mask = 0x00f00000;
 689                phys_addr = 36;
 690
 691                /*
 692                 * This is an AMD specific MSR, but we assume(hope?) that
 693                 * Intel will implement it too when they extend the address
 694                 * bus of the Xeon.
 695                 */
 696                if (cpuid_eax(0x80000000) >= 0x80000008) {
 697                        phys_addr = cpuid_eax(0x80000008) & 0xff;
 698                        /* CPUID workaround for Intel 0F33/0F34 CPU */
 699                        if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
 700                            boot_cpu_data.x86 == 0xF &&
 701                            boot_cpu_data.x86_model == 0x3 &&
 702                            (boot_cpu_data.x86_mask == 0x3 ||
 703                             boot_cpu_data.x86_mask == 0x4))
 704                                phys_addr = 36;
 705
 706                        size_or_mask = SIZE_OR_MASK_BITS(phys_addr);
 707                        size_and_mask = ~size_or_mask & 0xfffff00000ULL;
 708                } else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR &&
 709                           boot_cpu_data.x86 == 6) {
 710                        /*
 711                         * VIA C* family have Intel style MTRRs,
 712                         * but don't support PAE
 713                         */
 714                        size_or_mask = SIZE_OR_MASK_BITS(32);
 715                        size_and_mask = 0;
 716                        phys_addr = 32;
 717                }
 718        } else {
 719                switch (boot_cpu_data.x86_vendor) {
 720                case X86_VENDOR_AMD:
 721                        if (cpu_feature_enabled(X86_FEATURE_K6_MTRR)) {
 722                                /* Pre-Athlon (K6) AMD CPU MTRRs */
 723                                mtrr_if = mtrr_ops[X86_VENDOR_AMD];
 724                                size_or_mask = SIZE_OR_MASK_BITS(32);
 725                                size_and_mask = 0;
 726                        }
 727                        break;
 728                case X86_VENDOR_CENTAUR:
 729                        if (cpu_feature_enabled(X86_FEATURE_CENTAUR_MCR)) {
 730                                mtrr_if = mtrr_ops[X86_VENDOR_CENTAUR];
 731                                size_or_mask = SIZE_OR_MASK_BITS(32);
 732                                size_and_mask = 0;
 733                        }
 734                        break;
 735                case X86_VENDOR_CYRIX:
 736                        if (cpu_feature_enabled(X86_FEATURE_CYRIX_ARR)) {
 737                                mtrr_if = mtrr_ops[X86_VENDOR_CYRIX];
 738                                size_or_mask = SIZE_OR_MASK_BITS(32);
 739                                size_and_mask = 0;
 740                        }
 741                        break;
 742                default:
 743                        break;
 744                }
 745        }
 746
 747        if (mtrr_if) {
 748                __mtrr_enabled = true;
 749                set_num_var_ranges();
 750                init_table();
 751                if (use_intel()) {
 752                        /* BIOS may override */
 753                        __mtrr_enabled = get_mtrr_state();
 754
 755                        if (mtrr_cleanup(phys_addr)) {
 756                                changed_by_mtrr_cleanup = 1;
 757                                mtrr_if->set_all();
 758                        }
 759                }
 760        }
 761
 762        if (!mtrr_enabled())
 763                pr_info("MTRR: Disabled\n");
 764}
 765
 766void mtrr_ap_init(void)
 767{
 768        if (!mtrr_enabled())
 769                return;
 770
 771        if (!use_intel() || mtrr_aps_delayed_init)
 772                return;
 773        /*
 774         * Ideally we should hold mtrr_mutex here to avoid mtrr entries
 775         * changed, but this routine will be called in cpu boot time,
 776         * holding the lock breaks it.
 777         *
 778         * This routine is called in two cases:
 779         *
 780         *   1. very earily time of software resume, when there absolutely
 781         *      isn't mtrr entry changes;
 782         *
 783         *   2. cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug
 784         *      lock to prevent mtrr entry changes
 785         */
 786        set_mtrr_from_inactive_cpu(~0U, 0, 0, 0);
 787}
 788
 789/**
 790 * Save current fixed-range MTRR state of the first cpu in cpu_online_mask.
 791 */
 792void mtrr_save_state(void)
 793{
 794        int first_cpu;
 795
 796        if (!mtrr_enabled())
 797                return;
 798
 799        get_online_cpus();
 800        first_cpu = cpumask_first(cpu_online_mask);
 801        smp_call_function_single(first_cpu, mtrr_save_fixed_ranges, NULL, 1);
 802        put_online_cpus();
 803}
 804
 805void set_mtrr_aps_delayed_init(void)
 806{
 807        if (!mtrr_enabled())
 808                return;
 809        if (!use_intel())
 810                return;
 811
 812        mtrr_aps_delayed_init = true;
 813}
 814
 815/*
 816 * Delayed MTRR initialization for all AP's
 817 */
 818void mtrr_aps_init(void)
 819{
 820        if (!use_intel() || !mtrr_enabled())
 821                return;
 822
 823        /*
 824         * Check if someone has requested the delay of AP MTRR initialization,
 825         * by doing set_mtrr_aps_delayed_init(), prior to this point. If not,
 826         * then we are done.
 827         */
 828        if (!mtrr_aps_delayed_init)
 829                return;
 830
 831        set_mtrr(~0U, 0, 0, 0);
 832        mtrr_aps_delayed_init = false;
 833}
 834
 835void mtrr_bp_restore(void)
 836{
 837        if (!use_intel() || !mtrr_enabled())
 838                return;
 839
 840        mtrr_if->set_all();
 841}
 842
 843static int __init mtrr_init_finialize(void)
 844{
 845        if (!mtrr_enabled())
 846                return 0;
 847
 848        if (use_intel()) {
 849                if (!changed_by_mtrr_cleanup)
 850                        mtrr_state_warn();
 851                return 0;
 852        }
 853
 854        /*
 855         * The CPU has no MTRR and seems to not support SMP. They have
 856         * specific drivers, we use a tricky method to support
 857         * suspend/resume for them.
 858         *
 859         * TBD: is there any system with such CPU which supports
 860         * suspend/resume? If no, we should remove the code.
 861         */
 862        register_syscore_ops(&mtrr_syscore_ops);
 863
 864        return 0;
 865}
 866subsys_initcall(mtrr_init_finialize);
 867