linux/arch/x86/kernel/cpu/mtrr/main.c
<<
>>
Prefs
   1/*  Generic MTRR (Memory Type Range Register) driver.
   2
   3    Copyright (C) 1997-2000  Richard Gooch
   4    Copyright (c) 2002       Patrick Mochel
   5
   6    This library is free software; you can redistribute it and/or
   7    modify it under the terms of the GNU Library General Public
   8    License as published by the Free Software Foundation; either
   9    version 2 of the License, or (at your option) any later version.
  10
  11    This library is distributed in the hope that it will be useful,
  12    but WITHOUT ANY WARRANTY; without even the implied warranty of
  13    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14    Library General Public License for more details.
  15
  16    You should have received a copy of the GNU Library General Public
  17    License along with this library; if not, write to the Free
  18    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  19
  20    Richard Gooch may be reached by email at  rgooch@atnf.csiro.au
  21    The postal address is:
  22      Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
  23
  24    Source: "Pentium Pro Family Developer's Manual, Volume 3:
  25    Operating System Writer's Guide" (Intel document number 242692),
  26    section 11.11.7
  27
  28    This was cleaned and made readable by Patrick Mochel <mochel@osdl.org>
  29    on 6-7 March 2002.
  30    Source: Intel Architecture Software Developers Manual, Volume 3:
  31    System Programming Guide; Section 9.11. (1997 edition - PPro).
  32*/
  33
  34#define DEBUG
  35
  36#include <linux/types.h> /* FIXME: kvm_para.h needs this */
  37
  38#include <linux/kvm_para.h>
  39#include <linux/uaccess.h>
  40#include <linux/module.h>
  41#include <linux/mutex.h>
  42#include <linux/init.h>
  43#include <linux/sort.h>
  44#include <linux/cpu.h>
  45#include <linux/pci.h>
  46#include <linux/smp.h>
  47
  48#include <asm/processor.h>
  49#include <asm/e820.h>
  50#include <asm/mtrr.h>
  51#include <asm/msr.h>
  52
  53#include "mtrr.h"
  54
  55u32 num_var_ranges;
  56
  57unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
  58static DEFINE_MUTEX(mtrr_mutex);
  59
  60u64 size_or_mask, size_and_mask;
  61static bool mtrr_aps_delayed_init;
  62
  63static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
  64
  65struct mtrr_ops *mtrr_if;
  66
  67static void set_mtrr(unsigned int reg, unsigned long base,
  68                     unsigned long size, mtrr_type type);
  69
  70void set_mtrr_ops(struct mtrr_ops *ops)
  71{
  72        if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
  73                mtrr_ops[ops->vendor] = ops;
  74}
  75
  76/*  Returns non-zero if we have the write-combining memory type  */
  77static int have_wrcomb(void)
  78{
  79        struct pci_dev *dev;
  80        u8 rev;
  81
  82        dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL);
  83        if (dev != NULL) {
  84                /*
  85                 * ServerWorks LE chipsets < rev 6 have problems with
  86                 * write-combining. Don't allow it and leave room for other
  87                 * chipsets to be tagged
  88                 */
  89                if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS &&
  90                    dev->device == PCI_DEVICE_ID_SERVERWORKS_LE) {
  91                        pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev);
  92                        if (rev <= 5) {
  93                                pr_info("mtrr: Serverworks LE rev < 6 detected. Write-combining disabled.\n");
  94                                pci_dev_put(dev);
  95                                return 0;
  96                        }
  97                }
  98                /*
  99                 * Intel 450NX errata # 23. Non ascending cacheline evictions to
 100                 * write combining memory may resulting in data corruption
 101                 */
 102                if (dev->vendor == PCI_VENDOR_ID_INTEL &&
 103                    dev->device == PCI_DEVICE_ID_INTEL_82451NX) {
 104                        pr_info("mtrr: Intel 450NX MMC detected. Write-combining disabled.\n");
 105                        pci_dev_put(dev);
 106                        return 0;
 107                }
 108                pci_dev_put(dev);
 109        }
 110        return mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0;
 111}
 112
 113/*  This function returns the number of variable MTRRs  */
 114static void __init set_num_var_ranges(void)
 115{
 116        unsigned long config = 0, dummy;
 117
 118        if (use_intel())
 119                rdmsr(MSR_MTRRcap, config, dummy);
 120        else if (is_cpu(AMD))
 121                config = 2;
 122        else if (is_cpu(CYRIX) || is_cpu(CENTAUR))
 123                config = 8;
 124
 125        num_var_ranges = config & 0xff;
 126}
 127
 128static void __init init_table(void)
 129{
 130        int i, max;
 131
 132        max = num_var_ranges;
 133        for (i = 0; i < max; i++)
 134                mtrr_usage_table[i] = 1;
 135}
 136
 137struct set_mtrr_data {
 138        atomic_t        count;
 139        atomic_t        gate;
 140        unsigned long   smp_base;
 141        unsigned long   smp_size;
 142        unsigned int    smp_reg;
 143        mtrr_type       smp_type;
 144};
 145
 146/**
 147 * ipi_handler - Synchronisation handler. Executed by "other" CPUs.
 148 *
 149 * Returns nothing.
 150 */
 151static void ipi_handler(void *info)
 152{
 153#ifdef CONFIG_SMP
 154        struct set_mtrr_data *data = info;
 155        unsigned long flags;
 156
 157        local_irq_save(flags);
 158
 159        atomic_dec(&data->count);
 160        while (!atomic_read(&data->gate))
 161                cpu_relax();
 162
 163        /*  The master has cleared me to execute  */
 164        if (data->smp_reg != ~0U) {
 165                mtrr_if->set(data->smp_reg, data->smp_base,
 166                             data->smp_size, data->smp_type);
 167        } else if (mtrr_aps_delayed_init) {
 168                /*
 169                 * Initialize the MTRRs inaddition to the synchronisation.
 170                 */
 171                mtrr_if->set_all();
 172        }
 173
 174        atomic_dec(&data->count);
 175        while (atomic_read(&data->gate))
 176                cpu_relax();
 177
 178        atomic_dec(&data->count);
 179        local_irq_restore(flags);
 180#endif
 181}
 182
 183static inline int types_compatible(mtrr_type type1, mtrr_type type2)
 184{
 185        return type1 == MTRR_TYPE_UNCACHABLE ||
 186               type2 == MTRR_TYPE_UNCACHABLE ||
 187               (type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK) ||
 188               (type1 == MTRR_TYPE_WRBACK && type2 == MTRR_TYPE_WRTHROUGH);
 189}
 190
 191/**
 192 * set_mtrr - update mtrrs on all processors
 193 * @reg:        mtrr in question
 194 * @base:       mtrr base
 195 * @size:       mtrr size
 196 * @type:       mtrr type
 197 *
 198 * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly:
 199 *
 200 * 1. Send IPI to do the following:
 201 * 2. Disable Interrupts
 202 * 3. Wait for all procs to do so
 203 * 4. Enter no-fill cache mode
 204 * 5. Flush caches
 205 * 6. Clear PGE bit
 206 * 7. Flush all TLBs
 207 * 8. Disable all range registers
 208 * 9. Update the MTRRs
 209 * 10. Enable all range registers
 210 * 11. Flush all TLBs and caches again
 211 * 12. Enter normal cache mode and reenable caching
 212 * 13. Set PGE
 213 * 14. Wait for buddies to catch up
 214 * 15. Enable interrupts.
 215 *
 216 * What does that mean for us? Well, first we set data.count to the number
 217 * of CPUs. As each CPU disables interrupts, it'll decrement it once. We wait
 218 * until it hits 0 and proceed. We set the data.gate flag and reset data.count.
 219 * Meanwhile, they are waiting for that flag to be set. Once it's set, each
 220 * CPU goes through the transition of updating MTRRs.
 221 * The CPU vendors may each do it differently,
 222 * so we call mtrr_if->set() callback and let them take care of it.
 223 * When they're done, they again decrement data->count and wait for data.gate
 224 * to be reset.
 225 * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag
 226 * Everyone then enables interrupts and we all continue on.
 227 *
 228 * Note that the mechanism is the same for UP systems, too; all the SMP stuff
 229 * becomes nops.
 230 */
 231static void
 232set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type)
 233{
 234        struct set_mtrr_data data;
 235        unsigned long flags;
 236
 237        data.smp_reg = reg;
 238        data.smp_base = base;
 239        data.smp_size = size;
 240        data.smp_type = type;
 241        atomic_set(&data.count, num_booting_cpus() - 1);
 242
 243        /* Make sure data.count is visible before unleashing other CPUs */
 244        smp_wmb();
 245        atomic_set(&data.gate, 0);
 246
 247        /* Start the ball rolling on other CPUs */
 248        if (smp_call_function(ipi_handler, &data, 0) != 0)
 249                panic("mtrr: timed out waiting for other CPUs\n");
 250
 251        local_irq_save(flags);
 252
 253        while (atomic_read(&data.count))
 254                cpu_relax();
 255
 256        /* Ok, reset count and toggle gate */
 257        atomic_set(&data.count, num_booting_cpus() - 1);
 258        smp_wmb();
 259        atomic_set(&data.gate, 1);
 260
 261        /* Do our MTRR business */
 262
 263        /*
 264         * HACK!
 265         * We use this same function to initialize the mtrrs on boot.
 266         * The state of the boot cpu's mtrrs has been saved, and we want
 267         * to replicate across all the APs.
 268         * If we're doing that @reg is set to something special...
 269         */
 270        if (reg != ~0U)
 271                mtrr_if->set(reg, base, size, type);
 272        else if (!mtrr_aps_delayed_init)
 273                mtrr_if->set_all();
 274
 275        /* Wait for the others */
 276        while (atomic_read(&data.count))
 277                cpu_relax();
 278
 279        atomic_set(&data.count, num_booting_cpus() - 1);
 280        smp_wmb();
 281        atomic_set(&data.gate, 0);
 282
 283        /*
 284         * Wait here for everyone to have seen the gate change
 285         * So we're the last ones to touch 'data'
 286         */
 287        while (atomic_read(&data.count))
 288                cpu_relax();
 289
 290        local_irq_restore(flags);
 291}
 292
 293/**
 294 * mtrr_add_page - Add a memory type region
 295 * @base: Physical base address of region in pages (in units of 4 kB!)
 296 * @size: Physical size of region in pages (4 kB)
 297 * @type: Type of MTRR desired
 298 * @increment: If this is true do usage counting on the region
 299 *
 300 * Memory type region registers control the caching on newer Intel and
 301 * non Intel processors. This function allows drivers to request an
 302 * MTRR is added. The details and hardware specifics of each processor's
 303 * implementation are hidden from the caller, but nevertheless the
 304 * caller should expect to need to provide a power of two size on an
 305 * equivalent power of two boundary.
 306 *
 307 * If the region cannot be added either because all regions are in use
 308 * or the CPU cannot support it a negative value is returned. On success
 309 * the register number for this entry is returned, but should be treated
 310 * as a cookie only.
 311 *
 312 * On a multiprocessor machine the changes are made to all processors.
 313 * This is required on x86 by the Intel processors.
 314 *
 315 * The available types are
 316 *
 317 * %MTRR_TYPE_UNCACHABLE - No caching
 318 *
 319 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
 320 *
 321 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
 322 *
 323 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
 324 *
 325 * BUGS: Needs a quiet flag for the cases where drivers do not mind
 326 * failures and do not wish system log messages to be sent.
 327 */
 328int mtrr_add_page(unsigned long base, unsigned long size,
 329                  unsigned int type, bool increment)
 330{
 331        unsigned long lbase, lsize;
 332        int i, replace, error;
 333        mtrr_type ltype;
 334
 335        if (!mtrr_if)
 336                return -ENXIO;
 337
 338        error = mtrr_if->validate_add_page(base, size, type);
 339        if (error)
 340                return error;
 341
 342        if (type >= MTRR_NUM_TYPES) {
 343                pr_warning("mtrr: type: %u invalid\n", type);
 344                return -EINVAL;
 345        }
 346
 347        /* If the type is WC, check that this processor supports it */
 348        if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) {
 349                pr_warning("mtrr: your processor doesn't support write-combining\n");
 350                return -ENOSYS;
 351        }
 352
 353        if (!size) {
 354                pr_warning("mtrr: zero sized request\n");
 355                return -EINVAL;
 356        }
 357
 358        if (base & size_or_mask || size & size_or_mask) {
 359                pr_warning("mtrr: base or size exceeds the MTRR width\n");
 360                return -EINVAL;
 361        }
 362
 363        error = -EINVAL;
 364        replace = -1;
 365
 366        /* No CPU hotplug when we change MTRR entries */
 367        get_online_cpus();
 368
 369        /* Search for existing MTRR  */
 370        mutex_lock(&mtrr_mutex);
 371        for (i = 0; i < num_var_ranges; ++i) {
 372                mtrr_if->get(i, &lbase, &lsize, &ltype);
 373                if (!lsize || base > lbase + lsize - 1 ||
 374                    base + size - 1 < lbase)
 375                        continue;
 376                /*
 377                 * At this point we know there is some kind of
 378                 * overlap/enclosure
 379                 */
 380                if (base < lbase || base + size - 1 > lbase + lsize - 1) {
 381                        if (base <= lbase &&
 382                            base + size - 1 >= lbase + lsize - 1) {
 383                                /*  New region encloses an existing region  */
 384                                if (type == ltype) {
 385                                        replace = replace == -1 ? i : -2;
 386                                        continue;
 387                                } else if (types_compatible(type, ltype))
 388                                        continue;
 389                        }
 390                        pr_warning("mtrr: 0x%lx000,0x%lx000 overlaps existing"
 391                                " 0x%lx000,0x%lx000\n", base, size, lbase,
 392                                lsize);
 393                        goto out;
 394                }
 395                /* New region is enclosed by an existing region */
 396                if (ltype != type) {
 397                        if (types_compatible(type, ltype))
 398                                continue;
 399                        pr_warning("mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n",
 400                                base, size, mtrr_attrib_to_str(ltype),
 401                                mtrr_attrib_to_str(type));
 402                        goto out;
 403                }
 404                if (increment)
 405                        ++mtrr_usage_table[i];
 406                error = i;
 407                goto out;
 408        }
 409        /* Search for an empty MTRR */
 410        i = mtrr_if->get_free_region(base, size, replace);
 411        if (i >= 0) {
 412                set_mtrr(i, base, size, type);
 413                if (likely(replace < 0)) {
 414                        mtrr_usage_table[i] = 1;
 415                } else {
 416                        mtrr_usage_table[i] = mtrr_usage_table[replace];
 417                        if (increment)
 418                                mtrr_usage_table[i]++;
 419                        if (unlikely(replace != i)) {
 420                                set_mtrr(replace, 0, 0, 0);
 421                                mtrr_usage_table[replace] = 0;
 422                        }
 423                }
 424        } else {
 425                pr_info("mtrr: no more MTRRs available\n");
 426        }
 427        error = i;
 428 out:
 429        mutex_unlock(&mtrr_mutex);
 430        put_online_cpus();
 431        return error;
 432}
 433
 434static int mtrr_check(unsigned long base, unsigned long size)
 435{
 436        if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
 437                pr_warning("mtrr: size and base must be multiples of 4 kiB\n");
 438                pr_debug("mtrr: size: 0x%lx  base: 0x%lx\n", size, base);
 439                dump_stack();
 440                return -1;
 441        }
 442        return 0;
 443}
 444
 445/**
 446 * mtrr_add - Add a memory type region
 447 * @base: Physical base address of region
 448 * @size: Physical size of region
 449 * @type: Type of MTRR desired
 450 * @increment: If this is true do usage counting on the region
 451 *
 452 * Memory type region registers control the caching on newer Intel and
 453 * non Intel processors. This function allows drivers to request an
 454 * MTRR is added. The details and hardware specifics of each processor's
 455 * implementation are hidden from the caller, but nevertheless the
 456 * caller should expect to need to provide a power of two size on an
 457 * equivalent power of two boundary.
 458 *
 459 * If the region cannot be added either because all regions are in use
 460 * or the CPU cannot support it a negative value is returned. On success
 461 * the register number for this entry is returned, but should be treated
 462 * as a cookie only.
 463 *
 464 * On a multiprocessor machine the changes are made to all processors.
 465 * This is required on x86 by the Intel processors.
 466 *
 467 * The available types are
 468 *
 469 * %MTRR_TYPE_UNCACHABLE - No caching
 470 *
 471 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
 472 *
 473 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
 474 *
 475 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
 476 *
 477 * BUGS: Needs a quiet flag for the cases where drivers do not mind
 478 * failures and do not wish system log messages to be sent.
 479 */
 480int mtrr_add(unsigned long base, unsigned long size, unsigned int type,
 481             bool increment)
 482{
 483        if (mtrr_check(base, size))
 484                return -EINVAL;
 485        return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
 486                             increment);
 487}
 488EXPORT_SYMBOL(mtrr_add);
 489
 490/**
 491 * mtrr_del_page - delete a memory type region
 492 * @reg: Register returned by mtrr_add
 493 * @base: Physical base address
 494 * @size: Size of region
 495 *
 496 * If register is supplied then base and size are ignored. This is
 497 * how drivers should call it.
 498 *
 499 * Releases an MTRR region. If the usage count drops to zero the
 500 * register is freed and the region returns to default state.
 501 * On success the register is returned, on failure a negative error
 502 * code.
 503 */
 504int mtrr_del_page(int reg, unsigned long base, unsigned long size)
 505{
 506        int i, max;
 507        mtrr_type ltype;
 508        unsigned long lbase, lsize;
 509        int error = -EINVAL;
 510
 511        if (!mtrr_if)
 512                return -ENXIO;
 513
 514        max = num_var_ranges;
 515        /* No CPU hotplug when we change MTRR entries */
 516        get_online_cpus();
 517        mutex_lock(&mtrr_mutex);
 518        if (reg < 0) {
 519                /*  Search for existing MTRR  */
 520                for (i = 0; i < max; ++i) {
 521                        mtrr_if->get(i, &lbase, &lsize, &ltype);
 522                        if (lbase == base && lsize == size) {
 523                                reg = i;
 524                                break;
 525                        }
 526                }
 527                if (reg < 0) {
 528                        pr_debug("mtrr: no MTRR for %lx000,%lx000 found\n",
 529                                 base, size);
 530                        goto out;
 531                }
 532        }
 533        if (reg >= max) {
 534                pr_warning("mtrr: register: %d too big\n", reg);
 535                goto out;
 536        }
 537        mtrr_if->get(reg, &lbase, &lsize, &ltype);
 538        if (lsize < 1) {
 539                pr_warning("mtrr: MTRR %d not used\n", reg);
 540                goto out;
 541        }
 542        if (mtrr_usage_table[reg] < 1) {
 543                pr_warning("mtrr: reg: %d has count=0\n", reg);
 544                goto out;
 545        }
 546        if (--mtrr_usage_table[reg] < 1)
 547                set_mtrr(reg, 0, 0, 0);
 548        error = reg;
 549 out:
 550        mutex_unlock(&mtrr_mutex);
 551        put_online_cpus();
 552        return error;
 553}
 554
 555/**
 556 * mtrr_del - delete a memory type region
 557 * @reg: Register returned by mtrr_add
 558 * @base: Physical base address
 559 * @size: Size of region
 560 *
 561 * If register is supplied then base and size are ignored. This is
 562 * how drivers should call it.
 563 *
 564 * Releases an MTRR region. If the usage count drops to zero the
 565 * register is freed and the region returns to default state.
 566 * On success the register is returned, on failure a negative error
 567 * code.
 568 */
 569int mtrr_del(int reg, unsigned long base, unsigned long size)
 570{
 571        if (mtrr_check(base, size))
 572                return -EINVAL;
 573        return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
 574}
 575EXPORT_SYMBOL(mtrr_del);
 576
 577/*
 578 * HACK ALERT!
 579 * These should be called implicitly, but we can't yet until all the initcall
 580 * stuff is done...
 581 */
 582static void __init init_ifs(void)
 583{
 584#ifndef CONFIG_X86_64
 585        amd_init_mtrr();
 586        cyrix_init_mtrr();
 587        centaur_init_mtrr();
 588#endif
 589}
 590
 591/* The suspend/resume methods are only for CPU without MTRR. CPU using generic
 592 * MTRR driver doesn't require this
 593 */
 594struct mtrr_value {
 595        mtrr_type       ltype;
 596        unsigned long   lbase;
 597        unsigned long   lsize;
 598};
 599
 600static struct mtrr_value mtrr_value[MTRR_MAX_VAR_RANGES];
 601
 602static int mtrr_save(struct sys_device *sysdev, pm_message_t state)
 603{
 604        int i;
 605
 606        for (i = 0; i < num_var_ranges; i++) {
 607                mtrr_if->get(i, &mtrr_value[i].lbase,
 608                                &mtrr_value[i].lsize,
 609                                &mtrr_value[i].ltype);
 610        }
 611        return 0;
 612}
 613
 614static int mtrr_restore(struct sys_device *sysdev)
 615{
 616        int i;
 617
 618        for (i = 0; i < num_var_ranges; i++) {
 619                if (mtrr_value[i].lsize) {
 620                        set_mtrr(i, mtrr_value[i].lbase,
 621                                    mtrr_value[i].lsize,
 622                                    mtrr_value[i].ltype);
 623                }
 624        }
 625        return 0;
 626}
 627
 628
 629
 630static struct sysdev_driver mtrr_sysdev_driver = {
 631        .suspend        = mtrr_save,
 632        .resume         = mtrr_restore,
 633};
 634
 635int __initdata changed_by_mtrr_cleanup;
 636
 637/**
 638 * mtrr_bp_init - initialize mtrrs on the boot CPU
 639 *
 640 * This needs to be called early; before any of the other CPUs are
 641 * initialized (i.e. before smp_init()).
 642 *
 643 */
 644void __init mtrr_bp_init(void)
 645{
 646        u32 phys_addr;
 647
 648        init_ifs();
 649
 650        phys_addr = 32;
 651
 652        if (cpu_has_mtrr) {
 653                mtrr_if = &generic_mtrr_ops;
 654                size_or_mask = 0xff000000;                      /* 36 bits */
 655                size_and_mask = 0x00f00000;
 656                phys_addr = 36;
 657
 658                /*
 659                 * This is an AMD specific MSR, but we assume(hope?) that
 660                 * Intel will implement it to when they extend the address
 661                 * bus of the Xeon.
 662                 */
 663                if (cpuid_eax(0x80000000) >= 0x80000008) {
 664                        phys_addr = cpuid_eax(0x80000008) & 0xff;
 665                        /* CPUID workaround for Intel 0F33/0F34 CPU */
 666                        if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
 667                            boot_cpu_data.x86 == 0xF &&
 668                            boot_cpu_data.x86_model == 0x3 &&
 669                            (boot_cpu_data.x86_mask == 0x3 ||
 670                             boot_cpu_data.x86_mask == 0x4))
 671                                phys_addr = 36;
 672
 673                        size_or_mask = ~((1ULL << (phys_addr - PAGE_SHIFT)) - 1);
 674                        size_and_mask = ~size_or_mask & 0xfffff00000ULL;
 675                } else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR &&
 676                           boot_cpu_data.x86 == 6) {
 677                        /*
 678                         * VIA C* family have Intel style MTRRs,
 679                         * but don't support PAE
 680                         */
 681                        size_or_mask = 0xfff00000;              /* 32 bits */
 682                        size_and_mask = 0;
 683                        phys_addr = 32;
 684                }
 685        } else {
 686                switch (boot_cpu_data.x86_vendor) {
 687                case X86_VENDOR_AMD:
 688                        if (cpu_has_k6_mtrr) {
 689                                /* Pre-Athlon (K6) AMD CPU MTRRs */
 690                                mtrr_if = mtrr_ops[X86_VENDOR_AMD];
 691                                size_or_mask = 0xfff00000;      /* 32 bits */
 692                                size_and_mask = 0;
 693                        }
 694                        break;
 695                case X86_VENDOR_CENTAUR:
 696                        if (cpu_has_centaur_mcr) {
 697                                mtrr_if = mtrr_ops[X86_VENDOR_CENTAUR];
 698                                size_or_mask = 0xfff00000;      /* 32 bits */
 699                                size_and_mask = 0;
 700                        }
 701                        break;
 702                case X86_VENDOR_CYRIX:
 703                        if (cpu_has_cyrix_arr) {
 704                                mtrr_if = mtrr_ops[X86_VENDOR_CYRIX];
 705                                size_or_mask = 0xfff00000;      /* 32 bits */
 706                                size_and_mask = 0;
 707                        }
 708                        break;
 709                default:
 710                        break;
 711                }
 712        }
 713
 714        if (mtrr_if) {
 715                set_num_var_ranges();
 716                init_table();
 717                if (use_intel()) {
 718                        get_mtrr_state();
 719
 720                        if (mtrr_cleanup(phys_addr)) {
 721                                changed_by_mtrr_cleanup = 1;
 722                                mtrr_if->set_all();
 723                        }
 724                }
 725        }
 726}
 727
 728void mtrr_ap_init(void)
 729{
 730        if (!use_intel() || mtrr_aps_delayed_init)
 731                return;
 732        /*
 733         * Ideally we should hold mtrr_mutex here to avoid mtrr entries
 734         * changed, but this routine will be called in cpu boot time,
 735         * holding the lock breaks it.
 736         *
 737         * This routine is called in two cases:
 738         *
 739         *   1. very earily time of software resume, when there absolutely
 740         *      isn't mtrr entry changes;
 741         *
 742         *   2. cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug
 743         *      lock to prevent mtrr entry changes
 744         */
 745        set_mtrr(~0U, 0, 0, 0);
 746}
 747
 748/**
 749 * Save current fixed-range MTRR state of the BSP
 750 */
 751void mtrr_save_state(void)
 752{
 753        smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1);
 754}
 755
 756void set_mtrr_aps_delayed_init(void)
 757{
 758        if (!use_intel())
 759                return;
 760
 761        mtrr_aps_delayed_init = true;
 762}
 763
 764/*
 765 * MTRR initialization for all AP's
 766 */
 767void mtrr_aps_init(void)
 768{
 769        if (!use_intel())
 770                return;
 771
 772        set_mtrr(~0U, 0, 0, 0);
 773        mtrr_aps_delayed_init = false;
 774}
 775
 776void mtrr_bp_restore(void)
 777{
 778        if (!use_intel())
 779                return;
 780
 781        mtrr_if->set_all();
 782}
 783
 784static int __init mtrr_init_finialize(void)
 785{
 786        if (!mtrr_if)
 787                return 0;
 788
 789        if (use_intel()) {
 790                if (!changed_by_mtrr_cleanup)
 791                        mtrr_state_warn();
 792                return 0;
 793        }
 794
 795        /*
 796         * The CPU has no MTRR and seems to not support SMP. They have
 797         * specific drivers, we use a tricky method to support
 798         * suspend/resume for them.
 799         *
 800         * TBD: is there any system with such CPU which supports
 801         * suspend/resume? If no, we should remove the code.
 802         */
 803        sysdev_driver_register(&cpu_sysdev_class, &mtrr_sysdev_driver);
 804
 805        return 0;
 806}
 807subsys_initcall(mtrr_init_finialize);
 808