linux/arch/x86/kernel/cpu/mtrr/generic.c
<<
>>
Prefs
   1/*
   2 * This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
   3 * because MTRRs can span upto 40 bits (36bits on most modern x86)
   4 */
   5#define DEBUG
   6
   7#include <linux/module.h>
   8#include <linux/init.h>
   9#include <linux/io.h>
  10#include <linux/mm.h>
  11
  12#include <asm/processor-flags.h>
  13#include <asm/cpufeature.h>
  14#include <asm/tlbflush.h>
  15#include <asm/system.h>
  16#include <asm/mtrr.h>
  17#include <asm/msr.h>
  18#include <asm/pat.h>
  19
  20#include "mtrr.h"
  21
  22struct fixed_range_block {
  23        int base_msr;           /* start address of an MTRR block */
  24        int ranges;             /* number of MTRRs in this block  */
  25};
  26
  27static struct fixed_range_block fixed_range_blocks[] = {
  28        { MSR_MTRRfix64K_00000, 1 }, /* one   64k MTRR  */
  29        { MSR_MTRRfix16K_80000, 2 }, /* two   16k MTRRs */
  30        { MSR_MTRRfix4K_C0000,  8 }, /* eight  4k MTRRs */
  31        {}
  32};
  33
  34static unsigned long smp_changes_mask;
  35static int mtrr_state_set;
  36u64 mtrr_tom2;
  37
  38struct mtrr_state_type mtrr_state;
  39EXPORT_SYMBOL_GPL(mtrr_state);
  40
  41/*
  42 * BIOS is expected to clear MtrrFixDramModEn bit, see for example
  43 * "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD
  44 * Opteron Processors" (26094 Rev. 3.30 February 2006), section
  45 * "13.2.1.2 SYSCFG Register": "The MtrrFixDramModEn bit should be set
  46 * to 1 during BIOS initalization of the fixed MTRRs, then cleared to
  47 * 0 for operation."
  48 */
  49static inline void k8_check_syscfg_dram_mod_en(void)
  50{
  51        u32 lo, hi;
  52
  53        if (!((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
  54              (boot_cpu_data.x86 >= 0x0f)))
  55                return;
  56
  57        rdmsr(MSR_K8_SYSCFG, lo, hi);
  58        if (lo & K8_MTRRFIXRANGE_DRAM_MODIFY) {
  59                printk(KERN_ERR FW_WARN "MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]"
  60                       " not cleared by BIOS, clearing this bit\n",
  61                       smp_processor_id());
  62                lo &= ~K8_MTRRFIXRANGE_DRAM_MODIFY;
  63                mtrr_wrmsr(MSR_K8_SYSCFG, lo, hi);
  64        }
  65}
  66
  67/* Get the size of contiguous MTRR range */
  68static u64 get_mtrr_size(u64 mask)
  69{
  70        u64 size;
  71
  72        mask >>= PAGE_SHIFT;
  73        mask |= size_or_mask;
  74        size = -mask;
  75        size <<= PAGE_SHIFT;
  76        return size;
  77}
  78
  79/*
  80 * Check and return the effective type for MTRR-MTRR type overlap.
  81 * Returns 1 if the effective type is UNCACHEABLE, else returns 0
  82 */
  83static int check_type_overlap(u8 *prev, u8 *curr)
  84{
  85        if (*prev == MTRR_TYPE_UNCACHABLE || *curr == MTRR_TYPE_UNCACHABLE) {
  86                *prev = MTRR_TYPE_UNCACHABLE;
  87                *curr = MTRR_TYPE_UNCACHABLE;
  88                return 1;
  89        }
  90
  91        if ((*prev == MTRR_TYPE_WRBACK && *curr == MTRR_TYPE_WRTHROUGH) ||
  92            (*prev == MTRR_TYPE_WRTHROUGH && *curr == MTRR_TYPE_WRBACK)) {
  93                *prev = MTRR_TYPE_WRTHROUGH;
  94                *curr = MTRR_TYPE_WRTHROUGH;
  95        }
  96
  97        if (*prev != *curr) {
  98                *prev = MTRR_TYPE_UNCACHABLE;
  99                *curr = MTRR_TYPE_UNCACHABLE;
 100                return 1;
 101        }
 102
 103        return 0;
 104}
 105
 106/*
 107 * Error/Semi-error returns:
 108 * 0xFF - when MTRR is not enabled
 109 * *repeat == 1 implies [start:end] spanned across MTRR range and type returned
 110 *              corresponds only to [start:*partial_end].
 111 *              Caller has to lookup again for [*partial_end:end].
 112 */
 113static u8 __mtrr_type_lookup(u64 start, u64 end, u64 *partial_end, int *repeat)
 114{
 115        int i;
 116        u64 base, mask;
 117        u8 prev_match, curr_match;
 118
 119        *repeat = 0;
 120        if (!mtrr_state_set)
 121                return 0xFF;
 122
 123        if (!mtrr_state.enabled)
 124                return 0xFF;
 125
 126        /* Make end inclusive end, instead of exclusive */
 127        end--;
 128
 129        /* Look in fixed ranges. Just return the type as per start */
 130        if (mtrr_state.have_fixed && (start < 0x100000)) {
 131                int idx;
 132
 133                if (start < 0x80000) {
 134                        idx = 0;
 135                        idx += (start >> 16);
 136                        return mtrr_state.fixed_ranges[idx];
 137                } else if (start < 0xC0000) {
 138                        idx = 1 * 8;
 139                        idx += ((start - 0x80000) >> 14);
 140                        return mtrr_state.fixed_ranges[idx];
 141                } else if (start < 0x1000000) {
 142                        idx = 3 * 8;
 143                        idx += ((start - 0xC0000) >> 12);
 144                        return mtrr_state.fixed_ranges[idx];
 145                }
 146        }
 147
 148        /*
 149         * Look in variable ranges
 150         * Look of multiple ranges matching this address and pick type
 151         * as per MTRR precedence
 152         */
 153        if (!(mtrr_state.enabled & 2))
 154                return mtrr_state.def_type;
 155
 156        prev_match = 0xFF;
 157        for (i = 0; i < num_var_ranges; ++i) {
 158                unsigned short start_state, end_state;
 159
 160                if (!(mtrr_state.var_ranges[i].mask_lo & (1 << 11)))
 161                        continue;
 162
 163                base = (((u64)mtrr_state.var_ranges[i].base_hi) << 32) +
 164                       (mtrr_state.var_ranges[i].base_lo & PAGE_MASK);
 165                mask = (((u64)mtrr_state.var_ranges[i].mask_hi) << 32) +
 166                       (mtrr_state.var_ranges[i].mask_lo & PAGE_MASK);
 167
 168                start_state = ((start & mask) == (base & mask));
 169                end_state = ((end & mask) == (base & mask));
 170
 171                if (start_state != end_state) {
 172                        /*
 173                         * We have start:end spanning across an MTRR.
 174                         * We split the region into
 175                         * either
 176                         * (start:mtrr_end) (mtrr_end:end)
 177                         * or
 178                         * (start:mtrr_start) (mtrr_start:end)
 179                         * depending on kind of overlap.
 180                         * Return the type for first region and a pointer to
 181                         * the start of second region so that caller will
 182                         * lookup again on the second region.
 183                         * Note: This way we handle multiple overlaps as well.
 184                         */
 185                        if (start_state)
 186                                *partial_end = base + get_mtrr_size(mask);
 187                        else
 188                                *partial_end = base;
 189
 190                        if (unlikely(*partial_end <= start)) {
 191                                WARN_ON(1);
 192                                *partial_end = start + PAGE_SIZE;
 193                        }
 194
 195                        end = *partial_end - 1; /* end is inclusive */
 196                        *repeat = 1;
 197                }
 198
 199                if ((start & mask) != (base & mask))
 200                        continue;
 201
 202                curr_match = mtrr_state.var_ranges[i].base_lo & 0xff;
 203                if (prev_match == 0xFF) {
 204                        prev_match = curr_match;
 205                        continue;
 206                }
 207
 208                if (check_type_overlap(&prev_match, &curr_match))
 209                        return curr_match;
 210        }
 211
 212        if (mtrr_tom2) {
 213                if (start >= (1ULL<<32) && (end < mtrr_tom2))
 214                        return MTRR_TYPE_WRBACK;
 215        }
 216
 217        if (prev_match != 0xFF)
 218                return prev_match;
 219
 220        return mtrr_state.def_type;
 221}
 222
 223/*
 224 * Returns the effective MTRR type for the region
 225 * Error return:
 226 * 0xFF - when MTRR is not enabled
 227 */
 228u8 mtrr_type_lookup(u64 start, u64 end)
 229{
 230        u8 type, prev_type;
 231        int repeat;
 232        u64 partial_end;
 233
 234        type = __mtrr_type_lookup(start, end, &partial_end, &repeat);
 235
 236        /*
 237         * Common path is with repeat = 0.
 238         * However, we can have cases where [start:end] spans across some
 239         * MTRR range. Do repeated lookups for that case here.
 240         */
 241        while (repeat) {
 242                prev_type = type;
 243                start = partial_end;
 244                type = __mtrr_type_lookup(start, end, &partial_end, &repeat);
 245
 246                if (check_type_overlap(&prev_type, &type))
 247                        return type;
 248        }
 249
 250        return type;
 251}
 252
 253/* Get the MSR pair relating to a var range */
 254static void
 255get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
 256{
 257        rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
 258        rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
 259}
 260
 261/* Fill the MSR pair relating to a var range */
 262void fill_mtrr_var_range(unsigned int index,
 263                u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi)
 264{
 265        struct mtrr_var_range *vr;
 266
 267        vr = mtrr_state.var_ranges;
 268
 269        vr[index].base_lo = base_lo;
 270        vr[index].base_hi = base_hi;
 271        vr[index].mask_lo = mask_lo;
 272        vr[index].mask_hi = mask_hi;
 273}
 274
 275static void get_fixed_ranges(mtrr_type *frs)
 276{
 277        unsigned int *p = (unsigned int *)frs;
 278        int i;
 279
 280        k8_check_syscfg_dram_mod_en();
 281
 282        rdmsr(MSR_MTRRfix64K_00000, p[0], p[1]);
 283
 284        for (i = 0; i < 2; i++)
 285                rdmsr(MSR_MTRRfix16K_80000 + i, p[2 + i * 2], p[3 + i * 2]);
 286        for (i = 0; i < 8; i++)
 287                rdmsr(MSR_MTRRfix4K_C0000 + i, p[6 + i * 2], p[7 + i * 2]);
 288}
 289
 290void mtrr_save_fixed_ranges(void *info)
 291{
 292        if (cpu_has_mtrr)
 293                get_fixed_ranges(mtrr_state.fixed_ranges);
 294}
 295
 296static unsigned __initdata last_fixed_start;
 297static unsigned __initdata last_fixed_end;
 298static mtrr_type __initdata last_fixed_type;
 299
 300static void __init print_fixed_last(void)
 301{
 302        if (!last_fixed_end)
 303                return;
 304
 305        pr_debug("  %05X-%05X %s\n", last_fixed_start,
 306                 last_fixed_end - 1, mtrr_attrib_to_str(last_fixed_type));
 307
 308        last_fixed_end = 0;
 309}
 310
 311static void __init update_fixed_last(unsigned base, unsigned end,
 312                                     mtrr_type type)
 313{
 314        last_fixed_start = base;
 315        last_fixed_end = end;
 316        last_fixed_type = type;
 317}
 318
 319static void __init
 320print_fixed(unsigned base, unsigned step, const mtrr_type *types)
 321{
 322        unsigned i;
 323
 324        for (i = 0; i < 8; ++i, ++types, base += step) {
 325                if (last_fixed_end == 0) {
 326                        update_fixed_last(base, base + step, *types);
 327                        continue;
 328                }
 329                if (last_fixed_end == base && last_fixed_type == *types) {
 330                        last_fixed_end = base + step;
 331                        continue;
 332                }
 333                /* new segments: gap or different type */
 334                print_fixed_last();
 335                update_fixed_last(base, base + step, *types);
 336        }
 337}
 338
 339static void prepare_set(void);
 340static void post_set(void);
 341
 342static void __init print_mtrr_state(void)
 343{
 344        unsigned int i;
 345        int high_width;
 346
 347        pr_debug("MTRR default type: %s\n",
 348                 mtrr_attrib_to_str(mtrr_state.def_type));
 349        if (mtrr_state.have_fixed) {
 350                pr_debug("MTRR fixed ranges %sabled:\n",
 351                         mtrr_state.enabled & 1 ? "en" : "dis");
 352                print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
 353                for (i = 0; i < 2; ++i)
 354                        print_fixed(0x80000 + i * 0x20000, 0x04000,
 355                                    mtrr_state.fixed_ranges + (i + 1) * 8);
 356                for (i = 0; i < 8; ++i)
 357                        print_fixed(0xC0000 + i * 0x08000, 0x01000,
 358                                    mtrr_state.fixed_ranges + (i + 3) * 8);
 359
 360                /* tail */
 361                print_fixed_last();
 362        }
 363        pr_debug("MTRR variable ranges %sabled:\n",
 364                 mtrr_state.enabled & 2 ? "en" : "dis");
 365        if (size_or_mask & 0xffffffffUL)
 366                high_width = ffs(size_or_mask & 0xffffffffUL) - 1;
 367        else
 368                high_width = ffs(size_or_mask>>32) + 32 - 1;
 369        high_width = (high_width - (32 - PAGE_SHIFT) + 3) / 4;
 370
 371        for (i = 0; i < num_var_ranges; ++i) {
 372                if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
 373                        pr_debug("  %u base %0*X%05X000 mask %0*X%05X000 %s\n",
 374                                 i,
 375                                 high_width,
 376                                 mtrr_state.var_ranges[i].base_hi,
 377                                 mtrr_state.var_ranges[i].base_lo >> 12,
 378                                 high_width,
 379                                 mtrr_state.var_ranges[i].mask_hi,
 380                                 mtrr_state.var_ranges[i].mask_lo >> 12,
 381                                 mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff));
 382                else
 383                        pr_debug("  %u disabled\n", i);
 384        }
 385        if (mtrr_tom2)
 386                pr_debug("TOM2: %016llx aka %lldM\n", mtrr_tom2, mtrr_tom2>>20);
 387}
 388
 389/* Grab all of the MTRR state for this CPU into *state */
 390void __init get_mtrr_state(void)
 391{
 392        struct mtrr_var_range *vrs;
 393        unsigned long flags;
 394        unsigned lo, dummy;
 395        unsigned int i;
 396
 397        vrs = mtrr_state.var_ranges;
 398
 399        rdmsr(MSR_MTRRcap, lo, dummy);
 400        mtrr_state.have_fixed = (lo >> 8) & 1;
 401
 402        for (i = 0; i < num_var_ranges; i++)
 403                get_mtrr_var_range(i, &vrs[i]);
 404        if (mtrr_state.have_fixed)
 405                get_fixed_ranges(mtrr_state.fixed_ranges);
 406
 407        rdmsr(MSR_MTRRdefType, lo, dummy);
 408        mtrr_state.def_type = (lo & 0xff);
 409        mtrr_state.enabled = (lo & 0xc00) >> 10;
 410
 411        if (amd_special_default_mtrr()) {
 412                unsigned low, high;
 413
 414                /* TOP_MEM2 */
 415                rdmsr(MSR_K8_TOP_MEM2, low, high);
 416                mtrr_tom2 = high;
 417                mtrr_tom2 <<= 32;
 418                mtrr_tom2 |= low;
 419                mtrr_tom2 &= 0xffffff800000ULL;
 420        }
 421
 422        print_mtrr_state();
 423
 424        mtrr_state_set = 1;
 425
 426        /* PAT setup for BP. We need to go through sync steps here */
 427        local_irq_save(flags);
 428        prepare_set();
 429
 430        pat_init();
 431
 432        post_set();
 433        local_irq_restore(flags);
 434}
 435
 436/* Some BIOS's are messed up and don't set all MTRRs the same! */
 437void __init mtrr_state_warn(void)
 438{
 439        unsigned long mask = smp_changes_mask;
 440
 441        if (!mask)
 442                return;
 443        if (mask & MTRR_CHANGE_MASK_FIXED)
 444                pr_warning("mtrr: your CPUs had inconsistent fixed MTRR settings\n");
 445        if (mask & MTRR_CHANGE_MASK_VARIABLE)
 446                pr_warning("mtrr: your CPUs had inconsistent variable MTRR settings\n");
 447        if (mask & MTRR_CHANGE_MASK_DEFTYPE)
 448                pr_warning("mtrr: your CPUs had inconsistent MTRRdefType settings\n");
 449
 450        printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n");
 451        printk(KERN_INFO "mtrr: corrected configuration.\n");
 452}
 453
 454/*
 455 * Doesn't attempt to pass an error out to MTRR users
 456 * because it's quite complicated in some cases and probably not
 457 * worth it because the best error handling is to ignore it.
 458 */
 459void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
 460{
 461        if (wrmsr_safe(msr, a, b) < 0) {
 462                printk(KERN_ERR
 463                        "MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
 464                        smp_processor_id(), msr, a, b);
 465        }
 466}
 467
 468/**
 469 * set_fixed_range - checks & updates a fixed-range MTRR if it
 470 *                   differs from the value it should have
 471 * @msr: MSR address of the MTTR which should be checked and updated
 472 * @changed: pointer which indicates whether the MTRR needed to be changed
 473 * @msrwords: pointer to the MSR values which the MSR should have
 474 */
 475static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords)
 476{
 477        unsigned lo, hi;
 478
 479        rdmsr(msr, lo, hi);
 480
 481        if (lo != msrwords[0] || hi != msrwords[1]) {
 482                mtrr_wrmsr(msr, msrwords[0], msrwords[1]);
 483                *changed = true;
 484        }
 485}
 486
 487/**
 488 * generic_get_free_region - Get a free MTRR.
 489 * @base: The starting (base) address of the region.
 490 * @size: The size (in bytes) of the region.
 491 * @replace_reg: mtrr index to be replaced; set to invalid value if none.
 492 *
 493 * Returns: The index of the region on success, else negative on error.
 494 */
 495int
 496generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
 497{
 498        unsigned long lbase, lsize;
 499        mtrr_type ltype;
 500        int i, max;
 501
 502        max = num_var_ranges;
 503        if (replace_reg >= 0 && replace_reg < max)
 504                return replace_reg;
 505
 506        for (i = 0; i < max; ++i) {
 507                mtrr_if->get(i, &lbase, &lsize, &ltype);
 508                if (lsize == 0)
 509                        return i;
 510        }
 511
 512        return -ENOSPC;
 513}
 514
 515static void generic_get_mtrr(unsigned int reg, unsigned long *base,
 516                             unsigned long *size, mtrr_type *type)
 517{
 518        unsigned int mask_lo, mask_hi, base_lo, base_hi;
 519        unsigned int tmp, hi;
 520
 521        /*
 522         * get_mtrr doesn't need to update mtrr_state, also it could be called
 523         * from any cpu, so try to print it out directly.
 524         */
 525        get_cpu();
 526
 527        rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
 528
 529        if ((mask_lo & 0x800) == 0) {
 530                /*  Invalid (i.e. free) range */
 531                *base = 0;
 532                *size = 0;
 533                *type = 0;
 534                goto out_put_cpu;
 535        }
 536
 537        rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
 538
 539        /* Work out the shifted address mask: */
 540        tmp = mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT;
 541        mask_lo = size_or_mask | tmp;
 542
 543        /* Expand tmp with high bits to all 1s: */
 544        hi = fls(tmp);
 545        if (hi > 0) {
 546                tmp |= ~((1<<(hi - 1)) - 1);
 547
 548                if (tmp != mask_lo) {
 549                        printk(KERN_WARNING "mtrr: your BIOS has configured an incorrect mask, fixing it.\n");
 550                        mask_lo = tmp;
 551                }
 552        }
 553
 554        /*
 555         * This works correctly if size is a power of two, i.e. a
 556         * contiguous range:
 557         */
 558        *size = -mask_lo;
 559        *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
 560        *type = base_lo & 0xff;
 561
 562out_put_cpu:
 563        put_cpu();
 564}
 565
 566/**
 567 * set_fixed_ranges - checks & updates the fixed-range MTRRs if they
 568 *                    differ from the saved set
 569 * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges()
 570 */
 571static int set_fixed_ranges(mtrr_type *frs)
 572{
 573        unsigned long long *saved = (unsigned long long *)frs;
 574        bool changed = false;
 575        int block = -1, range;
 576
 577        k8_check_syscfg_dram_mod_en();
 578
 579        while (fixed_range_blocks[++block].ranges) {
 580                for (range = 0; range < fixed_range_blocks[block].ranges; range++)
 581                        set_fixed_range(fixed_range_blocks[block].base_msr + range,
 582                                        &changed, (unsigned int *)saved++);
 583        }
 584
 585        return changed;
 586}
 587
 588/*
 589 * Set the MSR pair relating to a var range.
 590 * Returns true if changes are made.
 591 */
 592static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
 593{
 594        unsigned int lo, hi;
 595        bool changed = false;
 596
 597        rdmsr(MTRRphysBase_MSR(index), lo, hi);
 598        if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
 599            || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
 600                (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
 601
 602                mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
 603                changed = true;
 604        }
 605
 606        rdmsr(MTRRphysMask_MSR(index), lo, hi);
 607
 608        if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
 609            || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
 610                (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
 611                mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
 612                changed = true;
 613        }
 614        return changed;
 615}
 616
 617static u32 deftype_lo, deftype_hi;
 618
 619/**
 620 * set_mtrr_state - Set the MTRR state for this CPU.
 621 *
 622 * NOTE: The CPU must already be in a safe state for MTRR changes.
 623 * RETURNS: 0 if no changes made, else a mask indicating what was changed.
 624 */
 625static unsigned long set_mtrr_state(void)
 626{
 627        unsigned long change_mask = 0;
 628        unsigned int i;
 629
 630        for (i = 0; i < num_var_ranges; i++) {
 631                if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
 632                        change_mask |= MTRR_CHANGE_MASK_VARIABLE;
 633        }
 634
 635        if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges))
 636                change_mask |= MTRR_CHANGE_MASK_FIXED;
 637
 638        /*
 639         * Set_mtrr_restore restores the old value of MTRRdefType,
 640         * so to set it we fiddle with the saved value:
 641         */
 642        if ((deftype_lo & 0xff) != mtrr_state.def_type
 643            || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
 644
 645                deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type |
 646                             (mtrr_state.enabled << 10);
 647                change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
 648        }
 649
 650        return change_mask;
 651}
 652
 653
 654static unsigned long cr4;
 655static DEFINE_RAW_SPINLOCK(set_atomicity_lock);
 656
 657/*
 658 * Since we are disabling the cache don't allow any interrupts,
 659 * they would run extremely slow and would only increase the pain.
 660 *
 661 * The caller must ensure that local interrupts are disabled and
 662 * are reenabled after post_set() has been called.
 663 */
 664static void prepare_set(void) __acquires(set_atomicity_lock)
 665{
 666        unsigned long cr0;
 667
 668        /*
 669         * Note that this is not ideal
 670         * since the cache is only flushed/disabled for this CPU while the
 671         * MTRRs are changed, but changing this requires more invasive
 672         * changes to the way the kernel boots
 673         */
 674
 675        raw_spin_lock(&set_atomicity_lock);
 676
 677        /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
 678        cr0 = read_cr0() | X86_CR0_CD;
 679        write_cr0(cr0);
 680        wbinvd();
 681
 682        /* Save value of CR4 and clear Page Global Enable (bit 7) */
 683        if (cpu_has_pge) {
 684                cr4 = read_cr4();
 685                write_cr4(cr4 & ~X86_CR4_PGE);
 686        }
 687
 688        /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
 689        __flush_tlb();
 690
 691        /* Save MTRR state */
 692        rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
 693
 694        /* Disable MTRRs, and set the default type to uncached */
 695        mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi);
 696}
 697
 698static void post_set(void) __releases(set_atomicity_lock)
 699{
 700        /* Flush TLBs (no need to flush caches - they are disabled) */
 701        __flush_tlb();
 702
 703        /* Intel (P6) standard MTRRs */
 704        mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
 705
 706        /* Enable caches */
 707        write_cr0(read_cr0() & 0xbfffffff);
 708
 709        /* Restore value of CR4 */
 710        if (cpu_has_pge)
 711                write_cr4(cr4);
 712        raw_spin_unlock(&set_atomicity_lock);
 713}
 714
 715static void generic_set_all(void)
 716{
 717        unsigned long mask, count;
 718        unsigned long flags;
 719
 720        local_irq_save(flags);
 721        prepare_set();
 722
 723        /* Actually set the state */
 724        mask = set_mtrr_state();
 725
 726        /* also set PAT */
 727        pat_init();
 728
 729        post_set();
 730        local_irq_restore(flags);
 731
 732        /* Use the atomic bitops to update the global mask */
 733        for (count = 0; count < sizeof mask * 8; ++count) {
 734                if (mask & 0x01)
 735                        set_bit(count, &smp_changes_mask);
 736                mask >>= 1;
 737        }
 738
 739}
 740
 741/**
 742 * generic_set_mtrr - set variable MTRR register on the local CPU.
 743 *
 744 * @reg: The register to set.
 745 * @base: The base address of the region.
 746 * @size: The size of the region. If this is 0 the region is disabled.
 747 * @type: The type of the region.
 748 *
 749 * Returns nothing.
 750 */
 751static void generic_set_mtrr(unsigned int reg, unsigned long base,
 752                             unsigned long size, mtrr_type type)
 753{
 754        unsigned long flags;
 755        struct mtrr_var_range *vr;
 756
 757        vr = &mtrr_state.var_ranges[reg];
 758
 759        local_irq_save(flags);
 760        prepare_set();
 761
 762        if (size == 0) {
 763                /*
 764                 * The invalid bit is kept in the mask, so we simply
 765                 * clear the relevant mask register to disable a range.
 766                 */
 767                mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0);
 768                memset(vr, 0, sizeof(struct mtrr_var_range));
 769        } else {
 770                vr->base_lo = base << PAGE_SHIFT | type;
 771                vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT);
 772                vr->mask_lo = -size << PAGE_SHIFT | 0x800;
 773                vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT);
 774
 775                mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi);
 776                mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi);
 777        }
 778
 779        post_set();
 780        local_irq_restore(flags);
 781}
 782
 783int generic_validate_add_page(unsigned long base, unsigned long size,
 784                              unsigned int type)
 785{
 786        unsigned long lbase, last;
 787
 788        /*
 789         * For Intel PPro stepping <= 7
 790         * must be 4 MiB aligned and not touch 0x70000000 -> 0x7003FFFF
 791         */
 792        if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
 793            boot_cpu_data.x86_model == 1 &&
 794            boot_cpu_data.x86_mask <= 7) {
 795                if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
 796                        pr_warning("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
 797                        return -EINVAL;
 798                }
 799                if (!(base + size < 0x70000 || base > 0x7003F) &&
 800                    (type == MTRR_TYPE_WRCOMB
 801                     || type == MTRR_TYPE_WRBACK)) {
 802                        pr_warning("mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
 803                        return -EINVAL;
 804                }
 805        }
 806
 807        /*
 808         * Check upper bits of base and last are equal and lower bits are 0
 809         * for base and 1 for last
 810         */
 811        last = base + size - 1;
 812        for (lbase = base; !(lbase & 1) && (last & 1);
 813             lbase = lbase >> 1, last = last >> 1)
 814                ;
 815        if (lbase != last) {
 816                pr_warning("mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", base, size);
 817                return -EINVAL;
 818        }
 819        return 0;
 820}
 821
 822static int generic_have_wrcomb(void)
 823{
 824        unsigned long config, dummy;
 825        rdmsr(MSR_MTRRcap, config, dummy);
 826        return config & (1 << 10);
 827}
 828
 829int positive_have_wrcomb(void)
 830{
 831        return 1;
 832}
 833
 834/*
 835 * Generic structure...
 836 */
 837const struct mtrr_ops generic_mtrr_ops = {
 838        .use_intel_if           = 1,
 839        .set_all                = generic_set_all,
 840        .get                    = generic_get_mtrr,
 841        .get_free_region        = generic_get_free_region,
 842        .set                    = generic_set_mtrr,
 843        .validate_add_page      = generic_validate_add_page,
 844        .have_wrcomb            = generic_have_wrcomb,
 845};
 846