linux/arch/mips/mm/tlb-r4k.c
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
   7 * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
   8 * Carsten Langgaard, carstenl@mips.com
   9 * Copyright (C) 2002 MIPS Technologies, Inc.  All rights reserved.
  10 */
  11#include <linux/cpu_pm.h>
  12#include <linux/init.h>
  13#include <linux/sched.h>
  14#include <linux/smp.h>
  15#include <linux/mm.h>
  16#include <linux/hugetlb.h>
  17#include <linux/export.h>
  18
  19#include <asm/cpu.h>
  20#include <asm/cpu-type.h>
  21#include <asm/bootinfo.h>
  22#include <asm/hazards.h>
  23#include <asm/mmu_context.h>
  24#include <asm/pgtable.h>
  25#include <asm/tlb.h>
  26#include <asm/tlbmisc.h>
  27
  28extern void build_tlb_refill_handler(void);
  29
  30/*
  31 * LOONGSON-2 has a 4 entry itlb which is a subset of jtlb, LOONGSON-3 has
  32 * a 4 entry itlb and a 4 entry dtlb which are subsets of jtlb. Unfortunately,
  33 * itlb/dtlb are not totally transparent to software.
  34 */
  35static inline void flush_micro_tlb(void)
  36{
  37        switch (current_cpu_type()) {
  38        case CPU_LOONGSON2:
  39                write_c0_diag(LOONGSON_DIAG_ITLB);
  40                break;
  41        case CPU_LOONGSON3:
  42                write_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB);
  43                break;
  44        default:
  45                break;
  46        }
  47}
  48
  49static inline void flush_micro_tlb_vm(struct vm_area_struct *vma)
  50{
  51        if (vma->vm_flags & VM_EXEC)
  52                flush_micro_tlb();
  53}
  54
  55void local_flush_tlb_all(void)
  56{
  57        unsigned long flags;
  58        unsigned long old_ctx;
  59        int entry, ftlbhighset;
  60
  61        local_irq_save(flags);
  62        /* Save old context and create impossible VPN2 value */
  63        old_ctx = read_c0_entryhi();
  64        htw_stop();
  65        write_c0_entrylo0(0);
  66        write_c0_entrylo1(0);
  67
  68        entry = num_wired_entries();
  69
  70        /*
  71         * Blast 'em all away.
  72         * If there are any wired entries, fall back to iterating
  73         */
  74        if (cpu_has_tlbinv && !entry) {
  75                if (current_cpu_data.tlbsizevtlb) {
  76                        write_c0_index(0);
  77                        mtc0_tlbw_hazard();
  78                        tlbinvf();  /* invalidate VTLB */
  79                }
  80                ftlbhighset = current_cpu_data.tlbsizevtlb +
  81                        current_cpu_data.tlbsizeftlbsets;
  82                for (entry = current_cpu_data.tlbsizevtlb;
  83                     entry < ftlbhighset;
  84                     entry++) {
  85                        write_c0_index(entry);
  86                        mtc0_tlbw_hazard();
  87                        tlbinvf();  /* invalidate one FTLB set */
  88                }
  89        } else {
  90                while (entry < current_cpu_data.tlbsize) {
  91                        /* Make sure all entries differ. */
  92                        write_c0_entryhi(UNIQUE_ENTRYHI(entry));
  93                        write_c0_index(entry);
  94                        mtc0_tlbw_hazard();
  95                        tlb_write_indexed();
  96                        entry++;
  97                }
  98        }
  99        tlbw_use_hazard();
 100        write_c0_entryhi(old_ctx);
 101        htw_start();
 102        flush_micro_tlb();
 103        local_irq_restore(flags);
 104}
 105EXPORT_SYMBOL(local_flush_tlb_all);
 106
 107/* All entries common to a mm share an asid.  To effectively flush
 108   these entries, we just bump the asid. */
 109void local_flush_tlb_mm(struct mm_struct *mm)
 110{
 111        int cpu;
 112
 113        preempt_disable();
 114
 115        cpu = smp_processor_id();
 116
 117        if (cpu_context(cpu, mm) != 0) {
 118                drop_mmu_context(mm, cpu);
 119        }
 120
 121        preempt_enable();
 122}
 123
 124void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
 125        unsigned long end)
 126{
 127        struct mm_struct *mm = vma->vm_mm;
 128        int cpu = smp_processor_id();
 129
 130        if (cpu_context(cpu, mm) != 0) {
 131                unsigned long size, flags;
 132
 133                local_irq_save(flags);
 134                start = round_down(start, PAGE_SIZE << 1);
 135                end = round_up(end, PAGE_SIZE << 1);
 136                size = (end - start) >> (PAGE_SHIFT + 1);
 137                if (size <= (current_cpu_data.tlbsizeftlbsets ?
 138                             current_cpu_data.tlbsize / 8 :
 139                             current_cpu_data.tlbsize / 2)) {
 140                        int oldpid = read_c0_entryhi();
 141                        int newpid = cpu_asid(cpu, mm);
 142
 143                        htw_stop();
 144                        while (start < end) {
 145                                int idx;
 146
 147                                write_c0_entryhi(start | newpid);
 148                                start += (PAGE_SIZE << 1);
 149                                mtc0_tlbw_hazard();
 150                                tlb_probe();
 151                                tlb_probe_hazard();
 152                                idx = read_c0_index();
 153                                write_c0_entrylo0(0);
 154                                write_c0_entrylo1(0);
 155                                if (idx < 0)
 156                                        continue;
 157                                /* Make sure all entries differ. */
 158                                write_c0_entryhi(UNIQUE_ENTRYHI(idx));
 159                                mtc0_tlbw_hazard();
 160                                tlb_write_indexed();
 161                        }
 162                        tlbw_use_hazard();
 163                        write_c0_entryhi(oldpid);
 164                        htw_start();
 165                } else {
 166                        drop_mmu_context(mm, cpu);
 167                }
 168                flush_micro_tlb();
 169                local_irq_restore(flags);
 170        }
 171}
 172
 173void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
 174{
 175        unsigned long size, flags;
 176
 177        local_irq_save(flags);
 178        size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
 179        size = (size + 1) >> 1;
 180        if (size <= (current_cpu_data.tlbsizeftlbsets ?
 181                     current_cpu_data.tlbsize / 8 :
 182                     current_cpu_data.tlbsize / 2)) {
 183                int pid = read_c0_entryhi();
 184
 185                start &= (PAGE_MASK << 1);
 186                end += ((PAGE_SIZE << 1) - 1);
 187                end &= (PAGE_MASK << 1);
 188                htw_stop();
 189
 190                while (start < end) {
 191                        int idx;
 192
 193                        write_c0_entryhi(start);
 194                        start += (PAGE_SIZE << 1);
 195                        mtc0_tlbw_hazard();
 196                        tlb_probe();
 197                        tlb_probe_hazard();
 198                        idx = read_c0_index();
 199                        write_c0_entrylo0(0);
 200                        write_c0_entrylo1(0);
 201                        if (idx < 0)
 202                                continue;
 203                        /* Make sure all entries differ. */
 204                        write_c0_entryhi(UNIQUE_ENTRYHI(idx));
 205                        mtc0_tlbw_hazard();
 206                        tlb_write_indexed();
 207                }
 208                tlbw_use_hazard();
 209                write_c0_entryhi(pid);
 210                htw_start();
 211        } else {
 212                local_flush_tlb_all();
 213        }
 214        flush_micro_tlb();
 215        local_irq_restore(flags);
 216}
 217
 218void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
 219{
 220        int cpu = smp_processor_id();
 221
 222        if (cpu_context(cpu, vma->vm_mm) != 0) {
 223                unsigned long flags;
 224                int oldpid, newpid, idx;
 225
 226                newpid = cpu_asid(cpu, vma->vm_mm);
 227                page &= (PAGE_MASK << 1);
 228                local_irq_save(flags);
 229                oldpid = read_c0_entryhi();
 230                htw_stop();
 231                write_c0_entryhi(page | newpid);
 232                mtc0_tlbw_hazard();
 233                tlb_probe();
 234                tlb_probe_hazard();
 235                idx = read_c0_index();
 236                write_c0_entrylo0(0);
 237                write_c0_entrylo1(0);
 238                if (idx < 0)
 239                        goto finish;
 240                /* Make sure all entries differ. */
 241                write_c0_entryhi(UNIQUE_ENTRYHI(idx));
 242                mtc0_tlbw_hazard();
 243                tlb_write_indexed();
 244                tlbw_use_hazard();
 245
 246        finish:
 247                write_c0_entryhi(oldpid);
 248                htw_start();
 249                flush_micro_tlb_vm(vma);
 250                local_irq_restore(flags);
 251        }
 252}
 253
 254/*
 255 * This one is only used for pages with the global bit set so we don't care
 256 * much about the ASID.
 257 */
 258void local_flush_tlb_one(unsigned long page)
 259{
 260        unsigned long flags;
 261        int oldpid, idx;
 262
 263        local_irq_save(flags);
 264        oldpid = read_c0_entryhi();
 265        htw_stop();
 266        page &= (PAGE_MASK << 1);
 267        write_c0_entryhi(page);
 268        mtc0_tlbw_hazard();
 269        tlb_probe();
 270        tlb_probe_hazard();
 271        idx = read_c0_index();
 272        write_c0_entrylo0(0);
 273        write_c0_entrylo1(0);
 274        if (idx >= 0) {
 275                /* Make sure all entries differ. */
 276                write_c0_entryhi(UNIQUE_ENTRYHI(idx));
 277                mtc0_tlbw_hazard();
 278                tlb_write_indexed();
 279                tlbw_use_hazard();
 280        }
 281        write_c0_entryhi(oldpid);
 282        htw_start();
 283        flush_micro_tlb();
 284        local_irq_restore(flags);
 285}
 286
 287/*
 288 * We will need multiple versions of update_mmu_cache(), one that just
 289 * updates the TLB with the new pte(s), and another which also checks
 290 * for the R4k "end of page" hardware bug and does the needy.
 291 */
 292void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
 293{
 294        unsigned long flags;
 295        pgd_t *pgdp;
 296        pud_t *pudp;
 297        pmd_t *pmdp;
 298        pte_t *ptep;
 299        int idx, pid;
 300
 301        /*
 302         * Handle debugger faulting in for debugee.
 303         */
 304        if (current->active_mm != vma->vm_mm)
 305                return;
 306
 307        local_irq_save(flags);
 308
 309        htw_stop();
 310        pid = read_c0_entryhi() & cpu_asid_mask(&current_cpu_data);
 311        address &= (PAGE_MASK << 1);
 312        write_c0_entryhi(address | pid);
 313        pgdp = pgd_offset(vma->vm_mm, address);
 314        mtc0_tlbw_hazard();
 315        tlb_probe();
 316        tlb_probe_hazard();
 317        pudp = pud_offset(pgdp, address);
 318        pmdp = pmd_offset(pudp, address);
 319        idx = read_c0_index();
 320#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
 321        /* this could be a huge page  */
 322        if (pmd_huge(*pmdp)) {
 323                unsigned long lo;
 324                write_c0_pagemask(PM_HUGE_MASK);
 325                ptep = (pte_t *)pmdp;
 326                lo = pte_to_entrylo(pte_val(*ptep));
 327                write_c0_entrylo0(lo);
 328                write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
 329
 330                mtc0_tlbw_hazard();
 331                if (idx < 0)
 332                        tlb_write_random();
 333                else
 334                        tlb_write_indexed();
 335                tlbw_use_hazard();
 336                write_c0_pagemask(PM_DEFAULT_MASK);
 337        } else
 338#endif
 339        {
 340                ptep = pte_offset_map(pmdp, address);
 341
 342#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
 343#ifdef CONFIG_XPA
 344                write_c0_entrylo0(pte_to_entrylo(ptep->pte_high));
 345                if (cpu_has_xpa)
 346                        writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK);
 347                ptep++;
 348                write_c0_entrylo1(pte_to_entrylo(ptep->pte_high));
 349                if (cpu_has_xpa)
 350                        writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK);
 351#else
 352                write_c0_entrylo0(ptep->pte_high);
 353                ptep++;
 354                write_c0_entrylo1(ptep->pte_high);
 355#endif
 356#else
 357                write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
 358                write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
 359#endif
 360                mtc0_tlbw_hazard();
 361                if (idx < 0)
 362                        tlb_write_random();
 363                else
 364                        tlb_write_indexed();
 365        }
 366        tlbw_use_hazard();
 367        htw_start();
 368        flush_micro_tlb_vm(vma);
 369        local_irq_restore(flags);
 370}
 371
 372void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
 373                     unsigned long entryhi, unsigned long pagemask)
 374{
 375#ifdef CONFIG_XPA
 376        panic("Broken for XPA kernels");
 377#else
 378        unsigned long flags;
 379        unsigned long wired;
 380        unsigned long old_pagemask;
 381        unsigned long old_ctx;
 382
 383        local_irq_save(flags);
 384        /* Save old context and create impossible VPN2 value */
 385        old_ctx = read_c0_entryhi();
 386        htw_stop();
 387        old_pagemask = read_c0_pagemask();
 388        wired = num_wired_entries();
 389        write_c0_wired(wired + 1);
 390        write_c0_index(wired);
 391        tlbw_use_hazard();      /* What is the hazard here? */
 392        write_c0_pagemask(pagemask);
 393        write_c0_entryhi(entryhi);
 394        write_c0_entrylo0(entrylo0);
 395        write_c0_entrylo1(entrylo1);
 396        mtc0_tlbw_hazard();
 397        tlb_write_indexed();
 398        tlbw_use_hazard();
 399
 400        write_c0_entryhi(old_ctx);
 401        tlbw_use_hazard();      /* What is the hazard here? */
 402        htw_start();
 403        write_c0_pagemask(old_pagemask);
 404        local_flush_tlb_all();
 405        local_irq_restore(flags);
 406#endif
 407}
 408
 409#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 410
 411int has_transparent_hugepage(void)
 412{
 413        static unsigned int mask = -1;
 414
 415        if (mask == -1) {       /* first call comes during __init */
 416                unsigned long flags;
 417
 418                local_irq_save(flags);
 419                write_c0_pagemask(PM_HUGE_MASK);
 420                back_to_back_c0_hazard();
 421                mask = read_c0_pagemask();
 422                write_c0_pagemask(PM_DEFAULT_MASK);
 423                local_irq_restore(flags);
 424        }
 425        return mask == PM_HUGE_MASK;
 426}
 427
 428#endif /* CONFIG_TRANSPARENT_HUGEPAGE  */
 429
 430/*
 431 * Used for loading TLB entries before trap_init() has started, when we
 432 * don't actually want to add a wired entry which remains throughout the
 433 * lifetime of the system
 434 */
 435
 436int temp_tlb_entry;
 437
 438__init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
 439                               unsigned long entryhi, unsigned long pagemask)
 440{
 441        int ret = 0;
 442        unsigned long flags;
 443        unsigned long wired;
 444        unsigned long old_pagemask;
 445        unsigned long old_ctx;
 446
 447        local_irq_save(flags);
 448        /* Save old context and create impossible VPN2 value */
 449        htw_stop();
 450        old_ctx = read_c0_entryhi();
 451        old_pagemask = read_c0_pagemask();
 452        wired = num_wired_entries();
 453        if (--temp_tlb_entry < wired) {
 454                printk(KERN_WARNING
 455                       "No TLB space left for add_temporary_entry\n");
 456                ret = -ENOSPC;
 457                goto out;
 458        }
 459
 460        write_c0_index(temp_tlb_entry);
 461        write_c0_pagemask(pagemask);
 462        write_c0_entryhi(entryhi);
 463        write_c0_entrylo0(entrylo0);
 464        write_c0_entrylo1(entrylo1);
 465        mtc0_tlbw_hazard();
 466        tlb_write_indexed();
 467        tlbw_use_hazard();
 468
 469        write_c0_entryhi(old_ctx);
 470        write_c0_pagemask(old_pagemask);
 471        htw_start();
 472out:
 473        local_irq_restore(flags);
 474        return ret;
 475}
 476
 477static int ntlb;
 478static int __init set_ntlb(char *str)
 479{
 480        get_option(&str, &ntlb);
 481        return 1;
 482}
 483
 484__setup("ntlb=", set_ntlb);
 485
 486/*
 487 * Configure TLB (for init or after a CPU has been powered off).
 488 */
 489static void r4k_tlb_configure(void)
 490{
 491        /*
 492         * You should never change this register:
 493         *   - On R4600 1.7 the tlbp never hits for pages smaller than
 494         *     the value in the c0_pagemask register.
 495         *   - The entire mm handling assumes the c0_pagemask register to
 496         *     be set to fixed-size pages.
 497         */
 498        write_c0_pagemask(PM_DEFAULT_MASK);
 499        back_to_back_c0_hazard();
 500        if (read_c0_pagemask() != PM_DEFAULT_MASK)
 501                panic("MMU doesn't support PAGE_SIZE=0x%lx", PAGE_SIZE);
 502
 503        write_c0_wired(0);
 504        if (current_cpu_type() == CPU_R10000 ||
 505            current_cpu_type() == CPU_R12000 ||
 506            current_cpu_type() == CPU_R14000 ||
 507            current_cpu_type() == CPU_R16000)
 508                write_c0_framemask(0);
 509
 510        if (cpu_has_rixi) {
 511                /*
 512                 * Enable the no read, no exec bits, and enable large physical
 513                 * address.
 514                 */
 515#ifdef CONFIG_64BIT
 516                set_c0_pagegrain(PG_RIE | PG_XIE | PG_ELPA);
 517#else
 518                set_c0_pagegrain(PG_RIE | PG_XIE);
 519#endif
 520        }
 521
 522        temp_tlb_entry = current_cpu_data.tlbsize - 1;
 523
 524        /* From this point on the ARC firmware is dead.  */
 525        local_flush_tlb_all();
 526
 527        /* Did I tell you that ARC SUCKS?  */
 528}
 529
 530void tlb_init(void)
 531{
 532        r4k_tlb_configure();
 533
 534        if (ntlb) {
 535                if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
 536                        int wired = current_cpu_data.tlbsize - ntlb;
 537                        write_c0_wired(wired);
 538                        write_c0_index(wired-1);
 539                        printk("Restricting TLB to %d entries\n", ntlb);
 540                } else
 541                        printk("Ignoring invalid argument ntlb=%d\n", ntlb);
 542        }
 543
 544        build_tlb_refill_handler();
 545}
 546
 547static int r4k_tlb_pm_notifier(struct notifier_block *self, unsigned long cmd,
 548                               void *v)
 549{
 550        switch (cmd) {
 551        case CPU_PM_ENTER_FAILED:
 552        case CPU_PM_EXIT:
 553                r4k_tlb_configure();
 554                break;
 555        }
 556
 557        return NOTIFY_OK;
 558}
 559
 560static struct notifier_block r4k_tlb_pm_notifier_block = {
 561        .notifier_call = r4k_tlb_pm_notifier,
 562};
 563
 564static int __init r4k_tlb_init_pm(void)
 565{
 566        return cpu_pm_register_notifier(&r4k_tlb_pm_notifier_block);
 567}
 568arch_initcall(r4k_tlb_init_pm);
 569