linux/arch/mips/mm/tlb-r4k.c
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
   7 * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
   8 * Carsten Langgaard, carstenl@mips.com
   9 * Copyright (C) 2002 MIPS Technologies, Inc.  All rights reserved.
  10 */
  11#include <linux/init.h>
  12#include <linux/sched.h>
  13#include <linux/smp.h>
  14#include <linux/mm.h>
  15#include <linux/hugetlb.h>
  16#include <linux/module.h>
  17
  18#include <asm/cpu.h>
  19#include <asm/cpu-type.h>
  20#include <asm/bootinfo.h>
  21#include <asm/mmu_context.h>
  22#include <asm/pgtable.h>
  23#include <asm/tlbmisc.h>
  24
  25extern void build_tlb_refill_handler(void);
  26
  27/*
  28 * Make sure all entries differ.  If they're not different
  29 * MIPS32 will take revenge ...
  30 */
  31#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
  32
  33/* Atomicity and interruptability */
  34#ifdef CONFIG_MIPS_MT_SMTC
  35
  36#include <asm/smtc.h>
  37#include <asm/mipsmtregs.h>
  38
  39#define ENTER_CRITICAL(flags) \
  40        { \
  41        unsigned int mvpflags; \
  42        local_irq_save(flags);\
  43        mvpflags = dvpe()
  44#define EXIT_CRITICAL(flags) \
  45        evpe(mvpflags); \
  46        local_irq_restore(flags); \
  47        }
  48#else
  49
  50#define ENTER_CRITICAL(flags) local_irq_save(flags)
  51#define EXIT_CRITICAL(flags) local_irq_restore(flags)
  52
  53#endif /* CONFIG_MIPS_MT_SMTC */
  54
  55#if defined(CONFIG_CPU_LOONGSON2)
  56/*
  57 * LOONGSON2 has a 4 entry itlb which is a subset of dtlb,
  58 * unfortrunately, itlb is not totally transparent to software.
  59 */
  60#define FLUSH_ITLB write_c0_diag(4);
  61
  62#define FLUSH_ITLB_VM(vma) { if ((vma)->vm_flags & VM_EXEC)  write_c0_diag(4); }
  63
  64#else
  65
  66#define FLUSH_ITLB
  67#define FLUSH_ITLB_VM(vma)
  68
  69#endif
  70
  71void local_flush_tlb_all(void)
  72{
  73        unsigned long flags;
  74        unsigned long old_ctx;
  75        int entry;
  76
  77        ENTER_CRITICAL(flags);
  78        /* Save old context and create impossible VPN2 value */
  79        old_ctx = read_c0_entryhi();
  80        write_c0_entrylo0(0);
  81        write_c0_entrylo1(0);
  82
  83        entry = read_c0_wired();
  84
  85        /* Blast 'em all away. */
  86        while (entry < current_cpu_data.tlbsize) {
  87                /* Make sure all entries differ. */
  88                write_c0_entryhi(UNIQUE_ENTRYHI(entry));
  89                write_c0_index(entry);
  90                mtc0_tlbw_hazard();
  91                tlb_write_indexed();
  92                entry++;
  93        }
  94        tlbw_use_hazard();
  95        write_c0_entryhi(old_ctx);
  96        FLUSH_ITLB;
  97        EXIT_CRITICAL(flags);
  98}
  99EXPORT_SYMBOL(local_flush_tlb_all);
 100
 101/* All entries common to a mm share an asid.  To effectively flush
 102   these entries, we just bump the asid. */
 103void local_flush_tlb_mm(struct mm_struct *mm)
 104{
 105        int cpu;
 106
 107        preempt_disable();
 108
 109        cpu = smp_processor_id();
 110
 111        if (cpu_context(cpu, mm) != 0) {
 112                drop_mmu_context(mm, cpu);
 113        }
 114
 115        preempt_enable();
 116}
 117
 118void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
 119        unsigned long end)
 120{
 121        struct mm_struct *mm = vma->vm_mm;
 122        int cpu = smp_processor_id();
 123
 124        if (cpu_context(cpu, mm) != 0) {
 125                unsigned long size, flags;
 126
 127                ENTER_CRITICAL(flags);
 128                start = round_down(start, PAGE_SIZE << 1);
 129                end = round_up(end, PAGE_SIZE << 1);
 130                size = (end - start) >> (PAGE_SHIFT + 1);
 131                if (size <= current_cpu_data.tlbsize/2) {
 132                        int oldpid = read_c0_entryhi();
 133                        int newpid = cpu_asid(cpu, mm);
 134
 135                        while (start < end) {
 136                                int idx;
 137
 138                                write_c0_entryhi(start | newpid);
 139                                start += (PAGE_SIZE << 1);
 140                                mtc0_tlbw_hazard();
 141                                tlb_probe();
 142                                tlb_probe_hazard();
 143                                idx = read_c0_index();
 144                                write_c0_entrylo0(0);
 145                                write_c0_entrylo1(0);
 146                                if (idx < 0)
 147                                        continue;
 148                                /* Make sure all entries differ. */
 149                                write_c0_entryhi(UNIQUE_ENTRYHI(idx));
 150                                mtc0_tlbw_hazard();
 151                                tlb_write_indexed();
 152                        }
 153                        tlbw_use_hazard();
 154                        write_c0_entryhi(oldpid);
 155                } else {
 156                        drop_mmu_context(mm, cpu);
 157                }
 158                FLUSH_ITLB;
 159                EXIT_CRITICAL(flags);
 160        }
 161}
 162
 163void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
 164{
 165        unsigned long size, flags;
 166
 167        ENTER_CRITICAL(flags);
 168        size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
 169        size = (size + 1) >> 1;
 170        if (size <= current_cpu_data.tlbsize / 2) {
 171                int pid = read_c0_entryhi();
 172
 173                start &= (PAGE_MASK << 1);
 174                end += ((PAGE_SIZE << 1) - 1);
 175                end &= (PAGE_MASK << 1);
 176
 177                while (start < end) {
 178                        int idx;
 179
 180                        write_c0_entryhi(start);
 181                        start += (PAGE_SIZE << 1);
 182                        mtc0_tlbw_hazard();
 183                        tlb_probe();
 184                        tlb_probe_hazard();
 185                        idx = read_c0_index();
 186                        write_c0_entrylo0(0);
 187                        write_c0_entrylo1(0);
 188                        if (idx < 0)
 189                                continue;
 190                        /* Make sure all entries differ. */
 191                        write_c0_entryhi(UNIQUE_ENTRYHI(idx));
 192                        mtc0_tlbw_hazard();
 193                        tlb_write_indexed();
 194                }
 195                tlbw_use_hazard();
 196                write_c0_entryhi(pid);
 197        } else {
 198                local_flush_tlb_all();
 199        }
 200        FLUSH_ITLB;
 201        EXIT_CRITICAL(flags);
 202}
 203
 204void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
 205{
 206        int cpu = smp_processor_id();
 207
 208        if (cpu_context(cpu, vma->vm_mm) != 0) {
 209                unsigned long flags;
 210                int oldpid, newpid, idx;
 211
 212                newpid = cpu_asid(cpu, vma->vm_mm);
 213                page &= (PAGE_MASK << 1);
 214                ENTER_CRITICAL(flags);
 215                oldpid = read_c0_entryhi();
 216                write_c0_entryhi(page | newpid);
 217                mtc0_tlbw_hazard();
 218                tlb_probe();
 219                tlb_probe_hazard();
 220                idx = read_c0_index();
 221                write_c0_entrylo0(0);
 222                write_c0_entrylo1(0);
 223                if (idx < 0)
 224                        goto finish;
 225                /* Make sure all entries differ. */
 226                write_c0_entryhi(UNIQUE_ENTRYHI(idx));
 227                mtc0_tlbw_hazard();
 228                tlb_write_indexed();
 229                tlbw_use_hazard();
 230
 231        finish:
 232                write_c0_entryhi(oldpid);
 233                FLUSH_ITLB_VM(vma);
 234                EXIT_CRITICAL(flags);
 235        }
 236}
 237
 238/*
 239 * This one is only used for pages with the global bit set so we don't care
 240 * much about the ASID.
 241 */
 242void local_flush_tlb_one(unsigned long page)
 243{
 244        unsigned long flags;
 245        int oldpid, idx;
 246
 247        ENTER_CRITICAL(flags);
 248        oldpid = read_c0_entryhi();
 249        page &= (PAGE_MASK << 1);
 250        write_c0_entryhi(page);
 251        mtc0_tlbw_hazard();
 252        tlb_probe();
 253        tlb_probe_hazard();
 254        idx = read_c0_index();
 255        write_c0_entrylo0(0);
 256        write_c0_entrylo1(0);
 257        if (idx >= 0) {
 258                /* Make sure all entries differ. */
 259                write_c0_entryhi(UNIQUE_ENTRYHI(idx));
 260                mtc0_tlbw_hazard();
 261                tlb_write_indexed();
 262                tlbw_use_hazard();
 263        }
 264        write_c0_entryhi(oldpid);
 265        FLUSH_ITLB;
 266        EXIT_CRITICAL(flags);
 267}
 268
 269/*
 270 * We will need multiple versions of update_mmu_cache(), one that just
 271 * updates the TLB with the new pte(s), and another which also checks
 272 * for the R4k "end of page" hardware bug and does the needy.
 273 */
 274void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
 275{
 276        unsigned long flags;
 277        pgd_t *pgdp;
 278        pud_t *pudp;
 279        pmd_t *pmdp;
 280        pte_t *ptep;
 281        int idx, pid;
 282
 283        /*
 284         * Handle debugger faulting in for debugee.
 285         */
 286        if (current->active_mm != vma->vm_mm)
 287                return;
 288
 289        ENTER_CRITICAL(flags);
 290
 291        pid = read_c0_entryhi() & ASID_MASK;
 292        address &= (PAGE_MASK << 1);
 293        write_c0_entryhi(address | pid);
 294        pgdp = pgd_offset(vma->vm_mm, address);
 295        mtc0_tlbw_hazard();
 296        tlb_probe();
 297        tlb_probe_hazard();
 298        pudp = pud_offset(pgdp, address);
 299        pmdp = pmd_offset(pudp, address);
 300        idx = read_c0_index();
 301#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
 302        /* this could be a huge page  */
 303        if (pmd_huge(*pmdp)) {
 304                unsigned long lo;
 305                write_c0_pagemask(PM_HUGE_MASK);
 306                ptep = (pte_t *)pmdp;
 307                lo = pte_to_entrylo(pte_val(*ptep));
 308                write_c0_entrylo0(lo);
 309                write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
 310
 311                mtc0_tlbw_hazard();
 312                if (idx < 0)
 313                        tlb_write_random();
 314                else
 315                        tlb_write_indexed();
 316                tlbw_use_hazard();
 317                write_c0_pagemask(PM_DEFAULT_MASK);
 318        } else
 319#endif
 320        {
 321                ptep = pte_offset_map(pmdp, address);
 322
 323#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
 324                write_c0_entrylo0(ptep->pte_high);
 325                ptep++;
 326                write_c0_entrylo1(ptep->pte_high);
 327#else
 328                write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
 329                write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
 330#endif
 331                mtc0_tlbw_hazard();
 332                if (idx < 0)
 333                        tlb_write_random();
 334                else
 335                        tlb_write_indexed();
 336        }
 337        tlbw_use_hazard();
 338        FLUSH_ITLB_VM(vma);
 339        EXIT_CRITICAL(flags);
 340}
 341
 342void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
 343                     unsigned long entryhi, unsigned long pagemask)
 344{
 345        unsigned long flags;
 346        unsigned long wired;
 347        unsigned long old_pagemask;
 348        unsigned long old_ctx;
 349
 350        ENTER_CRITICAL(flags);
 351        /* Save old context and create impossible VPN2 value */
 352        old_ctx = read_c0_entryhi();
 353        old_pagemask = read_c0_pagemask();
 354        wired = read_c0_wired();
 355        write_c0_wired(wired + 1);
 356        write_c0_index(wired);
 357        tlbw_use_hazard();      /* What is the hazard here? */
 358        write_c0_pagemask(pagemask);
 359        write_c0_entryhi(entryhi);
 360        write_c0_entrylo0(entrylo0);
 361        write_c0_entrylo1(entrylo1);
 362        mtc0_tlbw_hazard();
 363        tlb_write_indexed();
 364        tlbw_use_hazard();
 365
 366        write_c0_entryhi(old_ctx);
 367        tlbw_use_hazard();      /* What is the hazard here? */
 368        write_c0_pagemask(old_pagemask);
 369        local_flush_tlb_all();
 370        EXIT_CRITICAL(flags);
 371}
 372
 373#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 374
 375int __init has_transparent_hugepage(void)
 376{
 377        unsigned int mask;
 378        unsigned long flags;
 379
 380        ENTER_CRITICAL(flags);
 381        write_c0_pagemask(PM_HUGE_MASK);
 382        back_to_back_c0_hazard();
 383        mask = read_c0_pagemask();
 384        write_c0_pagemask(PM_DEFAULT_MASK);
 385
 386        EXIT_CRITICAL(flags);
 387
 388        return mask == PM_HUGE_MASK;
 389}
 390
 391#endif /* CONFIG_TRANSPARENT_HUGEPAGE  */
 392
 393static int ntlb;
 394static int __init set_ntlb(char *str)
 395{
 396        get_option(&str, &ntlb);
 397        return 1;
 398}
 399
 400__setup("ntlb=", set_ntlb);
 401
 402void tlb_init(void)
 403{
 404        /*
 405         * You should never change this register:
 406         *   - On R4600 1.7 the tlbp never hits for pages smaller than
 407         *     the value in the c0_pagemask register.
 408         *   - The entire mm handling assumes the c0_pagemask register to
 409         *     be set to fixed-size pages.
 410         */
 411        write_c0_pagemask(PM_DEFAULT_MASK);
 412        write_c0_wired(0);
 413        if (current_cpu_type() == CPU_R10000 ||
 414            current_cpu_type() == CPU_R12000 ||
 415            current_cpu_type() == CPU_R14000)
 416                write_c0_framemask(0);
 417
 418        if (cpu_has_rixi) {
 419                /*
 420                 * Enable the no read, no exec bits, and enable large virtual
 421                 * address.
 422                 */
 423                u32 pg = PG_RIE | PG_XIE;
 424#ifdef CONFIG_64BIT
 425                pg |= PG_ELPA;
 426#endif
 427                write_c0_pagegrain(pg);
 428        }
 429
 430        /* From this point on the ARC firmware is dead.  */
 431        local_flush_tlb_all();
 432
 433        /* Did I tell you that ARC SUCKS?  */
 434
 435        if (ntlb) {
 436                if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
 437                        int wired = current_cpu_data.tlbsize - ntlb;
 438                        write_c0_wired(wired);
 439                        write_c0_index(wired-1);
 440                        printk("Restricting TLB to %d entries\n", ntlb);
 441                } else
 442                        printk("Ignoring invalid argument ntlb=%d\n", ntlb);
 443        }
 444
 445        build_tlb_refill_handler();
 446}
 447