linux/arch/mips/mm/tlb-r4k.c
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
   7 * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
   8 * Carsten Langgaard, carstenl@mips.com
   9 * Copyright (C) 2002 MIPS Technologies, Inc.  All rights reserved.
  10 */
  11#include <linux/init.h>
  12#include <linux/sched.h>
  13#include <linux/smp.h>
  14#include <linux/mm.h>
  15#include <linux/hugetlb.h>
  16
  17#include <asm/cpu.h>
  18#include <asm/bootinfo.h>
  19#include <asm/mmu_context.h>
  20#include <asm/pgtable.h>
  21#include <asm/system.h>
  22
  23extern void build_tlb_refill_handler(void);
  24
  25/*
  26 * Make sure all entries differ.  If they're not different
  27 * MIPS32 will take revenge ...
  28 */
  29#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
  30
  31/* Atomicity and interruptability */
  32#ifdef CONFIG_MIPS_MT_SMTC
  33
  34#include <asm/smtc.h>
  35#include <asm/mipsmtregs.h>
  36
  37#define ENTER_CRITICAL(flags) \
  38        { \
  39        unsigned int mvpflags; \
  40        local_irq_save(flags);\
  41        mvpflags = dvpe()
  42#define EXIT_CRITICAL(flags) \
  43        evpe(mvpflags); \
  44        local_irq_restore(flags); \
  45        }
  46#else
  47
  48#define ENTER_CRITICAL(flags) local_irq_save(flags)
  49#define EXIT_CRITICAL(flags) local_irq_restore(flags)
  50
  51#endif /* CONFIG_MIPS_MT_SMTC */
  52
  53#if defined(CONFIG_CPU_LOONGSON2)
  54/*
  55 * LOONGSON2 has a 4 entry itlb which is a subset of dtlb,
  56 * unfortrunately, itlb is not totally transparent to software.
  57 */
  58#define FLUSH_ITLB write_c0_diag(4);
  59
  60#define FLUSH_ITLB_VM(vma) { if ((vma)->vm_flags & VM_EXEC)  write_c0_diag(4); }
  61
  62#else
  63
  64#define FLUSH_ITLB
  65#define FLUSH_ITLB_VM(vma)
  66
  67#endif
  68
  69void local_flush_tlb_all(void)
  70{
  71        unsigned long flags;
  72        unsigned long old_ctx;
  73        int entry;
  74
  75        ENTER_CRITICAL(flags);
  76        /* Save old context and create impossible VPN2 value */
  77        old_ctx = read_c0_entryhi();
  78        write_c0_entrylo0(0);
  79        write_c0_entrylo1(0);
  80
  81        entry = read_c0_wired();
  82
  83        /* Blast 'em all away. */
  84        while (entry < current_cpu_data.tlbsize) {
  85                /* Make sure all entries differ. */
  86                write_c0_entryhi(UNIQUE_ENTRYHI(entry));
  87                write_c0_index(entry);
  88                mtc0_tlbw_hazard();
  89                tlb_write_indexed();
  90                entry++;
  91        }
  92        tlbw_use_hazard();
  93        write_c0_entryhi(old_ctx);
  94        FLUSH_ITLB;
  95        EXIT_CRITICAL(flags);
  96}
  97
  98/* All entries common to a mm share an asid.  To effectively flush
  99   these entries, we just bump the asid. */
 100void local_flush_tlb_mm(struct mm_struct *mm)
 101{
 102        int cpu;
 103
 104        preempt_disable();
 105
 106        cpu = smp_processor_id();
 107
 108        if (cpu_context(cpu, mm) != 0) {
 109                drop_mmu_context(mm, cpu);
 110        }
 111
 112        preempt_enable();
 113}
 114
 115void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
 116        unsigned long end)
 117{
 118        struct mm_struct *mm = vma->vm_mm;
 119        int cpu = smp_processor_id();
 120
 121        if (cpu_context(cpu, mm) != 0) {
 122                unsigned long size, flags;
 123
 124                ENTER_CRITICAL(flags);
 125                size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
 126                size = (size + 1) >> 1;
 127                if (size <= current_cpu_data.tlbsize/2) {
 128                        int oldpid = read_c0_entryhi();
 129                        int newpid = cpu_asid(cpu, mm);
 130
 131                        start &= (PAGE_MASK << 1);
 132                        end += ((PAGE_SIZE << 1) - 1);
 133                        end &= (PAGE_MASK << 1);
 134                        while (start < end) {
 135                                int idx;
 136
 137                                write_c0_entryhi(start | newpid);
 138                                start += (PAGE_SIZE << 1);
 139                                mtc0_tlbw_hazard();
 140                                tlb_probe();
 141                                tlb_probe_hazard();
 142                                idx = read_c0_index();
 143                                write_c0_entrylo0(0);
 144                                write_c0_entrylo1(0);
 145                                if (idx < 0)
 146                                        continue;
 147                                /* Make sure all entries differ. */
 148                                write_c0_entryhi(UNIQUE_ENTRYHI(idx));
 149                                mtc0_tlbw_hazard();
 150                                tlb_write_indexed();
 151                        }
 152                        tlbw_use_hazard();
 153                        write_c0_entryhi(oldpid);
 154                } else {
 155                        drop_mmu_context(mm, cpu);
 156                }
 157                FLUSH_ITLB;
 158                EXIT_CRITICAL(flags);
 159        }
 160}
 161
 162void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
 163{
 164        unsigned long size, flags;
 165
 166        ENTER_CRITICAL(flags);
 167        size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
 168        size = (size + 1) >> 1;
 169        if (size <= current_cpu_data.tlbsize / 2) {
 170                int pid = read_c0_entryhi();
 171
 172                start &= (PAGE_MASK << 1);
 173                end += ((PAGE_SIZE << 1) - 1);
 174                end &= (PAGE_MASK << 1);
 175
 176                while (start < end) {
 177                        int idx;
 178
 179                        write_c0_entryhi(start);
 180                        start += (PAGE_SIZE << 1);
 181                        mtc0_tlbw_hazard();
 182                        tlb_probe();
 183                        tlb_probe_hazard();
 184                        idx = read_c0_index();
 185                        write_c0_entrylo0(0);
 186                        write_c0_entrylo1(0);
 187                        if (idx < 0)
 188                                continue;
 189                        /* Make sure all entries differ. */
 190                        write_c0_entryhi(UNIQUE_ENTRYHI(idx));
 191                        mtc0_tlbw_hazard();
 192                        tlb_write_indexed();
 193                }
 194                tlbw_use_hazard();
 195                write_c0_entryhi(pid);
 196        } else {
 197                local_flush_tlb_all();
 198        }
 199        FLUSH_ITLB;
 200        EXIT_CRITICAL(flags);
 201}
 202
 203void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
 204{
 205        int cpu = smp_processor_id();
 206
 207        if (cpu_context(cpu, vma->vm_mm) != 0) {
 208                unsigned long flags;
 209                int oldpid, newpid, idx;
 210
 211                newpid = cpu_asid(cpu, vma->vm_mm);
 212                page &= (PAGE_MASK << 1);
 213                ENTER_CRITICAL(flags);
 214                oldpid = read_c0_entryhi();
 215                write_c0_entryhi(page | newpid);
 216                mtc0_tlbw_hazard();
 217                tlb_probe();
 218                tlb_probe_hazard();
 219                idx = read_c0_index();
 220                write_c0_entrylo0(0);
 221                write_c0_entrylo1(0);
 222                if (idx < 0)
 223                        goto finish;
 224                /* Make sure all entries differ. */
 225                write_c0_entryhi(UNIQUE_ENTRYHI(idx));
 226                mtc0_tlbw_hazard();
 227                tlb_write_indexed();
 228                tlbw_use_hazard();
 229
 230        finish:
 231                write_c0_entryhi(oldpid);
 232                FLUSH_ITLB_VM(vma);
 233                EXIT_CRITICAL(flags);
 234        }
 235}
 236
 237/*
 238 * This one is only used for pages with the global bit set so we don't care
 239 * much about the ASID.
 240 */
 241void local_flush_tlb_one(unsigned long page)
 242{
 243        unsigned long flags;
 244        int oldpid, idx;
 245
 246        ENTER_CRITICAL(flags);
 247        oldpid = read_c0_entryhi();
 248        page &= (PAGE_MASK << 1);
 249        write_c0_entryhi(page);
 250        mtc0_tlbw_hazard();
 251        tlb_probe();
 252        tlb_probe_hazard();
 253        idx = read_c0_index();
 254        write_c0_entrylo0(0);
 255        write_c0_entrylo1(0);
 256        if (idx >= 0) {
 257                /* Make sure all entries differ. */
 258                write_c0_entryhi(UNIQUE_ENTRYHI(idx));
 259                mtc0_tlbw_hazard();
 260                tlb_write_indexed();
 261                tlbw_use_hazard();
 262        }
 263        write_c0_entryhi(oldpid);
 264        FLUSH_ITLB;
 265        EXIT_CRITICAL(flags);
 266}
 267
 268/*
 269 * We will need multiple versions of update_mmu_cache(), one that just
 270 * updates the TLB with the new pte(s), and another which also checks
 271 * for the R4k "end of page" hardware bug and does the needy.
 272 */
 273void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
 274{
 275        unsigned long flags;
 276        pgd_t *pgdp;
 277        pud_t *pudp;
 278        pmd_t *pmdp;
 279        pte_t *ptep;
 280        int idx, pid;
 281
 282        /*
 283         * Handle debugger faulting in for debugee.
 284         */
 285        if (current->active_mm != vma->vm_mm)
 286                return;
 287
 288        ENTER_CRITICAL(flags);
 289
 290        pid = read_c0_entryhi() & ASID_MASK;
 291        address &= (PAGE_MASK << 1);
 292        write_c0_entryhi(address | pid);
 293        pgdp = pgd_offset(vma->vm_mm, address);
 294        mtc0_tlbw_hazard();
 295        tlb_probe();
 296        tlb_probe_hazard();
 297        pudp = pud_offset(pgdp, address);
 298        pmdp = pmd_offset(pudp, address);
 299        idx = read_c0_index();
 300#ifdef CONFIG_HUGETLB_PAGE
 301        /* this could be a huge page  */
 302        if (pmd_huge(*pmdp)) {
 303                unsigned long lo;
 304                write_c0_pagemask(PM_HUGE_MASK);
 305                ptep = (pte_t *)pmdp;
 306                lo = pte_to_entrylo(pte_val(*ptep));
 307                write_c0_entrylo0(lo);
 308                write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
 309
 310                mtc0_tlbw_hazard();
 311                if (idx < 0)
 312                        tlb_write_random();
 313                else
 314                        tlb_write_indexed();
 315                write_c0_pagemask(PM_DEFAULT_MASK);
 316        } else
 317#endif
 318        {
 319                ptep = pte_offset_map(pmdp, address);
 320
 321#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
 322                write_c0_entrylo0(ptep->pte_high);
 323                ptep++;
 324                write_c0_entrylo1(ptep->pte_high);
 325#else
 326                write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
 327                write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
 328#endif
 329                mtc0_tlbw_hazard();
 330                if (idx < 0)
 331                        tlb_write_random();
 332                else
 333                        tlb_write_indexed();
 334        }
 335        tlbw_use_hazard();
 336        FLUSH_ITLB_VM(vma);
 337        EXIT_CRITICAL(flags);
 338}
 339
 340void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
 341        unsigned long entryhi, unsigned long pagemask)
 342{
 343        unsigned long flags;
 344        unsigned long wired;
 345        unsigned long old_pagemask;
 346        unsigned long old_ctx;
 347
 348        ENTER_CRITICAL(flags);
 349        /* Save old context and create impossible VPN2 value */
 350        old_ctx = read_c0_entryhi();
 351        old_pagemask = read_c0_pagemask();
 352        wired = read_c0_wired();
 353        write_c0_wired(wired + 1);
 354        write_c0_index(wired);
 355        tlbw_use_hazard();      /* What is the hazard here? */
 356        write_c0_pagemask(pagemask);
 357        write_c0_entryhi(entryhi);
 358        write_c0_entrylo0(entrylo0);
 359        write_c0_entrylo1(entrylo1);
 360        mtc0_tlbw_hazard();
 361        tlb_write_indexed();
 362        tlbw_use_hazard();
 363
 364        write_c0_entryhi(old_ctx);
 365        tlbw_use_hazard();      /* What is the hazard here? */
 366        write_c0_pagemask(old_pagemask);
 367        local_flush_tlb_all();
 368        EXIT_CRITICAL(flags);
 369}
 370
 371/*
 372 * Used for loading TLB entries before trap_init() has started, when we
 373 * don't actually want to add a wired entry which remains throughout the
 374 * lifetime of the system
 375 */
 376
 377static int temp_tlb_entry __cpuinitdata;
 378
 379__init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
 380                               unsigned long entryhi, unsigned long pagemask)
 381{
 382        int ret = 0;
 383        unsigned long flags;
 384        unsigned long wired;
 385        unsigned long old_pagemask;
 386        unsigned long old_ctx;
 387
 388        ENTER_CRITICAL(flags);
 389        /* Save old context and create impossible VPN2 value */
 390        old_ctx = read_c0_entryhi();
 391        old_pagemask = read_c0_pagemask();
 392        wired = read_c0_wired();
 393        if (--temp_tlb_entry < wired) {
 394                printk(KERN_WARNING
 395                       "No TLB space left for add_temporary_entry\n");
 396                ret = -ENOSPC;
 397                goto out;
 398        }
 399
 400        write_c0_index(temp_tlb_entry);
 401        write_c0_pagemask(pagemask);
 402        write_c0_entryhi(entryhi);
 403        write_c0_entrylo0(entrylo0);
 404        write_c0_entrylo1(entrylo1);
 405        mtc0_tlbw_hazard();
 406        tlb_write_indexed();
 407        tlbw_use_hazard();
 408
 409        write_c0_entryhi(old_ctx);
 410        write_c0_pagemask(old_pagemask);
 411out:
 412        EXIT_CRITICAL(flags);
 413        return ret;
 414}
 415
 416static int __cpuinitdata ntlb;
 417static int __init set_ntlb(char *str)
 418{
 419        get_option(&str, &ntlb);
 420        return 1;
 421}
 422
 423__setup("ntlb=", set_ntlb);
 424
 425void __cpuinit tlb_init(void)
 426{
 427        /*
 428         * You should never change this register:
 429         *   - On R4600 1.7 the tlbp never hits for pages smaller than
 430         *     the value in the c0_pagemask register.
 431         *   - The entire mm handling assumes the c0_pagemask register to
 432         *     be set to fixed-size pages.
 433         */
 434        write_c0_pagemask(PM_DEFAULT_MASK);
 435        write_c0_wired(0);
 436        if (current_cpu_type() == CPU_R10000 ||
 437            current_cpu_type() == CPU_R12000 ||
 438            current_cpu_type() == CPU_R14000)
 439                write_c0_framemask(0);
 440
 441        if (kernel_uses_smartmips_rixi) {
 442                /*
 443                 * Enable the no read, no exec bits, and enable large virtual
 444                 * address.
 445                 */
 446                u32 pg = PG_RIE | PG_XIE;
 447#ifdef CONFIG_64BIT
 448                pg |= PG_ELPA;
 449#endif
 450                write_c0_pagegrain(pg);
 451        }
 452
 453        temp_tlb_entry = current_cpu_data.tlbsize - 1;
 454
 455        /* From this point on the ARC firmware is dead.  */
 456        local_flush_tlb_all();
 457
 458        /* Did I tell you that ARC SUCKS?  */
 459
 460        if (ntlb) {
 461                if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
 462                        int wired = current_cpu_data.tlbsize - ntlb;
 463                        write_c0_wired(wired);
 464                        write_c0_index(wired-1);
 465                        printk("Restricting TLB to %d entries\n", ntlb);
 466                } else
 467                        printk("Ignoring invalid argument ntlb=%d\n", ntlb);
 468        }
 469
 470        build_tlb_refill_handler();
 471}
 472