linux/arch/mips/mm/tlb-r4k.c
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
   7 * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
   8 * Carsten Langgaard, carstenl@mips.com
   9 * Copyright (C) 2002 MIPS Technologies, Inc.  All rights reserved.
  10 */
  11#include <linux/init.h>
  12#include <linux/sched.h>
  13#include <linux/smp.h>
  14#include <linux/mm.h>
  15#include <linux/hugetlb.h>
  16
  17#include <asm/cpu.h>
  18#include <asm/bootinfo.h>
  19#include <asm/mmu_context.h>
  20#include <asm/pgtable.h>
  21#include <asm/system.h>
  22#include <asm/tlbmisc.h>
  23
  24extern void build_tlb_refill_handler(void);
  25
  26/*
  27 * Make sure all entries differ.  If they're not different
  28 * MIPS32 will take revenge ...
  29 */
  30#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
  31
  32/* Atomicity and interruptability */
  33#ifdef CONFIG_MIPS_MT_SMTC
  34
  35#include <asm/smtc.h>
  36#include <asm/mipsmtregs.h>
  37
  38#define ENTER_CRITICAL(flags) \
  39        { \
  40        unsigned int mvpflags; \
  41        local_irq_save(flags);\
  42        mvpflags = dvpe()
  43#define EXIT_CRITICAL(flags) \
  44        evpe(mvpflags); \
  45        local_irq_restore(flags); \
  46        }
  47#else
  48
  49#define ENTER_CRITICAL(flags) local_irq_save(flags)
  50#define EXIT_CRITICAL(flags) local_irq_restore(flags)
  51
  52#endif /* CONFIG_MIPS_MT_SMTC */
  53
  54#if defined(CONFIG_CPU_LOONGSON2)
  55/*
  56 * LOONGSON2 has a 4 entry itlb which is a subset of dtlb,
  57 * unfortrunately, itlb is not totally transparent to software.
  58 */
  59#define FLUSH_ITLB write_c0_diag(4);
  60
  61#define FLUSH_ITLB_VM(vma) { if ((vma)->vm_flags & VM_EXEC)  write_c0_diag(4); }
  62
  63#else
  64
  65#define FLUSH_ITLB
  66#define FLUSH_ITLB_VM(vma)
  67
  68#endif
  69
  70void local_flush_tlb_all(void)
  71{
  72        unsigned long flags;
  73        unsigned long old_ctx;
  74        int entry;
  75
  76        ENTER_CRITICAL(flags);
  77        /* Save old context and create impossible VPN2 value */
  78        old_ctx = read_c0_entryhi();
  79        write_c0_entrylo0(0);
  80        write_c0_entrylo1(0);
  81
  82        entry = read_c0_wired();
  83
  84        /* Blast 'em all away. */
  85        while (entry < current_cpu_data.tlbsize) {
  86                /* Make sure all entries differ. */
  87                write_c0_entryhi(UNIQUE_ENTRYHI(entry));
  88                write_c0_index(entry);
  89                mtc0_tlbw_hazard();
  90                tlb_write_indexed();
  91                entry++;
  92        }
  93        tlbw_use_hazard();
  94        write_c0_entryhi(old_ctx);
  95        FLUSH_ITLB;
  96        EXIT_CRITICAL(flags);
  97}
  98
  99/* All entries common to a mm share an asid.  To effectively flush
 100   these entries, we just bump the asid. */
 101void local_flush_tlb_mm(struct mm_struct *mm)
 102{
 103        int cpu;
 104
 105        preempt_disable();
 106
 107        cpu = smp_processor_id();
 108
 109        if (cpu_context(cpu, mm) != 0) {
 110                drop_mmu_context(mm, cpu);
 111        }
 112
 113        preempt_enable();
 114}
 115
 116void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
 117        unsigned long end)
 118{
 119        struct mm_struct *mm = vma->vm_mm;
 120        int cpu = smp_processor_id();
 121
 122        if (cpu_context(cpu, mm) != 0) {
 123                unsigned long size, flags;
 124                int huge = is_vm_hugetlb_page(vma);
 125
 126                ENTER_CRITICAL(flags);
 127                if (huge) {
 128                        start = round_down(start, HPAGE_SIZE);
 129                        end = round_up(end, HPAGE_SIZE);
 130                        size = (end - start) >> HPAGE_SHIFT;
 131                } else {
 132                        start = round_down(start, PAGE_SIZE << 1);
 133                        end = round_up(end, PAGE_SIZE << 1);
 134                        size = (end - start) >> (PAGE_SHIFT + 1);
 135                }
 136                if (size <= current_cpu_data.tlbsize/2) {
 137                        int oldpid = read_c0_entryhi();
 138                        int newpid = cpu_asid(cpu, mm);
 139
 140                        while (start < end) {
 141                                int idx;
 142
 143                                write_c0_entryhi(start | newpid);
 144                                if (huge)
 145                                        start += HPAGE_SIZE;
 146                                else
 147                                        start += (PAGE_SIZE << 1);
 148                                mtc0_tlbw_hazard();
 149                                tlb_probe();
 150                                tlb_probe_hazard();
 151                                idx = read_c0_index();
 152                                write_c0_entrylo0(0);
 153                                write_c0_entrylo1(0);
 154                                if (idx < 0)
 155                                        continue;
 156                                /* Make sure all entries differ. */
 157                                write_c0_entryhi(UNIQUE_ENTRYHI(idx));
 158                                mtc0_tlbw_hazard();
 159                                tlb_write_indexed();
 160                        }
 161                        tlbw_use_hazard();
 162                        write_c0_entryhi(oldpid);
 163                } else {
 164                        drop_mmu_context(mm, cpu);
 165                }
 166                FLUSH_ITLB;
 167                EXIT_CRITICAL(flags);
 168        }
 169}
 170
 171void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
 172{
 173        unsigned long size, flags;
 174
 175        ENTER_CRITICAL(flags);
 176        size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
 177        size = (size + 1) >> 1;
 178        if (size <= current_cpu_data.tlbsize / 2) {
 179                int pid = read_c0_entryhi();
 180
 181                start &= (PAGE_MASK << 1);
 182                end += ((PAGE_SIZE << 1) - 1);
 183                end &= (PAGE_MASK << 1);
 184
 185                while (start < end) {
 186                        int idx;
 187
 188                        write_c0_entryhi(start);
 189                        start += (PAGE_SIZE << 1);
 190                        mtc0_tlbw_hazard();
 191                        tlb_probe();
 192                        tlb_probe_hazard();
 193                        idx = read_c0_index();
 194                        write_c0_entrylo0(0);
 195                        write_c0_entrylo1(0);
 196                        if (idx < 0)
 197                                continue;
 198                        /* Make sure all entries differ. */
 199                        write_c0_entryhi(UNIQUE_ENTRYHI(idx));
 200                        mtc0_tlbw_hazard();
 201                        tlb_write_indexed();
 202                }
 203                tlbw_use_hazard();
 204                write_c0_entryhi(pid);
 205        } else {
 206                local_flush_tlb_all();
 207        }
 208        FLUSH_ITLB;
 209        EXIT_CRITICAL(flags);
 210}
 211
 212void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
 213{
 214        int cpu = smp_processor_id();
 215
 216        if (cpu_context(cpu, vma->vm_mm) != 0) {
 217                unsigned long flags;
 218                int oldpid, newpid, idx;
 219
 220                newpid = cpu_asid(cpu, vma->vm_mm);
 221                page &= (PAGE_MASK << 1);
 222                ENTER_CRITICAL(flags);
 223                oldpid = read_c0_entryhi();
 224                write_c0_entryhi(page | newpid);
 225                mtc0_tlbw_hazard();
 226                tlb_probe();
 227                tlb_probe_hazard();
 228                idx = read_c0_index();
 229                write_c0_entrylo0(0);
 230                write_c0_entrylo1(0);
 231                if (idx < 0)
 232                        goto finish;
 233                /* Make sure all entries differ. */
 234                write_c0_entryhi(UNIQUE_ENTRYHI(idx));
 235                mtc0_tlbw_hazard();
 236                tlb_write_indexed();
 237                tlbw_use_hazard();
 238
 239        finish:
 240                write_c0_entryhi(oldpid);
 241                FLUSH_ITLB_VM(vma);
 242                EXIT_CRITICAL(flags);
 243        }
 244}
 245
 246/*
 247 * This one is only used for pages with the global bit set so we don't care
 248 * much about the ASID.
 249 */
 250void local_flush_tlb_one(unsigned long page)
 251{
 252        unsigned long flags;
 253        int oldpid, idx;
 254
 255        ENTER_CRITICAL(flags);
 256        oldpid = read_c0_entryhi();
 257        page &= (PAGE_MASK << 1);
 258        write_c0_entryhi(page);
 259        mtc0_tlbw_hazard();
 260        tlb_probe();
 261        tlb_probe_hazard();
 262        idx = read_c0_index();
 263        write_c0_entrylo0(0);
 264        write_c0_entrylo1(0);
 265        if (idx >= 0) {
 266                /* Make sure all entries differ. */
 267                write_c0_entryhi(UNIQUE_ENTRYHI(idx));
 268                mtc0_tlbw_hazard();
 269                tlb_write_indexed();
 270                tlbw_use_hazard();
 271        }
 272        write_c0_entryhi(oldpid);
 273        FLUSH_ITLB;
 274        EXIT_CRITICAL(flags);
 275}
 276
 277/*
 278 * We will need multiple versions of update_mmu_cache(), one that just
 279 * updates the TLB with the new pte(s), and another which also checks
 280 * for the R4k "end of page" hardware bug and does the needy.
 281 */
 282void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
 283{
 284        unsigned long flags;
 285        pgd_t *pgdp;
 286        pud_t *pudp;
 287        pmd_t *pmdp;
 288        pte_t *ptep;
 289        int idx, pid;
 290
 291        /*
 292         * Handle debugger faulting in for debugee.
 293         */
 294        if (current->active_mm != vma->vm_mm)
 295                return;
 296
 297        ENTER_CRITICAL(flags);
 298
 299        pid = read_c0_entryhi() & ASID_MASK;
 300        address &= (PAGE_MASK << 1);
 301        write_c0_entryhi(address | pid);
 302        pgdp = pgd_offset(vma->vm_mm, address);
 303        mtc0_tlbw_hazard();
 304        tlb_probe();
 305        tlb_probe_hazard();
 306        pudp = pud_offset(pgdp, address);
 307        pmdp = pmd_offset(pudp, address);
 308        idx = read_c0_index();
 309#ifdef CONFIG_HUGETLB_PAGE
 310        /* this could be a huge page  */
 311        if (pmd_huge(*pmdp)) {
 312                unsigned long lo;
 313                write_c0_pagemask(PM_HUGE_MASK);
 314                ptep = (pte_t *)pmdp;
 315                lo = pte_to_entrylo(pte_val(*ptep));
 316                write_c0_entrylo0(lo);
 317                write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
 318
 319                mtc0_tlbw_hazard();
 320                if (idx < 0)
 321                        tlb_write_random();
 322                else
 323                        tlb_write_indexed();
 324                write_c0_pagemask(PM_DEFAULT_MASK);
 325        } else
 326#endif
 327        {
 328                ptep = pte_offset_map(pmdp, address);
 329
 330#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
 331                write_c0_entrylo0(ptep->pte_high);
 332                ptep++;
 333                write_c0_entrylo1(ptep->pte_high);
 334#else
 335                write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
 336                write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
 337#endif
 338                mtc0_tlbw_hazard();
 339                if (idx < 0)
 340                        tlb_write_random();
 341                else
 342                        tlb_write_indexed();
 343        }
 344        tlbw_use_hazard();
 345        FLUSH_ITLB_VM(vma);
 346        EXIT_CRITICAL(flags);
 347}
 348
 349void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
 350                     unsigned long entryhi, unsigned long pagemask)
 351{
 352        unsigned long flags;
 353        unsigned long wired;
 354        unsigned long old_pagemask;
 355        unsigned long old_ctx;
 356
 357        ENTER_CRITICAL(flags);
 358        /* Save old context and create impossible VPN2 value */
 359        old_ctx = read_c0_entryhi();
 360        old_pagemask = read_c0_pagemask();
 361        wired = read_c0_wired();
 362        write_c0_wired(wired + 1);
 363        write_c0_index(wired);
 364        tlbw_use_hazard();      /* What is the hazard here? */
 365        write_c0_pagemask(pagemask);
 366        write_c0_entryhi(entryhi);
 367        write_c0_entrylo0(entrylo0);
 368        write_c0_entrylo1(entrylo1);
 369        mtc0_tlbw_hazard();
 370        tlb_write_indexed();
 371        tlbw_use_hazard();
 372
 373        write_c0_entryhi(old_ctx);
 374        tlbw_use_hazard();      /* What is the hazard here? */
 375        write_c0_pagemask(old_pagemask);
 376        local_flush_tlb_all();
 377        EXIT_CRITICAL(flags);
 378}
 379
 380static int __cpuinitdata ntlb;
 381static int __init set_ntlb(char *str)
 382{
 383        get_option(&str, &ntlb);
 384        return 1;
 385}
 386
 387__setup("ntlb=", set_ntlb);
 388
 389void __cpuinit tlb_init(void)
 390{
 391        /*
 392         * You should never change this register:
 393         *   - On R4600 1.7 the tlbp never hits for pages smaller than
 394         *     the value in the c0_pagemask register.
 395         *   - The entire mm handling assumes the c0_pagemask register to
 396         *     be set to fixed-size pages.
 397         */
 398        write_c0_pagemask(PM_DEFAULT_MASK);
 399        write_c0_wired(0);
 400        if (current_cpu_type() == CPU_R10000 ||
 401            current_cpu_type() == CPU_R12000 ||
 402            current_cpu_type() == CPU_R14000)
 403                write_c0_framemask(0);
 404
 405        if (kernel_uses_smartmips_rixi) {
 406                /*
 407                 * Enable the no read, no exec bits, and enable large virtual
 408                 * address.
 409                 */
 410                u32 pg = PG_RIE | PG_XIE;
 411#ifdef CONFIG_64BIT
 412                pg |= PG_ELPA;
 413#endif
 414                write_c0_pagegrain(pg);
 415        }
 416
 417        /* From this point on the ARC firmware is dead.  */
 418        local_flush_tlb_all();
 419
 420        /* Did I tell you that ARC SUCKS?  */
 421
 422        if (ntlb) {
 423                if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
 424                        int wired = current_cpu_data.tlbsize - ntlb;
 425                        write_c0_wired(wired);
 426                        write_c0_index(wired-1);
 427                        printk("Restricting TLB to %d entries\n", ntlb);
 428                } else
 429                        printk("Ignoring invalid argument ntlb=%d\n", ntlb);
 430        }
 431
 432        build_tlb_refill_handler();
 433}
 434