linux/arch/mips/mm/tlb-r4k.c
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
   7 * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
   8 * Carsten Langgaard, carstenl@mips.com
   9 * Copyright (C) 2002 MIPS Technologies, Inc.  All rights reserved.
  10 */
  11#include <linux/cpu_pm.h>
  12#include <linux/init.h>
  13#include <linux/sched.h>
  14#include <linux/smp.h>
  15#include <linux/mm.h>
  16#include <linux/hugetlb.h>
  17#include <linux/module.h>
  18
  19#include <asm/cpu.h>
  20#include <asm/cpu-type.h>
  21#include <asm/bootinfo.h>
  22#include <asm/mmu_context.h>
  23#include <asm/pgtable.h>
  24#include <asm/tlb.h>
  25#include <asm/tlbmisc.h>
  26
  27extern void build_tlb_refill_handler(void);
  28
  29/*
  30 * LOONGSON2/3 has a 4 entry itlb which is a subset of dtlb,
  31 * unfortunately, itlb is not totally transparent to software.
  32 */
  33static inline void flush_itlb(void)
  34{
  35        switch (current_cpu_type()) {
  36        case CPU_LOONGSON2:
  37        case CPU_LOONGSON3:
  38                write_c0_diag(4);
  39                break;
  40        default:
  41                break;
  42        }
  43}
  44
  45static inline void flush_itlb_vm(struct vm_area_struct *vma)
  46{
  47        if (vma->vm_flags & VM_EXEC)
  48                flush_itlb();
  49}
  50
  51void local_flush_tlb_all(void)
  52{
  53        unsigned long flags;
  54        unsigned long old_ctx;
  55        int entry, ftlbhighset;
  56
  57        local_irq_save(flags);
  58        /* Save old context and create impossible VPN2 value */
  59        old_ctx = read_c0_entryhi();
  60        htw_stop();
  61        write_c0_entrylo0(0);
  62        write_c0_entrylo1(0);
  63
  64        entry = read_c0_wired();
  65
  66        /* Blast 'em all away. */
  67        if (cpu_has_tlbinv) {
  68                if (current_cpu_data.tlbsizevtlb) {
  69                        write_c0_index(0);
  70                        mtc0_tlbw_hazard();
  71                        tlbinvf();  /* invalidate VTLB */
  72                }
  73                ftlbhighset = current_cpu_data.tlbsizevtlb +
  74                        current_cpu_data.tlbsizeftlbsets;
  75                for (entry = current_cpu_data.tlbsizevtlb;
  76                     entry < ftlbhighset;
  77                     entry++) {
  78                        write_c0_index(entry);
  79                        mtc0_tlbw_hazard();
  80                        tlbinvf();  /* invalidate one FTLB set */
  81                }
  82        } else {
  83                while (entry < current_cpu_data.tlbsize) {
  84                        /* Make sure all entries differ. */
  85                        write_c0_entryhi(UNIQUE_ENTRYHI(entry));
  86                        write_c0_index(entry);
  87                        mtc0_tlbw_hazard();
  88                        tlb_write_indexed();
  89                        entry++;
  90                }
  91        }
  92        tlbw_use_hazard();
  93        write_c0_entryhi(old_ctx);
  94        htw_start();
  95        flush_itlb();
  96        local_irq_restore(flags);
  97}
  98EXPORT_SYMBOL(local_flush_tlb_all);
  99
 100/* All entries common to a mm share an asid.  To effectively flush
 101   these entries, we just bump the asid. */
 102void local_flush_tlb_mm(struct mm_struct *mm)
 103{
 104        int cpu;
 105
 106        preempt_disable();
 107
 108        cpu = smp_processor_id();
 109
 110        if (cpu_context(cpu, mm) != 0) {
 111                drop_mmu_context(mm, cpu);
 112        }
 113
 114        preempt_enable();
 115}
 116
 117void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
 118        unsigned long end)
 119{
 120        struct mm_struct *mm = vma->vm_mm;
 121        int cpu = smp_processor_id();
 122
 123        if (cpu_context(cpu, mm) != 0) {
 124                unsigned long size, flags;
 125
 126                local_irq_save(flags);
 127                start = round_down(start, PAGE_SIZE << 1);
 128                end = round_up(end, PAGE_SIZE << 1);
 129                size = (end - start) >> (PAGE_SHIFT + 1);
 130                if (size <= (current_cpu_data.tlbsizeftlbsets ?
 131                             current_cpu_data.tlbsize / 8 :
 132                             current_cpu_data.tlbsize / 2)) {
 133                        int oldpid = read_c0_entryhi();
 134                        int newpid = cpu_asid(cpu, mm);
 135
 136                        htw_stop();
 137                        while (start < end) {
 138                                int idx;
 139
 140                                write_c0_entryhi(start | newpid);
 141                                start += (PAGE_SIZE << 1);
 142                                mtc0_tlbw_hazard();
 143                                tlb_probe();
 144                                tlb_probe_hazard();
 145                                idx = read_c0_index();
 146                                write_c0_entrylo0(0);
 147                                write_c0_entrylo1(0);
 148                                if (idx < 0)
 149                                        continue;
 150                                /* Make sure all entries differ. */
 151                                write_c0_entryhi(UNIQUE_ENTRYHI(idx));
 152                                mtc0_tlbw_hazard();
 153                                tlb_write_indexed();
 154                        }
 155                        tlbw_use_hazard();
 156                        write_c0_entryhi(oldpid);
 157                        htw_start();
 158                } else {
 159                        drop_mmu_context(mm, cpu);
 160                }
 161                flush_itlb();
 162                local_irq_restore(flags);
 163        }
 164}
 165
 166void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
 167{
 168        unsigned long size, flags;
 169
 170        local_irq_save(flags);
 171        size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
 172        size = (size + 1) >> 1;
 173        if (size <= (current_cpu_data.tlbsizeftlbsets ?
 174                     current_cpu_data.tlbsize / 8 :
 175                     current_cpu_data.tlbsize / 2)) {
 176                int pid = read_c0_entryhi();
 177
 178                start &= (PAGE_MASK << 1);
 179                end += ((PAGE_SIZE << 1) - 1);
 180                end &= (PAGE_MASK << 1);
 181                htw_stop();
 182
 183                while (start < end) {
 184                        int idx;
 185
 186                        write_c0_entryhi(start);
 187                        start += (PAGE_SIZE << 1);
 188                        mtc0_tlbw_hazard();
 189                        tlb_probe();
 190                        tlb_probe_hazard();
 191                        idx = read_c0_index();
 192                        write_c0_entrylo0(0);
 193                        write_c0_entrylo1(0);
 194                        if (idx < 0)
 195                                continue;
 196                        /* Make sure all entries differ. */
 197                        write_c0_entryhi(UNIQUE_ENTRYHI(idx));
 198                        mtc0_tlbw_hazard();
 199                        tlb_write_indexed();
 200                }
 201                tlbw_use_hazard();
 202                write_c0_entryhi(pid);
 203                htw_start();
 204        } else {
 205                local_flush_tlb_all();
 206        }
 207        flush_itlb();
 208        local_irq_restore(flags);
 209}
 210
 211void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
 212{
 213        int cpu = smp_processor_id();
 214
 215        if (cpu_context(cpu, vma->vm_mm) != 0) {
 216                unsigned long flags;
 217                int oldpid, newpid, idx;
 218
 219                newpid = cpu_asid(cpu, vma->vm_mm);
 220                page &= (PAGE_MASK << 1);
 221                local_irq_save(flags);
 222                oldpid = read_c0_entryhi();
 223                htw_stop();
 224                write_c0_entryhi(page | newpid);
 225                mtc0_tlbw_hazard();
 226                tlb_probe();
 227                tlb_probe_hazard();
 228                idx = read_c0_index();
 229                write_c0_entrylo0(0);
 230                write_c0_entrylo1(0);
 231                if (idx < 0)
 232                        goto finish;
 233                /* Make sure all entries differ. */
 234                write_c0_entryhi(UNIQUE_ENTRYHI(idx));
 235                mtc0_tlbw_hazard();
 236                tlb_write_indexed();
 237                tlbw_use_hazard();
 238
 239        finish:
 240                write_c0_entryhi(oldpid);
 241                htw_start();
 242                flush_itlb_vm(vma);
 243                local_irq_restore(flags);
 244        }
 245}
 246
 247/*
 248 * This one is only used for pages with the global bit set so we don't care
 249 * much about the ASID.
 250 */
 251void local_flush_tlb_one(unsigned long page)
 252{
 253        unsigned long flags;
 254        int oldpid, idx;
 255
 256        local_irq_save(flags);
 257        oldpid = read_c0_entryhi();
 258        htw_stop();
 259        page &= (PAGE_MASK << 1);
 260        write_c0_entryhi(page);
 261        mtc0_tlbw_hazard();
 262        tlb_probe();
 263        tlb_probe_hazard();
 264        idx = read_c0_index();
 265        write_c0_entrylo0(0);
 266        write_c0_entrylo1(0);
 267        if (idx >= 0) {
 268                /* Make sure all entries differ. */
 269                write_c0_entryhi(UNIQUE_ENTRYHI(idx));
 270                mtc0_tlbw_hazard();
 271                tlb_write_indexed();
 272                tlbw_use_hazard();
 273        }
 274        write_c0_entryhi(oldpid);
 275        htw_start();
 276        flush_itlb();
 277        local_irq_restore(flags);
 278}
 279
 280/*
 281 * We will need multiple versions of update_mmu_cache(), one that just
 282 * updates the TLB with the new pte(s), and another which also checks
 283 * for the R4k "end of page" hardware bug and does the needy.
 284 */
 285void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
 286{
 287        unsigned long flags;
 288        pgd_t *pgdp;
 289        pud_t *pudp;
 290        pmd_t *pmdp;
 291        pte_t *ptep;
 292        int idx, pid;
 293
 294        /*
 295         * Handle debugger faulting in for debugee.
 296         */
 297        if (current->active_mm != vma->vm_mm)
 298                return;
 299
 300        local_irq_save(flags);
 301
 302        htw_stop();
 303        pid = read_c0_entryhi() & ASID_MASK;
 304        address &= (PAGE_MASK << 1);
 305        write_c0_entryhi(address | pid);
 306        pgdp = pgd_offset(vma->vm_mm, address);
 307        mtc0_tlbw_hazard();
 308        tlb_probe();
 309        tlb_probe_hazard();
 310        pudp = pud_offset(pgdp, address);
 311        pmdp = pmd_offset(pudp, address);
 312        idx = read_c0_index();
 313#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
 314        /* this could be a huge page  */
 315        if (pmd_huge(*pmdp)) {
 316                unsigned long lo;
 317                write_c0_pagemask(PM_HUGE_MASK);
 318                ptep = (pte_t *)pmdp;
 319                lo = pte_to_entrylo(pte_val(*ptep));
 320                write_c0_entrylo0(lo);
 321                write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
 322
 323                mtc0_tlbw_hazard();
 324                if (idx < 0)
 325                        tlb_write_random();
 326                else
 327                        tlb_write_indexed();
 328                tlbw_use_hazard();
 329                write_c0_pagemask(PM_DEFAULT_MASK);
 330        } else
 331#endif
 332        {
 333                ptep = pte_offset_map(pmdp, address);
 334
 335#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
 336                write_c0_entrylo0(ptep->pte_high);
 337                ptep++;
 338                write_c0_entrylo1(ptep->pte_high);
 339#else
 340                write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
 341                write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
 342#endif
 343                mtc0_tlbw_hazard();
 344                if (idx < 0)
 345                        tlb_write_random();
 346                else
 347                        tlb_write_indexed();
 348        }
 349        tlbw_use_hazard();
 350        htw_start();
 351        flush_itlb_vm(vma);
 352        local_irq_restore(flags);
 353}
 354
 355void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
 356                     unsigned long entryhi, unsigned long pagemask)
 357{
 358        unsigned long flags;
 359        unsigned long wired;
 360        unsigned long old_pagemask;
 361        unsigned long old_ctx;
 362
 363        local_irq_save(flags);
 364        /* Save old context and create impossible VPN2 value */
 365        old_ctx = read_c0_entryhi();
 366        htw_stop();
 367        old_pagemask = read_c0_pagemask();
 368        wired = read_c0_wired();
 369        write_c0_wired(wired + 1);
 370        write_c0_index(wired);
 371        tlbw_use_hazard();      /* What is the hazard here? */
 372        write_c0_pagemask(pagemask);
 373        write_c0_entryhi(entryhi);
 374        write_c0_entrylo0(entrylo0);
 375        write_c0_entrylo1(entrylo1);
 376        mtc0_tlbw_hazard();
 377        tlb_write_indexed();
 378        tlbw_use_hazard();
 379
 380        write_c0_entryhi(old_ctx);
 381        tlbw_use_hazard();      /* What is the hazard here? */
 382        htw_start();
 383        write_c0_pagemask(old_pagemask);
 384        local_flush_tlb_all();
 385        local_irq_restore(flags);
 386}
 387
 388#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 389
 390int __init has_transparent_hugepage(void)
 391{
 392        unsigned int mask;
 393        unsigned long flags;
 394
 395        local_irq_save(flags);
 396        write_c0_pagemask(PM_HUGE_MASK);
 397        back_to_back_c0_hazard();
 398        mask = read_c0_pagemask();
 399        write_c0_pagemask(PM_DEFAULT_MASK);
 400
 401        local_irq_restore(flags);
 402
 403        return mask == PM_HUGE_MASK;
 404}
 405
 406#endif /* CONFIG_TRANSPARENT_HUGEPAGE  */
 407
 408/*
 409 * Used for loading TLB entries before trap_init() has started, when we
 410 * don't actually want to add a wired entry which remains throughout the
 411 * lifetime of the system
 412 */
 413
 414int temp_tlb_entry __cpuinitdata;
 415
 416__init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
 417                               unsigned long entryhi, unsigned long pagemask)
 418{
 419        int ret = 0;
 420        unsigned long flags;
 421        unsigned long wired;
 422        unsigned long old_pagemask;
 423        unsigned long old_ctx;
 424
 425        local_irq_save(flags);
 426        /* Save old context and create impossible VPN2 value */
 427        htw_stop();
 428        old_ctx = read_c0_entryhi();
 429        old_pagemask = read_c0_pagemask();
 430        wired = read_c0_wired();
 431        if (--temp_tlb_entry < wired) {
 432                printk(KERN_WARNING
 433                       "No TLB space left for add_temporary_entry\n");
 434                ret = -ENOSPC;
 435                goto out;
 436        }
 437
 438        write_c0_index(temp_tlb_entry);
 439        write_c0_pagemask(pagemask);
 440        write_c0_entryhi(entryhi);
 441        write_c0_entrylo0(entrylo0);
 442        write_c0_entrylo1(entrylo1);
 443        mtc0_tlbw_hazard();
 444        tlb_write_indexed();
 445        tlbw_use_hazard();
 446
 447        write_c0_entryhi(old_ctx);
 448        write_c0_pagemask(old_pagemask);
 449        htw_start();
 450out:
 451        local_irq_restore(flags);
 452        return ret;
 453}
 454
 455static int ntlb;
 456static int __init set_ntlb(char *str)
 457{
 458        get_option(&str, &ntlb);
 459        return 1;
 460}
 461
 462__setup("ntlb=", set_ntlb);
 463
 464/*
 465 * Configure TLB (for init or after a CPU has been powered off).
 466 */
 467static void r4k_tlb_configure(void)
 468{
 469        /*
 470         * You should never change this register:
 471         *   - On R4600 1.7 the tlbp never hits for pages smaller than
 472         *     the value in the c0_pagemask register.
 473         *   - The entire mm handling assumes the c0_pagemask register to
 474         *     be set to fixed-size pages.
 475         */
 476        write_c0_pagemask(PM_DEFAULT_MASK);
 477        write_c0_wired(0);
 478        if (current_cpu_type() == CPU_R10000 ||
 479            current_cpu_type() == CPU_R12000 ||
 480            current_cpu_type() == CPU_R14000)
 481                write_c0_framemask(0);
 482
 483        if (cpu_has_rixi) {
 484                /*
 485                 * Enable the no read, no exec bits, and enable large virtual
 486                 * address.
 487                 */
 488#ifdef CONFIG_64BIT
 489                set_c0_pagegrain(PG_RIE | PG_XIE | PG_ELPA);
 490#else
 491                set_c0_pagegrain(PG_RIE | PG_XIE);
 492#endif
 493        }
 494
 495        temp_tlb_entry = current_cpu_data.tlbsize - 1;
 496
 497        /* From this point on the ARC firmware is dead.  */
 498        local_flush_tlb_all();
 499
 500        /* Did I tell you that ARC SUCKS?  */
 501}
 502
 503void tlb_init(void)
 504{
 505        r4k_tlb_configure();
 506
 507        if (ntlb) {
 508                if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
 509                        int wired = current_cpu_data.tlbsize - ntlb;
 510                        write_c0_wired(wired);
 511                        write_c0_index(wired-1);
 512                        printk("Restricting TLB to %d entries\n", ntlb);
 513                } else
 514                        printk("Ignoring invalid argument ntlb=%d\n", ntlb);
 515        }
 516
 517        build_tlb_refill_handler();
 518}
 519
 520static int r4k_tlb_pm_notifier(struct notifier_block *self, unsigned long cmd,
 521                               void *v)
 522{
 523        switch (cmd) {
 524        case CPU_PM_ENTER_FAILED:
 525        case CPU_PM_EXIT:
 526                r4k_tlb_configure();
 527                break;
 528        }
 529
 530        return NOTIFY_OK;
 531}
 532
 533static struct notifier_block r4k_tlb_pm_notifier_block = {
 534        .notifier_call = r4k_tlb_pm_notifier,
 535};
 536
 537static int __init r4k_tlb_init_pm(void)
 538{
 539        return cpu_pm_register_notifier(&r4k_tlb_pm_notifier_block);
 540}
 541arch_initcall(r4k_tlb_init_pm);
 542