linux/arch/mips/mm/tlb-r3k.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * r2300.c: R2000 and R3000 specific mmu/cache code.
   4 *
   5 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
   6 *
   7 * with a lot of changes to make this thing work for R3000s
   8 * Tx39XX R4k style caches added. HK
   9 * Copyright (C) 1998, 1999, 2000 Harald Koerfgen
  10 * Copyright (C) 1998 Gleb Raiko & Vladimir Roganov
  11 * Copyright (C) 2002  Ralf Baechle
  12 * Copyright (C) 2002  Maciej W. Rozycki
  13 */
  14#include <linux/kernel.h>
  15#include <linux/sched.h>
  16#include <linux/smp.h>
  17#include <linux/mm.h>
  18
  19#include <asm/page.h>
  20#include <asm/pgtable.h>
  21#include <asm/mmu_context.h>
  22#include <asm/tlbmisc.h>
  23#include <asm/isadep.h>
  24#include <asm/io.h>
  25#include <asm/bootinfo.h>
  26#include <asm/cpu.h>
  27
  28#undef DEBUG_TLB
  29
  30extern void build_tlb_refill_handler(void);
  31
  32/* CP0 hazard avoidance. */
  33#define BARRIER                         \
  34        __asm__ __volatile__(           \
  35                ".set   push\n\t"       \
  36                ".set   noreorder\n\t"  \
  37                "nop\n\t"               \
  38                ".set   pop\n\t")
  39
  40int r3k_have_wired_reg;                 /* Should be in cpu_data? */
  41
  42/* TLB operations. */
  43static void local_flush_tlb_from(int entry)
  44{
  45        unsigned long old_ctx;
  46
  47        old_ctx = read_c0_entryhi() & cpu_asid_mask(&current_cpu_data);
  48        write_c0_entrylo0(0);
  49        while (entry < current_cpu_data.tlbsize) {
  50                write_c0_index(entry << 8);
  51                write_c0_entryhi((entry | 0x80000) << 12);
  52                entry++;                                /* BARRIER */
  53                tlb_write_indexed();
  54        }
  55        write_c0_entryhi(old_ctx);
  56}
  57
  58void local_flush_tlb_all(void)
  59{
  60        unsigned long flags;
  61
  62#ifdef DEBUG_TLB
  63        printk("[tlball]");
  64#endif
  65        local_irq_save(flags);
  66        local_flush_tlb_from(r3k_have_wired_reg ? read_c0_wired() : 8);
  67        local_irq_restore(flags);
  68}
  69
  70void local_flush_tlb_mm(struct mm_struct *mm)
  71{
  72        int cpu = smp_processor_id();
  73
  74        if (cpu_context(cpu, mm) != 0) {
  75#ifdef DEBUG_TLB
  76                printk("[tlbmm<%lu>]", (unsigned long)cpu_context(cpu, mm));
  77#endif
  78                drop_mmu_context(mm, cpu);
  79        }
  80}
  81
  82void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  83                           unsigned long end)
  84{
  85        unsigned long asid_mask = cpu_asid_mask(&current_cpu_data);
  86        struct mm_struct *mm = vma->vm_mm;
  87        int cpu = smp_processor_id();
  88
  89        if (cpu_context(cpu, mm) != 0) {
  90                unsigned long size, flags;
  91
  92#ifdef DEBUG_TLB
  93                printk("[tlbrange<%lu,0x%08lx,0x%08lx>]",
  94                        cpu_context(cpu, mm) & asid_mask, start, end);
  95#endif
  96                local_irq_save(flags);
  97                size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
  98                if (size <= current_cpu_data.tlbsize) {
  99                        int oldpid = read_c0_entryhi() & asid_mask;
 100                        int newpid = cpu_context(cpu, mm) & asid_mask;
 101
 102                        start &= PAGE_MASK;
 103                        end += PAGE_SIZE - 1;
 104                        end &= PAGE_MASK;
 105                        while (start < end) {
 106                                int idx;
 107
 108                                write_c0_entryhi(start | newpid);
 109                                start += PAGE_SIZE;     /* BARRIER */
 110                                tlb_probe();
 111                                idx = read_c0_index();
 112                                write_c0_entrylo0(0);
 113                                write_c0_entryhi(KSEG0);
 114                                if (idx < 0)            /* BARRIER */
 115                                        continue;
 116                                tlb_write_indexed();
 117                        }
 118                        write_c0_entryhi(oldpid);
 119                } else {
 120                        drop_mmu_context(mm, cpu);
 121                }
 122                local_irq_restore(flags);
 123        }
 124}
 125
 126void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
 127{
 128        unsigned long size, flags;
 129
 130#ifdef DEBUG_TLB
 131        printk("[tlbrange<%lu,0x%08lx,0x%08lx>]", start, end);
 132#endif
 133        local_irq_save(flags);
 134        size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
 135        if (size <= current_cpu_data.tlbsize) {
 136                int pid = read_c0_entryhi();
 137
 138                start &= PAGE_MASK;
 139                end += PAGE_SIZE - 1;
 140                end &= PAGE_MASK;
 141
 142                while (start < end) {
 143                        int idx;
 144
 145                        write_c0_entryhi(start);
 146                        start += PAGE_SIZE;             /* BARRIER */
 147                        tlb_probe();
 148                        idx = read_c0_index();
 149                        write_c0_entrylo0(0);
 150                        write_c0_entryhi(KSEG0);
 151                        if (idx < 0)                    /* BARRIER */
 152                                continue;
 153                        tlb_write_indexed();
 154                }
 155                write_c0_entryhi(pid);
 156        } else {
 157                local_flush_tlb_all();
 158        }
 159        local_irq_restore(flags);
 160}
 161
 162void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
 163{
 164        unsigned long asid_mask = cpu_asid_mask(&current_cpu_data);
 165        int cpu = smp_processor_id();
 166
 167        if (cpu_context(cpu, vma->vm_mm) != 0) {
 168                unsigned long flags;
 169                int oldpid, newpid, idx;
 170
 171#ifdef DEBUG_TLB
 172                printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page);
 173#endif
 174                newpid = cpu_context(cpu, vma->vm_mm) & asid_mask;
 175                page &= PAGE_MASK;
 176                local_irq_save(flags);
 177                oldpid = read_c0_entryhi() & asid_mask;
 178                write_c0_entryhi(page | newpid);
 179                BARRIER;
 180                tlb_probe();
 181                idx = read_c0_index();
 182                write_c0_entrylo0(0);
 183                write_c0_entryhi(KSEG0);
 184                if (idx < 0)                            /* BARRIER */
 185                        goto finish;
 186                tlb_write_indexed();
 187
 188finish:
 189                write_c0_entryhi(oldpid);
 190                local_irq_restore(flags);
 191        }
 192}
 193
 194void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
 195{
 196        unsigned long asid_mask = cpu_asid_mask(&current_cpu_data);
 197        unsigned long flags;
 198        int idx, pid;
 199
 200        /*
 201         * Handle debugger faulting in for debugee.
 202         */
 203        if (current->active_mm != vma->vm_mm)
 204                return;
 205
 206        pid = read_c0_entryhi() & asid_mask;
 207
 208#ifdef DEBUG_TLB
 209        if ((pid != (cpu_context(cpu, vma->vm_mm) & asid_mask)) || (cpu_context(cpu, vma->vm_mm) == 0)) {
 210                printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n",
 211                       (cpu_context(cpu, vma->vm_mm)), pid);
 212        }
 213#endif
 214
 215        local_irq_save(flags);
 216        address &= PAGE_MASK;
 217        write_c0_entryhi(address | pid);
 218        BARRIER;
 219        tlb_probe();
 220        idx = read_c0_index();
 221        write_c0_entrylo0(pte_val(pte));
 222        write_c0_entryhi(address | pid);
 223        if (idx < 0) {                                  /* BARRIER */
 224                tlb_write_random();
 225        } else {
 226                tlb_write_indexed();
 227        }
 228        write_c0_entryhi(pid);
 229        local_irq_restore(flags);
 230}
 231
 232void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
 233                     unsigned long entryhi, unsigned long pagemask)
 234{
 235        unsigned long asid_mask = cpu_asid_mask(&current_cpu_data);
 236        unsigned long flags;
 237        unsigned long old_ctx;
 238        static unsigned long wired = 0;
 239
 240        if (r3k_have_wired_reg) {                       /* TX39XX */
 241                unsigned long old_pagemask;
 242                unsigned long w;
 243
 244#ifdef DEBUG_TLB
 245                printk("[tlbwired<entry lo0 %8x, hi %8x\n, pagemask %8x>]\n",
 246                       entrylo0, entryhi, pagemask);
 247#endif
 248
 249                local_irq_save(flags);
 250                /* Save old context and create impossible VPN2 value */
 251                old_ctx = read_c0_entryhi() & asid_mask;
 252                old_pagemask = read_c0_pagemask();
 253                w = read_c0_wired();
 254                write_c0_wired(w + 1);
 255                write_c0_index(w << 8);
 256                write_c0_pagemask(pagemask);
 257                write_c0_entryhi(entryhi);
 258                write_c0_entrylo0(entrylo0);
 259                BARRIER;
 260                tlb_write_indexed();
 261
 262                write_c0_entryhi(old_ctx);
 263                write_c0_pagemask(old_pagemask);
 264                local_flush_tlb_all();
 265                local_irq_restore(flags);
 266
 267        } else if (wired < 8) {
 268#ifdef DEBUG_TLB
 269                printk("[tlbwired<entry lo0 %8x, hi %8x\n>]\n",
 270                       entrylo0, entryhi);
 271#endif
 272
 273                local_irq_save(flags);
 274                old_ctx = read_c0_entryhi() & asid_mask;
 275                write_c0_entrylo0(entrylo0);
 276                write_c0_entryhi(entryhi);
 277                write_c0_index(wired);
 278                wired++;                                /* BARRIER */
 279                tlb_write_indexed();
 280                write_c0_entryhi(old_ctx);
 281                local_flush_tlb_all();
 282                local_irq_restore(flags);
 283        }
 284}
 285
 286void tlb_init(void)
 287{
 288        switch (current_cpu_type()) {
 289        case CPU_TX3922:
 290        case CPU_TX3927:
 291                r3k_have_wired_reg = 1;
 292                write_c0_wired(0);              /* Set to 8 on reset... */
 293                break;
 294        }
 295        local_flush_tlb_from(0);
 296        build_tlb_refill_handler();
 297}
 298