linux/arch/sh/mm/tlb-sh5.c
<<
>>
Prefs
   1/*
   2 * arch/sh/mm/tlb-sh5.c
   3 *
   4 * Copyright (C) 2003  Paul Mundt <lethal@linux-sh.org>
   5 * Copyright (C) 2003  Richard Curnow <richard.curnow@superh.com>
   6 *
   7 * This file is subject to the terms and conditions of the GNU General Public
   8 * License.  See the file "COPYING" in the main directory of this archive
   9 * for more details.
  10 */
  11#include <linux/mm.h>
  12#include <linux/init.h>
  13#include <asm/page.h>
  14#include <asm/tlb.h>
  15#include <asm/mmu_context.h>
  16
  17/**
  18 * sh64_tlb_init - Perform initial setup for the DTLB and ITLB.
  19 */
  20int __cpuinit sh64_tlb_init(void)
  21{
  22        /* Assign some sane DTLB defaults */
  23        cpu_data->dtlb.entries  = 64;
  24        cpu_data->dtlb.step     = 0x10;
  25
  26        cpu_data->dtlb.first    = DTLB_FIXED | cpu_data->dtlb.step;
  27        cpu_data->dtlb.next     = cpu_data->dtlb.first;
  28
  29        cpu_data->dtlb.last     = DTLB_FIXED |
  30                                  ((cpu_data->dtlb.entries - 1) *
  31                                   cpu_data->dtlb.step);
  32
  33        /* And again for the ITLB */
  34        cpu_data->itlb.entries  = 64;
  35        cpu_data->itlb.step     = 0x10;
  36
  37        cpu_data->itlb.first    = ITLB_FIXED | cpu_data->itlb.step;
  38        cpu_data->itlb.next     = cpu_data->itlb.first;
  39        cpu_data->itlb.last     = ITLB_FIXED |
  40                                  ((cpu_data->itlb.entries - 1) *
  41                                   cpu_data->itlb.step);
  42
  43        return 0;
  44}
  45
  46/**
  47 * sh64_next_free_dtlb_entry - Find the next available DTLB entry
  48 */
  49unsigned long long sh64_next_free_dtlb_entry(void)
  50{
  51        return cpu_data->dtlb.next;
  52}
  53
  54/**
  55 * sh64_get_wired_dtlb_entry - Allocate a wired (locked-in) entry in the DTLB
  56 */
  57unsigned long long sh64_get_wired_dtlb_entry(void)
  58{
  59        unsigned long long entry = sh64_next_free_dtlb_entry();
  60
  61        cpu_data->dtlb.first += cpu_data->dtlb.step;
  62        cpu_data->dtlb.next  += cpu_data->dtlb.step;
  63
  64        return entry;
  65}
  66
  67/**
  68 * sh64_put_wired_dtlb_entry - Free a wired (locked-in) entry in the DTLB.
  69 *
  70 * @entry:      Address of TLB slot.
  71 *
  72 * Works like a stack, last one to allocate must be first one to free.
  73 */
  74int sh64_put_wired_dtlb_entry(unsigned long long entry)
  75{
  76        __flush_tlb_slot(entry);
  77
  78        /*
  79         * We don't do any particularly useful tracking of wired entries,
  80         * so this approach works like a stack .. last one to be allocated
  81         * has to be the first one to be freed.
  82         *
  83         * We could potentially load wired entries into a list and work on
  84         * rebalancing the list periodically (which also entails moving the
  85         * contents of a TLB entry) .. though I have a feeling that this is
  86         * more trouble than it's worth.
  87         */
  88
  89        /*
  90         * Entry must be valid .. we don't want any ITLB addresses!
  91         */
  92        if (entry <= DTLB_FIXED)
  93                return -EINVAL;
  94
  95        /*
  96         * Next, check if we're within range to be freed. (ie, must be the
  97         * entry beneath the first 'free' entry!
  98         */
  99        if (entry < (cpu_data->dtlb.first - cpu_data->dtlb.step))
 100                return -EINVAL;
 101
 102        /* If we are, then bring this entry back into the list */
 103        cpu_data->dtlb.first    -= cpu_data->dtlb.step;
 104        cpu_data->dtlb.next     = entry;
 105
 106        return 0;
 107}
 108
 109/**
 110 * sh64_setup_tlb_slot - Load up a translation in a wired slot.
 111 *
 112 * @config_addr:        Address of TLB slot.
 113 * @eaddr:              Virtual address.
 114 * @asid:               Address Space Identifier.
 115 * @paddr:              Physical address.
 116 *
 117 * Load up a virtual<->physical translation for @eaddr<->@paddr in the
 118 * pre-allocated TLB slot @config_addr (see sh64_get_wired_dtlb_entry).
 119 */
 120void sh64_setup_tlb_slot(unsigned long long config_addr, unsigned long eaddr,
 121                         unsigned long asid, unsigned long paddr)
 122{
 123        unsigned long long pteh, ptel;
 124
 125        pteh = neff_sign_extend(eaddr);
 126        pteh &= PAGE_MASK;
 127        pteh |= (asid << PTEH_ASID_SHIFT) | PTEH_VALID;
 128        ptel = neff_sign_extend(paddr);
 129        ptel &= PAGE_MASK;
 130        ptel |= (_PAGE_CACHABLE | _PAGE_READ | _PAGE_WRITE);
 131
 132        asm volatile("putcfg %0, 1, %1\n\t"
 133                        "putcfg %0, 0, %2\n"
 134                        : : "r" (config_addr), "r" (ptel), "r" (pteh));
 135}
 136
 137/**
 138 * sh64_teardown_tlb_slot - Teardown a translation.
 139 *
 140 * @config_addr:        Address of TLB slot.
 141 *
 142 * Teardown any existing mapping in the TLB slot @config_addr.
 143 */
 144void sh64_teardown_tlb_slot(unsigned long long config_addr)
 145        __attribute__ ((alias("__flush_tlb_slot")));
 146
 147static int dtlb_entry;
 148static unsigned long long dtlb_entries[64];
 149
 150void tlb_wire_entry(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
 151{
 152        unsigned long long entry;
 153        unsigned long paddr, flags;
 154
 155        BUG_ON(dtlb_entry == ARRAY_SIZE(dtlb_entries));
 156
 157        local_irq_save(flags);
 158
 159        entry = sh64_get_wired_dtlb_entry();
 160        dtlb_entries[dtlb_entry++] = entry;
 161
 162        paddr = pte_val(pte) & _PAGE_FLAGS_HARDWARE_MASK;
 163        paddr &= ~PAGE_MASK;
 164
 165        sh64_setup_tlb_slot(entry, addr, get_asid(), paddr);
 166
 167        local_irq_restore(flags);
 168}
 169
 170void tlb_unwire_entry(void)
 171{
 172        unsigned long long entry;
 173        unsigned long flags;
 174
 175        BUG_ON(!dtlb_entry);
 176
 177        local_irq_save(flags);
 178        entry = dtlb_entries[dtlb_entry--];
 179
 180        sh64_teardown_tlb_slot(entry);
 181        sh64_put_wired_dtlb_entry(entry);
 182
 183        local_irq_restore(flags);
 184}
 185
 186void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
 187{
 188        unsigned long long ptel;
 189        unsigned long long pteh=0;
 190        struct tlb_info *tlbp;
 191        unsigned long long next;
 192        unsigned int fault_code = get_thread_fault_code();
 193
 194        /* Get PTEL first */
 195        ptel = pte.pte_low;
 196
 197        /*
 198         * Set PTEH register
 199         */
 200        pteh = neff_sign_extend(address & MMU_VPN_MASK);
 201
 202        /* Set the ASID. */
 203        pteh |= get_asid() << PTEH_ASID_SHIFT;
 204        pteh |= PTEH_VALID;
 205
 206        /* Set PTEL register, set_pte has performed the sign extension */
 207        ptel &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
 208
 209        if (fault_code & FAULT_CODE_ITLB)
 210                tlbp = &cpu_data->itlb;
 211        else
 212                tlbp = &cpu_data->dtlb;
 213
 214        next = tlbp->next;
 215        __flush_tlb_slot(next);
 216        asm volatile ("putcfg %0,1,%2\n\n\t"
 217                      "putcfg %0,0,%1\n"
 218                      :  : "r" (next), "r" (pteh), "r" (ptel) );
 219
 220        next += TLB_STEP;
 221        if (next > tlbp->last)
 222                next = tlbp->first;
 223        tlbp->next = next;
 224}
 225