linux/arch/sh/mm/tlbex_64.c
<<
>>
Prefs
   1/*
   2 * The SH64 TLB miss.
   3 *
   4 * Original code from fault.c
   5 * Copyright (C) 2000, 2001  Paolo Alberelli
   6 *
   7 * Fast PTE->TLB refill path
   8 * Copyright (C) 2003 Richard.Curnow@superh.com
   9 *
  10 * IMPORTANT NOTES :
  11 * The do_fast_page_fault function is called from a context in entry.S
  12 * where very few registers have been saved.  In particular, the code in
  13 * this file must be compiled not to use ANY caller-save registers that
  14 * are not part of the restricted save set.  Also, it means that code in
  15 * this file must not make calls to functions elsewhere in the kernel, or
  16 * else the excepting context will see corruption in its caller-save
  17 * registers.  Plus, the entry.S save area is non-reentrant, so this code
  18 * has to run with SR.BL==1, i.e. no interrupts taken inside it and panic
  19 * on any exception.
  20 *
  21 * This file is subject to the terms and conditions of the GNU General Public
  22 * License.  See the file "COPYING" in the main directory of this archive
  23 * for more details.
  24 */
  25#include <linux/signal.h>
  26#include <linux/sched.h>
  27#include <linux/kernel.h>
  28#include <linux/errno.h>
  29#include <linux/string.h>
  30#include <linux/types.h>
  31#include <linux/ptrace.h>
  32#include <linux/mman.h>
  33#include <linux/mm.h>
  34#include <linux/smp.h>
  35#include <linux/interrupt.h>
  36#include <linux/kprobes.h>
  37#include <asm/tlb.h>
  38#include <asm/io.h>
  39#include <asm/uaccess.h>
  40#include <asm/pgalloc.h>
  41#include <asm/mmu_context.h>
  42
  43static int handle_tlbmiss(unsigned long long protection_flags,
  44                          unsigned long address)
  45{
  46        pgd_t *pgd;
  47        pud_t *pud;
  48        pmd_t *pmd;
  49        pte_t *pte;
  50        pte_t entry;
  51
  52        if (is_vmalloc_addr((void *)address)) {
  53                pgd = pgd_offset_k(address);
  54        } else {
  55                if (unlikely(address >= TASK_SIZE || !current->mm))
  56                        return 1;
  57
  58                pgd = pgd_offset(current->mm, address);
  59        }
  60
  61        pud = pud_offset(pgd, address);
  62        if (pud_none(*pud) || !pud_present(*pud))
  63                return 1;
  64
  65        pmd = pmd_offset(pud, address);
  66        if (pmd_none(*pmd) || !pmd_present(*pmd))
  67                return 1;
  68
  69        pte = pte_offset_kernel(pmd, address);
  70        entry = *pte;
  71        if (pte_none(entry) || !pte_present(entry))
  72                return 1;
  73
  74        /*
  75         * If the page doesn't have sufficient protection bits set to
  76         * service the kind of fault being handled, there's not much
  77         * point doing the TLB refill.  Punt the fault to the general
  78         * handler.
  79         */
  80        if ((pte_val(entry) & protection_flags) != protection_flags)
  81                return 1;
  82
  83        update_mmu_cache(NULL, address, pte);
  84
  85        return 0;
  86}
  87
  88/*
  89 * Put all this information into one structure so that everything is just
  90 * arithmetic relative to a single base address.  This reduces the number
  91 * of movi/shori pairs needed just to load addresses of static data.
  92 */
  93struct expevt_lookup {
  94        unsigned short protection_flags[8];
  95        unsigned char  is_text_access[8];
  96        unsigned char  is_write_access[8];
  97};
  98
  99#define PRU (1<<9)
 100#define PRW (1<<8)
 101#define PRX (1<<7)
 102#define PRR (1<<6)
 103
 104/* Sized as 8 rather than 4 to allow checking the PTE's PRU bit against whether
 105   the fault happened in user mode or privileged mode. */
 106static struct expevt_lookup expevt_lookup_table = {
 107        .protection_flags = {PRX, PRX, 0, 0, PRR, PRR, PRW, PRW},
 108        .is_text_access   = {1,   1,   0, 0, 0,   0,   0,   0}
 109};
 110
 111static inline unsigned int
 112expevt_to_fault_code(unsigned long expevt)
 113{
 114        if (expevt == 0xa40)
 115                return FAULT_CODE_ITLB;
 116        else if (expevt == 0x060)
 117                return FAULT_CODE_WRITE;
 118
 119        return 0;
 120}
 121
 122/*
 123   This routine handles page faults that can be serviced just by refilling a
 124   TLB entry from an existing page table entry.  (This case represents a very
 125   large majority of page faults.) Return 1 if the fault was successfully
 126   handled.  Return 0 if the fault could not be handled.  (This leads into the
 127   general fault handling in fault.c which deals with mapping file-backed
 128   pages, stack growth, segmentation faults, swapping etc etc)
 129 */
 130asmlinkage int __kprobes
 131do_fast_page_fault(unsigned long long ssr_md, unsigned long long expevt,
 132                   unsigned long address)
 133{
 134        unsigned long long protection_flags;
 135        unsigned long long index;
 136        unsigned long long expevt4;
 137        unsigned int fault_code;
 138
 139        /* The next few lines implement a way of hashing EXPEVT into a
 140         * small array index which can be used to lookup parameters
 141         * specific to the type of TLBMISS being handled.
 142         *
 143         * Note:
 144         *      ITLBMISS has EXPEVT==0xa40
 145         *      RTLBMISS has EXPEVT==0x040
 146         *      WTLBMISS has EXPEVT==0x060
 147         */
 148        expevt4 = (expevt >> 4);
 149        /* TODO : xor ssr_md into this expression too. Then we can check
 150         * that PRU is set when it needs to be. */
 151        index = expevt4 ^ (expevt4 >> 5);
 152        index &= 7;
 153
 154        fault_code = expevt_to_fault_code(expevt);
 155
 156        protection_flags = expevt_lookup_table.protection_flags[index];
 157
 158        if (expevt_lookup_table.is_text_access[index])
 159                fault_code |= FAULT_CODE_ITLB;
 160        if (!ssr_md)
 161                fault_code |= FAULT_CODE_USER;
 162
 163        set_thread_fault_code(fault_code);
 164
 165        return handle_tlbmiss(protection_flags, address);
 166}
 167