linux/arch/sh/include/asm/tlb.h
<<
>>
Prefs
   1#ifndef __ASM_SH_TLB_H
   2#define __ASM_SH_TLB_H
   3
   4#ifdef CONFIG_SUPERH64
   5# include "tlb_64.h"
   6#endif
   7
   8#ifndef __ASSEMBLY__
   9#include <linux/pagemap.h>
  10
  11#ifdef CONFIG_MMU
  12#include <asm/pgalloc.h>
  13#include <asm/tlbflush.h>
  14#include <asm/mmu_context.h>
  15
  16/*
  17 * TLB handling.  This allows us to remove pages from the page
  18 * tables, and efficiently handle the TLB issues.
  19 */
  20struct mmu_gather {
  21        struct mm_struct        *mm;
  22        unsigned int            fullmm;
  23        unsigned long           start, end;
  24};
  25
  26DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
  27
  28static inline void init_tlb_gather(struct mmu_gather *tlb)
  29{
  30        tlb->start = TASK_SIZE;
  31        tlb->end = 0;
  32
  33        if (tlb->fullmm) {
  34                tlb->start = 0;
  35                tlb->end = TASK_SIZE;
  36        }
  37}
  38
  39static inline struct mmu_gather *
  40tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
  41{
  42        struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
  43
  44        tlb->mm = mm;
  45        tlb->fullmm = full_mm_flush;
  46
  47        init_tlb_gather(tlb);
  48
  49        return tlb;
  50}
  51
  52static inline void
  53tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
  54{
  55        if (tlb->fullmm)
  56                flush_tlb_mm(tlb->mm);
  57
  58        /* keep the page table cache within bounds */
  59        check_pgt_cache();
  60
  61        put_cpu_var(mmu_gathers);
  62}
  63
  64static inline void
  65tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
  66{
  67        if (tlb->start > address)
  68                tlb->start = address;
  69        if (tlb->end < address + PAGE_SIZE)
  70                tlb->end = address + PAGE_SIZE;
  71}
  72
  73/*
  74 * In the case of tlb vma handling, we can optimise these away in the
  75 * case where we're doing a full MM flush.  When we're doing a munmap,
  76 * the vmas are adjusted to only cover the region to be torn down.
  77 */
  78static inline void
  79tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
  80{
  81        if (!tlb->fullmm)
  82                flush_cache_range(vma, vma->vm_start, vma->vm_end);
  83}
  84
  85static inline void
  86tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
  87{
  88        if (!tlb->fullmm && tlb->end) {
  89                flush_tlb_range(vma, tlb->start, tlb->end);
  90                init_tlb_gather(tlb);
  91        }
  92}
  93
  94#define tlb_remove_page(tlb,page)       free_page_and_swap_cache(page)
  95#define pte_free_tlb(tlb, ptep, addr)   pte_free((tlb)->mm, ptep)
  96#define pmd_free_tlb(tlb, pmdp, addr)   pmd_free((tlb)->mm, pmdp)
  97#define pud_free_tlb(tlb, pudp, addr)   pud_free((tlb)->mm, pudp)
  98
  99#define tlb_migrate_finish(mm)          do { } while (0)
 100
 101#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SUPERH64)
 102extern void tlb_wire_entry(struct vm_area_struct *, unsigned long, pte_t);
 103extern void tlb_unwire_entry(void);
 104#else
 105static inline void tlb_wire_entry(struct vm_area_struct *vma ,
 106                                  unsigned long addr, pte_t pte)
 107{
 108        BUG();
 109}
 110
 111static inline void tlb_unwire_entry(void)
 112{
 113        BUG();
 114}
 115#endif
 116
 117#else /* CONFIG_MMU */
 118
 119#define tlb_start_vma(tlb, vma)                         do { } while (0)
 120#define tlb_end_vma(tlb, vma)                           do { } while (0)
 121#define __tlb_remove_tlb_entry(tlb, pte, address)       do { } while (0)
 122#define tlb_flush(tlb)                                  do { } while (0)
 123
 124#include <asm-generic/tlb.h>
 125
 126#endif /* CONFIG_MMU */
 127#endif /* __ASSEMBLY__ */
 128#endif /* __ASM_SH_TLB_H */
 129