linux/arch/sh/include/asm/tlb.h
<<
>>
Prefs
   1#ifndef __ASM_SH_TLB_H
   2#define __ASM_SH_TLB_H
   3
   4#ifdef CONFIG_SUPERH64
   5# include "tlb_64.h"
   6#endif
   7
   8#ifndef __ASSEMBLY__
   9#include <linux/pagemap.h>
  10
  11#ifdef CONFIG_MMU
  12#include <asm/pgalloc.h>
  13#include <asm/tlbflush.h>
  14
  15/*
  16 * TLB handling.  This allows us to remove pages from the page
  17 * tables, and efficiently handle the TLB issues.
  18 */
  19struct mmu_gather {
  20        struct mm_struct        *mm;
  21        unsigned int            fullmm;
  22        unsigned long           start, end;
  23};
  24
  25DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
  26
  27static inline void init_tlb_gather(struct mmu_gather *tlb)
  28{
  29        tlb->start = TASK_SIZE;
  30        tlb->end = 0;
  31
  32        if (tlb->fullmm) {
  33                tlb->start = 0;
  34                tlb->end = TASK_SIZE;
  35        }
  36}
  37
  38static inline struct mmu_gather *
  39tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
  40{
  41        struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
  42
  43        tlb->mm = mm;
  44        tlb->fullmm = full_mm_flush;
  45
  46        init_tlb_gather(tlb);
  47
  48        return tlb;
  49}
  50
  51static inline void
  52tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
  53{
  54        if (tlb->fullmm)
  55                flush_tlb_mm(tlb->mm);
  56
  57        /* keep the page table cache within bounds */
  58        check_pgt_cache();
  59
  60        put_cpu_var(mmu_gathers);
  61}
  62
  63static inline void
  64tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
  65{
  66        if (tlb->start > address)
  67                tlb->start = address;
  68        if (tlb->end < address + PAGE_SIZE)
  69                tlb->end = address + PAGE_SIZE;
  70}
  71
  72/*
  73 * In the case of tlb vma handling, we can optimise these away in the
  74 * case where we're doing a full MM flush.  When we're doing a munmap,
  75 * the vmas are adjusted to only cover the region to be torn down.
  76 */
  77static inline void
  78tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
  79{
  80        if (!tlb->fullmm)
  81                flush_cache_range(vma, vma->vm_start, vma->vm_end);
  82}
  83
  84static inline void
  85tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
  86{
  87        if (!tlb->fullmm && tlb->end) {
  88                flush_tlb_range(vma, tlb->start, tlb->end);
  89                init_tlb_gather(tlb);
  90        }
  91}
  92
  93#define tlb_remove_page(tlb,page)       free_page_and_swap_cache(page)
  94#define pte_free_tlb(tlb, ptep, addr)   pte_free((tlb)->mm, ptep)
  95#define pmd_free_tlb(tlb, pmdp, addr)   pmd_free((tlb)->mm, pmdp)
  96#define pud_free_tlb(tlb, pudp, addr)   pud_free((tlb)->mm, pudp)
  97
  98#define tlb_migrate_finish(mm)          do { } while (0)
  99
 100#else /* CONFIG_MMU */
 101
 102#define tlb_start_vma(tlb, vma)                         do { } while (0)
 103#define tlb_end_vma(tlb, vma)                           do { } while (0)
 104#define __tlb_remove_tlb_entry(tlb, pte, address)       do { } while (0)
 105#define tlb_flush(tlb)                                  do { } while (0)
 106
 107#include <asm-generic/tlb.h>
 108
 109#endif /* CONFIG_MMU */
 110#endif /* __ASSEMBLY__ */
 111#endif /* __ASM_SH_TLB_H */
 112