linux/arch/arm/include/asm/tlb.h
<<
>>
Prefs
   1/*
   2 *  arch/arm/include/asm/tlb.h
   3 *
   4 *  Copyright (C) 2002 Russell King
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 *
  10 *  Experimentation shows that on a StrongARM, it appears to be faster
  11 *  to use the "invalidate whole tlb" rather than "invalidate single
  12 *  tlb" for this.
  13 *
  14 *  This appears true for both the process fork+exit case, as well as
  15 *  the munmap-large-area case.
  16 */
  17#ifndef __ASMARM_TLB_H
  18#define __ASMARM_TLB_H
  19
  20#include <asm/cacheflush.h>
  21#include <asm/tlbflush.h>
  22
  23#ifndef CONFIG_MMU
  24
  25#include <linux/pagemap.h>
  26#include <asm-generic/tlb.h>
  27
  28#else /* !CONFIG_MMU */
  29
  30#include <asm/pgalloc.h>
  31
  32/*
  33 * TLB handling.  This allows us to remove pages from the page
  34 * tables, and efficiently handle the TLB issues.
  35 */
  36struct mmu_gather {
  37        struct mm_struct        *mm;
  38        unsigned int            fullmm;
  39        unsigned long           range_start;
  40        unsigned long           range_end;
  41};
  42
  43DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
  44
  45static inline struct mmu_gather *
  46tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
  47{
  48        struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
  49
  50        tlb->mm = mm;
  51        tlb->fullmm = full_mm_flush;
  52
  53        return tlb;
  54}
  55
  56static inline void
  57tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
  58{
  59        if (tlb->fullmm)
  60                flush_tlb_mm(tlb->mm);
  61
  62        /* keep the page table cache within bounds */
  63        check_pgt_cache();
  64
  65        put_cpu_var(mmu_gathers);
  66}
  67
  68/*
  69 * Memorize the range for the TLB flush.
  70 */
  71static inline void
  72tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
  73{
  74        if (!tlb->fullmm) {
  75                if (addr < tlb->range_start)
  76                        tlb->range_start = addr;
  77                if (addr + PAGE_SIZE > tlb->range_end)
  78                        tlb->range_end = addr + PAGE_SIZE;
  79        }
  80}
  81
  82/*
  83 * In the case of tlb vma handling, we can optimise these away in the
  84 * case where we're doing a full MM flush.  When we're doing a munmap,
  85 * the vmas are adjusted to only cover the region to be torn down.
  86 */
  87static inline void
  88tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
  89{
  90        if (!tlb->fullmm) {
  91                flush_cache_range(vma, vma->vm_start, vma->vm_end);
  92                tlb->range_start = TASK_SIZE;
  93                tlb->range_end = 0;
  94        }
  95}
  96
  97static inline void
  98tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
  99{
 100        if (!tlb->fullmm && tlb->range_end > 0)
 101                flush_tlb_range(vma, tlb->range_start, tlb->range_end);
 102}
 103
 104#define tlb_remove_page(tlb,page)       free_page_and_swap_cache(page)
 105#define pte_free_tlb(tlb, ptep, addr)   pte_free((tlb)->mm, ptep)
 106#define pmd_free_tlb(tlb, pmdp, addr)   pmd_free((tlb)->mm, pmdp)
 107
 108#define tlb_migrate_finish(mm)          do { } while (0)
 109
 110#endif /* CONFIG_MMU */
 111#endif
 112