linux/arch/um/include/asm/tlb.h
<<
>>
Prefs
   1#ifndef __UM_TLB_H
   2#define __UM_TLB_H
   3
   4#include <linux/pagemap.h>
   5#include <linux/swap.h>
   6#include <asm/percpu.h>
   7#include <asm/pgalloc.h>
   8#include <asm/tlbflush.h>
   9
  10#define tlb_start_vma(tlb, vma) do { } while (0)
  11#define tlb_end_vma(tlb, vma) do { } while (0)
  12#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
  13
  14/* struct mmu_gather is an opaque type used by the mm code for passing around
  15 * any data needed by arch specific code for tlb_remove_page.
  16 */
  17struct mmu_gather {
  18        struct mm_struct        *mm;
  19        unsigned int            need_flush; /* Really unmapped some ptes? */
  20        unsigned long           start;
  21        unsigned long           end;
  22        unsigned int            fullmm; /* non-zero means full mm flush */
  23};
  24
  25static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
  26                                          unsigned long address)
  27{
  28        if (tlb->start > address)
  29                tlb->start = address;
  30        if (tlb->end < address + PAGE_SIZE)
  31                tlb->end = address + PAGE_SIZE;
  32}
  33
  34static inline void init_tlb_gather(struct mmu_gather *tlb)
  35{
  36        tlb->need_flush = 0;
  37
  38        tlb->start = TASK_SIZE;
  39        tlb->end = 0;
  40
  41        if (tlb->fullmm) {
  42                tlb->start = 0;
  43                tlb->end = TASK_SIZE;
  44        }
  45}
  46
  47static inline void
  48tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
  49{
  50        tlb->mm = mm;
  51        tlb->start = start;
  52        tlb->end = end;
  53        tlb->fullmm = !(start | (end+1));
  54
  55        init_tlb_gather(tlb);
  56}
  57
  58extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
  59                               unsigned long end);
  60
  61static inline void
  62tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
  63{
  64        flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end);
  65}
  66
  67static inline void
  68tlb_flush_mmu_free(struct mmu_gather *tlb)
  69{
  70        init_tlb_gather(tlb);
  71}
  72
  73static inline void
  74tlb_flush_mmu(struct mmu_gather *tlb)
  75{
  76        if (!tlb->need_flush)
  77                return;
  78
  79        tlb_flush_mmu_tlbonly(tlb);
  80        tlb_flush_mmu_free(tlb);
  81}
  82
  83/* tlb_finish_mmu
  84 *      Called at the end of the shootdown operation to free up any resources
  85 *      that were required.
  86 */
  87static inline void
  88tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
  89{
  90        tlb_flush_mmu(tlb);
  91
  92        /* keep the page table cache within bounds */
  93        check_pgt_cache();
  94}
  95
  96/* tlb_remove_page
  97 *      Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)),
  98 *      while handling the additional races in SMP caused by other CPUs
  99 *      caching valid mappings in their TLBs.
 100 */
 101static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 102{
 103        tlb->need_flush = 1;
 104        free_page_and_swap_cache(page);
 105        return false; /* avoid calling tlb_flush_mmu */
 106}
 107
 108static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 109{
 110        __tlb_remove_page(tlb, page);
 111}
 112
 113static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
 114                                          struct page *page, int page_size)
 115{
 116        return __tlb_remove_page(tlb, page);
 117}
 118
 119static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb,
 120                                         struct page *page)
 121{
 122        return __tlb_remove_page(tlb, page);
 123}
 124
 125static inline void tlb_remove_page_size(struct mmu_gather *tlb,
 126                                        struct page *page, int page_size)
 127{
 128        return tlb_remove_page(tlb, page);
 129}
 130
 131/**
 132 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
 133 *
 134 * Record the fact that pte's were really umapped in ->need_flush, so we can
 135 * later optimise away the tlb invalidate.   This helps when userspace is
 136 * unmapping already-unmapped pages, which happens quite a lot.
 137 */
 138#define tlb_remove_tlb_entry(tlb, ptep, address)                \
 139        do {                                                    \
 140                tlb->need_flush = 1;                            \
 141                __tlb_remove_tlb_entry(tlb, ptep, address);     \
 142        } while (0)
 143
 144#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
 145
 146#define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr)
 147
 148#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
 149
 150#define tlb_migrate_finish(mm) do {} while (0)
 151
 152#endif
 153