linux/arch/um/include/asm/tlb.h
<<
>>
Prefs
   1#ifndef __UM_TLB_H
   2#define __UM_TLB_H
   3
   4#include <linux/pagemap.h>
   5#include <linux/swap.h>
   6#include <asm/percpu.h>
   7#include <asm/pgalloc.h>
   8#include <asm/tlbflush.h>
   9
  10#define tlb_start_vma(tlb, vma) do { } while (0)
  11#define tlb_end_vma(tlb, vma) do { } while (0)
  12#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
  13
  14/* struct mmu_gather is an opaque type used by the mm code for passing around
  15 * any data needed by arch specific code for tlb_remove_page.
  16 */
  17struct mmu_gather {
  18        struct mm_struct        *mm;
  19        unsigned int            need_flush; /* Really unmapped some ptes? */
  20        unsigned long           start;
  21        unsigned long           end;
  22        unsigned int            fullmm; /* non-zero means full mm flush */
  23};
  24
  25static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
  26                                          unsigned long address)
  27{
  28        if (tlb->start > address)
  29                tlb->start = address;
  30        if (tlb->end < address + PAGE_SIZE)
  31                tlb->end = address + PAGE_SIZE;
  32}
  33
  34static inline void init_tlb_gather(struct mmu_gather *tlb)
  35{
  36        tlb->need_flush = 0;
  37
  38        tlb->start = TASK_SIZE;
  39        tlb->end = 0;
  40
  41        if (tlb->fullmm) {
  42                tlb->start = 0;
  43                tlb->end = TASK_SIZE;
  44        }
  45}
  46
  47static inline void
  48arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
  49                unsigned long start, unsigned long end)
  50{
  51        tlb->mm = mm;
  52        tlb->start = start;
  53        tlb->end = end;
  54        tlb->fullmm = !(start | (end+1));
  55
  56        init_tlb_gather(tlb);
  57}
  58
  59extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
  60                               unsigned long end);
  61
  62static inline void
  63tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
  64{
  65        flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end);
  66}
  67
  68static inline void
  69tlb_flush_mmu_free(struct mmu_gather *tlb)
  70{
  71        init_tlb_gather(tlb);
  72}
  73
  74static inline void
  75tlb_flush_mmu(struct mmu_gather *tlb)
  76{
  77        if (!tlb->need_flush)
  78                return;
  79
  80        tlb_flush_mmu_tlbonly(tlb);
  81        tlb_flush_mmu_free(tlb);
  82}
  83
  84/* arch_tlb_finish_mmu
  85 *      Called at the end of the shootdown operation to free up any resources
  86 *      that were required.
  87 */
  88static inline void
  89arch_tlb_finish_mmu(struct mmu_gather *tlb,
  90                unsigned long start, unsigned long end, bool force)
  91{
  92        if (force) {
  93                tlb->start = start;
  94                tlb->end = end;
  95                tlb->need_flush = 1;
  96        }
  97        tlb_flush_mmu(tlb);
  98
  99        /* keep the page table cache within bounds */
 100        check_pgt_cache();
 101}
 102
 103/* tlb_remove_page
 104 *      Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)),
 105 *      while handling the additional races in SMP caused by other CPUs
 106 *      caching valid mappings in their TLBs.
 107 */
 108static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 109{
 110        tlb->need_flush = 1;
 111        free_page_and_swap_cache(page);
 112        return false; /* avoid calling tlb_flush_mmu */
 113}
 114
 115static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 116{
 117        __tlb_remove_page(tlb, page);
 118}
 119
 120static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
 121                                          struct page *page, int page_size)
 122{
 123        return __tlb_remove_page(tlb, page);
 124}
 125
 126static inline void tlb_remove_page_size(struct mmu_gather *tlb,
 127                                        struct page *page, int page_size)
 128{
 129        return tlb_remove_page(tlb, page);
 130}
 131
 132/**
 133 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
 134 *
 135 * Record the fact that pte's were really umapped in ->need_flush, so we can
 136 * later optimise away the tlb invalidate.   This helps when userspace is
 137 * unmapping already-unmapped pages, which happens quite a lot.
 138 */
 139#define tlb_remove_tlb_entry(tlb, ptep, address)                \
 140        do {                                                    \
 141                tlb->need_flush = 1;                            \
 142                __tlb_remove_tlb_entry(tlb, ptep, address);     \
 143        } while (0)
 144
 145#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address)        \
 146        tlb_remove_tlb_entry(tlb, ptep, address)
 147
 148#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
 149static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
 150                                                     unsigned int page_size)
 151{
 152}
 153
 154#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
 155
 156#define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr)
 157
 158#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
 159
 160#define tlb_migrate_finish(mm) do {} while (0)
 161
 162#endif
 163