linux/arch/um/include/asm/tlb.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef __UM_TLB_H
   3#define __UM_TLB_H
   4
   5#include <linux/pagemap.h>
   6#include <linux/swap.h>
   7#include <asm/percpu.h>
   8#include <asm/pgalloc.h>
   9#include <asm/tlbflush.h>
  10
  11#define tlb_start_vma(tlb, vma) do { } while (0)
  12#define tlb_end_vma(tlb, vma) do { } while (0)
  13#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
  14
  15/* struct mmu_gather is an opaque type used by the mm code for passing around
  16 * any data needed by arch specific code for tlb_remove_page.
  17 */
  18struct mmu_gather {
  19        struct mm_struct        *mm;
  20        unsigned int            need_flush; /* Really unmapped some ptes? */
  21        unsigned long           start;
  22        unsigned long           end;
  23        unsigned int            fullmm; /* non-zero means full mm flush */
  24};
  25
  26static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
  27                                          unsigned long address)
  28{
  29        if (tlb->start > address)
  30                tlb->start = address;
  31        if (tlb->end < address + PAGE_SIZE)
  32                tlb->end = address + PAGE_SIZE;
  33}
  34
  35static inline void init_tlb_gather(struct mmu_gather *tlb)
  36{
  37        tlb->need_flush = 0;
  38
  39        tlb->start = TASK_SIZE;
  40        tlb->end = 0;
  41
  42        if (tlb->fullmm) {
  43                tlb->start = 0;
  44                tlb->end = TASK_SIZE;
  45        }
  46}
  47
  48static inline void
  49arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
  50                unsigned long start, unsigned long end)
  51{
  52        tlb->mm = mm;
  53        tlb->start = start;
  54        tlb->end = end;
  55        tlb->fullmm = !(start | (end+1));
  56
  57        init_tlb_gather(tlb);
  58}
  59
  60extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
  61                               unsigned long end);
  62
  63static inline void
  64tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
  65{
  66        flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end);
  67}
  68
  69static inline void
  70tlb_flush_mmu_free(struct mmu_gather *tlb)
  71{
  72        init_tlb_gather(tlb);
  73}
  74
  75static inline void
  76tlb_flush_mmu(struct mmu_gather *tlb)
  77{
  78        if (!tlb->need_flush)
  79                return;
  80
  81        tlb_flush_mmu_tlbonly(tlb);
  82        tlb_flush_mmu_free(tlb);
  83}
  84
  85/* arch_tlb_finish_mmu
  86 *      Called at the end of the shootdown operation to free up any resources
  87 *      that were required.
  88 */
  89static inline void
  90arch_tlb_finish_mmu(struct mmu_gather *tlb,
  91                unsigned long start, unsigned long end, bool force)
  92{
  93        if (force) {
  94                tlb->start = start;
  95                tlb->end = end;
  96                tlb->need_flush = 1;
  97        }
  98        tlb_flush_mmu(tlb);
  99
 100        /* keep the page table cache within bounds */
 101        check_pgt_cache();
 102}
 103
 104/* tlb_remove_page
 105 *      Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)),
 106 *      while handling the additional races in SMP caused by other CPUs
 107 *      caching valid mappings in their TLBs.
 108 */
 109static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 110{
 111        tlb->need_flush = 1;
 112        free_page_and_swap_cache(page);
 113        return false; /* avoid calling tlb_flush_mmu */
 114}
 115
 116static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 117{
 118        __tlb_remove_page(tlb, page);
 119}
 120
 121static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
 122                                          struct page *page, int page_size)
 123{
 124        return __tlb_remove_page(tlb, page);
 125}
 126
 127static inline void tlb_remove_page_size(struct mmu_gather *tlb,
 128                                        struct page *page, int page_size)
 129{
 130        return tlb_remove_page(tlb, page);
 131}
 132
 133/**
 134 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
 135 *
 136 * Record the fact that pte's were really umapped in ->need_flush, so we can
 137 * later optimise away the tlb invalidate.   This helps when userspace is
 138 * unmapping already-unmapped pages, which happens quite a lot.
 139 */
 140#define tlb_remove_tlb_entry(tlb, ptep, address)                \
 141        do {                                                    \
 142                tlb->need_flush = 1;                            \
 143                __tlb_remove_tlb_entry(tlb, ptep, address);     \
 144        } while (0)
 145
 146#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address)        \
 147        tlb_remove_tlb_entry(tlb, ptep, address)
 148
 149#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
 150static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
 151                                                     unsigned int page_size)
 152{
 153}
 154
 155#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
 156
 157#define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr)
 158
 159#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
 160
 161#define tlb_migrate_finish(mm) do {} while (0)
 162
 163#endif
 164