linux/arch/sh/include/asm/tlb.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef __ASM_SH_TLB_H
   3#define __ASM_SH_TLB_H
   4
   5#ifdef CONFIG_SUPERH64
   6# include <asm/tlb_64.h>
   7#endif
   8
   9#ifndef __ASSEMBLY__
  10#include <linux/pagemap.h>
  11
  12#ifdef CONFIG_MMU
  13#include <linux/swap.h>
  14#include <asm/pgalloc.h>
  15#include <asm/tlbflush.h>
  16#include <asm/mmu_context.h>
  17
  18/*
  19 * TLB handling.  This allows us to remove pages from the page
  20 * tables, and efficiently handle the TLB issues.
  21 */
  22struct mmu_gather {
  23        struct mm_struct        *mm;
  24        unsigned int            fullmm;
  25        unsigned long           start, end;
  26};
  27
  28static inline void init_tlb_gather(struct mmu_gather *tlb)
  29{
  30        tlb->start = TASK_SIZE;
  31        tlb->end = 0;
  32
  33        if (tlb->fullmm) {
  34                tlb->start = 0;
  35                tlb->end = TASK_SIZE;
  36        }
  37}
  38
  39static inline void
  40arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
  41                unsigned long start, unsigned long end)
  42{
  43        tlb->mm = mm;
  44        tlb->start = start;
  45        tlb->end = end;
  46        tlb->fullmm = !(start | (end+1));
  47
  48        init_tlb_gather(tlb);
  49}
  50
  51static inline void
  52arch_tlb_finish_mmu(struct mmu_gather *tlb,
  53                unsigned long start, unsigned long end, bool force)
  54{
  55        if (tlb->fullmm || force)
  56                flush_tlb_mm(tlb->mm);
  57
  58        /* keep the page table cache within bounds */
  59        check_pgt_cache();
  60}
  61
  62static inline void
  63tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
  64{
  65        if (tlb->start > address)
  66                tlb->start = address;
  67        if (tlb->end < address + PAGE_SIZE)
  68                tlb->end = address + PAGE_SIZE;
  69}
  70
  71#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address)        \
  72        tlb_remove_tlb_entry(tlb, ptep, address)
  73
  74/*
  75 * In the case of tlb vma handling, we can optimise these away in the
  76 * case where we're doing a full MM flush.  When we're doing a munmap,
  77 * the vmas are adjusted to only cover the region to be torn down.
  78 */
  79static inline void
  80tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
  81{
  82        if (!tlb->fullmm)
  83                flush_cache_range(vma, vma->vm_start, vma->vm_end);
  84}
  85
  86static inline void
  87tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
  88{
  89        if (!tlb->fullmm && tlb->end) {
  90                flush_tlb_range(vma, tlb->start, tlb->end);
  91                init_tlb_gather(tlb);
  92        }
  93}
  94
  95static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
  96{
  97}
  98
  99static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
 100{
 101}
 102
 103static inline void tlb_flush_mmu(struct mmu_gather *tlb)
 104{
 105}
 106
 107static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 108{
 109        free_page_and_swap_cache(page);
 110        return false; /* avoid calling tlb_flush_mmu */
 111}
 112
 113static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 114{
 115        __tlb_remove_page(tlb, page);
 116}
 117
 118static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
 119                                          struct page *page, int page_size)
 120{
 121        return __tlb_remove_page(tlb, page);
 122}
 123
 124static inline void tlb_remove_page_size(struct mmu_gather *tlb,
 125                                        struct page *page, int page_size)
 126{
 127        return tlb_remove_page(tlb, page);
 128}
 129
 130#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
 131static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
 132                                                     unsigned int page_size)
 133{
 134}
 135
 136#define pte_free_tlb(tlb, ptep, addr)   pte_free((tlb)->mm, ptep)
 137#define pmd_free_tlb(tlb, pmdp, addr)   pmd_free((tlb)->mm, pmdp)
 138#define pud_free_tlb(tlb, pudp, addr)   pud_free((tlb)->mm, pudp)
 139
 140#define tlb_migrate_finish(mm)          do { } while (0)
 141
 142#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SUPERH64)
 143extern void tlb_wire_entry(struct vm_area_struct *, unsigned long, pte_t);
 144extern void tlb_unwire_entry(void);
 145#else
 146static inline void tlb_wire_entry(struct vm_area_struct *vma ,
 147                                  unsigned long addr, pte_t pte)
 148{
 149        BUG();
 150}
 151
 152static inline void tlb_unwire_entry(void)
 153{
 154        BUG();
 155}
 156#endif
 157
 158#else /* CONFIG_MMU */
 159
 160#define tlb_start_vma(tlb, vma)                         do { } while (0)
 161#define tlb_end_vma(tlb, vma)                           do { } while (0)
 162#define __tlb_remove_tlb_entry(tlb, pte, address)       do { } while (0)
 163#define tlb_flush(tlb)                                  do { } while (0)
 164
 165#include <asm-generic/tlb.h>
 166
 167#endif /* CONFIG_MMU */
 168#endif /* __ASSEMBLY__ */
 169#endif /* __ASM_SH_TLB_H */
 170