linux/arch/arm/include/asm/tlb.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 *  arch/arm/include/asm/tlb.h
   4 *
   5 *  Copyright (C) 2002 Russell King
   6 *
   7 *  Experimentation shows that on a StrongARM, it appears to be faster
   8 *  to use the "invalidate whole tlb" rather than "invalidate single
   9 *  tlb" for this.
  10 *
  11 *  This appears true for both the process fork+exit case, as well as
  12 *  the munmap-large-area case.
  13 */
  14#ifndef __ASMARM_TLB_H
  15#define __ASMARM_TLB_H
  16
  17#include <asm/cacheflush.h>
  18
  19#ifndef CONFIG_MMU
  20
  21#include <linux/pagemap.h>
  22
  23#define tlb_flush(tlb)  ((void) tlb)
  24
  25#include <asm-generic/tlb.h>
  26
  27#else /* !CONFIG_MMU */
  28
  29#include <linux/swap.h>
  30#include <asm/pgalloc.h>
  31#include <asm/tlbflush.h>
  32
  33static inline void __tlb_remove_table(void *_table)
  34{
  35        free_page_and_swap_cache((struct page *)_table);
  36}
  37
  38#include <asm-generic/tlb.h>
  39
  40#ifndef CONFIG_HAVE_RCU_TABLE_FREE
  41#define tlb_remove_table(tlb, entry) tlb_remove_page(tlb, entry)
  42#endif
  43
  44static inline void
  45__pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long addr)
  46{
  47        pgtable_page_dtor(pte);
  48
  49#ifndef CONFIG_ARM_LPAE
  50        /*
  51         * With the classic ARM MMU, a pte page has two corresponding pmd
  52         * entries, each covering 1MB.
  53         */
  54        addr = (addr & PMD_MASK) + SZ_1M;
  55        __tlb_adjust_range(tlb, addr - PAGE_SIZE, 2 * PAGE_SIZE);
  56#endif
  57
  58        tlb_remove_table(tlb, pte);
  59}
  60
  61static inline void
  62__pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
  63{
  64#ifdef CONFIG_ARM_LPAE
  65        struct page *page = virt_to_page(pmdp);
  66
  67        tlb_remove_table(tlb, page);
  68#endif
  69}
  70
  71#endif /* CONFIG_MMU */
  72#endif
  73