linux/arch/arm64/include/asm/tlb.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 * Based on arch/arm/include/asm/tlb.h
   4 *
   5 * Copyright (C) 2002 Russell King
   6 * Copyright (C) 2012 ARM Ltd.
   7 */
   8#ifndef __ASM_TLB_H
   9#define __ASM_TLB_H
  10
  11#include <linux/pagemap.h>
  12#include <linux/swap.h>
  13
  14static inline void __tlb_remove_table(void *_table)
  15{
  16        free_page_and_swap_cache((struct page *)_table);
  17}
  18
  19#define tlb_flush tlb_flush
  20static void tlb_flush(struct mmu_gather *tlb);
  21
  22#include <asm-generic/tlb.h>
  23
  24/*
  25 * get the tlbi levels in arm64.  Default value is 0 if more than one
  26 * of cleared_* is set or neither is set.
  27 * Arm64 doesn't support p4ds now.
  28 */
  29static inline int tlb_get_level(struct mmu_gather *tlb)
  30{
  31        /* The TTL field is only valid for the leaf entry. */
  32        if (tlb->freed_tables)
  33                return 0;
  34
  35        if (tlb->cleared_ptes && !(tlb->cleared_pmds ||
  36                                   tlb->cleared_puds ||
  37                                   tlb->cleared_p4ds))
  38                return 3;
  39
  40        if (tlb->cleared_pmds && !(tlb->cleared_ptes ||
  41                                   tlb->cleared_puds ||
  42                                   tlb->cleared_p4ds))
  43                return 2;
  44
  45        if (tlb->cleared_puds && !(tlb->cleared_ptes ||
  46                                   tlb->cleared_pmds ||
  47                                   tlb->cleared_p4ds))
  48                return 1;
  49
  50        return 0;
  51}
  52
  53static inline void tlb_flush(struct mmu_gather *tlb)
  54{
  55        struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0);
  56        bool last_level = !tlb->freed_tables;
  57        unsigned long stride = tlb_get_unmap_size(tlb);
  58        int tlb_level = tlb_get_level(tlb);
  59
  60        /*
  61         * If we're tearing down the address space then we only care about
  62         * invalidating the walk-cache, since the ASID allocator won't
  63         * reallocate our ASID without invalidating the entire TLB.
  64         */
  65        if (tlb->fullmm) {
  66                if (!last_level)
  67                        flush_tlb_mm(tlb->mm);
  68                return;
  69        }
  70
  71        __flush_tlb_range(&vma, tlb->start, tlb->end, stride,
  72                          last_level, tlb_level);
  73}
  74
  75static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
  76                                  unsigned long addr)
  77{
  78        pgtable_pte_page_dtor(pte);
  79        tlb_remove_table(tlb, pte);
  80}
  81
  82#if CONFIG_PGTABLE_LEVELS > 2
  83static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
  84                                  unsigned long addr)
  85{
  86        struct page *page = virt_to_page(pmdp);
  87
  88        pgtable_pmd_page_dtor(page);
  89        tlb_remove_table(tlb, page);
  90}
  91#endif
  92
  93#if CONFIG_PGTABLE_LEVELS > 3
  94static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp,
  95                                  unsigned long addr)
  96{
  97        tlb_remove_table(tlb, virt_to_page(pudp));
  98}
  99#endif
 100
 101#endif
 102