linux/arch/arm64/include/asm/tlbflush.h
<<
>>
Prefs
   1/*
   2 * Based on arch/arm/include/asm/tlbflush.h
   3 *
   4 * Copyright (C) 1999-2003 Russell King
   5 * Copyright (C) 2012 ARM Ltd.
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  18 */
  19#ifndef __ASM_TLBFLUSH_H
  20#define __ASM_TLBFLUSH_H
  21
  22#ifndef __ASSEMBLY__
  23
  24#include <linux/sched.h>
  25#include <asm/cputype.h>
  26
  27/*
  28 *      TLB Management
  29 *      ==============
  30 *
  31 *      The TLB specific code is expected to perform whatever tests it needs
  32 *      to determine if it should invalidate the TLB for each call.  Start
  33 *      addresses are inclusive and end addresses are exclusive; it is safe to
  34 *      round these addresses down.
  35 *
  36 *      flush_tlb_all()
  37 *
  38 *              Invalidate the entire TLB.
  39 *
  40 *      flush_tlb_mm(mm)
  41 *
  42 *              Invalidate all TLB entries in a particular address space.
  43 *              - mm    - mm_struct describing address space
  44 *
  45 *      flush_tlb_range(mm,start,end)
  46 *
  47 *              Invalidate a range of TLB entries in the specified address
  48 *              space.
  49 *              - mm    - mm_struct describing address space
  50 *              - start - start address (may not be aligned)
  51 *              - end   - end address (exclusive, may not be aligned)
  52 *
  53 *      flush_tlb_page(vaddr,vma)
  54 *
  55 *              Invalidate the specified page in the specified address range.
  56 *              - vaddr - virtual address (may not be aligned)
  57 *              - vma   - vma_struct describing address range
  58 *
  59 *      flush_kern_tlb_page(kaddr)
  60 *
  61 *              Invalidate the TLB entry for the specified page.  The address
  62 *              will be in the kernels virtual memory space.  Current uses
  63 *              only require the D-TLB to be invalidated.
  64 *              - kaddr - Kernel virtual memory address
  65 */
  66static inline void local_flush_tlb_all(void)
  67{
  68        dsb(nshst);
  69        asm("tlbi       vmalle1");
  70        dsb(nsh);
  71        isb();
  72}
  73
  74static inline void flush_tlb_all(void)
  75{
  76        dsb(ishst);
  77        asm("tlbi       vmalle1is");
  78        dsb(ish);
  79        isb();
  80}
  81
  82static inline void flush_tlb_mm(struct mm_struct *mm)
  83{
  84        unsigned long asid = ASID(mm) << 48;
  85
  86        dsb(ishst);
  87        asm("tlbi       aside1is, %0" : : "r" (asid));
  88        dsb(ish);
  89}
  90
  91static inline void flush_tlb_page(struct vm_area_struct *vma,
  92                                  unsigned long uaddr)
  93{
  94        unsigned long addr = uaddr >> 12 | (ASID(vma->vm_mm) << 48);
  95
  96        dsb(ishst);
  97        asm("tlbi       vale1is, %0" : : "r" (addr));
  98        dsb(ish);
  99}
 100
 101/*
 102 * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
 103 * necessarily a performance improvement.
 104 */
 105#define MAX_TLB_RANGE   (1024UL << PAGE_SHIFT)
 106
 107static inline void __flush_tlb_range(struct vm_area_struct *vma,
 108                                     unsigned long start, unsigned long end,
 109                                     bool last_level)
 110{
 111        unsigned long asid = ASID(vma->vm_mm) << 48;
 112        unsigned long addr;
 113
 114        if ((end - start) > MAX_TLB_RANGE) {
 115                flush_tlb_mm(vma->vm_mm);
 116                return;
 117        }
 118
 119        start = asid | (start >> 12);
 120        end = asid | (end >> 12);
 121
 122        dsb(ishst);
 123        for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) {
 124                if (last_level)
 125                        asm("tlbi vale1is, %0" : : "r"(addr));
 126                else
 127                        asm("tlbi vae1is, %0" : : "r"(addr));
 128        }
 129        dsb(ish);
 130}
 131
 132static inline void flush_tlb_range(struct vm_area_struct *vma,
 133                                   unsigned long start, unsigned long end)
 134{
 135        __flush_tlb_range(vma, start, end, false);
 136}
 137
 138static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
 139{
 140        unsigned long addr;
 141
 142        if ((end - start) > MAX_TLB_RANGE) {
 143                flush_tlb_all();
 144                return;
 145        }
 146
 147        start >>= 12;
 148        end >>= 12;
 149
 150        dsb(ishst);
 151        for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
 152                asm("tlbi vaae1is, %0" : : "r"(addr));
 153        dsb(ish);
 154        isb();
 155}
 156
 157/*
 158 * Used to invalidate the TLB (walk caches) corresponding to intermediate page
 159 * table levels (pgd/pud/pmd).
 160 */
 161static inline void __flush_tlb_pgtable(struct mm_struct *mm,
 162                                       unsigned long uaddr)
 163{
 164        unsigned long addr = uaddr >> 12 | (ASID(mm) << 48);
 165
 166        asm("tlbi       vae1is, %0" : : "r" (addr));
 167        dsb(ish);
 168}
 169
 170#endif
 171
 172#endif
 173