linux/arch/arm64/include/asm/tlbflush.h
<<
>>
Prefs
   1/*
   2 * Based on arch/arm/include/asm/tlbflush.h
   3 *
   4 * Copyright (C) 1999-2003 Russell King
   5 * Copyright (C) 2012 ARM Ltd.
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  18 */
  19#ifndef __ASM_TLBFLUSH_H
  20#define __ASM_TLBFLUSH_H
  21
  22#ifndef __ASSEMBLY__
  23
  24#include <linux/sched.h>
  25#include <asm/cputype.h>
  26
  27extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
  28extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long);
  29
  30extern struct cpu_tlb_fns cpu_tlb;
  31
  32/*
  33 *      TLB Management
  34 *      ==============
  35 *
  36 *      The arch/arm64/mm/tlb.S files implement these methods.
  37 *
  38 *      The TLB specific code is expected to perform whatever tests it needs
  39 *      to determine if it should invalidate the TLB for each call.  Start
  40 *      addresses are inclusive and end addresses are exclusive; it is safe to
  41 *      round these addresses down.
  42 *
  43 *      flush_tlb_all()
  44 *
  45 *              Invalidate the entire TLB.
  46 *
  47 *      flush_tlb_mm(mm)
  48 *
  49 *              Invalidate all TLB entries in a particular address space.
  50 *              - mm    - mm_struct describing address space
  51 *
  52 *      flush_tlb_range(mm,start,end)
  53 *
  54 *              Invalidate a range of TLB entries in the specified address
  55 *              space.
  56 *              - mm    - mm_struct describing address space
  57 *              - start - start address (may not be aligned)
  58 *              - end   - end address (exclusive, may not be aligned)
  59 *
  60 *      flush_tlb_page(vaddr,vma)
  61 *
  62 *              Invalidate the specified page in the specified address range.
  63 *              - vaddr - virtual address (may not be aligned)
  64 *              - vma   - vma_struct describing address range
  65 *
  66 *      flush_kern_tlb_page(kaddr)
  67 *
  68 *              Invalidate the TLB entry for the specified page.  The address
  69 *              will be in the kernels virtual memory space.  Current uses
  70 *              only require the D-TLB to be invalidated.
  71 *              - kaddr - Kernel virtual memory address
  72 */
  73static inline void flush_tlb_all(void)
  74{
  75        dsb();
  76        asm("tlbi       vmalle1is");
  77        dsb();
  78        isb();
  79}
  80
  81static inline void flush_tlb_mm(struct mm_struct *mm)
  82{
  83        unsigned long asid = (unsigned long)ASID(mm) << 48;
  84
  85        dsb();
  86        asm("tlbi       aside1is, %0" : : "r" (asid));
  87        dsb();
  88}
  89
  90static inline void flush_tlb_page(struct vm_area_struct *vma,
  91                                  unsigned long uaddr)
  92{
  93        unsigned long addr = uaddr >> 12 |
  94                ((unsigned long)ASID(vma->vm_mm) << 48);
  95
  96        dsb();
  97        asm("tlbi       vae1is, %0" : : "r" (addr));
  98        dsb();
  99}
 100
 101/*
 102 * Convert calls to our calling convention.
 103 */
 104#define flush_tlb_range(vma,start,end)  __cpu_flush_user_tlb_range(start,end,vma)
 105#define flush_tlb_kernel_range(s,e)     __cpu_flush_kern_tlb_range(s,e)
 106
 107/*
 108 * On AArch64, the cache coherency is handled via the set_pte_at() function.
 109 */
 110static inline void update_mmu_cache(struct vm_area_struct *vma,
 111                                    unsigned long addr, pte_t *ptep)
 112{
 113        /*
 114         * set_pte() does not have a DSB, so make sure that the page table
 115         * write is visible.
 116         */
 117        dsb();
 118}
 119
 120#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
 121
 122#endif
 123
 124#endif
 125