linux/arch/powerpc/include/asm/book3s/64/tlbflush.h
<<
>>
Prefs
   1#ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
   2#define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
   3
   4#define MMU_NO_CONTEXT  ~0UL
   5
   6
   7#include <asm/book3s/64/tlbflush-hash.h>
   8#include <asm/book3s/64/tlbflush-radix.h>
   9
  10#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
  11static inline void flush_pmd_tlb_range(struct vm_area_struct *vma,
  12                                       unsigned long start, unsigned long end)
  13{
  14        if (radix_enabled())
  15                return radix__flush_pmd_tlb_range(vma, start, end);
  16        return hash__flush_tlb_range(vma, start, end);
  17}
  18
  19#define __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
  20static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
  21                                           unsigned long start,
  22                                           unsigned long end)
  23{
  24        if (radix_enabled())
  25                return radix__flush_hugetlb_tlb_range(vma, start, end);
  26        return hash__flush_tlb_range(vma, start, end);
  27}
  28
  29static inline void flush_tlb_range(struct vm_area_struct *vma,
  30                                   unsigned long start, unsigned long end)
  31{
  32        if (radix_enabled())
  33                return radix__flush_tlb_range(vma, start, end);
  34        return hash__flush_tlb_range(vma, start, end);
  35}
  36
  37static inline void flush_tlb_kernel_range(unsigned long start,
  38                                          unsigned long end)
  39{
  40        if (radix_enabled())
  41                return radix__flush_tlb_kernel_range(start, end);
  42        return hash__flush_tlb_kernel_range(start, end);
  43}
  44
  45static inline void local_flush_tlb_mm(struct mm_struct *mm)
  46{
  47        if (radix_enabled())
  48                return radix__local_flush_tlb_mm(mm);
  49        return hash__local_flush_tlb_mm(mm);
  50}
  51
  52static inline void local_flush_tlb_page(struct vm_area_struct *vma,
  53                                        unsigned long vmaddr)
  54{
  55        if (radix_enabled())
  56                return radix__local_flush_tlb_page(vma, vmaddr);
  57        return hash__local_flush_tlb_page(vma, vmaddr);
  58}
  59
  60static inline void tlb_flush(struct mmu_gather *tlb)
  61{
  62        if (radix_enabled())
  63                return radix__tlb_flush(tlb);
  64        return hash__tlb_flush(tlb);
  65}
  66
  67#ifdef CONFIG_SMP
  68static inline void flush_tlb_mm(struct mm_struct *mm)
  69{
  70        if (radix_enabled())
  71                return radix__flush_tlb_mm(mm);
  72        return hash__flush_tlb_mm(mm);
  73}
  74
  75static inline void flush_tlb_page(struct vm_area_struct *vma,
  76                                  unsigned long vmaddr)
  77{
  78        if (radix_enabled())
  79                return radix__flush_tlb_page(vma, vmaddr);
  80        return hash__flush_tlb_page(vma, vmaddr);
  81}
  82#else
  83#define flush_tlb_mm(mm)                local_flush_tlb_mm(mm)
  84#define flush_tlb_page(vma, addr)       local_flush_tlb_page(vma, addr)
  85#endif /* CONFIG_SMP */
  86/*
  87 * flush the page walk cache for the address
  88 */
  89static inline void flush_tlb_pgtable(struct mmu_gather *tlb, unsigned long address)
  90{
  91        /*
  92         * Flush the page table walk cache on freeing a page table. We already
  93         * have marked the upper/higher level page table entry none by now.
  94         * So it is safe to flush PWC here.
  95         */
  96        if (!radix_enabled())
  97                return;
  98
  99        radix__flush_tlb_pwc(tlb, address);
 100}
 101#endif /*  _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */
 102