linux/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
<<
>>
Prefs
   1#ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H
   2#define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H
   3
   4/*
   5 * TLB flushing for 64-bit hash-MMU CPUs
   6 */
   7
   8#include <linux/percpu.h>
   9#include <asm/page.h>
  10
  11#define PPC64_TLB_BATCH_NR 192
  12
  13struct ppc64_tlb_batch {
  14        int                     active;
  15        unsigned long           index;
  16        struct mm_struct        *mm;
  17        real_pte_t              pte[PPC64_TLB_BATCH_NR];
  18        unsigned long           vpn[PPC64_TLB_BATCH_NR];
  19        unsigned int            psize;
  20        int                     ssize;
  21};
  22DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
  23
  24extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
  25
  26#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
  27
  28static inline void arch_enter_lazy_mmu_mode(void)
  29{
  30        struct ppc64_tlb_batch *batch;
  31
  32        if (radix_enabled())
  33                return;
  34        batch = this_cpu_ptr(&ppc64_tlb_batch);
  35        batch->active = 1;
  36}
  37
  38static inline void arch_leave_lazy_mmu_mode(void)
  39{
  40        struct ppc64_tlb_batch *batch;
  41
  42        if (radix_enabled())
  43                return;
  44        batch = this_cpu_ptr(&ppc64_tlb_batch);
  45
  46        if (batch->index)
  47                __flush_tlb_pending(batch);
  48        batch->active = 0;
  49}
  50
  51#define arch_flush_lazy_mmu_mode()      do {} while (0)
  52
  53
  54extern void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize,
  55                            int ssize, unsigned long flags);
  56extern void flush_hash_range(unsigned long number, int local);
  57extern void flush_hash_hugepage(unsigned long vsid, unsigned long addr,
  58                                pmd_t *pmdp, unsigned int psize, int ssize,
  59                                unsigned long flags);
  60static inline void hash__local_flush_tlb_mm(struct mm_struct *mm)
  61{
  62}
  63
  64static inline void hash__flush_tlb_mm(struct mm_struct *mm)
  65{
  66}
  67
  68static inline void hash__local_flush_tlb_page(struct vm_area_struct *vma,
  69                                          unsigned long vmaddr)
  70{
  71}
  72
  73static inline void hash__flush_tlb_page(struct vm_area_struct *vma,
  74                                    unsigned long vmaddr)
  75{
  76}
  77
  78static inline void hash__flush_tlb_range(struct vm_area_struct *vma,
  79                                     unsigned long start, unsigned long end)
  80{
  81}
  82
  83static inline void hash__flush_tlb_kernel_range(unsigned long start,
  84                                            unsigned long end)
  85{
  86}
  87
  88
  89struct mmu_gather;
  90extern void hash__tlb_flush(struct mmu_gather *tlb);
  91/* Private function for use by PCI IO mapping code */
  92extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
  93                                     unsigned long end);
  94extern void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd,
  95                                unsigned long addr);
  96#endif /*  _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H */
  97