linux/arch/sparc/include/asm/tlb_64.h
<<
>>
Prefs
   1#ifndef _SPARC64_TLB_H
   2#define _SPARC64_TLB_H
   3
   4#include <linux/swap.h>
   5#include <linux/pagemap.h>
   6#include <asm/pgalloc.h>
   7#include <asm/tlbflush.h>
   8#include <asm/mmu_context.h>
   9
  10#define TLB_BATCH_NR    192
  11
  12/*
  13 * For UP we don't need to worry about TLB flush
  14 * and page free order so much..
  15 */
  16#ifdef CONFIG_SMP
  17  #define FREE_PTE_NR   506
  18  #define tlb_fast_mode(bp) ((bp)->pages_nr == ~0U)
  19#else
  20  #define FREE_PTE_NR   1
  21  #define tlb_fast_mode(bp) 1
  22#endif
  23
  24struct mmu_gather {
  25        struct mm_struct *mm;
  26        unsigned int pages_nr;
  27        unsigned int need_flush;
  28        unsigned int fullmm;
  29        unsigned int tlb_nr;
  30        unsigned long vaddrs[TLB_BATCH_NR];
  31        struct page *pages[FREE_PTE_NR];
  32};
  33
  34DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
  35
  36#ifdef CONFIG_SMP
  37extern void smp_flush_tlb_pending(struct mm_struct *,
  38                                  unsigned long, unsigned long *);
  39#endif
  40
  41extern void __flush_tlb_pending(unsigned long, unsigned long, unsigned long *);
  42extern void flush_tlb_pending(void);
  43
  44static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
  45{
  46        struct mmu_gather *mp = &get_cpu_var(mmu_gathers);
  47
  48        BUG_ON(mp->tlb_nr);
  49
  50        mp->mm = mm;
  51        mp->pages_nr = num_online_cpus() > 1 ? 0U : ~0U;
  52        mp->fullmm = full_mm_flush;
  53
  54        return mp;
  55}
  56
  57
  58static inline void tlb_flush_mmu(struct mmu_gather *mp)
  59{
  60        if (!mp->fullmm)
  61                flush_tlb_pending();
  62        if (mp->need_flush) {
  63                free_pages_and_swap_cache(mp->pages, mp->pages_nr);
  64                mp->pages_nr = 0;
  65                mp->need_flush = 0;
  66        }
  67
  68}
  69
  70#ifdef CONFIG_SMP
  71extern void smp_flush_tlb_mm(struct mm_struct *mm);
  72#define do_flush_tlb_mm(mm) smp_flush_tlb_mm(mm)
  73#else
  74#define do_flush_tlb_mm(mm) __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT)
  75#endif
  76
  77static inline void tlb_finish_mmu(struct mmu_gather *mp, unsigned long start, unsigned long end)
  78{
  79        tlb_flush_mmu(mp);
  80
  81        if (mp->fullmm)
  82                mp->fullmm = 0;
  83
  84        /* keep the page table cache within bounds */
  85        check_pgt_cache();
  86
  87        put_cpu_var(mmu_gathers);
  88}
  89
  90static inline void tlb_remove_page(struct mmu_gather *mp, struct page *page)
  91{
  92        if (tlb_fast_mode(mp)) {
  93                free_page_and_swap_cache(page);
  94                return;
  95        }
  96        mp->need_flush = 1;
  97        mp->pages[mp->pages_nr++] = page;
  98        if (mp->pages_nr >= FREE_PTE_NR)
  99                tlb_flush_mmu(mp);
 100}
 101
 102#define tlb_remove_tlb_entry(mp,ptep,addr) do { } while (0)
 103#define pte_free_tlb(mp, ptepage, addr) pte_free((mp)->mm, ptepage)
 104#define pmd_free_tlb(mp, pmdp, addr) pmd_free((mp)->mm, pmdp)
 105#define pud_free_tlb(tlb,pudp, addr) __pud_free_tlb(tlb,pudp,addr)
 106
 107#define tlb_migrate_finish(mm)  do { } while (0)
 108#define tlb_start_vma(tlb, vma) do { } while (0)
 109#define tlb_end_vma(tlb, vma)   do { } while (0)
 110
 111#endif /* _SPARC64_TLB_H */
 112