1#ifndef _SPARC64_TLBFLUSH_H
2#define _SPARC64_TLBFLUSH_H
3
4#include <linux/mm.h>
5#include <asm/mmu_context.h>
6
7
8
9#define TLB_BATCH_NR 192
10
11struct tlb_batch {
12 struct mm_struct *mm;
13 unsigned long tlb_nr;
14 unsigned long active;
15 unsigned long vaddrs[TLB_BATCH_NR];
16};
17
18extern void flush_tsb_kernel_range(unsigned long start, unsigned long end);
19extern void flush_tsb_user(struct tlb_batch *tb);
20extern void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr);
21
22
23
24static inline void flush_tlb_mm(struct mm_struct *mm)
25{
26}
27
28static inline void flush_tlb_page(struct vm_area_struct *vma,
29 unsigned long vmaddr)
30{
31}
32
33static inline void flush_tlb_range(struct vm_area_struct *vma,
34 unsigned long start, unsigned long end)
35{
36}
37
38#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
39
40extern void flush_tlb_pending(void);
41extern void arch_enter_lazy_mmu_mode(void);
42extern void arch_leave_lazy_mmu_mode(void);
43#define arch_flush_lazy_mmu_mode() do {} while (0)
44
45
46extern void __flush_tlb_all(void);
47extern void __flush_tlb_page(unsigned long context, unsigned long vaddr);
48extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
49
50#ifndef CONFIG_SMP
51
52#define flush_tlb_kernel_range(start,end) \
53do { flush_tsb_kernel_range(start,end); \
54 __flush_tlb_kernel_range(start,end); \
55} while (0)
56
57static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
58{
59 __flush_tlb_page(CTX_HWBITS(mm->context), vaddr);
60}
61
62#else
63
64extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
65extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
66
67#define flush_tlb_kernel_range(start, end) \
68do { flush_tsb_kernel_range(start,end); \
69 smp_flush_tlb_kernel_range(start, end); \
70} while (0)
71
72#define global_flush_tlb_page(mm, vaddr) \
73 smp_flush_tlb_page(mm, vaddr)
74
75#endif
76
77#endif
78