1#ifndef _PARISC_TLBFLUSH_H
2#define _PARISC_TLBFLUSH_H
3
4
5
6#include <linux/mm.h>
7#include <linux/sched.h>
8#include <asm/mmu_context.h>
9
10
11
12
13
14
15
16
17extern spinlock_t pa_tlb_lock;
18
19#define purge_tlb_start(flags) spin_lock_irqsave(&pa_tlb_lock, flags)
20#define purge_tlb_end(flags) spin_unlock_irqrestore(&pa_tlb_lock, flags)
21
22extern void flush_tlb_all(void);
23extern void flush_tlb_all_local(void *);
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41static inline void flush_tlb_mm(struct mm_struct *mm)
42{
43 BUG_ON(mm == &init_mm);
44
45#if 1 || defined(CONFIG_SMP)
46 flush_tlb_all();
47#else
48
49
50
51 if (mm) {
52 if (mm->context != 0)
53 free_sid(mm->context);
54 mm->context = alloc_sid();
55 if (mm == current->active_mm)
56 load_context(mm->context);
57 }
58#endif
59}
60
61static inline void flush_tlb_page(struct vm_area_struct *vma,
62 unsigned long addr)
63{
64 unsigned long flags;
65
66
67
68 mb();
69 mtsp(vma->vm_mm->context,1);
70 purge_tlb_start(flags);
71 pdtlb(addr);
72 pitlb(addr);
73 purge_tlb_end(flags);
74}
75
76void __flush_tlb_range(unsigned long sid,
77 unsigned long start, unsigned long end);
78
79#define flush_tlb_range(vma,start,end) __flush_tlb_range((vma)->vm_mm->context,start,end)
80
81#define flush_tlb_kernel_range(start, end) __flush_tlb_range(0,start,end)
82
83#endif
84