1
2
3
4
5
6
7
8
9
10
11#ifndef _ASM_TLBFLUSH_H
12#define _ASM_TLBFLUSH_H
13
14#include <linux/mm.h>
15#include <asm/processor.h>
16
17struct tlb_state {
18 struct mm_struct *active_mm;
19 int state;
20};
21DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
22
23
24
25
26static inline void local_flush_tlb(void)
27{
28 int w;
29 asm volatile(
30 " mov %1,%0 \n"
31 " or %2,%0 \n"
32 " mov %0,%1 \n"
33 : "=d"(w)
34 : "m"(MMUCTR), "i"(MMUCTR_IIV|MMUCTR_DIV)
35 : "cc", "memory");
36}
37
38
39
40
41static inline void local_flush_tlb_all(void)
42{
43 local_flush_tlb();
44}
45
46
47
48
49static inline void local_flush_tlb_one(unsigned long addr)
50{
51 local_flush_tlb();
52}
53
54
55
56
57
58
59static inline
60void local_flush_tlb_page(struct mm_struct *mm, unsigned long addr)
61{
62 unsigned long pteu, flags, cnx;
63
64 addr &= PAGE_MASK;
65
66 local_irq_save(flags);
67
68 cnx = 1;
69#ifdef CONFIG_MN10300_TLB_USE_PIDR
70 cnx = mm->context.tlbpid[smp_processor_id()];
71#endif
72 if (cnx) {
73 pteu = addr;
74#ifdef CONFIG_MN10300_TLB_USE_PIDR
75 pteu |= cnx & xPTEU_PID;
76#endif
77 IPTEU = pteu;
78 DPTEU = pteu;
79 if (IPTEL & xPTEL_V)
80 IPTEL = 0;
81 if (DPTEL & xPTEL_V)
82 DPTEL = 0;
83 }
84 local_irq_restore(flags);
85}
86
87
88
89
90
91
92
93
94
95
96
97#ifdef CONFIG_SMP
98
99#include <asm/smp.h>
100
101extern void flush_tlb_all(void);
102extern void flush_tlb_current_task(void);
103extern void flush_tlb_mm(struct mm_struct *);
104extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
105
106#define flush_tlb() flush_tlb_current_task()
107
108static inline void flush_tlb_range(struct vm_area_struct *vma,
109 unsigned long start, unsigned long end)
110{
111 flush_tlb_mm(vma->vm_mm);
112}
113
114#else
115
116static inline void flush_tlb_all(void)
117{
118 preempt_disable();
119 local_flush_tlb_all();
120 preempt_enable();
121}
122
123static inline void flush_tlb_mm(struct mm_struct *mm)
124{
125 preempt_disable();
126 local_flush_tlb_all();
127 preempt_enable();
128}
129
130static inline void flush_tlb_range(struct vm_area_struct *vma,
131 unsigned long start, unsigned long end)
132{
133 preempt_disable();
134 local_flush_tlb_all();
135 preempt_enable();
136}
137
138#define flush_tlb_page(vma, addr) local_flush_tlb_page((vma)->vm_mm, addr)
139#define flush_tlb() flush_tlb_all()
140
141#endif
142
143static inline void flush_tlb_kernel_range(unsigned long start,
144 unsigned long end)
145{
146 flush_tlb_all();
147}
148
149static inline void flush_tlb_pgtables(struct mm_struct *mm,
150 unsigned long start, unsigned long end)
151{
152}
153
154#endif
155