1
2
3
4
5
6
7#ifndef __BLACKFIN_MMU_CONTEXT_H__
8#define __BLACKFIN_MMU_CONTEXT_H__
9
10#include <linux/slab.h>
11#include <linux/sched.h>
12#include <asm/setup.h>
13#include <asm/page.h>
14#include <asm/pgalloc.h>
15#include <asm/cplbinit.h>
16#include <asm/sections.h>
17
18
19
20
21
22extern void *current_l1_stack_save;
23extern int nr_l1stack_tasks;
24extern void *l1_stack_base;
25extern unsigned long l1_stack_len;
26
27extern int l1sram_free(const void*);
28extern void *l1sram_alloc_max(void*);
29
30static inline void free_l1stack(void)
31{
32 nr_l1stack_tasks--;
33 if (nr_l1stack_tasks == 0) {
34 l1sram_free(l1_stack_base);
35 l1_stack_base = NULL;
36 l1_stack_len = 0;
37 }
38}
39
40static inline unsigned long
41alloc_l1stack(unsigned long length, unsigned long *stack_base)
42{
43 if (nr_l1stack_tasks == 0) {
44 l1_stack_base = l1sram_alloc_max(&l1_stack_len);
45 if (!l1_stack_base)
46 return 0;
47 }
48
49 if (l1_stack_len < length) {
50 if (nr_l1stack_tasks == 0)
51 l1sram_free(l1_stack_base);
52 return 0;
53 }
54 *stack_base = (unsigned long)l1_stack_base;
55 nr_l1stack_tasks++;
56 return l1_stack_len;
57}
58
59static inline int
60activate_l1stack(struct mm_struct *mm, unsigned long sp_base)
61{
62 if (current_l1_stack_save)
63 memcpy(current_l1_stack_save, l1_stack_base, l1_stack_len);
64 mm->context.l1_stack_save = current_l1_stack_save = (void*)sp_base;
65 memcpy(l1_stack_base, current_l1_stack_save, l1_stack_len);
66 return 1;
67}
68
69#define deactivate_mm(tsk,mm) do { } while (0)
70
71#define activate_mm(prev, next) switch_mm(prev, next, NULL)
72
73static inline void __switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
74 struct task_struct *tsk)
75{
76#ifdef CONFIG_MPU
77 unsigned int cpu = smp_processor_id();
78#endif
79 if (prev_mm == next_mm)
80 return;
81#ifdef CONFIG_MPU
82 if (prev_mm->context.page_rwx_mask == current_rwx_mask[cpu]) {
83 flush_switched_cplbs(cpu);
84 set_mask_dcplbs(next_mm->context.page_rwx_mask, cpu);
85 }
86#endif
87
88#ifdef CONFIG_APP_STACK_L1
89
90 if (!next_mm->context.l1_stack_save)
91 return;
92 if (next_mm->context.l1_stack_save == current_l1_stack_save)
93 return;
94 if (current_l1_stack_save) {
95 memcpy(current_l1_stack_save, l1_stack_base, l1_stack_len);
96 }
97 current_l1_stack_save = next_mm->context.l1_stack_save;
98 memcpy(l1_stack_base, current_l1_stack_save, l1_stack_len);
99#endif
100}
101
102#ifdef CONFIG_IPIPE
103#define lock_mm_switch(flags) flags = hard_local_irq_save_cond()
104#define unlock_mm_switch(flags) hard_local_irq_restore_cond(flags)
105#else
106#define lock_mm_switch(flags) do { (void)(flags); } while (0)
107#define unlock_mm_switch(flags) do { (void)(flags); } while (0)
108#endif
109
110#ifdef CONFIG_MPU
111static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
112 struct task_struct *tsk)
113{
114 unsigned long flags;
115 lock_mm_switch(flags);
116 __switch_mm(prev, next, tsk);
117 unlock_mm_switch(flags);
118}
119
120static inline void protect_page(struct mm_struct *mm, unsigned long addr,
121 unsigned long flags)
122{
123 unsigned long *mask = mm->context.page_rwx_mask;
124 unsigned long page;
125 unsigned long idx;
126 unsigned long bit;
127
128 if (unlikely(addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE))
129 page = (addr - (ASYNC_BANK0_BASE - _ramend)) >> 12;
130 else
131 page = addr >> 12;
132 idx = page >> 5;
133 bit = 1 << (page & 31);
134
135 if (flags & VM_READ)
136 mask[idx] |= bit;
137 else
138 mask[idx] &= ~bit;
139 mask += page_mask_nelts;
140 if (flags & VM_WRITE)
141 mask[idx] |= bit;
142 else
143 mask[idx] &= ~bit;
144 mask += page_mask_nelts;
145 if (flags & VM_EXEC)
146 mask[idx] |= bit;
147 else
148 mask[idx] &= ~bit;
149}
150
151static inline void update_protections(struct mm_struct *mm)
152{
153 unsigned int cpu = smp_processor_id();
154 if (mm->context.page_rwx_mask == current_rwx_mask[cpu]) {
155 flush_switched_cplbs(cpu);
156 set_mask_dcplbs(mm->context.page_rwx_mask, cpu);
157 }
158}
159#else
160static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
161 struct task_struct *tsk)
162{
163 __switch_mm(prev, next, tsk);
164}
165#endif
166
167static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
168{
169}
170
171
172static inline int
173init_new_context(struct task_struct *tsk, struct mm_struct *mm)
174{
175#ifdef CONFIG_MPU
176 unsigned long p = __get_free_pages(GFP_KERNEL, page_mask_order);
177 mm->context.page_rwx_mask = (unsigned long *)p;
178 memset(mm->context.page_rwx_mask, 0,
179 page_mask_nelts * 3 * sizeof(long));
180#endif
181 return 0;
182}
183
184static inline void destroy_context(struct mm_struct *mm)
185{
186 struct sram_list_struct *tmp;
187#ifdef CONFIG_MPU
188 unsigned int cpu = smp_processor_id();
189#endif
190
191#ifdef CONFIG_APP_STACK_L1
192 if (current_l1_stack_save == mm->context.l1_stack_save)
193 current_l1_stack_save = 0;
194 if (mm->context.l1_stack_save)
195 free_l1stack();
196#endif
197
198 while ((tmp = mm->context.sram_list)) {
199 mm->context.sram_list = tmp->next;
200 sram_free(tmp->addr);
201 kfree(tmp);
202 }
203#ifdef CONFIG_MPU
204 if (current_rwx_mask[cpu] == mm->context.page_rwx_mask)
205 current_rwx_mask[cpu] = NULL;
206 free_pages((unsigned long)mm->context.page_rwx_mask, page_mask_order);
207#endif
208}
209
210#define ipipe_mm_switch_protect(flags) \
211 flags = hard_local_irq_save_cond()
212
213#define ipipe_mm_switch_unprotect(flags) \
214 hard_local_irq_restore_cond(flags)
215
216#endif
217