1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/init.h>
14#include <linux/mman.h>
15#include <linux/mm.h>
16#include <asm/tlb.h>
17#include <asm/processor.h>
18#include <asm/cache.h>
19#include <asm/pgalloc.h>
20#include <asm/uaccess.h>
21#include <asm/mmu_context.h>
22
23extern void __weak sh4__flush_region_init(void);
24
25
26static unsigned long long dtlb_cache_slot;
27
28
29
30
31
32
33static inline void
34sh64_setup_dtlb_cache_slot(unsigned long eaddr, unsigned long asid,
35 unsigned long paddr)
36{
37 local_irq_disable();
38 sh64_setup_tlb_slot(dtlb_cache_slot, eaddr, asid, paddr);
39}
40
41static inline void sh64_teardown_dtlb_cache_slot(void)
42{
43 sh64_teardown_tlb_slot(dtlb_cache_slot);
44 local_irq_enable();
45}
46
47static inline void sh64_icache_inv_all(void)
48{
49 unsigned long long addr, flag, data;
50 unsigned long flags;
51
52 addr = ICCR0;
53 flag = ICCR0_ICI;
54 data = 0;
55
56
57 local_irq_save(flags);
58
59
60 __asm__ __volatile__ (
61 "getcfg %3, 0, %0\n\t"
62 "or %0, %2, %0\n\t"
63 "putcfg %3, 0, %0\n\t"
64 "synci"
65 : "=&r" (data)
66 : "0" (data), "r" (flag), "r" (addr));
67
68 local_irq_restore(flags);
69}
70
71static void sh64_icache_inv_kernel_range(unsigned long start, unsigned long end)
72{
73
74
75
76 unsigned long long ullend, addr, aligned_start;
77 aligned_start = (unsigned long long)(signed long long)(signed long) start;
78 addr = L1_CACHE_ALIGN(aligned_start);
79 ullend = (unsigned long long) (signed long long) (signed long) end;
80
81 while (addr <= ullend) {
82 __asm__ __volatile__ ("icbi %0, 0" : : "r" (addr));
83 addr += L1_CACHE_BYTES;
84 }
85}
86
87static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long eaddr)
88{
89
90
91 unsigned int cpu = smp_processor_id();
92 unsigned long long addr, end_addr;
93 unsigned long flags = 0;
94 unsigned long running_asid, vma_asid;
95 addr = eaddr;
96 end_addr = addr + PAGE_SIZE;
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112 running_asid = get_asid();
113 vma_asid = cpu_asid(cpu, vma->vm_mm);
114 if (running_asid != vma_asid) {
115 local_irq_save(flags);
116 switch_and_save_asid(vma_asid);
117 }
118 while (addr < end_addr) {
119
120 __asm__ __volatile__("icbi %0, 0" : : "r" (addr));
121 __asm__ __volatile__("icbi %0, 32" : : "r" (addr));
122 __asm__ __volatile__("icbi %0, 64" : : "r" (addr));
123 __asm__ __volatile__("icbi %0, 96" : : "r" (addr));
124 addr += 128;
125 }
126 if (running_asid != vma_asid) {
127 switch_and_save_asid(running_asid);
128 local_irq_restore(flags);
129 }
130}
131
132static void sh64_icache_inv_user_page_range(struct mm_struct *mm,
133 unsigned long start, unsigned long end)
134{
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149 int n_pages;
150
151 if (!mm)
152 return;
153
154 n_pages = ((end - start) >> PAGE_SHIFT);
155 if (n_pages >= 64) {
156 sh64_icache_inv_all();
157 } else {
158 unsigned long aligned_start;
159 unsigned long eaddr;
160 unsigned long after_last_page_start;
161 unsigned long mm_asid, current_asid;
162 unsigned long flags = 0;
163
164 mm_asid = cpu_asid(smp_processor_id(), mm);
165 current_asid = get_asid();
166
167 if (mm_asid != current_asid) {
168
169 local_irq_save(flags);
170 switch_and_save_asid(mm_asid);
171 }
172
173 aligned_start = start & PAGE_MASK;
174 after_last_page_start = PAGE_SIZE + ((end - 1) & PAGE_MASK);
175
176 while (aligned_start < after_last_page_start) {
177 struct vm_area_struct *vma;
178 unsigned long vma_end;
179 vma = find_vma(mm, aligned_start);
180 if (!vma || (aligned_start <= vma->vm_end)) {
181
182 aligned_start += PAGE_SIZE;
183 continue;
184 }
185 vma_end = vma->vm_end;
186 if (vma->vm_flags & VM_EXEC) {
187
188 eaddr = aligned_start;
189 while (eaddr < vma_end) {
190 sh64_icache_inv_user_page(vma, eaddr);
191 eaddr += PAGE_SIZE;
192 }
193 }
194 aligned_start = vma->vm_end;
195 }
196
197 if (mm_asid != current_asid) {
198 switch_and_save_asid(current_asid);
199 local_irq_restore(flags);
200 }
201 }
202}
203
204static void sh64_icache_inv_current_user_range(unsigned long start, unsigned long end)
205{
206
207
208
209
210 unsigned long long aligned_start;
211 unsigned long long ull_end;
212 unsigned long long addr;
213
214 ull_end = end;
215
216
217
218
219
220
221
222 aligned_start = L1_CACHE_ALIGN(start);
223 addr = aligned_start;
224 while (addr < ull_end) {
225 __asm__ __volatile__ ("icbi %0, 0" : : "r" (addr));
226 __asm__ __volatile__ ("nop");
227 __asm__ __volatile__ ("nop");
228 addr += L1_CACHE_BYTES;
229 }
230}
231
232
233
234#define DUMMY_ALLOCO_AREA_SIZE ((L1_CACHE_BYTES << 10) + (1024 * 4))
235static unsigned char dummy_alloco_area[DUMMY_ALLOCO_AREA_SIZE] __cacheline_aligned = { 0, };
236
237static void inline sh64_dcache_purge_sets(int sets_to_purge_base, int n_sets)
238{
239
240
241
242
243 int dummy_buffer_base_set;
244 unsigned long long eaddr, eaddr0, eaddr1;
245 int j;
246 int set_offset;
247
248 dummy_buffer_base_set = ((int)&dummy_alloco_area &
249 cpu_data->dcache.entry_mask) >>
250 cpu_data->dcache.entry_shift;
251 set_offset = sets_to_purge_base - dummy_buffer_base_set;
252
253 for (j = 0; j < n_sets; j++, set_offset++) {
254 set_offset &= (cpu_data->dcache.sets - 1);
255 eaddr0 = (unsigned long long)dummy_alloco_area +
256 (set_offset << cpu_data->dcache.entry_shift);
257
258
259
260
261
262
263
264
265 eaddr1 = eaddr0 + cpu_data->dcache.way_size *
266 cpu_data->dcache.ways;
267
268 for (eaddr = eaddr0; eaddr < eaddr1;
269 eaddr += cpu_data->dcache.way_size) {
270 __asm__ __volatile__ ("alloco %0, 0" : : "r" (eaddr));
271 __asm__ __volatile__ ("synco");
272 }
273
274 eaddr1 = eaddr0 + cpu_data->dcache.way_size *
275 cpu_data->dcache.ways;
276
277 for (eaddr = eaddr0; eaddr < eaddr1;
278 eaddr += cpu_data->dcache.way_size) {
279
280
281
282
283 if (test_bit(SH_CACHE_MODE_WT, &(cpu_data->dcache.flags)))
284 __raw_readb((unsigned long)eaddr);
285 }
286 }
287
288
289
290
291
292
293}
294
295
296
297
298
299
300
301
302
303static void sh64_dcache_purge_all(void)
304{
305
306 sh64_dcache_purge_sets(0, cpu_data->dcache.sets);
307}
308
309
310
311
312#define MAGIC_PAGE0_START 0xffffffffec000000ULL
313
314
315
316
317
318
319
320
321
322
323
324
325static void sh64_dcache_purge_coloured_phy_page(unsigned long paddr,
326 unsigned long eaddr)
327{
328 unsigned long long magic_page_start;
329 unsigned long long magic_eaddr, magic_eaddr_end;
330
331 magic_page_start = MAGIC_PAGE0_START + (eaddr & CACHE_OC_SYN_MASK);
332
333
334
335 sh64_setup_dtlb_cache_slot(magic_page_start, get_asid(), paddr);
336
337 magic_eaddr = magic_page_start;
338 magic_eaddr_end = magic_eaddr + PAGE_SIZE;
339
340 while (magic_eaddr < magic_eaddr_end) {
341
342
343
344 __asm__ __volatile__ ("ocbp %0, 0" : : "r" (magic_eaddr));
345 magic_eaddr += L1_CACHE_BYTES;
346 }
347
348 sh64_teardown_dtlb_cache_slot();
349}
350
351
352
353
354
355
356
357
358
359static void sh64_dcache_purge_phy_page(unsigned long paddr)
360{
361 unsigned long long eaddr_start, eaddr, eaddr_end;
362 int i;
363
364
365
366 eaddr_start = MAGIC_PAGE0_START;
367 for (i = 0; i < (1 << CACHE_OC_N_SYNBITS); i++) {
368 sh64_setup_dtlb_cache_slot(eaddr_start, get_asid(), paddr);
369
370 eaddr = eaddr_start;
371 eaddr_end = eaddr + PAGE_SIZE;
372 while (eaddr < eaddr_end) {
373 __asm__ __volatile__ ("ocbp %0, 0" : : "r" (eaddr));
374 eaddr += L1_CACHE_BYTES;
375 }
376
377 sh64_teardown_dtlb_cache_slot();
378 eaddr_start += PAGE_SIZE;
379 }
380}
381
382static void sh64_dcache_purge_user_pages(struct mm_struct *mm,
383 unsigned long addr, unsigned long end)
384{
385 pgd_t *pgd;
386 pud_t *pud;
387 pmd_t *pmd;
388 pte_t *pte;
389 pte_t entry;
390 spinlock_t *ptl;
391 unsigned long paddr;
392
393 if (!mm)
394 return;
395
396 pgd = pgd_offset(mm, addr);
397 if (pgd_bad(*pgd))
398 return;
399
400 pud = pud_offset(pgd, addr);
401 if (pud_none(*pud) || pud_bad(*pud))
402 return;
403
404 pmd = pmd_offset(pud, addr);
405 if (pmd_none(*pmd) || pmd_bad(*pmd))
406 return;
407
408 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
409 do {
410 entry = *pte;
411 if (pte_none(entry) || !pte_present(entry))
412 continue;
413 paddr = pte_val(entry) & PAGE_MASK;
414 sh64_dcache_purge_coloured_phy_page(paddr, addr);
415 } while (pte++, addr += PAGE_SIZE, addr != end);
416 pte_unmap_unlock(pte - 1, ptl);
417}
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467static void sh64_dcache_purge_user_range(struct mm_struct *mm,
468 unsigned long start, unsigned long end)
469{
470 int n_pages = ((end - start) >> PAGE_SHIFT);
471
472 if (n_pages >= 64 || ((start ^ (end - 1)) & PMD_MASK)) {
473 sh64_dcache_purge_all();
474 } else {
475
476 start &= PAGE_MASK;
477 end = PAGE_ALIGN(end);
478 sh64_dcache_purge_user_pages(mm, start, end);
479 }
480}
481
482
483
484
485
486static void sh5_flush_cache_all(void *unused)
487{
488 sh64_dcache_purge_all();
489 sh64_icache_inv_all();
490}
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513static void sh5_flush_cache_mm(void *unused)
514{
515 sh64_dcache_purge_all();
516}
517
518
519
520
521
522
523
524
525static void sh5_flush_cache_range(void *args)
526{
527 struct flusher_data *data = args;
528 struct vm_area_struct *vma;
529 unsigned long start, end;
530
531 vma = data->vma;
532 start = data->addr1;
533 end = data->addr2;
534
535 sh64_dcache_purge_user_range(vma->vm_mm, start, end);
536 sh64_icache_inv_user_page_range(vma->vm_mm, start, end);
537}
538
539
540
541
542
543
544
545
546
547
548static void sh5_flush_cache_page(void *args)
549{
550 struct flusher_data *data = args;
551 struct vm_area_struct *vma;
552 unsigned long eaddr, pfn;
553
554 vma = data->vma;
555 eaddr = data->addr1;
556 pfn = data->addr2;
557
558 sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT);
559
560 if (vma->vm_flags & VM_EXEC)
561 sh64_icache_inv_user_page(vma, eaddr);
562}
563
564static void sh5_flush_dcache_page(void *page)
565{
566 sh64_dcache_purge_phy_page(page_to_phys((struct page *)page));
567 wmb();
568}
569
570
571
572
573
574
575
576
577
578static void sh5_flush_icache_range(void *args)
579{
580 struct flusher_data *data = args;
581 unsigned long start, end;
582
583 start = data->addr1;
584 end = data->addr2;
585
586 __flush_purge_region((void *)start, end);
587 wmb();
588 sh64_icache_inv_kernel_range(start, end);
589}
590
591
592
593
594
595
596
597static void sh5_flush_cache_sigtramp(void *vaddr)
598{
599 unsigned long end = (unsigned long)vaddr + L1_CACHE_BYTES;
600
601 __flush_wback_region(vaddr, L1_CACHE_BYTES);
602 wmb();
603 sh64_icache_inv_current_user_range((unsigned long)vaddr, end);
604}
605
606void __init sh5_cache_init(void)
607{
608 local_flush_cache_all = sh5_flush_cache_all;
609 local_flush_cache_mm = sh5_flush_cache_mm;
610 local_flush_cache_dup_mm = sh5_flush_cache_mm;
611 local_flush_cache_page = sh5_flush_cache_page;
612 local_flush_cache_range = sh5_flush_cache_range;
613 local_flush_dcache_page = sh5_flush_dcache_page;
614 local_flush_icache_range = sh5_flush_icache_range;
615 local_flush_cache_sigtramp = sh5_flush_cache_sigtramp;
616
617
618 dtlb_cache_slot = sh64_get_wired_dtlb_entry();
619
620 sh4__flush_region_init();
621}
622