1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64#include <linux/module.h>
65#include <linux/mm.h>
66#include <linux/sched.h>
67#include <linux/cache.h>
68#include <linux/mmu_context.h>
69#include <linux/syscalls.h>
70#include <linux/uaccess.h>
71#include <linux/pagemap.h>
72#include <asm/cacheflush.h>
73#include <asm/cachectl.h>
74#include <asm/setup.h>
75
76
77#define ARC_REG_IC_BCR 0x77
78#define ARC_REG_IC_IVIC 0x10
79#define ARC_REG_IC_CTRL 0x11
80#define ARC_REG_IC_IVIL 0x19
81#if (CONFIG_ARC_MMU_VER > 2)
82#define ARC_REG_IC_PTAG 0x1E
83#endif
84
85
86#define IC_CTRL_CACHE_DISABLE 0x1
87
88
89#define ARC_REG_DC_BCR 0x72
90#define ARC_REG_DC_IVDC 0x47
91#define ARC_REG_DC_CTRL 0x48
92#define ARC_REG_DC_IVDL 0x4A
93#define ARC_REG_DC_FLSH 0x4B
94#define ARC_REG_DC_FLDL 0x4C
95#if (CONFIG_ARC_MMU_VER > 2)
96#define ARC_REG_DC_PTAG 0x5C
97#endif
98
99
100#define DC_CTRL_INV_MODE_FLUSH 0x40
101#define DC_CTRL_FLUSH_STATUS 0x100
102
103char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len)
104{
105 int n = 0;
106 unsigned int c = smp_processor_id();
107
108#define PR_CACHE(p, enb, str) \
109{ \
110 if (!(p)->ver) \
111 n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \
112 else \
113 n += scnprintf(buf + n, len - n, \
114 str"\t\t: (%uK) VIPT, %dway set-asc, %ub Line %s\n", \
115 TO_KB((p)->sz), (p)->assoc, (p)->line_len, \
116 enb ? "" : "DISABLED (kernel-build)"); \
117}
118
119 PR_CACHE(&cpuinfo_arc700[c].icache, IS_ENABLED(CONFIG_ARC_HAS_ICACHE),
120 "I-Cache");
121 PR_CACHE(&cpuinfo_arc700[c].dcache, IS_ENABLED(CONFIG_ARC_HAS_DCACHE),
122 "D-Cache");
123
124 return buf;
125}
126
127
128
129
130
131
132void read_decode_cache_bcr(void)
133{
134 struct cpuinfo_arc_cache *p_ic, *p_dc;
135 unsigned int cpu = smp_processor_id();
136 struct bcr_cache {
137#ifdef CONFIG_CPU_BIG_ENDIAN
138 unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
139#else
140 unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
141#endif
142 } ibcr, dbcr;
143
144 p_ic = &cpuinfo_arc700[cpu].icache;
145 READ_BCR(ARC_REG_IC_BCR, ibcr);
146
147 BUG_ON(ibcr.config != 3);
148 p_ic->assoc = 2;
149 p_ic->line_len = 8 << ibcr.line_len;
150 p_ic->sz = 0x200 << ibcr.sz;
151 p_ic->ver = ibcr.ver;
152
153 p_dc = &cpuinfo_arc700[cpu].dcache;
154 READ_BCR(ARC_REG_DC_BCR, dbcr);
155
156 BUG_ON(dbcr.config != 2);
157 p_dc->assoc = 4;
158 p_dc->line_len = 16 << dbcr.line_len;
159 p_dc->sz = 0x200 << dbcr.sz;
160 p_dc->ver = dbcr.ver;
161}
162
163
164
165
166
167
168
169
170void arc_cache_init(void)
171{
172 unsigned int cpu = smp_processor_id();
173 struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
174 struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
175 unsigned int dcache_does_alias, temp;
176 char str[256];
177
178 printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
179
180 if (!ic->ver)
181 goto chk_dc;
182
183#ifdef CONFIG_ARC_HAS_ICACHE
184
185 if (ic->line_len != ARC_ICACHE_LINE_LEN)
186 panic("Cache H/W doesn't match kernel Config");
187
188 if (ic->ver != CONFIG_ARC_MMU_VER)
189 panic("Cache ver doesn't match MMU ver\n");
190#endif
191
192
193 temp = read_aux_reg(ARC_REG_IC_CTRL);
194
195#ifdef CONFIG_ARC_HAS_ICACHE
196 temp &= ~IC_CTRL_CACHE_DISABLE;
197#else
198 temp |= IC_CTRL_CACHE_DISABLE;
199#endif
200
201 write_aux_reg(ARC_REG_IC_CTRL, temp);
202
203chk_dc:
204 if (!dc->ver)
205 return;
206
207#ifdef CONFIG_ARC_HAS_DCACHE
208 if (dc->line_len != ARC_DCACHE_LINE_LEN)
209 panic("Cache H/W doesn't match kernel Config");
210
211
212 dcache_does_alias = (dc->sz / dc->assoc) > PAGE_SIZE;
213
214 if (dcache_does_alias && !cache_is_vipt_aliasing())
215 panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
216 else if (!dcache_does_alias && cache_is_vipt_aliasing())
217 panic("Don't need CONFIG_ARC_CACHE_VIPT_ALIASING\n");
218#endif
219
220
221
222
223
224 temp = read_aux_reg(ARC_REG_DC_CTRL);
225 temp &= ~DC_CTRL_INV_MODE_FLUSH;
226
227#ifdef CONFIG_ARC_HAS_DCACHE
228
229 write_aux_reg(ARC_REG_DC_CTRL, temp & ~IC_CTRL_CACHE_DISABLE);
230#else
231
232 write_aux_reg(ARC_REG_DC_FLSH, 0x1);
233
234 write_aux_reg(ARC_REG_DC_CTRL, temp | IC_CTRL_CACHE_DISABLE);
235#endif
236
237 return;
238}
239
240#define OP_INV 0x1
241#define OP_FLUSH 0x2
242#define OP_FLUSH_N_INV 0x3
243
244#ifdef CONFIG_ARC_HAS_DCACHE
245
246
247
248
249
250static inline void wait_for_flush(void)
251{
252 while (read_aux_reg(ARC_REG_DC_CTRL) & DC_CTRL_FLUSH_STATUS)
253 ;
254}
255
256
257
258
259
260
261
262static inline void __dc_entire_op(const int cacheop)
263{
264 unsigned int tmp = tmp;
265 int aux;
266
267 if (cacheop == OP_FLUSH_N_INV) {
268
269
270
271
272
273 tmp = read_aux_reg(ARC_REG_DC_CTRL);
274 write_aux_reg(ARC_REG_DC_CTRL, tmp | DC_CTRL_INV_MODE_FLUSH);
275 }
276
277 if (cacheop & OP_INV)
278 aux = ARC_REG_DC_IVDC;
279 else
280 aux = ARC_REG_DC_FLSH;
281
282 write_aux_reg(aux, 0x1);
283
284 if (cacheop & OP_FLUSH)
285 wait_for_flush();
286
287
288 if (cacheop == OP_FLUSH_N_INV)
289 write_aux_reg(ARC_REG_DC_CTRL, tmp & ~DC_CTRL_INV_MODE_FLUSH);
290}
291
292
293
294
295
296
297
298static inline void __dc_line_loop(unsigned long paddr, unsigned long vaddr,
299 unsigned long sz, const int aux_reg)
300{
301 int num_lines;
302
303
304
305
306
307
308
309 if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
310 sz += paddr & ~DCACHE_LINE_MASK;
311 paddr &= DCACHE_LINE_MASK;
312 vaddr &= DCACHE_LINE_MASK;
313 }
314
315 num_lines = DIV_ROUND_UP(sz, ARC_DCACHE_LINE_LEN);
316
317#if (CONFIG_ARC_MMU_VER <= 2)
318 paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
319#endif
320
321 while (num_lines-- > 0) {
322#if (CONFIG_ARC_MMU_VER > 2)
323
324
325
326
327 write_aux_reg(ARC_REG_DC_PTAG, paddr);
328
329 write_aux_reg(aux_reg, vaddr);
330 vaddr += ARC_DCACHE_LINE_LEN;
331#else
332
333 write_aux_reg(aux_reg, paddr);
334#endif
335 paddr += ARC_DCACHE_LINE_LEN;
336 }
337}
338
339
340#define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op)
341
342
343
344
345static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr,
346 unsigned long sz, const int cacheop)
347{
348 unsigned long flags, tmp = tmp;
349 int aux;
350
351 local_irq_save(flags);
352
353 if (cacheop == OP_FLUSH_N_INV) {
354
355
356
357
358
359
360 tmp = read_aux_reg(ARC_REG_DC_CTRL);
361 write_aux_reg(ARC_REG_DC_CTRL, tmp | DC_CTRL_INV_MODE_FLUSH);
362 }
363
364 if (cacheop & OP_INV)
365 aux = ARC_REG_DC_IVDL;
366 else
367 aux = ARC_REG_DC_FLDL;
368
369 __dc_line_loop(paddr, vaddr, sz, aux);
370
371 if (cacheop & OP_FLUSH)
372 wait_for_flush();
373
374
375 if (cacheop == OP_FLUSH_N_INV)
376 write_aux_reg(ARC_REG_DC_CTRL, tmp & ~DC_CTRL_INV_MODE_FLUSH);
377
378 local_irq_restore(flags);
379}
380
381#else
382
383#define __dc_entire_op(cacheop)
384#define __dc_line_op(paddr, vaddr, sz, cacheop)
385#define __dc_line_op_k(paddr, sz, cacheop)
386
387#endif
388
389
390#ifdef CONFIG_ARC_HAS_ICACHE
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
438 unsigned long sz)
439{
440 unsigned long flags;
441 int num_lines;
442
443
444
445
446
447
448
449 if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
450 sz += paddr & ~ICACHE_LINE_MASK;
451 paddr &= ICACHE_LINE_MASK;
452 vaddr &= ICACHE_LINE_MASK;
453 }
454
455 num_lines = DIV_ROUND_UP(sz, ARC_ICACHE_LINE_LEN);
456
457#if (CONFIG_ARC_MMU_VER <= 2)
458
459 paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
460#endif
461
462 local_irq_save(flags);
463 while (num_lines-- > 0) {
464#if (CONFIG_ARC_MMU_VER > 2)
465
466 write_aux_reg(ARC_REG_IC_PTAG, paddr);
467
468
469 write_aux_reg(ARC_REG_IC_IVIL, vaddr);
470 vaddr += ARC_ICACHE_LINE_LEN;
471#else
472
473 write_aux_reg(ARC_REG_IC_IVIL, paddr);
474#endif
475 paddr += ARC_ICACHE_LINE_LEN;
476 }
477 local_irq_restore(flags);
478}
479
480static inline void __ic_entire_inv(void)
481{
482 write_aux_reg(ARC_REG_IC_IVIC, 1);
483 read_aux_reg(ARC_REG_IC_CTRL);
484}
485
486#else
487
488#define __ic_entire_inv()
489#define __ic_line_inv_vaddr(pstart, vstart, sz)
490
491#endif
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510void flush_dcache_page(struct page *page)
511{
512 struct address_space *mapping;
513
514 if (!cache_is_vipt_aliasing()) {
515 clear_bit(PG_dc_clean, &page->flags);
516 return;
517 }
518
519
520 mapping = page_mapping(page);
521 if (!mapping)
522 return;
523
524
525
526
527
528 if (!mapping_mapped(mapping)) {
529 clear_bit(PG_dc_clean, &page->flags);
530 } else if (page_mapped(page)) {
531
532
533 void *paddr = page_address(page);
534 unsigned long vaddr = page->index << PAGE_CACHE_SHIFT;
535
536 if (addr_not_cache_congruent(paddr, vaddr))
537 __flush_dcache_page(paddr, vaddr);
538 }
539}
540EXPORT_SYMBOL(flush_dcache_page);
541
542
543void dma_cache_wback_inv(unsigned long start, unsigned long sz)
544{
545 __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
546}
547EXPORT_SYMBOL(dma_cache_wback_inv);
548
549void dma_cache_inv(unsigned long start, unsigned long sz)
550{
551 __dc_line_op_k(start, sz, OP_INV);
552}
553EXPORT_SYMBOL(dma_cache_inv);
554
555void dma_cache_wback(unsigned long start, unsigned long sz)
556{
557 __dc_line_op_k(start, sz, OP_FLUSH);
558}
559EXPORT_SYMBOL(dma_cache_wback);
560
561
562
563
564
565
566
567
568void flush_icache_range(unsigned long kstart, unsigned long kend)
569{
570 unsigned int tot_sz, off, sz;
571 unsigned long phy, pfn;
572
573
574
575
576 if (kstart < TASK_SIZE) {
577 BUG_ON("Flush icache range for user virtual addr space");
578 return;
579 }
580
581
582
583
584 tot_sz = kend - kstart;
585 if (tot_sz > PAGE_SIZE) {
586 flush_cache_all();
587 return;
588 }
589
590
591 if (likely(kstart > PAGE_OFFSET)) {
592
593
594
595
596
597
598 __sync_icache_dcache(kstart, kstart, kend - kstart);
599 return;
600 }
601
602
603
604
605
606
607
608
609
610
611 while (tot_sz > 0) {
612 off = kstart % PAGE_SIZE;
613 pfn = vmalloc_to_pfn((void *)kstart);
614 phy = (pfn << PAGE_SHIFT) + off;
615 sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off);
616 __sync_icache_dcache(phy, kstart, sz);
617 kstart += sz;
618 tot_sz -= sz;
619 }
620}
621
622
623
624
625
626
627
628
629
630
631
632void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len)
633{
634 unsigned long flags;
635
636 local_irq_save(flags);
637 __ic_line_inv_vaddr(paddr, vaddr, len);
638 __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
639 local_irq_restore(flags);
640}
641
642
643void __inv_icache_page(unsigned long paddr, unsigned long vaddr)
644{
645 __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
646}
647
648
649
650
651
652void ___flush_dcache_page(unsigned long paddr, unsigned long vaddr)
653{
654 __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
655}
656
657noinline void flush_cache_all(void)
658{
659 unsigned long flags;
660
661 local_irq_save(flags);
662
663 __ic_entire_inv();
664 __dc_entire_op(OP_FLUSH_N_INV);
665
666 local_irq_restore(flags);
667
668}
669
670#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
671
672void flush_cache_mm(struct mm_struct *mm)
673{
674 flush_cache_all();
675}
676
677void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
678 unsigned long pfn)
679{
680 unsigned int paddr = pfn << PAGE_SHIFT;
681
682 u_vaddr &= PAGE_MASK;
683
684 ___flush_dcache_page(paddr, u_vaddr);
685
686 if (vma->vm_flags & VM_EXEC)
687 __inv_icache_page(paddr, u_vaddr);
688}
689
690void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
691 unsigned long end)
692{
693 flush_cache_all();
694}
695
696void flush_anon_page(struct vm_area_struct *vma, struct page *page,
697 unsigned long u_vaddr)
698{
699
700 __flush_dcache_page(page_address(page), u_vaddr);
701 __flush_dcache_page(page_address(page), page_address(page));
702
703}
704
705#endif
706
707void copy_user_highpage(struct page *to, struct page *from,
708 unsigned long u_vaddr, struct vm_area_struct *vma)
709{
710 void *kfrom = page_address(from);
711 void *kto = page_address(to);
712 int clean_src_k_mappings = 0;
713
714
715
716
717
718
719
720
721
722 if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
723 __flush_dcache_page(kfrom, u_vaddr);
724 clean_src_k_mappings = 1;
725 }
726
727 copy_page(kto, kfrom);
728
729
730
731
732
733
734
735
736
737 clear_bit(PG_dc_clean, &to->flags);
738
739
740
741
742
743 if (clean_src_k_mappings) {
744 __flush_dcache_page(kfrom, kfrom);
745 set_bit(PG_dc_clean, &from->flags);
746 } else {
747 clear_bit(PG_dc_clean, &from->flags);
748 }
749}
750
751void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
752{
753 clear_page(to);
754 clear_bit(PG_dc_clean, &page->flags);
755}
756
757
758
759
760
761
762SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
763{
764
765 flush_cache_all();
766 return 0;
767}
768