1
2
3
4
5
6
7
8
9
10
11
12#include <linux/module.h>
13#include <linux/mm.h>
14#include <linux/sched.h>
15#include <linux/cache.h>
16#include <linux/mmu_context.h>
17#include <linux/syscalls.h>
18#include <linux/uaccess.h>
19#include <linux/pagemap.h>
20#include <asm/cacheflush.h>
21#include <asm/cachectl.h>
22#include <asm/setup.h>
23
24static int l2_line_sz;
25
26void (*_cache_line_loop_ic_fn)(unsigned long paddr, unsigned long vaddr,
27 unsigned long sz, const int cacheop);
28
29char *arc_cache_mumbojumbo(int c, char *buf, int len)
30{
31 int n = 0;
32 struct cpuinfo_arc_cache *p;
33
34#define PR_CACHE(p, cfg, str) \
35 if (!(p)->ver) \
36 n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \
37 else \
38 n += scnprintf(buf + n, len - n, \
39 str"\t\t: %uK, %dway/set, %uB Line, %s%s%s\n", \
40 (p)->sz_k, (p)->assoc, (p)->line_len, \
41 (p)->vipt ? "VIPT" : "PIPT", \
42 (p)->alias ? " aliasing" : "", \
43 IS_ENABLED(cfg) ? "" : " (not used)");
44
45 PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache");
46 PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
47
48 p = &cpuinfo_arc700[c].slc;
49 if (p->ver)
50 n += scnprintf(buf + n, len - n,
51 "SLC\t\t: %uK, %uB Line\n", p->sz_k, p->line_len);
52
53 return buf;
54}
55
56
57
58
59
60
61void read_decode_cache_bcr(void)
62{
63 struct cpuinfo_arc_cache *p_ic, *p_dc, *p_slc;
64 unsigned int cpu = smp_processor_id();
65 struct bcr_cache {
66#ifdef CONFIG_CPU_BIG_ENDIAN
67 unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
68#else
69 unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
70#endif
71 } ibcr, dbcr;
72
73 struct bcr_generic sbcr;
74
75 struct bcr_slc_cfg {
76#ifdef CONFIG_CPU_BIG_ENDIAN
77 unsigned int pad:24, way:2, lsz:2, sz:4;
78#else
79 unsigned int sz:4, lsz:2, way:2, pad:24;
80#endif
81 } slc_cfg;
82
83 p_ic = &cpuinfo_arc700[cpu].icache;
84 READ_BCR(ARC_REG_IC_BCR, ibcr);
85
86 if (!ibcr.ver)
87 goto dc_chk;
88
89 if (ibcr.ver <= 3) {
90 BUG_ON(ibcr.config != 3);
91 p_ic->assoc = 2;
92 } else if (ibcr.ver >= 4) {
93 p_ic->assoc = 1 << ibcr.config;
94 }
95
96 p_ic->line_len = 8 << ibcr.line_len;
97 p_ic->sz_k = 1 << (ibcr.sz - 1);
98 p_ic->ver = ibcr.ver;
99 p_ic->vipt = 1;
100 p_ic->alias = p_ic->sz_k/p_ic->assoc/TO_KB(PAGE_SIZE) > 1;
101
102dc_chk:
103 p_dc = &cpuinfo_arc700[cpu].dcache;
104 READ_BCR(ARC_REG_DC_BCR, dbcr);
105
106 if (!dbcr.ver)
107 goto slc_chk;
108
109 if (dbcr.ver <= 3) {
110 BUG_ON(dbcr.config != 2);
111 p_dc->assoc = 4;
112 p_dc->vipt = 1;
113 p_dc->alias = p_dc->sz_k/p_dc->assoc/TO_KB(PAGE_SIZE) > 1;
114 } else if (dbcr.ver >= 4) {
115 p_dc->assoc = 1 << dbcr.config;
116 p_dc->vipt = 0;
117 p_dc->alias = 0;
118 }
119
120 p_dc->line_len = 16 << dbcr.line_len;
121 p_dc->sz_k = 1 << (dbcr.sz - 1);
122 p_dc->ver = dbcr.ver;
123
124slc_chk:
125 if (!is_isa_arcv2())
126 return;
127
128 p_slc = &cpuinfo_arc700[cpu].slc;
129 READ_BCR(ARC_REG_SLC_BCR, sbcr);
130 if (sbcr.ver) {
131 READ_BCR(ARC_REG_SLC_CFG, slc_cfg);
132 p_slc->ver = sbcr.ver;
133 p_slc->sz_k = 128 << slc_cfg.sz;
134 l2_line_sz = p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64;
135 }
136}
137
138
139
140
141
142#define OP_INV 0x1
143#define OP_FLUSH 0x2
144#define OP_FLUSH_N_INV 0x3
145#define OP_INV_IC 0x4
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189static inline
190void __cache_line_loop_v2(unsigned long paddr, unsigned long vaddr,
191 unsigned long sz, const int op)
192{
193 unsigned int aux_cmd;
194 int num_lines;
195 const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
196
197 if (op == OP_INV_IC) {
198 aux_cmd = ARC_REG_IC_IVIL;
199 } else {
200
201 aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
202 }
203
204
205
206
207
208
209
210 if (!full_page) {
211 sz += paddr & ~CACHE_LINE_MASK;
212 paddr &= CACHE_LINE_MASK;
213 vaddr &= CACHE_LINE_MASK;
214 }
215
216 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
217
218
219 paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
220
221 while (num_lines-- > 0) {
222 write_aux_reg(aux_cmd, paddr);
223 paddr += L1_CACHE_BYTES;
224 }
225}
226
227static inline
228void __cache_line_loop_v3(unsigned long paddr, unsigned long vaddr,
229 unsigned long sz, const int op)
230{
231 unsigned int aux_cmd, aux_tag;
232 int num_lines;
233 const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
234
235 if (op == OP_INV_IC) {
236 aux_cmd = ARC_REG_IC_IVIL;
237 aux_tag = ARC_REG_IC_PTAG;
238 } else {
239 aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
240 aux_tag = ARC_REG_DC_PTAG;
241 }
242
243
244
245
246
247
248
249 if (!full_page) {
250 sz += paddr & ~CACHE_LINE_MASK;
251 paddr &= CACHE_LINE_MASK;
252 vaddr &= CACHE_LINE_MASK;
253 }
254 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
255
256
257
258
259
260 if (full_page)
261 write_aux_reg(aux_tag, paddr);
262
263 while (num_lines-- > 0) {
264 if (!full_page) {
265 write_aux_reg(aux_tag, paddr);
266 paddr += L1_CACHE_BYTES;
267 }
268
269 write_aux_reg(aux_cmd, vaddr);
270 vaddr += L1_CACHE_BYTES;
271 }
272}
273
274
275
276
277
278
279
280
281static inline
282void __cache_line_loop_v4(unsigned long paddr, unsigned long vaddr,
283 unsigned long sz, const int cacheop)
284{
285 unsigned int aux_cmd;
286 int num_lines;
287 const int full_page_op = __builtin_constant_p(sz) && sz == PAGE_SIZE;
288
289 if (cacheop == OP_INV_IC) {
290 aux_cmd = ARC_REG_IC_IVIL;
291 } else {
292
293 aux_cmd = cacheop & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
294 }
295
296
297
298
299
300
301
302 if (!full_page_op) {
303 sz += paddr & ~CACHE_LINE_MASK;
304 paddr &= CACHE_LINE_MASK;
305 }
306
307 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
308
309 while (num_lines-- > 0) {
310 write_aux_reg(aux_cmd, paddr);
311 paddr += L1_CACHE_BYTES;
312 }
313}
314
315#if (CONFIG_ARC_MMU_VER < 3)
316#define __cache_line_loop __cache_line_loop_v2
317#elif (CONFIG_ARC_MMU_VER == 3)
318#define __cache_line_loop __cache_line_loop_v3
319#elif (CONFIG_ARC_MMU_VER > 3)
320#define __cache_line_loop __cache_line_loop_v4
321#endif
322
323#ifdef CONFIG_ARC_HAS_DCACHE
324
325
326
327
328
329static inline void __before_dc_op(const int op)
330{
331 if (op == OP_FLUSH_N_INV) {
332
333
334
335
336
337 const unsigned int ctl = ARC_REG_DC_CTRL;
338 write_aux_reg(ctl, read_aux_reg(ctl) | DC_CTRL_INV_MODE_FLUSH);
339 }
340}
341
342static inline void __after_dc_op(const int op)
343{
344 if (op & OP_FLUSH) {
345 const unsigned int ctl = ARC_REG_DC_CTRL;
346 unsigned int reg;
347
348
349 while ((reg = read_aux_reg(ctl)) & DC_CTRL_FLUSH_STATUS)
350 ;
351
352
353 if (op == OP_FLUSH_N_INV)
354 write_aux_reg(ctl, reg & ~DC_CTRL_INV_MODE_FLUSH);
355 }
356}
357
358
359
360
361
362
363
364static inline void __dc_entire_op(const int op)
365{
366 int aux;
367
368 __before_dc_op(op);
369
370 if (op & OP_INV)
371 aux = ARC_REG_DC_IVDC;
372 else
373 aux = ARC_REG_DC_FLSH;
374
375 write_aux_reg(aux, 0x1);
376
377 __after_dc_op(op);
378}
379
380
381#define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op)
382
383
384
385
386static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr,
387 unsigned long sz, const int op)
388{
389 unsigned long flags;
390
391 local_irq_save(flags);
392
393 __before_dc_op(op);
394
395 __cache_line_loop(paddr, vaddr, sz, op);
396
397 __after_dc_op(op);
398
399 local_irq_restore(flags);
400}
401
402#else
403
404#define __dc_entire_op(op)
405#define __dc_line_op(paddr, vaddr, sz, op)
406#define __dc_line_op_k(paddr, sz, op)
407
408#endif
409
410#ifdef CONFIG_ARC_HAS_ICACHE
411
412static inline void __ic_entire_inv(void)
413{
414 write_aux_reg(ARC_REG_IC_IVIC, 1);
415 read_aux_reg(ARC_REG_IC_CTRL);
416}
417
418static inline void
419__ic_line_inv_vaddr_local(unsigned long paddr, unsigned long vaddr,
420 unsigned long sz)
421{
422 unsigned long flags;
423
424 local_irq_save(flags);
425 (*_cache_line_loop_ic_fn)(paddr, vaddr, sz, OP_INV_IC);
426 local_irq_restore(flags);
427}
428
429#ifndef CONFIG_SMP
430
431#define __ic_line_inv_vaddr(p, v, s) __ic_line_inv_vaddr_local(p, v, s)
432
433#else
434
435struct ic_inv_args {
436 unsigned long paddr, vaddr;
437 int sz;
438};
439
440static void __ic_line_inv_vaddr_helper(void *info)
441{
442 struct ic_inv_args *ic_inv = info;
443
444 __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz);
445}
446
447static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
448 unsigned long sz)
449{
450 struct ic_inv_args ic_inv = {
451 .paddr = paddr,
452 .vaddr = vaddr,
453 .sz = sz
454 };
455
456 on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1);
457}
458
459#endif
460
461#else
462
463#define __ic_entire_inv()
464#define __ic_line_inv_vaddr(pstart, vstart, sz)
465
466#endif
467
468noinline void slc_op(unsigned long paddr, unsigned long sz, const int op)
469{
470#ifdef CONFIG_ISA_ARCV2
471
472
473
474
475
476
477
478 static DEFINE_SPINLOCK(lock);
479 unsigned long flags;
480 unsigned int ctrl;
481
482 spin_lock_irqsave(&lock, flags);
483
484
485
486
487
488
489
490 ctrl = read_aux_reg(ARC_REG_SLC_CTRL);
491
492
493 if (!(op & OP_FLUSH))
494 ctrl &= ~SLC_CTRL_IM;
495 else
496 ctrl |= SLC_CTRL_IM;
497
498 if (op & OP_INV)
499 ctrl |= SLC_CTRL_RGN_OP_INV;
500 else
501 ctrl &= ~SLC_CTRL_RGN_OP_INV;
502
503 write_aux_reg(ARC_REG_SLC_CTRL, ctrl);
504
505
506
507
508
509
510 write_aux_reg(ARC_REG_SLC_RGN_END, (paddr + sz + l2_line_sz - 1));
511 write_aux_reg(ARC_REG_SLC_RGN_START, paddr);
512
513 while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
514
515 spin_unlock_irqrestore(&lock, flags);
516#endif
517}
518
519static inline int need_slc_flush(void)
520{
521 return is_isa_arcv2() && l2_line_sz;
522}
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540void flush_dcache_page(struct page *page)
541{
542 struct address_space *mapping;
543
544 if (!cache_is_vipt_aliasing()) {
545 clear_bit(PG_dc_clean, &page->flags);
546 return;
547 }
548
549
550 mapping = page_mapping(page);
551 if (!mapping)
552 return;
553
554
555
556
557
558 if (!mapping_mapped(mapping)) {
559 clear_bit(PG_dc_clean, &page->flags);
560 } else if (page_mapped(page)) {
561
562
563 unsigned long paddr = (unsigned long)page_address(page);
564 unsigned long vaddr = page->index << PAGE_CACHE_SHIFT;
565
566 if (addr_not_cache_congruent(paddr, vaddr))
567 __flush_dcache_page(paddr, vaddr);
568 }
569}
570EXPORT_SYMBOL(flush_dcache_page);
571
572void dma_cache_wback_inv(unsigned long start, unsigned long sz)
573{
574 __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
575
576 if (need_slc_flush())
577 slc_op(start, sz, OP_FLUSH_N_INV);
578}
579EXPORT_SYMBOL(dma_cache_wback_inv);
580
581void dma_cache_inv(unsigned long start, unsigned long sz)
582{
583 __dc_line_op_k(start, sz, OP_INV);
584
585 if (need_slc_flush())
586 slc_op(start, sz, OP_INV);
587}
588EXPORT_SYMBOL(dma_cache_inv);
589
590void dma_cache_wback(unsigned long start, unsigned long sz)
591{
592 __dc_line_op_k(start, sz, OP_FLUSH);
593
594 if (need_slc_flush())
595 slc_op(start, sz, OP_FLUSH);
596}
597EXPORT_SYMBOL(dma_cache_wback);
598
599
600
601
602
603
604
605
606void flush_icache_range(unsigned long kstart, unsigned long kend)
607{
608 unsigned int tot_sz;
609
610 WARN(kstart < TASK_SIZE, "%s() can't handle user vaddr", __func__);
611
612
613
614
615 tot_sz = kend - kstart;
616 if (tot_sz > PAGE_SIZE) {
617 flush_cache_all();
618 return;
619 }
620
621
622 if (likely(kstart > PAGE_OFFSET)) {
623
624
625
626
627
628
629 __sync_icache_dcache(kstart, kstart, kend - kstart);
630 return;
631 }
632
633
634
635
636
637
638
639
640
641
642 while (tot_sz > 0) {
643 unsigned int off, sz;
644 unsigned long phy, pfn;
645
646 off = kstart % PAGE_SIZE;
647 pfn = vmalloc_to_pfn((void *)kstart);
648 phy = (pfn << PAGE_SHIFT) + off;
649 sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off);
650 __sync_icache_dcache(phy, kstart, sz);
651 kstart += sz;
652 tot_sz -= sz;
653 }
654}
655EXPORT_SYMBOL(flush_icache_range);
656
657
658
659
660
661
662
663
664
665
666
667void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len)
668{
669 __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
670 __ic_line_inv_vaddr(paddr, vaddr, len);
671}
672
673
674void __inv_icache_page(unsigned long paddr, unsigned long vaddr)
675{
676 __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
677}
678
679
680
681
682
683void __flush_dcache_page(unsigned long paddr, unsigned long vaddr)
684{
685 __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
686}
687
688noinline void flush_cache_all(void)
689{
690 unsigned long flags;
691
692 local_irq_save(flags);
693
694 __ic_entire_inv();
695 __dc_entire_op(OP_FLUSH_N_INV);
696
697 local_irq_restore(flags);
698
699}
700
701#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
702
703void flush_cache_mm(struct mm_struct *mm)
704{
705 flush_cache_all();
706}
707
708void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
709 unsigned long pfn)
710{
711 unsigned int paddr = pfn << PAGE_SHIFT;
712
713 u_vaddr &= PAGE_MASK;
714
715 __flush_dcache_page(paddr, u_vaddr);
716
717 if (vma->vm_flags & VM_EXEC)
718 __inv_icache_page(paddr, u_vaddr);
719}
720
721void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
722 unsigned long end)
723{
724 flush_cache_all();
725}
726
727void flush_anon_page(struct vm_area_struct *vma, struct page *page,
728 unsigned long u_vaddr)
729{
730
731 __flush_dcache_page(page_address(page), u_vaddr);
732 __flush_dcache_page(page_address(page), page_address(page));
733
734}
735
736#endif
737
738void copy_user_highpage(struct page *to, struct page *from,
739 unsigned long u_vaddr, struct vm_area_struct *vma)
740{
741 unsigned long kfrom = (unsigned long)page_address(from);
742 unsigned long kto = (unsigned long)page_address(to);
743 int clean_src_k_mappings = 0;
744
745
746
747
748
749
750
751
752
753 if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
754 __flush_dcache_page(kfrom, u_vaddr);
755 clean_src_k_mappings = 1;
756 }
757
758 copy_page((void *)kto, (void *)kfrom);
759
760
761
762
763
764
765
766
767
768 clear_bit(PG_dc_clean, &to->flags);
769
770
771
772
773
774 if (clean_src_k_mappings) {
775 __flush_dcache_page(kfrom, kfrom);
776 set_bit(PG_dc_clean, &from->flags);
777 } else {
778 clear_bit(PG_dc_clean, &from->flags);
779 }
780}
781
782void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
783{
784 clear_page(to);
785 clear_bit(PG_dc_clean, &page->flags);
786}
787
788
789
790
791
792
793SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
794{
795
796 flush_cache_all();
797 return 0;
798}
799
800void arc_cache_init(void)
801{
802 unsigned int __maybe_unused cpu = smp_processor_id();
803 char str[256];
804
805 printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
806
807 if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
808 struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
809
810 if (!ic->ver)
811 panic("cache support enabled but non-existent cache\n");
812
813 if (ic->line_len != L1_CACHE_BYTES)
814 panic("ICache line [%d] != kernel Config [%d]",
815 ic->line_len, L1_CACHE_BYTES);
816
817 if (ic->ver != CONFIG_ARC_MMU_VER)
818 panic("Cache ver [%d] doesn't match MMU ver [%d]\n",
819 ic->ver, CONFIG_ARC_MMU_VER);
820
821
822
823
824
825 if (is_isa_arcv2() && ic->alias)
826 _cache_line_loop_ic_fn = __cache_line_loop_v3;
827 else
828 _cache_line_loop_ic_fn = __cache_line_loop;
829 }
830
831 if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) {
832 struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
833
834 if (!dc->ver)
835 panic("cache support enabled but non-existent cache\n");
836
837 if (dc->line_len != L1_CACHE_BYTES)
838 panic("DCache line [%d] != kernel Config [%d]",
839 dc->line_len, L1_CACHE_BYTES);
840
841
842 if (is_isa_arcompact()) {
843 int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
844
845 if (dc->alias && !handled)
846 panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
847 else if (!dc->alias && handled)
848 panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
849 }
850 }
851}
852