1
2
3
4
5
6
7
8
9
10
11
12#include <linux/module.h>
13#include <linux/mm.h>
14#include <linux/sched.h>
15#include <linux/cache.h>
16#include <linux/mmu_context.h>
17#include <linux/syscalls.h>
18#include <linux/uaccess.h>
19#include <linux/pagemap.h>
20#include <asm/cacheflush.h>
21#include <asm/cachectl.h>
22#include <asm/setup.h>
23
24static int l2_line_sz;
25static int ioc_exists;
26int slc_enable = 1, ioc_enable = 1;
27unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE;
28unsigned long perip_end = 0xFFFFFFFF;
29
30void (*_cache_line_loop_ic_fn)(phys_addr_t paddr, unsigned long vaddr,
31 unsigned long sz, const int cacheop);
32
33void (*__dma_cache_wback_inv)(phys_addr_t start, unsigned long sz);
34void (*__dma_cache_inv)(phys_addr_t start, unsigned long sz);
35void (*__dma_cache_wback)(phys_addr_t start, unsigned long sz);
36
37char *arc_cache_mumbojumbo(int c, char *buf, int len)
38{
39 int n = 0;
40 struct cpuinfo_arc_cache *p;
41
42#define PR_CACHE(p, cfg, str) \
43 if (!(p)->line_len) \
44 n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \
45 else \
46 n += scnprintf(buf + n, len - n, \
47 str"\t\t: %uK, %dway/set, %uB Line, %s%s%s\n", \
48 (p)->sz_k, (p)->assoc, (p)->line_len, \
49 (p)->vipt ? "VIPT" : "PIPT", \
50 (p)->alias ? " aliasing" : "", \
51 IS_USED_CFG(cfg));
52
53 PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache");
54 PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
55
56 p = &cpuinfo_arc700[c].slc;
57 if (p->line_len)
58 n += scnprintf(buf + n, len - n,
59 "SLC\t\t: %uK, %uB Line%s\n",
60 p->sz_k, p->line_len, IS_USED_RUN(slc_enable));
61
62 n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n",
63 perip_base,
64 IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency "));
65
66 return buf;
67}
68
69
70
71
72
73
74static void read_decode_cache_bcr_arcv2(int cpu)
75{
76 struct cpuinfo_arc_cache *p_slc = &cpuinfo_arc700[cpu].slc;
77 struct bcr_generic sbcr;
78
79 struct bcr_slc_cfg {
80#ifdef CONFIG_CPU_BIG_ENDIAN
81 unsigned int pad:24, way:2, lsz:2, sz:4;
82#else
83 unsigned int sz:4, lsz:2, way:2, pad:24;
84#endif
85 } slc_cfg;
86
87 struct bcr_clust_cfg {
88#ifdef CONFIG_CPU_BIG_ENDIAN
89 unsigned int pad:7, c:1, num_entries:8, num_cores:8, ver:8;
90#else
91 unsigned int ver:8, num_cores:8, num_entries:8, c:1, pad:7;
92#endif
93 } cbcr;
94
95 struct bcr_volatile {
96#ifdef CONFIG_CPU_BIG_ENDIAN
97 unsigned int start:4, limit:4, pad:22, order:1, disable:1;
98#else
99 unsigned int disable:1, order:1, pad:22, limit:4, start:4;
100#endif
101 } vol;
102
103
104 READ_BCR(ARC_REG_SLC_BCR, sbcr);
105 if (sbcr.ver) {
106 READ_BCR(ARC_REG_SLC_CFG, slc_cfg);
107 p_slc->sz_k = 128 << slc_cfg.sz;
108 l2_line_sz = p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64;
109 }
110
111 READ_BCR(ARC_REG_CLUSTER_BCR, cbcr);
112 if (cbcr.c)
113 ioc_exists = 1;
114 else
115 ioc_enable = 0;
116
117
118 if (cpuinfo_arc700[cpu].core.family > 0x51) {
119 READ_BCR(AUX_VOL, vol);
120 perip_base = vol.start << 28;
121
122 if (cpuinfo_arc700[cpu].core.family > 0x52)
123 perip_end = (vol.limit << 28) - 1;
124 }
125}
126
127void read_decode_cache_bcr(void)
128{
129 struct cpuinfo_arc_cache *p_ic, *p_dc;
130 unsigned int cpu = smp_processor_id();
131 struct bcr_cache {
132#ifdef CONFIG_CPU_BIG_ENDIAN
133 unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
134#else
135 unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
136#endif
137 } ibcr, dbcr;
138
139 p_ic = &cpuinfo_arc700[cpu].icache;
140 READ_BCR(ARC_REG_IC_BCR, ibcr);
141
142 if (!ibcr.ver)
143 goto dc_chk;
144
145 if (ibcr.ver <= 3) {
146 BUG_ON(ibcr.config != 3);
147 p_ic->assoc = 2;
148 } else if (ibcr.ver >= 4) {
149 p_ic->assoc = 1 << ibcr.config;
150 }
151
152 p_ic->line_len = 8 << ibcr.line_len;
153 p_ic->sz_k = 1 << (ibcr.sz - 1);
154 p_ic->vipt = 1;
155 p_ic->alias = p_ic->sz_k/p_ic->assoc/TO_KB(PAGE_SIZE) > 1;
156
157dc_chk:
158 p_dc = &cpuinfo_arc700[cpu].dcache;
159 READ_BCR(ARC_REG_DC_BCR, dbcr);
160
161 if (!dbcr.ver)
162 goto slc_chk;
163
164 if (dbcr.ver <= 3) {
165 BUG_ON(dbcr.config != 2);
166 p_dc->assoc = 4;
167 p_dc->vipt = 1;
168 p_dc->alias = p_dc->sz_k/p_dc->assoc/TO_KB(PAGE_SIZE) > 1;
169 } else if (dbcr.ver >= 4) {
170 p_dc->assoc = 1 << dbcr.config;
171 p_dc->vipt = 0;
172 p_dc->alias = 0;
173 }
174
175 p_dc->line_len = 16 << dbcr.line_len;
176 p_dc->sz_k = 1 << (dbcr.sz - 1);
177
178slc_chk:
179 if (is_isa_arcv2())
180 read_decode_cache_bcr_arcv2(cpu);
181}
182
183
184
185
186
187#define OP_INV 0x1
188#define OP_FLUSH 0x2
189#define OP_FLUSH_N_INV 0x3
190#define OP_INV_IC 0x4
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234static inline
235void __cache_line_loop_v2(phys_addr_t paddr, unsigned long vaddr,
236 unsigned long sz, const int op)
237{
238 unsigned int aux_cmd;
239 int num_lines;
240 const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
241
242 if (op == OP_INV_IC) {
243 aux_cmd = ARC_REG_IC_IVIL;
244 } else {
245
246 aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
247 }
248
249
250
251
252
253
254
255 if (!full_page) {
256 sz += paddr & ~CACHE_LINE_MASK;
257 paddr &= CACHE_LINE_MASK;
258 vaddr &= CACHE_LINE_MASK;
259 }
260
261 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
262
263
264 paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
265
266 while (num_lines-- > 0) {
267 write_aux_reg(aux_cmd, paddr);
268 paddr += L1_CACHE_BYTES;
269 }
270}
271
272
273
274
275
276
277
278
279
280static inline
281void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
282 unsigned long sz, const int op)
283{
284 unsigned int aux_cmd, aux_tag;
285 int num_lines;
286 const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
287
288 if (op == OP_INV_IC) {
289 aux_cmd = ARC_REG_IC_IVIL;
290 aux_tag = ARC_REG_IC_PTAG;
291 } else {
292 aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
293 aux_tag = ARC_REG_DC_PTAG;
294 }
295
296
297
298
299
300
301
302 if (!full_page) {
303 sz += paddr & ~CACHE_LINE_MASK;
304 paddr &= CACHE_LINE_MASK;
305 vaddr &= CACHE_LINE_MASK;
306 }
307 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
308
309
310
311
312
313 if (full_page)
314 write_aux_reg(aux_tag, paddr);
315
316
317
318
319
320
321
322
323 if (is_pae40_enabled() && op == OP_INV_IC)
324 write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
325
326 while (num_lines-- > 0) {
327 if (!full_page) {
328 write_aux_reg(aux_tag, paddr);
329 paddr += L1_CACHE_BYTES;
330 }
331
332 write_aux_reg(aux_cmd, vaddr);
333 vaddr += L1_CACHE_BYTES;
334 }
335}
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350static inline
351void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
352 unsigned long sz, const int cacheop)
353{
354 unsigned int aux_cmd;
355 int num_lines;
356 const int full_page_op = __builtin_constant_p(sz) && sz == PAGE_SIZE;
357
358 if (cacheop == OP_INV_IC) {
359 aux_cmd = ARC_REG_IC_IVIL;
360 } else {
361
362 aux_cmd = cacheop & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
363 }
364
365
366
367
368
369
370
371 if (!full_page_op) {
372 sz += paddr & ~CACHE_LINE_MASK;
373 paddr &= CACHE_LINE_MASK;
374 }
375
376 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
377
378
379
380
381
382
383 if (is_pae40_enabled()) {
384 if (cacheop == OP_INV_IC)
385
386
387
388
389 write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
390 else
391 write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32);
392 }
393
394 while (num_lines-- > 0) {
395 write_aux_reg(aux_cmd, paddr);
396 paddr += L1_CACHE_BYTES;
397 }
398}
399
400#if (CONFIG_ARC_MMU_VER < 3)
401#define __cache_line_loop __cache_line_loop_v2
402#elif (CONFIG_ARC_MMU_VER == 3)
403#define __cache_line_loop __cache_line_loop_v3
404#elif (CONFIG_ARC_MMU_VER > 3)
405#define __cache_line_loop __cache_line_loop_v4
406#endif
407
408#ifdef CONFIG_ARC_HAS_DCACHE
409
410
411
412
413
414static inline void __before_dc_op(const int op)
415{
416 if (op == OP_FLUSH_N_INV) {
417
418
419
420
421
422 const unsigned int ctl = ARC_REG_DC_CTRL;
423 write_aux_reg(ctl, read_aux_reg(ctl) | DC_CTRL_INV_MODE_FLUSH);
424 }
425}
426
427static inline void __after_dc_op(const int op)
428{
429 if (op & OP_FLUSH) {
430 const unsigned int ctl = ARC_REG_DC_CTRL;
431 unsigned int reg;
432
433
434 while ((reg = read_aux_reg(ctl)) & DC_CTRL_FLUSH_STATUS)
435 ;
436
437
438 if (op == OP_FLUSH_N_INV)
439 write_aux_reg(ctl, reg & ~DC_CTRL_INV_MODE_FLUSH);
440 }
441}
442
443
444
445
446
447
448
449static inline void __dc_entire_op(const int op)
450{
451 int aux;
452
453 __before_dc_op(op);
454
455 if (op & OP_INV)
456 aux = ARC_REG_DC_IVDC;
457 else
458 aux = ARC_REG_DC_FLSH;
459
460 write_aux_reg(aux, 0x1);
461
462 __after_dc_op(op);
463}
464
465static inline void __dc_disable(void)
466{
467 const int r = ARC_REG_DC_CTRL;
468
469 __dc_entire_op(OP_FLUSH_N_INV);
470 write_aux_reg(r, read_aux_reg(r) | DC_CTRL_DIS);
471}
472
473static void __dc_enable(void)
474{
475 const int r = ARC_REG_DC_CTRL;
476
477 write_aux_reg(r, read_aux_reg(r) & ~DC_CTRL_DIS);
478}
479
480
481#define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op)
482
483
484
485
486static inline void __dc_line_op(phys_addr_t paddr, unsigned long vaddr,
487 unsigned long sz, const int op)
488{
489 unsigned long flags;
490
491 local_irq_save(flags);
492
493 __before_dc_op(op);
494
495 __cache_line_loop(paddr, vaddr, sz, op);
496
497 __after_dc_op(op);
498
499 local_irq_restore(flags);
500}
501
502#else
503
504#define __dc_entire_op(op)
505#define __dc_disable()
506#define __dc_enable()
507#define __dc_line_op(paddr, vaddr, sz, op)
508#define __dc_line_op_k(paddr, sz, op)
509
510#endif
511
512#ifdef CONFIG_ARC_HAS_ICACHE
513
514static inline void __ic_entire_inv(void)
515{
516 write_aux_reg(ARC_REG_IC_IVIC, 1);
517 read_aux_reg(ARC_REG_IC_CTRL);
518}
519
520static inline void
521__ic_line_inv_vaddr_local(phys_addr_t paddr, unsigned long vaddr,
522 unsigned long sz)
523{
524 unsigned long flags;
525
526 local_irq_save(flags);
527 (*_cache_line_loop_ic_fn)(paddr, vaddr, sz, OP_INV_IC);
528 local_irq_restore(flags);
529}
530
531#ifndef CONFIG_SMP
532
533#define __ic_line_inv_vaddr(p, v, s) __ic_line_inv_vaddr_local(p, v, s)
534
535#else
536
537struct ic_inv_args {
538 phys_addr_t paddr, vaddr;
539 int sz;
540};
541
542static void __ic_line_inv_vaddr_helper(void *info)
543{
544 struct ic_inv_args *ic_inv = info;
545
546 __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz);
547}
548
549static void __ic_line_inv_vaddr(phys_addr_t paddr, unsigned long vaddr,
550 unsigned long sz)
551{
552 struct ic_inv_args ic_inv = {
553 .paddr = paddr,
554 .vaddr = vaddr,
555 .sz = sz
556 };
557
558 on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1);
559}
560
561#endif
562
563#else
564
565#define __ic_entire_inv()
566#define __ic_line_inv_vaddr(pstart, vstart, sz)
567
568#endif
569
570noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op)
571{
572#ifdef CONFIG_ISA_ARCV2
573
574
575
576
577
578
579
580 static DEFINE_SPINLOCK(lock);
581 unsigned long flags;
582 unsigned int ctrl;
583
584 spin_lock_irqsave(&lock, flags);
585
586
587
588
589
590
591
592 ctrl = read_aux_reg(ARC_REG_SLC_CTRL);
593
594
595 if (!(op & OP_FLUSH))
596 ctrl &= ~SLC_CTRL_IM;
597 else
598 ctrl |= SLC_CTRL_IM;
599
600 if (op & OP_INV)
601 ctrl |= SLC_CTRL_RGN_OP_INV;
602 else
603 ctrl &= ~SLC_CTRL_RGN_OP_INV;
604
605 write_aux_reg(ARC_REG_SLC_CTRL, ctrl);
606
607
608
609
610
611
612 write_aux_reg(ARC_REG_SLC_RGN_END, (paddr + sz + l2_line_sz - 1));
613 write_aux_reg(ARC_REG_SLC_RGN_START, paddr);
614
615 while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
616
617 spin_unlock_irqrestore(&lock, flags);
618#endif
619}
620
621noinline static void slc_entire_op(const int op)
622{
623 unsigned int ctrl, r = ARC_REG_SLC_CTRL;
624
625 ctrl = read_aux_reg(r);
626
627 if (!(op & OP_FLUSH))
628 ctrl &= ~SLC_CTRL_IM;
629 else
630 ctrl |= SLC_CTRL_IM;
631
632 write_aux_reg(r, ctrl);
633
634 write_aux_reg(ARC_REG_SLC_INVALIDATE, 1);
635
636
637 while (read_aux_reg(r) & SLC_CTRL_BUSY);
638}
639
640static inline void arc_slc_disable(void)
641{
642 const int r = ARC_REG_SLC_CTRL;
643
644 slc_entire_op(OP_FLUSH_N_INV);
645 write_aux_reg(r, read_aux_reg(r) | SLC_CTRL_DIS);
646}
647
648static inline void arc_slc_enable(void)
649{
650 const int r = ARC_REG_SLC_CTRL;
651
652 write_aux_reg(r, read_aux_reg(r) & ~SLC_CTRL_DIS);
653}
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671void flush_dcache_page(struct page *page)
672{
673 struct address_space *mapping;
674
675 if (!cache_is_vipt_aliasing()) {
676 clear_bit(PG_dc_clean, &page->flags);
677 return;
678 }
679
680
681 mapping = page_mapping(page);
682 if (!mapping)
683 return;
684
685
686
687
688
689 if (!mapping_mapped(mapping)) {
690 clear_bit(PG_dc_clean, &page->flags);
691 } else if (page_mapcount(page)) {
692
693
694 phys_addr_t paddr = (unsigned long)page_address(page);
695 unsigned long vaddr = page->index << PAGE_SHIFT;
696
697 if (addr_not_cache_congruent(paddr, vaddr))
698 __flush_dcache_page(paddr, vaddr);
699 }
700}
701EXPORT_SYMBOL(flush_dcache_page);
702
703
704
705
706
707static void __dma_cache_wback_inv_l1(phys_addr_t start, unsigned long sz)
708{
709 __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
710}
711
712static void __dma_cache_inv_l1(phys_addr_t start, unsigned long sz)
713{
714 __dc_line_op_k(start, sz, OP_INV);
715}
716
717static void __dma_cache_wback_l1(phys_addr_t start, unsigned long sz)
718{
719 __dc_line_op_k(start, sz, OP_FLUSH);
720}
721
722
723
724
725
726static void __dma_cache_wback_inv_slc(phys_addr_t start, unsigned long sz)
727{
728 __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
729 slc_op(start, sz, OP_FLUSH_N_INV);
730}
731
732static void __dma_cache_inv_slc(phys_addr_t start, unsigned long sz)
733{
734 __dc_line_op_k(start, sz, OP_INV);
735 slc_op(start, sz, OP_INV);
736}
737
738static void __dma_cache_wback_slc(phys_addr_t start, unsigned long sz)
739{
740 __dc_line_op_k(start, sz, OP_FLUSH);
741 slc_op(start, sz, OP_FLUSH);
742}
743
744
745
746
747
748
749static void __dma_cache_wback_inv_ioc(phys_addr_t start, unsigned long sz) {}
750static void __dma_cache_inv_ioc(phys_addr_t start, unsigned long sz) {}
751static void __dma_cache_wback_ioc(phys_addr_t start, unsigned long sz) {}
752
753
754
755
756void dma_cache_wback_inv(phys_addr_t start, unsigned long sz)
757{
758 __dma_cache_wback_inv(start, sz);
759}
760EXPORT_SYMBOL(dma_cache_wback_inv);
761
762void dma_cache_inv(phys_addr_t start, unsigned long sz)
763{
764 __dma_cache_inv(start, sz);
765}
766EXPORT_SYMBOL(dma_cache_inv);
767
768void dma_cache_wback(phys_addr_t start, unsigned long sz)
769{
770 __dma_cache_wback(start, sz);
771}
772EXPORT_SYMBOL(dma_cache_wback);
773
774
775
776
777
778
779
780
781void flush_icache_range(unsigned long kstart, unsigned long kend)
782{
783 unsigned int tot_sz;
784
785 WARN(kstart < TASK_SIZE, "%s() can't handle user vaddr", __func__);
786
787
788
789
790 tot_sz = kend - kstart;
791 if (tot_sz > PAGE_SIZE) {
792 flush_cache_all();
793 return;
794 }
795
796
797 if (likely(kstart > PAGE_OFFSET)) {
798
799
800
801
802
803
804 __sync_icache_dcache(kstart, kstart, kend - kstart);
805 return;
806 }
807
808
809
810
811
812
813
814
815
816
817 while (tot_sz > 0) {
818 unsigned int off, sz;
819 unsigned long phy, pfn;
820
821 off = kstart % PAGE_SIZE;
822 pfn = vmalloc_to_pfn((void *)kstart);
823 phy = (pfn << PAGE_SHIFT) + off;
824 sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off);
825 __sync_icache_dcache(phy, kstart, sz);
826 kstart += sz;
827 tot_sz -= sz;
828 }
829}
830EXPORT_SYMBOL(flush_icache_range);
831
832
833
834
835
836
837
838
839
840
841
842void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len)
843{
844 __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
845 __ic_line_inv_vaddr(paddr, vaddr, len);
846}
847
848
849void __inv_icache_page(phys_addr_t paddr, unsigned long vaddr)
850{
851 __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
852}
853
854
855
856
857
858void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr)
859{
860 __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
861}
862
863noinline void flush_cache_all(void)
864{
865 unsigned long flags;
866
867 local_irq_save(flags);
868
869 __ic_entire_inv();
870 __dc_entire_op(OP_FLUSH_N_INV);
871
872 local_irq_restore(flags);
873
874}
875
876#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
877
878void flush_cache_mm(struct mm_struct *mm)
879{
880 flush_cache_all();
881}
882
883void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
884 unsigned long pfn)
885{
886 unsigned int paddr = pfn << PAGE_SHIFT;
887
888 u_vaddr &= PAGE_MASK;
889
890 __flush_dcache_page(paddr, u_vaddr);
891
892 if (vma->vm_flags & VM_EXEC)
893 __inv_icache_page(paddr, u_vaddr);
894}
895
896void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
897 unsigned long end)
898{
899 flush_cache_all();
900}
901
902void flush_anon_page(struct vm_area_struct *vma, struct page *page,
903 unsigned long u_vaddr)
904{
905
906 __flush_dcache_page(page_address(page), u_vaddr);
907 __flush_dcache_page(page_address(page), page_address(page));
908
909}
910
911#endif
912
913void copy_user_highpage(struct page *to, struct page *from,
914 unsigned long u_vaddr, struct vm_area_struct *vma)
915{
916 void *kfrom = kmap_atomic(from);
917 void *kto = kmap_atomic(to);
918 int clean_src_k_mappings = 0;
919
920
921
922
923
924
925
926
927
928
929
930
931 if (page_mapcount(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
932 __flush_dcache_page((unsigned long)kfrom, u_vaddr);
933 clean_src_k_mappings = 1;
934 }
935
936 copy_page(kto, kfrom);
937
938
939
940
941
942
943
944
945
946 clear_bit(PG_dc_clean, &to->flags);
947
948
949
950
951
952 if (clean_src_k_mappings) {
953 __flush_dcache_page((unsigned long)kfrom, (unsigned long)kfrom);
954 set_bit(PG_dc_clean, &from->flags);
955 } else {
956 clear_bit(PG_dc_clean, &from->flags);
957 }
958
959 kunmap_atomic(kto);
960 kunmap_atomic(kfrom);
961}
962
963void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
964{
965 clear_page(to);
966 clear_bit(PG_dc_clean, &page->flags);
967}
968
969
970
971
972
973
974SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
975{
976
977 flush_cache_all();
978 return 0;
979}
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996noinline void __init arc_ioc_setup(void)
997{
998 unsigned int ap_sz;
999
1000
1001 __dc_disable();
1002
1003
1004 if (read_aux_reg(ARC_REG_SLC_BCR))
1005 slc_entire_op(OP_FLUSH_N_INV);
1006
1007
1008 write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000);
1009
1010
1011
1012
1013
1014
1015
1016 ap_sz = order_base_2(arc_get_mem_sz()/1024) - 2;
1017 write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, ap_sz);
1018
1019 write_aux_reg(ARC_REG_IO_COH_PARTIAL, 1);
1020 write_aux_reg(ARC_REG_IO_COH_ENABLE, 1);
1021
1022
1023 __dc_enable();
1024}
1025
1026void __init arc_cache_init_master(void)
1027{
1028 unsigned int __maybe_unused cpu = smp_processor_id();
1029
1030 if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
1031 struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
1032
1033 if (!ic->line_len)
1034 panic("cache support enabled but non-existent cache\n");
1035
1036 if (ic->line_len != L1_CACHE_BYTES)
1037 panic("ICache line [%d] != kernel Config [%d]",
1038 ic->line_len, L1_CACHE_BYTES);
1039
1040
1041
1042
1043
1044 if (is_isa_arcv2() && ic->alias)
1045 _cache_line_loop_ic_fn = __cache_line_loop_v3;
1046 else
1047 _cache_line_loop_ic_fn = __cache_line_loop;
1048 }
1049
1050 if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) {
1051 struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
1052
1053 if (!dc->line_len)
1054 panic("cache support enabled but non-existent cache\n");
1055
1056 if (dc->line_len != L1_CACHE_BYTES)
1057 panic("DCache line [%d] != kernel Config [%d]",
1058 dc->line_len, L1_CACHE_BYTES);
1059
1060
1061 if (is_isa_arcompact()) {
1062 int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
1063 int num_colors = dc->sz_k/dc->assoc/TO_KB(PAGE_SIZE);
1064
1065 if (dc->alias) {
1066 if (!handled)
1067 panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
1068 if (CACHE_COLORS_NUM != num_colors)
1069 panic("CACHE_COLORS_NUM not optimized for config\n");
1070 } else if (!dc->alias && handled) {
1071 panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
1072 }
1073 }
1074 }
1075
1076
1077 if (is_isa_arcv2() && l2_line_sz && !slc_enable)
1078 arc_slc_disable();
1079
1080 if (is_isa_arcv2() && ioc_enable)
1081 arc_ioc_setup();
1082
1083 if (is_isa_arcv2() && ioc_enable) {
1084 __dma_cache_wback_inv = __dma_cache_wback_inv_ioc;
1085 __dma_cache_inv = __dma_cache_inv_ioc;
1086 __dma_cache_wback = __dma_cache_wback_ioc;
1087 } else if (is_isa_arcv2() && l2_line_sz && slc_enable) {
1088 __dma_cache_wback_inv = __dma_cache_wback_inv_slc;
1089 __dma_cache_inv = __dma_cache_inv_slc;
1090 __dma_cache_wback = __dma_cache_wback_slc;
1091 } else {
1092 __dma_cache_wback_inv = __dma_cache_wback_inv_l1;
1093 __dma_cache_inv = __dma_cache_inv_l1;
1094 __dma_cache_wback = __dma_cache_wback_l1;
1095 }
1096}
1097
1098void __ref arc_cache_init(void)
1099{
1100 unsigned int __maybe_unused cpu = smp_processor_id();
1101 char str[256];
1102
1103 printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
1104
1105
1106
1107
1108
1109
1110
1111 if (!cpu)
1112 arc_cache_init_master();
1113}
1114