1
2
3
4
5
6
7
8
9
10
11
12#include <linux/module.h>
13#include <linux/mm.h>
14#include <linux/sched.h>
15#include <linux/cache.h>
16#include <linux/mmu_context.h>
17#include <linux/syscalls.h>
18#include <linux/uaccess.h>
19#include <linux/pagemap.h>
20#include <asm/cacheflush.h>
21#include <asm/cachectl.h>
22#include <asm/setup.h>
23
24static int l2_line_sz;
25static int ioc_exists;
26int slc_enable = 1, ioc_enable = 0;
27unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE;
28unsigned long perip_end = 0xFFFFFFFF;
29
30void (*_cache_line_loop_ic_fn)(phys_addr_t paddr, unsigned long vaddr,
31 unsigned long sz, const int cacheop);
32
33void (*__dma_cache_wback_inv)(phys_addr_t start, unsigned long sz);
34void (*__dma_cache_inv)(phys_addr_t start, unsigned long sz);
35void (*__dma_cache_wback)(phys_addr_t start, unsigned long sz);
36
37char *arc_cache_mumbojumbo(int c, char *buf, int len)
38{
39 int n = 0;
40 struct cpuinfo_arc_cache *p;
41
42#define PR_CACHE(p, cfg, str) \
43 if (!(p)->ver) \
44 n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \
45 else \
46 n += scnprintf(buf + n, len - n, \
47 str"\t\t: %uK, %dway/set, %uB Line, %s%s%s\n", \
48 (p)->sz_k, (p)->assoc, (p)->line_len, \
49 (p)->vipt ? "VIPT" : "PIPT", \
50 (p)->alias ? " aliasing" : "", \
51 IS_USED_CFG(cfg));
52
53 PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache");
54 PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
55
56 p = &cpuinfo_arc700[c].slc;
57 if (p->ver)
58 n += scnprintf(buf + n, len - n,
59 "SLC\t\t: %uK, %uB Line%s\n",
60 p->sz_k, p->line_len, IS_USED_RUN(slc_enable));
61
62 n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n",
63 perip_base,
64 IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency "));
65
66 return buf;
67}
68
69
70
71
72
73
74static void read_decode_cache_bcr_arcv2(int cpu)
75{
76 struct cpuinfo_arc_cache *p_slc = &cpuinfo_arc700[cpu].slc;
77 struct bcr_generic sbcr;
78
79 struct bcr_slc_cfg {
80#ifdef CONFIG_CPU_BIG_ENDIAN
81 unsigned int pad:24, way:2, lsz:2, sz:4;
82#else
83 unsigned int sz:4, lsz:2, way:2, pad:24;
84#endif
85 } slc_cfg;
86
87 struct bcr_clust_cfg {
88#ifdef CONFIG_CPU_BIG_ENDIAN
89 unsigned int pad:7, c:1, num_entries:8, num_cores:8, ver:8;
90#else
91 unsigned int ver:8, num_cores:8, num_entries:8, c:1, pad:7;
92#endif
93 } cbcr;
94
95 struct bcr_volatile {
96#ifdef CONFIG_CPU_BIG_ENDIAN
97 unsigned int start:4, limit:4, pad:22, order:1, disable:1;
98#else
99 unsigned int disable:1, order:1, pad:22, limit:4, start:4;
100#endif
101 } vol;
102
103
104 READ_BCR(ARC_REG_SLC_BCR, sbcr);
105 if (sbcr.ver) {
106 READ_BCR(ARC_REG_SLC_CFG, slc_cfg);
107 p_slc->ver = sbcr.ver;
108 p_slc->sz_k = 128 << slc_cfg.sz;
109 l2_line_sz = p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64;
110 }
111
112 READ_BCR(ARC_REG_CLUSTER_BCR, cbcr);
113 if (cbcr.c)
114 ioc_exists = 1;
115 else
116 ioc_enable = 0;
117
118
119 if (cpuinfo_arc700[cpu].core.family > 0x51) {
120 READ_BCR(AUX_VOL, vol);
121 perip_base = vol.start << 28;
122
123 if (cpuinfo_arc700[cpu].core.family > 0x52)
124 perip_end = (vol.limit << 28) - 1;
125 }
126}
127
128void read_decode_cache_bcr(void)
129{
130 struct cpuinfo_arc_cache *p_ic, *p_dc;
131 unsigned int cpu = smp_processor_id();
132 struct bcr_cache {
133#ifdef CONFIG_CPU_BIG_ENDIAN
134 unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
135#else
136 unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
137#endif
138 } ibcr, dbcr;
139
140 p_ic = &cpuinfo_arc700[cpu].icache;
141 READ_BCR(ARC_REG_IC_BCR, ibcr);
142
143 if (!ibcr.ver)
144 goto dc_chk;
145
146 if (ibcr.ver <= 3) {
147 BUG_ON(ibcr.config != 3);
148 p_ic->assoc = 2;
149 } else if (ibcr.ver >= 4) {
150 p_ic->assoc = 1 << ibcr.config;
151 }
152
153 p_ic->line_len = 8 << ibcr.line_len;
154 p_ic->sz_k = 1 << (ibcr.sz - 1);
155 p_ic->ver = ibcr.ver;
156 p_ic->vipt = 1;
157 p_ic->alias = p_ic->sz_k/p_ic->assoc/TO_KB(PAGE_SIZE) > 1;
158
159dc_chk:
160 p_dc = &cpuinfo_arc700[cpu].dcache;
161 READ_BCR(ARC_REG_DC_BCR, dbcr);
162
163 if (!dbcr.ver)
164 goto slc_chk;
165
166 if (dbcr.ver <= 3) {
167 BUG_ON(dbcr.config != 2);
168 p_dc->assoc = 4;
169 p_dc->vipt = 1;
170 p_dc->alias = p_dc->sz_k/p_dc->assoc/TO_KB(PAGE_SIZE) > 1;
171 } else if (dbcr.ver >= 4) {
172 p_dc->assoc = 1 << dbcr.config;
173 p_dc->vipt = 0;
174 p_dc->alias = 0;
175 }
176
177 p_dc->line_len = 16 << dbcr.line_len;
178 p_dc->sz_k = 1 << (dbcr.sz - 1);
179 p_dc->ver = dbcr.ver;
180
181slc_chk:
182 if (is_isa_arcv2())
183 read_decode_cache_bcr_arcv2(cpu);
184}
185
186
187
188
189
190#define OP_INV 0x1
191#define OP_FLUSH 0x2
192#define OP_FLUSH_N_INV 0x3
193#define OP_INV_IC 0x4
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237static inline
238void __cache_line_loop_v2(phys_addr_t paddr, unsigned long vaddr,
239 unsigned long sz, const int op)
240{
241 unsigned int aux_cmd;
242 int num_lines;
243 const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
244
245 if (op == OP_INV_IC) {
246 aux_cmd = ARC_REG_IC_IVIL;
247 } else {
248
249 aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
250 }
251
252
253
254
255
256
257
258 if (!full_page) {
259 sz += paddr & ~CACHE_LINE_MASK;
260 paddr &= CACHE_LINE_MASK;
261 vaddr &= CACHE_LINE_MASK;
262 }
263
264 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
265
266
267 paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
268
269 while (num_lines-- > 0) {
270 write_aux_reg(aux_cmd, paddr);
271 paddr += L1_CACHE_BYTES;
272 }
273}
274
275
276
277
278
279static inline
280void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
281 unsigned long sz, const int op)
282{
283 unsigned int aux_cmd, aux_tag;
284 int num_lines;
285 const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
286
287 if (op == OP_INV_IC) {
288 aux_cmd = ARC_REG_IC_IVIL;
289 aux_tag = ARC_REG_IC_PTAG;
290 } else {
291 aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
292 aux_tag = ARC_REG_DC_PTAG;
293 }
294
295
296
297
298
299
300
301 if (!full_page) {
302 sz += paddr & ~CACHE_LINE_MASK;
303 paddr &= CACHE_LINE_MASK;
304 vaddr &= CACHE_LINE_MASK;
305 }
306 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
307
308
309
310
311
312 if (full_page)
313 write_aux_reg(aux_tag, paddr);
314
315
316
317
318
319
320
321
322 if (is_pae40_enabled() && op == OP_INV_IC)
323 write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
324
325 while (num_lines-- > 0) {
326 if (!full_page) {
327 write_aux_reg(aux_tag, paddr);
328 paddr += L1_CACHE_BYTES;
329 }
330
331 write_aux_reg(aux_cmd, vaddr);
332 vaddr += L1_CACHE_BYTES;
333 }
334}
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349static inline
350void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
351 unsigned long sz, const int cacheop)
352{
353 unsigned int aux_cmd;
354 int num_lines;
355 const int full_page_op = __builtin_constant_p(sz) && sz == PAGE_SIZE;
356
357 if (cacheop == OP_INV_IC) {
358 aux_cmd = ARC_REG_IC_IVIL;
359 } else {
360
361 aux_cmd = cacheop & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
362 }
363
364
365
366
367
368
369
370 if (!full_page_op) {
371 sz += paddr & ~CACHE_LINE_MASK;
372 paddr &= CACHE_LINE_MASK;
373 }
374
375 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
376
377
378
379
380
381
382 if (is_pae40_enabled()) {
383 if (cacheop == OP_INV_IC)
384
385
386
387
388 write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
389 else
390 write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32);
391 }
392
393 while (num_lines-- > 0) {
394 write_aux_reg(aux_cmd, paddr);
395 paddr += L1_CACHE_BYTES;
396 }
397}
398
399#if (CONFIG_ARC_MMU_VER < 3)
400#define __cache_line_loop __cache_line_loop_v2
401#elif (CONFIG_ARC_MMU_VER == 3)
402#define __cache_line_loop __cache_line_loop_v3
403#elif (CONFIG_ARC_MMU_VER > 3)
404#define __cache_line_loop __cache_line_loop_v4
405#endif
406
407#ifdef CONFIG_ARC_HAS_DCACHE
408
409
410
411
412
413static inline void __before_dc_op(const int op)
414{
415 if (op == OP_FLUSH_N_INV) {
416
417
418
419
420
421 const unsigned int ctl = ARC_REG_DC_CTRL;
422 write_aux_reg(ctl, read_aux_reg(ctl) | DC_CTRL_INV_MODE_FLUSH);
423 }
424}
425
426static inline void __after_dc_op(const int op)
427{
428 if (op & OP_FLUSH) {
429 const unsigned int ctl = ARC_REG_DC_CTRL;
430 unsigned int reg;
431
432
433 while ((reg = read_aux_reg(ctl)) & DC_CTRL_FLUSH_STATUS)
434 ;
435
436
437 if (op == OP_FLUSH_N_INV)
438 write_aux_reg(ctl, reg & ~DC_CTRL_INV_MODE_FLUSH);
439 }
440}
441
442
443
444
445
446
447
448static inline void __dc_entire_op(const int op)
449{
450 int aux;
451
452 __before_dc_op(op);
453
454 if (op & OP_INV)
455 aux = ARC_REG_DC_IVDC;
456 else
457 aux = ARC_REG_DC_FLSH;
458
459 write_aux_reg(aux, 0x1);
460
461 __after_dc_op(op);
462}
463
464
465#define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op)
466
467
468
469
470static inline void __dc_line_op(phys_addr_t paddr, unsigned long vaddr,
471 unsigned long sz, const int op)
472{
473 unsigned long flags;
474
475 local_irq_save(flags);
476
477 __before_dc_op(op);
478
479 __cache_line_loop(paddr, vaddr, sz, op);
480
481 __after_dc_op(op);
482
483 local_irq_restore(flags);
484}
485
486#else
487
488#define __dc_entire_op(op)
489#define __dc_line_op(paddr, vaddr, sz, op)
490#define __dc_line_op_k(paddr, sz, op)
491
492#endif
493
494#ifdef CONFIG_ARC_HAS_ICACHE
495
496static inline void __ic_entire_inv(void)
497{
498 write_aux_reg(ARC_REG_IC_IVIC, 1);
499 read_aux_reg(ARC_REG_IC_CTRL);
500}
501
502static inline void
503__ic_line_inv_vaddr_local(phys_addr_t paddr, unsigned long vaddr,
504 unsigned long sz)
505{
506 unsigned long flags;
507
508 local_irq_save(flags);
509 (*_cache_line_loop_ic_fn)(paddr, vaddr, sz, OP_INV_IC);
510 local_irq_restore(flags);
511}
512
513#ifndef CONFIG_SMP
514
515#define __ic_line_inv_vaddr(p, v, s) __ic_line_inv_vaddr_local(p, v, s)
516
517#else
518
519struct ic_inv_args {
520 phys_addr_t paddr, vaddr;
521 int sz;
522};
523
524static void __ic_line_inv_vaddr_helper(void *info)
525{
526 struct ic_inv_args *ic_inv = info;
527
528 __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz);
529}
530
531static void __ic_line_inv_vaddr(phys_addr_t paddr, unsigned long vaddr,
532 unsigned long sz)
533{
534 struct ic_inv_args ic_inv = {
535 .paddr = paddr,
536 .vaddr = vaddr,
537 .sz = sz
538 };
539
540 on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1);
541}
542
543#endif
544
545#else
546
547#define __ic_entire_inv()
548#define __ic_line_inv_vaddr(pstart, vstart, sz)
549
550#endif
551
552noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op)
553{
554#ifdef CONFIG_ISA_ARCV2
555
556
557
558
559
560
561
562 static DEFINE_SPINLOCK(lock);
563 unsigned long flags;
564 unsigned int ctrl;
565
566 spin_lock_irqsave(&lock, flags);
567
568
569
570
571
572
573
574 ctrl = read_aux_reg(ARC_REG_SLC_CTRL);
575
576
577 if (!(op & OP_FLUSH))
578 ctrl &= ~SLC_CTRL_IM;
579 else
580 ctrl |= SLC_CTRL_IM;
581
582 if (op & OP_INV)
583 ctrl |= SLC_CTRL_RGN_OP_INV;
584 else
585 ctrl &= ~SLC_CTRL_RGN_OP_INV;
586
587 write_aux_reg(ARC_REG_SLC_CTRL, ctrl);
588
589
590
591
592
593
594 write_aux_reg(ARC_REG_SLC_RGN_END, (paddr + sz + l2_line_sz - 1));
595 write_aux_reg(ARC_REG_SLC_RGN_START, paddr);
596
597 while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
598
599 spin_unlock_irqrestore(&lock, flags);
600#endif
601}
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619void flush_dcache_page(struct page *page)
620{
621 struct address_space *mapping;
622
623 if (!cache_is_vipt_aliasing()) {
624 clear_bit(PG_dc_clean, &page->flags);
625 return;
626 }
627
628
629 mapping = page_mapping(page);
630 if (!mapping)
631 return;
632
633
634
635
636
637 if (!mapping_mapped(mapping)) {
638 clear_bit(PG_dc_clean, &page->flags);
639 } else if (page_mapcount(page)) {
640
641
642 phys_addr_t paddr = (unsigned long)page_address(page);
643 unsigned long vaddr = page->index << PAGE_SHIFT;
644
645 if (addr_not_cache_congruent(paddr, vaddr))
646 __flush_dcache_page(paddr, vaddr);
647 }
648}
649EXPORT_SYMBOL(flush_dcache_page);
650
651
652
653
654
655static void __dma_cache_wback_inv_l1(phys_addr_t start, unsigned long sz)
656{
657 __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
658}
659
660static void __dma_cache_inv_l1(phys_addr_t start, unsigned long sz)
661{
662 __dc_line_op_k(start, sz, OP_INV);
663}
664
665static void __dma_cache_wback_l1(phys_addr_t start, unsigned long sz)
666{
667 __dc_line_op_k(start, sz, OP_FLUSH);
668}
669
670
671
672
673
674static void __dma_cache_wback_inv_slc(phys_addr_t start, unsigned long sz)
675{
676 __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
677 slc_op(start, sz, OP_FLUSH_N_INV);
678}
679
680static void __dma_cache_inv_slc(phys_addr_t start, unsigned long sz)
681{
682 __dc_line_op_k(start, sz, OP_INV);
683 slc_op(start, sz, OP_INV);
684}
685
686static void __dma_cache_wback_slc(phys_addr_t start, unsigned long sz)
687{
688 __dc_line_op_k(start, sz, OP_FLUSH);
689 slc_op(start, sz, OP_FLUSH);
690}
691
692
693
694
695
696
697static void __dma_cache_wback_inv_ioc(phys_addr_t start, unsigned long sz) {}
698static void __dma_cache_inv_ioc(phys_addr_t start, unsigned long sz) {}
699static void __dma_cache_wback_ioc(phys_addr_t start, unsigned long sz) {}
700
701
702
703
704void dma_cache_wback_inv(phys_addr_t start, unsigned long sz)
705{
706 __dma_cache_wback_inv(start, sz);
707}
708EXPORT_SYMBOL(dma_cache_wback_inv);
709
710void dma_cache_inv(phys_addr_t start, unsigned long sz)
711{
712 __dma_cache_inv(start, sz);
713}
714EXPORT_SYMBOL(dma_cache_inv);
715
716void dma_cache_wback(phys_addr_t start, unsigned long sz)
717{
718 __dma_cache_wback(start, sz);
719}
720EXPORT_SYMBOL(dma_cache_wback);
721
722
723
724
725
726
727
728
729void flush_icache_range(unsigned long kstart, unsigned long kend)
730{
731 unsigned int tot_sz;
732
733 WARN(kstart < TASK_SIZE, "%s() can't handle user vaddr", __func__);
734
735
736
737
738 tot_sz = kend - kstart;
739 if (tot_sz > PAGE_SIZE) {
740 flush_cache_all();
741 return;
742 }
743
744
745 if (likely(kstart > PAGE_OFFSET)) {
746
747
748
749
750
751
752 __sync_icache_dcache(kstart, kstart, kend - kstart);
753 return;
754 }
755
756
757
758
759
760
761
762
763
764
765 while (tot_sz > 0) {
766 unsigned int off, sz;
767 unsigned long phy, pfn;
768
769 off = kstart % PAGE_SIZE;
770 pfn = vmalloc_to_pfn((void *)kstart);
771 phy = (pfn << PAGE_SHIFT) + off;
772 sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off);
773 __sync_icache_dcache(phy, kstart, sz);
774 kstart += sz;
775 tot_sz -= sz;
776 }
777}
778EXPORT_SYMBOL(flush_icache_range);
779
780
781
782
783
784
785
786
787
788
789
790void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len)
791{
792 __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
793 __ic_line_inv_vaddr(paddr, vaddr, len);
794}
795
796
797void __inv_icache_page(phys_addr_t paddr, unsigned long vaddr)
798{
799 __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
800}
801
802
803
804
805
806void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr)
807{
808 __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
809}
810
811noinline void flush_cache_all(void)
812{
813 unsigned long flags;
814
815 local_irq_save(flags);
816
817 __ic_entire_inv();
818 __dc_entire_op(OP_FLUSH_N_INV);
819
820 local_irq_restore(flags);
821
822}
823
824#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
825
826void flush_cache_mm(struct mm_struct *mm)
827{
828 flush_cache_all();
829}
830
831void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
832 unsigned long pfn)
833{
834 unsigned int paddr = pfn << PAGE_SHIFT;
835
836 u_vaddr &= PAGE_MASK;
837
838 __flush_dcache_page(paddr, u_vaddr);
839
840 if (vma->vm_flags & VM_EXEC)
841 __inv_icache_page(paddr, u_vaddr);
842}
843
844void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
845 unsigned long end)
846{
847 flush_cache_all();
848}
849
850void flush_anon_page(struct vm_area_struct *vma, struct page *page,
851 unsigned long u_vaddr)
852{
853
854 __flush_dcache_page(page_address(page), u_vaddr);
855 __flush_dcache_page(page_address(page), page_address(page));
856
857}
858
859#endif
860
861void copy_user_highpage(struct page *to, struct page *from,
862 unsigned long u_vaddr, struct vm_area_struct *vma)
863{
864 void *kfrom = kmap_atomic(from);
865 void *kto = kmap_atomic(to);
866 int clean_src_k_mappings = 0;
867
868
869
870
871
872
873
874
875
876
877
878
879 if (page_mapcount(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
880 __flush_dcache_page((unsigned long)kfrom, u_vaddr);
881 clean_src_k_mappings = 1;
882 }
883
884 copy_page(kto, kfrom);
885
886
887
888
889
890
891
892
893
894 clear_bit(PG_dc_clean, &to->flags);
895
896
897
898
899
900 if (clean_src_k_mappings) {
901 __flush_dcache_page((unsigned long)kfrom, (unsigned long)kfrom);
902 set_bit(PG_dc_clean, &from->flags);
903 } else {
904 clear_bit(PG_dc_clean, &from->flags);
905 }
906
907 kunmap_atomic(kto);
908 kunmap_atomic(kfrom);
909}
910
911void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
912{
913 clear_page(to);
914 clear_bit(PG_dc_clean, &page->flags);
915}
916
917
918
919
920
921
922SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
923{
924
925 flush_cache_all();
926 return 0;
927}
928
929void arc_cache_init(void)
930{
931 unsigned int __maybe_unused cpu = smp_processor_id();
932 char str[256];
933
934 printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
935
936
937
938
939
940
941
942 if (cpu)
943 return;
944
945 if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
946 struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
947
948 if (!ic->ver)
949 panic("cache support enabled but non-existent cache\n");
950
951 if (ic->line_len != L1_CACHE_BYTES)
952 panic("ICache line [%d] != kernel Config [%d]",
953 ic->line_len, L1_CACHE_BYTES);
954
955 if (ic->ver != CONFIG_ARC_MMU_VER)
956 panic("Cache ver [%d] doesn't match MMU ver [%d]\n",
957 ic->ver, CONFIG_ARC_MMU_VER);
958
959
960
961
962
963 if (is_isa_arcv2() && ic->alias)
964 _cache_line_loop_ic_fn = __cache_line_loop_v3;
965 else
966 _cache_line_loop_ic_fn = __cache_line_loop;
967 }
968
969 if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) {
970 struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
971
972 if (!dc->ver)
973 panic("cache support enabled but non-existent cache\n");
974
975 if (dc->line_len != L1_CACHE_BYTES)
976 panic("DCache line [%d] != kernel Config [%d]",
977 dc->line_len, L1_CACHE_BYTES);
978
979
980 if (is_isa_arcompact()) {
981 int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
982
983 if (dc->alias && !handled)
984 panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
985 else if (!dc->alias && handled)
986 panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
987 }
988 }
989
990 if (is_isa_arcv2() && l2_line_sz && !slc_enable) {
991
992
993 write_aux_reg(ARC_REG_SLC_CTRL,
994 read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_IM);
995
996 write_aux_reg(ARC_REG_SLC_INVALIDATE, 1);
997
998
999 while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
1000 write_aux_reg(ARC_REG_SLC_CTRL,
1001 read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_DISABLE);
1002 }
1003
1004 if (is_isa_arcv2() && ioc_enable) {
1005
1006 write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000);
1007
1008 write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, 0x11);
1009
1010 write_aux_reg(ARC_REG_IO_COH_PARTIAL, 1);
1011
1012 write_aux_reg(ARC_REG_IO_COH_ENABLE, 1);
1013
1014 __dma_cache_wback_inv = __dma_cache_wback_inv_ioc;
1015 __dma_cache_inv = __dma_cache_inv_ioc;
1016 __dma_cache_wback = __dma_cache_wback_ioc;
1017 } else if (is_isa_arcv2() && l2_line_sz && slc_enable) {
1018 __dma_cache_wback_inv = __dma_cache_wback_inv_slc;
1019 __dma_cache_inv = __dma_cache_inv_slc;
1020 __dma_cache_wback = __dma_cache_wback_slc;
1021 } else {
1022 __dma_cache_wback_inv = __dma_cache_wback_inv_l1;
1023 __dma_cache_inv = __dma_cache_inv_l1;
1024 __dma_cache_wback = __dma_cache_wback_l1;
1025 }
1026}
1027