1
2
3
4
5
6
7
8
9
10
11
12#include <linux/module.h>
13#include <linux/mm.h>
14#include <linux/sched.h>
15#include <linux/cache.h>
16#include <linux/mmu_context.h>
17#include <linux/syscalls.h>
18#include <linux/uaccess.h>
19#include <linux/pagemap.h>
20#include <asm/cacheflush.h>
21#include <asm/cachectl.h>
22#include <asm/setup.h>
23
24#ifdef CONFIG_ISA_ARCV2
25#define USE_RGN_FLSH 1
26#endif
27
28static int l2_line_sz;
29static int ioc_exists;
30int slc_enable = 1, ioc_enable = 1;
31unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE;
32unsigned long perip_end = 0xFFFFFFFF;
33
34void (*_cache_line_loop_ic_fn)(phys_addr_t paddr, unsigned long vaddr,
35 unsigned long sz, const int op, const int full_page);
36
37void (*__dma_cache_wback_inv)(phys_addr_t start, unsigned long sz);
38void (*__dma_cache_inv)(phys_addr_t start, unsigned long sz);
39void (*__dma_cache_wback)(phys_addr_t start, unsigned long sz);
40
41char *arc_cache_mumbojumbo(int c, char *buf, int len)
42{
43 int n = 0;
44 struct cpuinfo_arc_cache *p;
45
46#define PR_CACHE(p, cfg, str) \
47 if (!(p)->line_len) \
48 n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \
49 else \
50 n += scnprintf(buf + n, len - n, \
51 str"\t\t: %uK, %dway/set, %uB Line, %s%s%s\n", \
52 (p)->sz_k, (p)->assoc, (p)->line_len, \
53 (p)->vipt ? "VIPT" : "PIPT", \
54 (p)->alias ? " aliasing" : "", \
55 IS_USED_CFG(cfg));
56
57 PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache");
58 PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
59
60 p = &cpuinfo_arc700[c].slc;
61 if (p->line_len)
62 n += scnprintf(buf + n, len - n,
63 "SLC\t\t: %uK, %uB Line%s\n",
64 p->sz_k, p->line_len, IS_USED_RUN(slc_enable));
65
66 n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n",
67 perip_base,
68 IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency "));
69
70 return buf;
71}
72
73
74
75
76
77
78static void read_decode_cache_bcr_arcv2(int cpu)
79{
80 struct cpuinfo_arc_cache *p_slc = &cpuinfo_arc700[cpu].slc;
81 struct bcr_generic sbcr;
82
83 struct bcr_slc_cfg {
84#ifdef CONFIG_CPU_BIG_ENDIAN
85 unsigned int pad:24, way:2, lsz:2, sz:4;
86#else
87 unsigned int sz:4, lsz:2, way:2, pad:24;
88#endif
89 } slc_cfg;
90
91 struct bcr_clust_cfg {
92#ifdef CONFIG_CPU_BIG_ENDIAN
93 unsigned int pad:7, c:1, num_entries:8, num_cores:8, ver:8;
94#else
95 unsigned int ver:8, num_cores:8, num_entries:8, c:1, pad:7;
96#endif
97 } cbcr;
98
99 struct bcr_volatile {
100#ifdef CONFIG_CPU_BIG_ENDIAN
101 unsigned int start:4, limit:4, pad:22, order:1, disable:1;
102#else
103 unsigned int disable:1, order:1, pad:22, limit:4, start:4;
104#endif
105 } vol;
106
107
108 READ_BCR(ARC_REG_SLC_BCR, sbcr);
109 if (sbcr.ver) {
110 READ_BCR(ARC_REG_SLC_CFG, slc_cfg);
111 p_slc->sz_k = 128 << slc_cfg.sz;
112 l2_line_sz = p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64;
113 }
114
115 READ_BCR(ARC_REG_CLUSTER_BCR, cbcr);
116 if (cbcr.c)
117 ioc_exists = 1;
118 else
119 ioc_enable = 0;
120
121
122 if (cpuinfo_arc700[cpu].core.family > 0x51) {
123 READ_BCR(AUX_VOL, vol);
124 perip_base = vol.start << 28;
125
126 if (cpuinfo_arc700[cpu].core.family > 0x52)
127 perip_end = (vol.limit << 28) - 1;
128 }
129}
130
131void read_decode_cache_bcr(void)
132{
133 struct cpuinfo_arc_cache *p_ic, *p_dc;
134 unsigned int cpu = smp_processor_id();
135 struct bcr_cache {
136#ifdef CONFIG_CPU_BIG_ENDIAN
137 unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
138#else
139 unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
140#endif
141 } ibcr, dbcr;
142
143 p_ic = &cpuinfo_arc700[cpu].icache;
144 READ_BCR(ARC_REG_IC_BCR, ibcr);
145
146 if (!ibcr.ver)
147 goto dc_chk;
148
149 if (ibcr.ver <= 3) {
150 BUG_ON(ibcr.config != 3);
151 p_ic->assoc = 2;
152 } else if (ibcr.ver >= 4) {
153 p_ic->assoc = 1 << ibcr.config;
154 }
155
156 p_ic->line_len = 8 << ibcr.line_len;
157 p_ic->sz_k = 1 << (ibcr.sz - 1);
158 p_ic->vipt = 1;
159 p_ic->alias = p_ic->sz_k/p_ic->assoc/TO_KB(PAGE_SIZE) > 1;
160
161dc_chk:
162 p_dc = &cpuinfo_arc700[cpu].dcache;
163 READ_BCR(ARC_REG_DC_BCR, dbcr);
164
165 if (!dbcr.ver)
166 goto slc_chk;
167
168 if (dbcr.ver <= 3) {
169 BUG_ON(dbcr.config != 2);
170 p_dc->assoc = 4;
171 p_dc->vipt = 1;
172 p_dc->alias = p_dc->sz_k/p_dc->assoc/TO_KB(PAGE_SIZE) > 1;
173 } else if (dbcr.ver >= 4) {
174 p_dc->assoc = 1 << dbcr.config;
175 p_dc->vipt = 0;
176 p_dc->alias = 0;
177 }
178
179 p_dc->line_len = 16 << dbcr.line_len;
180 p_dc->sz_k = 1 << (dbcr.sz - 1);
181
182slc_chk:
183 if (is_isa_arcv2())
184 read_decode_cache_bcr_arcv2(cpu);
185}
186
187
188
189
190
191#define OP_INV 0x1
192#define OP_FLUSH 0x2
193#define OP_FLUSH_N_INV 0x3
194#define OP_INV_IC 0x4
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238static inline
239void __cache_line_loop_v2(phys_addr_t paddr, unsigned long vaddr,
240 unsigned long sz, const int op, const int full_page)
241{
242 unsigned int aux_cmd;
243 int num_lines;
244
245 if (op == OP_INV_IC) {
246 aux_cmd = ARC_REG_IC_IVIL;
247 } else {
248
249 aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
250 }
251
252
253
254
255
256
257
258 if (!full_page) {
259 sz += paddr & ~CACHE_LINE_MASK;
260 paddr &= CACHE_LINE_MASK;
261 vaddr &= CACHE_LINE_MASK;
262 }
263
264 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
265
266
267 paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
268
269 while (num_lines-- > 0) {
270 write_aux_reg(aux_cmd, paddr);
271 paddr += L1_CACHE_BYTES;
272 }
273}
274
275
276
277
278
279
280
281
282
283static inline
284void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
285 unsigned long sz, const int op, const int full_page)
286{
287 unsigned int aux_cmd, aux_tag;
288 int num_lines;
289
290 if (op == OP_INV_IC) {
291 aux_cmd = ARC_REG_IC_IVIL;
292 aux_tag = ARC_REG_IC_PTAG;
293 } else {
294 aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
295 aux_tag = ARC_REG_DC_PTAG;
296 }
297
298
299
300
301
302
303
304 if (!full_page) {
305 sz += paddr & ~CACHE_LINE_MASK;
306 paddr &= CACHE_LINE_MASK;
307 vaddr &= CACHE_LINE_MASK;
308 }
309 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
310
311
312
313
314
315 if (full_page)
316 write_aux_reg(aux_tag, paddr);
317
318
319
320
321
322
323
324
325 if (is_pae40_enabled() && op == OP_INV_IC)
326 write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
327
328 while (num_lines-- > 0) {
329 if (!full_page) {
330 write_aux_reg(aux_tag, paddr);
331 paddr += L1_CACHE_BYTES;
332 }
333
334 write_aux_reg(aux_cmd, vaddr);
335 vaddr += L1_CACHE_BYTES;
336 }
337}
338
339#ifndef USE_RGN_FLSH
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354static inline
355void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
356 unsigned long sz, const int op, const int full_page)
357{
358 unsigned int aux_cmd;
359 int num_lines;
360
361 if (op == OP_INV_IC) {
362 aux_cmd = ARC_REG_IC_IVIL;
363 } else {
364
365 aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
366 }
367
368
369
370
371
372
373
374 if (!full_page) {
375 sz += paddr & ~CACHE_LINE_MASK;
376 paddr &= CACHE_LINE_MASK;
377 }
378
379 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
380
381
382
383
384
385
386 if (is_pae40_enabled()) {
387 if (op == OP_INV_IC)
388
389
390
391
392 write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
393 else
394 write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32);
395 }
396
397 while (num_lines-- > 0) {
398 write_aux_reg(aux_cmd, paddr);
399 paddr += L1_CACHE_BYTES;
400 }
401}
402
403#else
404
405
406
407
408static inline
409void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
410 unsigned long sz, const int op, const int full_page)
411{
412 unsigned int s, e;
413
414
415 if (op == OP_INV_IC) {
416 s = ARC_REG_IC_IVIR;
417 e = ARC_REG_IC_ENDR;
418 } else {
419 s = ARC_REG_DC_STARTR;
420 e = ARC_REG_DC_ENDR;
421 }
422
423 if (!full_page) {
424
425 sz += paddr & ~CACHE_LINE_MASK;
426 paddr &= CACHE_LINE_MASK;
427
428
429
430
431
432 sz += L1_CACHE_BYTES - 1;
433 }
434
435 if (is_pae40_enabled()) {
436
437 if (op == OP_INV_IC)
438 write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
439 else
440 write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32);
441 }
442
443
444 write_aux_reg(e, paddr + sz);
445 write_aux_reg(s, paddr);
446
447
448}
449
450#endif
451
452#if (CONFIG_ARC_MMU_VER < 3)
453#define __cache_line_loop __cache_line_loop_v2
454#elif (CONFIG_ARC_MMU_VER == 3)
455#define __cache_line_loop __cache_line_loop_v3
456#elif (CONFIG_ARC_MMU_VER > 3)
457#define __cache_line_loop __cache_line_loop_v4
458#endif
459
460#ifdef CONFIG_ARC_HAS_DCACHE
461
462
463
464
465
466#ifndef USE_RGN_FLSH
467
468
469
470
471static inline void __before_dc_op(const int op)
472{
473 if (op == OP_FLUSH_N_INV) {
474
475
476
477
478
479 const unsigned int ctl = ARC_REG_DC_CTRL;
480 write_aux_reg(ctl, read_aux_reg(ctl) | DC_CTRL_INV_MODE_FLUSH);
481 }
482}
483
484#else
485
486static inline void __before_dc_op(const int op)
487{
488 const unsigned int ctl = ARC_REG_DC_CTRL;
489 unsigned int val = read_aux_reg(ctl);
490
491 if (op == OP_FLUSH_N_INV) {
492 val |= DC_CTRL_INV_MODE_FLUSH;
493 }
494
495 if (op != OP_INV_IC) {
496
497
498
499
500 val &= ~DC_CTRL_RGN_OP_MSK;
501 if (op & OP_INV)
502 val |= DC_CTRL_RGN_OP_INV;
503 }
504 write_aux_reg(ctl, val);
505}
506
507#endif
508
509
510static inline void __after_dc_op(const int op)
511{
512 if (op & OP_FLUSH) {
513 const unsigned int ctl = ARC_REG_DC_CTRL;
514 unsigned int reg;
515
516
517 while ((reg = read_aux_reg(ctl)) & DC_CTRL_FLUSH_STATUS)
518 ;
519
520
521 if (op == OP_FLUSH_N_INV)
522 write_aux_reg(ctl, reg & ~DC_CTRL_INV_MODE_FLUSH);
523 }
524}
525
526
527
528
529
530
531
532static inline void __dc_entire_op(const int op)
533{
534 int aux;
535
536 __before_dc_op(op);
537
538 if (op & OP_INV)
539 aux = ARC_REG_DC_IVDC;
540 else
541 aux = ARC_REG_DC_FLSH;
542
543 write_aux_reg(aux, 0x1);
544
545 __after_dc_op(op);
546}
547
548static inline void __dc_disable(void)
549{
550 const int r = ARC_REG_DC_CTRL;
551
552 __dc_entire_op(OP_FLUSH_N_INV);
553 write_aux_reg(r, read_aux_reg(r) | DC_CTRL_DIS);
554}
555
556static void __dc_enable(void)
557{
558 const int r = ARC_REG_DC_CTRL;
559
560 write_aux_reg(r, read_aux_reg(r) & ~DC_CTRL_DIS);
561}
562
563
564#define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op)
565
566
567
568
569static inline void __dc_line_op(phys_addr_t paddr, unsigned long vaddr,
570 unsigned long sz, const int op)
571{
572 const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
573 unsigned long flags;
574
575 local_irq_save(flags);
576
577 __before_dc_op(op);
578
579 __cache_line_loop(paddr, vaddr, sz, op, full_page);
580
581 __after_dc_op(op);
582
583 local_irq_restore(flags);
584}
585
586#else
587
588#define __dc_entire_op(op)
589#define __dc_disable()
590#define __dc_enable()
591#define __dc_line_op(paddr, vaddr, sz, op)
592#define __dc_line_op_k(paddr, sz, op)
593
594#endif
595
596#ifdef CONFIG_ARC_HAS_ICACHE
597
598static inline void __ic_entire_inv(void)
599{
600 write_aux_reg(ARC_REG_IC_IVIC, 1);
601 read_aux_reg(ARC_REG_IC_CTRL);
602}
603
604static inline void
605__ic_line_inv_vaddr_local(phys_addr_t paddr, unsigned long vaddr,
606 unsigned long sz)
607{
608 const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
609 unsigned long flags;
610
611 local_irq_save(flags);
612 (*_cache_line_loop_ic_fn)(paddr, vaddr, sz, OP_INV_IC, full_page);
613 local_irq_restore(flags);
614}
615
616#ifndef CONFIG_SMP
617
618#define __ic_line_inv_vaddr(p, v, s) __ic_line_inv_vaddr_local(p, v, s)
619
620#else
621
622struct ic_inv_args {
623 phys_addr_t paddr, vaddr;
624 int sz;
625};
626
627static void __ic_line_inv_vaddr_helper(void *info)
628{
629 struct ic_inv_args *ic_inv = info;
630
631 __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz);
632}
633
634static void __ic_line_inv_vaddr(phys_addr_t paddr, unsigned long vaddr,
635 unsigned long sz)
636{
637 struct ic_inv_args ic_inv = {
638 .paddr = paddr,
639 .vaddr = vaddr,
640 .sz = sz
641 };
642
643 on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1);
644}
645
646#endif
647
648#else
649
650#define __ic_entire_inv()
651#define __ic_line_inv_vaddr(pstart, vstart, sz)
652
653#endif
654
655noinline void slc_op_rgn(phys_addr_t paddr, unsigned long sz, const int op)
656{
657#ifdef CONFIG_ISA_ARCV2
658
659
660
661
662
663
664
665 static DEFINE_SPINLOCK(lock);
666 unsigned long flags;
667 unsigned int ctrl;
668 phys_addr_t end;
669
670 spin_lock_irqsave(&lock, flags);
671
672
673
674
675
676
677
678 ctrl = read_aux_reg(ARC_REG_SLC_CTRL);
679
680
681 if (!(op & OP_FLUSH))
682 ctrl &= ~SLC_CTRL_IM;
683 else
684 ctrl |= SLC_CTRL_IM;
685
686 if (op & OP_INV)
687 ctrl |= SLC_CTRL_RGN_OP_INV;
688 else
689 ctrl &= ~SLC_CTRL_RGN_OP_INV;
690
691 write_aux_reg(ARC_REG_SLC_CTRL, ctrl);
692
693
694
695
696
697
698 end = paddr + sz + l2_line_sz - 1;
699 if (is_pae40_enabled())
700 write_aux_reg(ARC_REG_SLC_RGN_END1, upper_32_bits(end));
701
702 write_aux_reg(ARC_REG_SLC_RGN_END, lower_32_bits(end));
703
704 if (is_pae40_enabled())
705 write_aux_reg(ARC_REG_SLC_RGN_START1, upper_32_bits(paddr));
706
707 write_aux_reg(ARC_REG_SLC_RGN_START, lower_32_bits(paddr));
708
709
710 read_aux_reg(ARC_REG_SLC_CTRL);
711
712 while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
713
714 spin_unlock_irqrestore(&lock, flags);
715#endif
716}
717
718noinline void slc_op_line(phys_addr_t paddr, unsigned long sz, const int op)
719{
720#ifdef CONFIG_ISA_ARCV2
721
722
723
724
725
726
727
728 static DEFINE_SPINLOCK(lock);
729
730 const unsigned long SLC_LINE_MASK = ~(l2_line_sz - 1);
731 unsigned int ctrl, cmd;
732 unsigned long flags;
733 int num_lines;
734
735 spin_lock_irqsave(&lock, flags);
736
737 ctrl = read_aux_reg(ARC_REG_SLC_CTRL);
738
739
740 if (!(op & OP_FLUSH))
741 ctrl &= ~SLC_CTRL_IM;
742 else
743 ctrl |= SLC_CTRL_IM;
744
745 write_aux_reg(ARC_REG_SLC_CTRL, ctrl);
746
747 cmd = op & OP_INV ? ARC_AUX_SLC_IVDL : ARC_AUX_SLC_FLDL;
748
749 sz += paddr & ~SLC_LINE_MASK;
750 paddr &= SLC_LINE_MASK;
751
752 num_lines = DIV_ROUND_UP(sz, l2_line_sz);
753
754 while (num_lines-- > 0) {
755 write_aux_reg(cmd, paddr);
756 paddr += l2_line_sz;
757 }
758
759
760 read_aux_reg(ARC_REG_SLC_CTRL);
761
762 while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
763
764 spin_unlock_irqrestore(&lock, flags);
765#endif
766}
767
768#define slc_op(paddr, sz, op) slc_op_rgn(paddr, sz, op)
769
770noinline static void slc_entire_op(const int op)
771{
772 unsigned int ctrl, r = ARC_REG_SLC_CTRL;
773
774 ctrl = read_aux_reg(r);
775
776 if (!(op & OP_FLUSH))
777 ctrl &= ~SLC_CTRL_IM;
778 else
779 ctrl |= SLC_CTRL_IM;
780
781 write_aux_reg(r, ctrl);
782
783 write_aux_reg(ARC_REG_SLC_INVALIDATE, 1);
784
785
786 read_aux_reg(r);
787
788
789 while (read_aux_reg(r) & SLC_CTRL_BUSY);
790}
791
792static inline void arc_slc_disable(void)
793{
794 const int r = ARC_REG_SLC_CTRL;
795
796 slc_entire_op(OP_FLUSH_N_INV);
797 write_aux_reg(r, read_aux_reg(r) | SLC_CTRL_DIS);
798}
799
800static inline void arc_slc_enable(void)
801{
802 const int r = ARC_REG_SLC_CTRL;
803
804 write_aux_reg(r, read_aux_reg(r) & ~SLC_CTRL_DIS);
805}
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823void flush_dcache_page(struct page *page)
824{
825 struct address_space *mapping;
826
827 if (!cache_is_vipt_aliasing()) {
828 clear_bit(PG_dc_clean, &page->flags);
829 return;
830 }
831
832
833 mapping = page_mapping(page);
834 if (!mapping)
835 return;
836
837
838
839
840
841 if (!mapping_mapped(mapping)) {
842 clear_bit(PG_dc_clean, &page->flags);
843 } else if (page_mapcount(page)) {
844
845
846 phys_addr_t paddr = (unsigned long)page_address(page);
847 unsigned long vaddr = page->index << PAGE_SHIFT;
848
849 if (addr_not_cache_congruent(paddr, vaddr))
850 __flush_dcache_page(paddr, vaddr);
851 }
852}
853EXPORT_SYMBOL(flush_dcache_page);
854
855
856
857
858
859static void __dma_cache_wback_inv_l1(phys_addr_t start, unsigned long sz)
860{
861 __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
862}
863
864static void __dma_cache_inv_l1(phys_addr_t start, unsigned long sz)
865{
866 __dc_line_op_k(start, sz, OP_INV);
867}
868
869static void __dma_cache_wback_l1(phys_addr_t start, unsigned long sz)
870{
871 __dc_line_op_k(start, sz, OP_FLUSH);
872}
873
874
875
876
877
878static void __dma_cache_wback_inv_slc(phys_addr_t start, unsigned long sz)
879{
880 __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
881 slc_op(start, sz, OP_FLUSH_N_INV);
882}
883
884static void __dma_cache_inv_slc(phys_addr_t start, unsigned long sz)
885{
886 __dc_line_op_k(start, sz, OP_INV);
887 slc_op(start, sz, OP_INV);
888}
889
890static void __dma_cache_wback_slc(phys_addr_t start, unsigned long sz)
891{
892 __dc_line_op_k(start, sz, OP_FLUSH);
893 slc_op(start, sz, OP_FLUSH);
894}
895
896
897
898
899
900
901static void __dma_cache_wback_inv_ioc(phys_addr_t start, unsigned long sz) {}
902static void __dma_cache_inv_ioc(phys_addr_t start, unsigned long sz) {}
903static void __dma_cache_wback_ioc(phys_addr_t start, unsigned long sz) {}
904
905
906
907
908void dma_cache_wback_inv(phys_addr_t start, unsigned long sz)
909{
910 __dma_cache_wback_inv(start, sz);
911}
912EXPORT_SYMBOL(dma_cache_wback_inv);
913
914void dma_cache_inv(phys_addr_t start, unsigned long sz)
915{
916 __dma_cache_inv(start, sz);
917}
918EXPORT_SYMBOL(dma_cache_inv);
919
920void dma_cache_wback(phys_addr_t start, unsigned long sz)
921{
922 __dma_cache_wback(start, sz);
923}
924EXPORT_SYMBOL(dma_cache_wback);
925
926
927
928
929
930
931
932
933void flush_icache_range(unsigned long kstart, unsigned long kend)
934{
935 unsigned int tot_sz;
936
937 WARN(kstart < TASK_SIZE, "%s() can't handle user vaddr", __func__);
938
939
940
941
942 tot_sz = kend - kstart;
943 if (tot_sz > PAGE_SIZE) {
944 flush_cache_all();
945 return;
946 }
947
948
949 if (likely(kstart > PAGE_OFFSET)) {
950
951
952
953
954
955
956 __sync_icache_dcache(kstart, kstart, kend - kstart);
957 return;
958 }
959
960
961
962
963
964
965
966
967
968
969 while (tot_sz > 0) {
970 unsigned int off, sz;
971 unsigned long phy, pfn;
972
973 off = kstart % PAGE_SIZE;
974 pfn = vmalloc_to_pfn((void *)kstart);
975 phy = (pfn << PAGE_SHIFT) + off;
976 sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off);
977 __sync_icache_dcache(phy, kstart, sz);
978 kstart += sz;
979 tot_sz -= sz;
980 }
981}
982EXPORT_SYMBOL(flush_icache_range);
983
984
985
986
987
988
989
990
991
992
993
994void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len)
995{
996 __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
997 __ic_line_inv_vaddr(paddr, vaddr, len);
998}
999
1000
1001void __inv_icache_page(phys_addr_t paddr, unsigned long vaddr)
1002{
1003 __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
1004}
1005
1006
1007
1008
1009
1010void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr)
1011{
1012 __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
1013}
1014
1015noinline void flush_cache_all(void)
1016{
1017 unsigned long flags;
1018
1019 local_irq_save(flags);
1020
1021 __ic_entire_inv();
1022 __dc_entire_op(OP_FLUSH_N_INV);
1023
1024 local_irq_restore(flags);
1025
1026}
1027
1028#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
1029
1030void flush_cache_mm(struct mm_struct *mm)
1031{
1032 flush_cache_all();
1033}
1034
1035void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
1036 unsigned long pfn)
1037{
1038 unsigned int paddr = pfn << PAGE_SHIFT;
1039
1040 u_vaddr &= PAGE_MASK;
1041
1042 __flush_dcache_page(paddr, u_vaddr);
1043
1044 if (vma->vm_flags & VM_EXEC)
1045 __inv_icache_page(paddr, u_vaddr);
1046}
1047
1048void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
1049 unsigned long end)
1050{
1051 flush_cache_all();
1052}
1053
1054void flush_anon_page(struct vm_area_struct *vma, struct page *page,
1055 unsigned long u_vaddr)
1056{
1057
1058 __flush_dcache_page(page_address(page), u_vaddr);
1059 __flush_dcache_page(page_address(page), page_address(page));
1060
1061}
1062
1063#endif
1064
1065void copy_user_highpage(struct page *to, struct page *from,
1066 unsigned long u_vaddr, struct vm_area_struct *vma)
1067{
1068 void *kfrom = kmap_atomic(from);
1069 void *kto = kmap_atomic(to);
1070 int clean_src_k_mappings = 0;
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083 if (page_mapcount(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
1084 __flush_dcache_page((unsigned long)kfrom, u_vaddr);
1085 clean_src_k_mappings = 1;
1086 }
1087
1088 copy_page(kto, kfrom);
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098 clear_bit(PG_dc_clean, &to->flags);
1099
1100
1101
1102
1103
1104 if (clean_src_k_mappings) {
1105 __flush_dcache_page((unsigned long)kfrom, (unsigned long)kfrom);
1106 set_bit(PG_dc_clean, &from->flags);
1107 } else {
1108 clear_bit(PG_dc_clean, &from->flags);
1109 }
1110
1111 kunmap_atomic(kto);
1112 kunmap_atomic(kfrom);
1113}
1114
1115void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
1116{
1117 clear_page(to);
1118 clear_bit(PG_dc_clean, &page->flags);
1119}
1120
1121
1122
1123
1124
1125
1126SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
1127{
1128
1129 flush_cache_all();
1130 return 0;
1131}
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148noinline void __init arc_ioc_setup(void)
1149{
1150 unsigned int ioc_base, mem_sz;
1151
1152
1153 __dc_disable();
1154
1155
1156 if (read_aux_reg(ARC_REG_SLC_BCR))
1157 slc_entire_op(OP_FLUSH_N_INV);
1158
1159
1160
1161
1162
1163
1164 mem_sz = arc_get_mem_sz();
1165
1166 if (!is_power_of_2(mem_sz) || mem_sz < 4096)
1167 panic("IOC Aperture size must be power of 2 larger than 4KB");
1168
1169
1170
1171
1172
1173 write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, order_base_2(mem_sz >> 10) - 2);
1174
1175
1176 ioc_base = CONFIG_LINUX_RAM_BASE;
1177
1178 if (ioc_base % mem_sz != 0)
1179 panic("IOC Aperture start must be aligned to the size of the aperture");
1180
1181 write_aux_reg(ARC_REG_IO_COH_AP0_BASE, ioc_base >> 12);
1182 write_aux_reg(ARC_REG_IO_COH_PARTIAL, 1);
1183 write_aux_reg(ARC_REG_IO_COH_ENABLE, 1);
1184
1185
1186 __dc_enable();
1187}
1188
1189
1190
1191
1192
1193
1194
1195
1196void __init arc_cache_init_master(void)
1197{
1198 unsigned int __maybe_unused cpu = smp_processor_id();
1199
1200 if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
1201 struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
1202
1203 if (!ic->line_len)
1204 panic("cache support enabled but non-existent cache\n");
1205
1206 if (ic->line_len != L1_CACHE_BYTES)
1207 panic("ICache line [%d] != kernel Config [%d]",
1208 ic->line_len, L1_CACHE_BYTES);
1209
1210
1211
1212
1213
1214 if (is_isa_arcv2() && ic->alias)
1215 _cache_line_loop_ic_fn = __cache_line_loop_v3;
1216 else
1217 _cache_line_loop_ic_fn = __cache_line_loop;
1218 }
1219
1220 if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) {
1221 struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
1222
1223 if (!dc->line_len)
1224 panic("cache support enabled but non-existent cache\n");
1225
1226 if (dc->line_len != L1_CACHE_BYTES)
1227 panic("DCache line [%d] != kernel Config [%d]",
1228 dc->line_len, L1_CACHE_BYTES);
1229
1230
1231 if (is_isa_arcompact()) {
1232 int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
1233 int num_colors = dc->sz_k/dc->assoc/TO_KB(PAGE_SIZE);
1234
1235 if (dc->alias) {
1236 if (!handled)
1237 panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
1238 if (CACHE_COLORS_NUM != num_colors)
1239 panic("CACHE_COLORS_NUM not optimized for config\n");
1240 } else if (!dc->alias && handled) {
1241 panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
1242 }
1243 }
1244 }
1245
1246
1247 if (is_isa_arcv2() && l2_line_sz && !slc_enable)
1248 arc_slc_disable();
1249
1250 if (is_isa_arcv2() && ioc_enable)
1251 arc_ioc_setup();
1252
1253 if (is_isa_arcv2() && ioc_enable) {
1254 __dma_cache_wback_inv = __dma_cache_wback_inv_ioc;
1255 __dma_cache_inv = __dma_cache_inv_ioc;
1256 __dma_cache_wback = __dma_cache_wback_ioc;
1257 } else if (is_isa_arcv2() && l2_line_sz && slc_enable) {
1258 __dma_cache_wback_inv = __dma_cache_wback_inv_slc;
1259 __dma_cache_inv = __dma_cache_inv_slc;
1260 __dma_cache_wback = __dma_cache_wback_slc;
1261 } else {
1262 __dma_cache_wback_inv = __dma_cache_wback_inv_l1;
1263 __dma_cache_inv = __dma_cache_inv_l1;
1264 __dma_cache_wback = __dma_cache_wback_l1;
1265 }
1266}
1267
1268void __ref arc_cache_init(void)
1269{
1270 unsigned int __maybe_unused cpu = smp_processor_id();
1271 char str[256];
1272
1273 pr_info("%s", arc_cache_mumbojumbo(0, str, sizeof(str)));
1274
1275 if (!cpu)
1276 arc_cache_init_master();
1277
1278
1279
1280
1281
1282
1283
1284
1285 if (is_isa_arcv2() && pae40_exist_but_not_enab()) {
1286
1287 if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE))
1288 write_aux_reg(ARC_REG_IC_PTAG_HI, 0);
1289
1290 if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE))
1291 write_aux_reg(ARC_REG_DC_PTAG_HI, 0);
1292
1293 if (l2_line_sz) {
1294 write_aux_reg(ARC_REG_SLC_RGN_END1, 0);
1295 write_aux_reg(ARC_REG_SLC_RGN_START1, 0);
1296 }
1297 }
1298}
1299