1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64#include <linux/module.h>
65#include <linux/mm.h>
66#include <linux/sched.h>
67#include <linux/cache.h>
68#include <linux/mmu_context.h>
69#include <linux/syscalls.h>
70#include <linux/uaccess.h>
71#include <linux/pagemap.h>
72#include <asm/cacheflush.h>
73#include <asm/cachectl.h>
74#include <asm/setup.h>
75
76char *arc_cache_mumbojumbo(int c, char *buf, int len)
77{
78 int n = 0;
79
80#define PR_CACHE(p, cfg, str) \
81 if (!(p)->ver) \
82 n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \
83 else \
84 n += scnprintf(buf + n, len - n, \
85 str"\t\t: %uK, %dway/set, %uB Line, %s%s%s\n", \
86 (p)->sz_k, (p)->assoc, (p)->line_len, \
87 (p)->vipt ? "VIPT" : "PIPT", \
88 (p)->alias ? " aliasing" : "", \
89 IS_ENABLED(cfg) ? "" : " (not used)");
90
91 PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache");
92 PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
93
94 return buf;
95}
96
97
98
99
100
101
102void read_decode_cache_bcr(void)
103{
104 struct cpuinfo_arc_cache *p_ic, *p_dc;
105 unsigned int cpu = smp_processor_id();
106 struct bcr_cache {
107#ifdef CONFIG_CPU_BIG_ENDIAN
108 unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
109#else
110 unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
111#endif
112 } ibcr, dbcr;
113
114 p_ic = &cpuinfo_arc700[cpu].icache;
115 READ_BCR(ARC_REG_IC_BCR, ibcr);
116
117 if (!ibcr.ver)
118 goto dc_chk;
119
120 BUG_ON(ibcr.config != 3);
121 p_ic->assoc = 2;
122 p_ic->line_len = 8 << ibcr.line_len;
123 p_ic->sz_k = 1 << (ibcr.sz - 1);
124 p_ic->ver = ibcr.ver;
125 p_ic->vipt = 1;
126 p_ic->alias = p_ic->sz_k/p_ic->assoc/TO_KB(PAGE_SIZE) > 1;
127
128dc_chk:
129 p_dc = &cpuinfo_arc700[cpu].dcache;
130 READ_BCR(ARC_REG_DC_BCR, dbcr);
131
132 if (!dbcr.ver)
133 return;
134
135 BUG_ON(dbcr.config != 2);
136 p_dc->assoc = 4;
137 p_dc->line_len = 16 << dbcr.line_len;
138 p_dc->sz_k = 1 << (dbcr.sz - 1);
139 p_dc->ver = dbcr.ver;
140 p_dc->vipt = 1;
141 p_dc->alias = p_dc->sz_k/p_dc->assoc/TO_KB(PAGE_SIZE) > 1;
142}
143
144
145
146
147
148
149
150
151void arc_cache_init(void)
152{
153 unsigned int __maybe_unused cpu = smp_processor_id();
154 char str[256];
155
156 printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
157
158 if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
159 struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
160
161 if (!ic->ver)
162 panic("cache support enabled but non-existent cache\n");
163
164 if (ic->line_len != L1_CACHE_BYTES)
165 panic("ICache line [%d] != kernel Config [%d]",
166 ic->line_len, L1_CACHE_BYTES);
167
168 if (ic->ver != CONFIG_ARC_MMU_VER)
169 panic("Cache ver [%d] doesn't match MMU ver [%d]\n",
170 ic->ver, CONFIG_ARC_MMU_VER);
171 }
172
173 if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) {
174 struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
175 int handled;
176
177 if (!dc->ver)
178 panic("cache support enabled but non-existent cache\n");
179
180 if (dc->line_len != L1_CACHE_BYTES)
181 panic("DCache line [%d] != kernel Config [%d]",
182 dc->line_len, L1_CACHE_BYTES);
183
184
185 handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
186
187 if (dc->alias && !handled)
188 panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
189 else if (!dc->alias && handled)
190 panic("Don't need CONFIG_ARC_CACHE_VIPT_ALIASING\n");
191 }
192}
193
194#define OP_INV 0x1
195#define OP_FLUSH 0x2
196#define OP_FLUSH_N_INV 0x3
197#define OP_INV_IC 0x4
198
199
200
201
202static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr,
203 unsigned long sz, const int cacheop)
204{
205 unsigned int aux_cmd, aux_tag;
206 int num_lines;
207 const int full_page_op = __builtin_constant_p(sz) && sz == PAGE_SIZE;
208
209 if (cacheop == OP_INV_IC) {
210 aux_cmd = ARC_REG_IC_IVIL;
211#if (CONFIG_ARC_MMU_VER > 2)
212 aux_tag = ARC_REG_IC_PTAG;
213#endif
214 }
215 else {
216
217 aux_cmd = cacheop & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
218#if (CONFIG_ARC_MMU_VER > 2)
219 aux_tag = ARC_REG_DC_PTAG;
220#endif
221 }
222
223
224
225
226
227
228
229 if (!full_page_op) {
230 sz += paddr & ~CACHE_LINE_MASK;
231 paddr &= CACHE_LINE_MASK;
232 vaddr &= CACHE_LINE_MASK;
233 }
234
235 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
236
237#if (CONFIG_ARC_MMU_VER <= 2)
238
239 paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
240#else
241
242 if (full_page_op)
243 write_aux_reg(aux_tag, paddr);
244#endif
245
246 while (num_lines-- > 0) {
247#if (CONFIG_ARC_MMU_VER > 2)
248
249 if (!full_page_op) {
250 write_aux_reg(aux_tag, paddr);
251 paddr += L1_CACHE_BYTES;
252 }
253
254 write_aux_reg(aux_cmd, vaddr);
255 vaddr += L1_CACHE_BYTES;
256#else
257 write_aux_reg(aux_cmd, paddr);
258 paddr += L1_CACHE_BYTES;
259#endif
260 }
261}
262
263#ifdef CONFIG_ARC_HAS_DCACHE
264
265
266
267
268
269static unsigned int __before_dc_op(const int op)
270{
271 unsigned int reg = reg;
272
273 if (op == OP_FLUSH_N_INV) {
274
275
276
277
278
279 reg = read_aux_reg(ARC_REG_DC_CTRL);
280 write_aux_reg(ARC_REG_DC_CTRL, reg | DC_CTRL_INV_MODE_FLUSH)
281 ;
282 }
283
284 return reg;
285}
286
287static void __after_dc_op(const int op, unsigned int reg)
288{
289 if (op & OP_FLUSH)
290 while (read_aux_reg(ARC_REG_DC_CTRL) & DC_CTRL_FLUSH_STATUS);
291
292
293 if (op == OP_FLUSH_N_INV)
294 write_aux_reg(ARC_REG_DC_CTRL, reg & ~DC_CTRL_INV_MODE_FLUSH);
295}
296
297
298
299
300
301
302
303static inline void __dc_entire_op(const int cacheop)
304{
305 unsigned int ctrl_reg;
306 int aux;
307
308 ctrl_reg = __before_dc_op(cacheop);
309
310 if (cacheop & OP_INV)
311 aux = ARC_REG_DC_IVDC;
312 else
313 aux = ARC_REG_DC_FLSH;
314
315 write_aux_reg(aux, 0x1);
316
317 __after_dc_op(cacheop, ctrl_reg);
318}
319
320
321#define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op)
322
323
324
325
326static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr,
327 unsigned long sz, const int cacheop)
328{
329 unsigned long flags;
330 unsigned int ctrl_reg;
331
332 local_irq_save(flags);
333
334 ctrl_reg = __before_dc_op(cacheop);
335
336 __cache_line_loop(paddr, vaddr, sz, cacheop);
337
338 __after_dc_op(cacheop, ctrl_reg);
339
340 local_irq_restore(flags);
341}
342
343#else
344
345#define __dc_entire_op(cacheop)
346#define __dc_line_op(paddr, vaddr, sz, cacheop)
347#define __dc_line_op_k(paddr, sz, cacheop)
348
349#endif
350
351
352#ifdef CONFIG_ARC_HAS_ICACHE
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400static inline void __ic_entire_inv(void)
401{
402 write_aux_reg(ARC_REG_IC_IVIC, 1);
403 read_aux_reg(ARC_REG_IC_CTRL);
404}
405
406static inline void
407__ic_line_inv_vaddr_local(unsigned long paddr, unsigned long vaddr,
408 unsigned long sz)
409{
410 unsigned long flags;
411
412 local_irq_save(flags);
413 __cache_line_loop(paddr, vaddr, sz, OP_INV_IC);
414 local_irq_restore(flags);
415}
416
417#ifndef CONFIG_SMP
418
419#define __ic_line_inv_vaddr(p, v, s) __ic_line_inv_vaddr_local(p, v, s)
420
421#else
422
423struct ic_inv_args {
424 unsigned long paddr, vaddr;
425 int sz;
426};
427
428static void __ic_line_inv_vaddr_helper(void *info)
429{
430 struct ic_inv_args *ic_inv = info;
431
432 __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz);
433}
434
435static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
436 unsigned long sz)
437{
438 struct ic_inv_args ic_inv = {
439 .paddr = paddr,
440 .vaddr = vaddr,
441 .sz = sz
442 };
443
444 on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1);
445}
446
447#endif
448
449#else
450
451#define __ic_entire_inv()
452#define __ic_line_inv_vaddr(pstart, vstart, sz)
453
454#endif
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473void flush_dcache_page(struct page *page)
474{
475 struct address_space *mapping;
476
477 if (!cache_is_vipt_aliasing()) {
478 clear_bit(PG_dc_clean, &page->flags);
479 return;
480 }
481
482
483 mapping = page_mapping(page);
484 if (!mapping)
485 return;
486
487
488
489
490
491 if (!mapping_mapped(mapping)) {
492 clear_bit(PG_dc_clean, &page->flags);
493 } else if (page_mapped(page)) {
494
495
496 void *paddr = page_address(page);
497 unsigned long vaddr = page->index << PAGE_CACHE_SHIFT;
498
499 if (addr_not_cache_congruent(paddr, vaddr))
500 __flush_dcache_page(paddr, vaddr);
501 }
502}
503EXPORT_SYMBOL(flush_dcache_page);
504
505
506void dma_cache_wback_inv(unsigned long start, unsigned long sz)
507{
508 __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
509}
510EXPORT_SYMBOL(dma_cache_wback_inv);
511
512void dma_cache_inv(unsigned long start, unsigned long sz)
513{
514 __dc_line_op_k(start, sz, OP_INV);
515}
516EXPORT_SYMBOL(dma_cache_inv);
517
518void dma_cache_wback(unsigned long start, unsigned long sz)
519{
520 __dc_line_op_k(start, sz, OP_FLUSH);
521}
522EXPORT_SYMBOL(dma_cache_wback);
523
524
525
526
527
528
529
530
531void flush_icache_range(unsigned long kstart, unsigned long kend)
532{
533 unsigned int tot_sz;
534
535 WARN(kstart < TASK_SIZE, "%s() can't handle user vaddr", __func__);
536
537
538
539
540 tot_sz = kend - kstart;
541 if (tot_sz > PAGE_SIZE) {
542 flush_cache_all();
543 return;
544 }
545
546
547 if (likely(kstart > PAGE_OFFSET)) {
548
549
550
551
552
553
554 __sync_icache_dcache(kstart, kstart, kend - kstart);
555 return;
556 }
557
558
559
560
561
562
563
564
565
566
567 while (tot_sz > 0) {
568 unsigned int off, sz;
569 unsigned long phy, pfn;
570
571 off = kstart % PAGE_SIZE;
572 pfn = vmalloc_to_pfn((void *)kstart);
573 phy = (pfn << PAGE_SHIFT) + off;
574 sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off);
575 __sync_icache_dcache(phy, kstart, sz);
576 kstart += sz;
577 tot_sz -= sz;
578 }
579}
580EXPORT_SYMBOL(flush_icache_range);
581
582
583
584
585
586
587
588
589
590
591
592void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len)
593{
594 __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
595 __ic_line_inv_vaddr(paddr, vaddr, len);
596}
597
598
599void __inv_icache_page(unsigned long paddr, unsigned long vaddr)
600{
601 __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
602}
603
604
605
606
607
608void ___flush_dcache_page(unsigned long paddr, unsigned long vaddr)
609{
610 __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
611}
612
613noinline void flush_cache_all(void)
614{
615 unsigned long flags;
616
617 local_irq_save(flags);
618
619 __ic_entire_inv();
620 __dc_entire_op(OP_FLUSH_N_INV);
621
622 local_irq_restore(flags);
623
624}
625
626#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
627
628void flush_cache_mm(struct mm_struct *mm)
629{
630 flush_cache_all();
631}
632
633void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
634 unsigned long pfn)
635{
636 unsigned int paddr = pfn << PAGE_SHIFT;
637
638 u_vaddr &= PAGE_MASK;
639
640 ___flush_dcache_page(paddr, u_vaddr);
641
642 if (vma->vm_flags & VM_EXEC)
643 __inv_icache_page(paddr, u_vaddr);
644}
645
646void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
647 unsigned long end)
648{
649 flush_cache_all();
650}
651
652void flush_anon_page(struct vm_area_struct *vma, struct page *page,
653 unsigned long u_vaddr)
654{
655
656 __flush_dcache_page(page_address(page), u_vaddr);
657 __flush_dcache_page(page_address(page), page_address(page));
658
659}
660
661#endif
662
663void copy_user_highpage(struct page *to, struct page *from,
664 unsigned long u_vaddr, struct vm_area_struct *vma)
665{
666 void *kfrom = page_address(from);
667 void *kto = page_address(to);
668 int clean_src_k_mappings = 0;
669
670
671
672
673
674
675
676
677
678 if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
679 __flush_dcache_page(kfrom, u_vaddr);
680 clean_src_k_mappings = 1;
681 }
682
683 copy_page(kto, kfrom);
684
685
686
687
688
689
690
691
692
693 clear_bit(PG_dc_clean, &to->flags);
694
695
696
697
698
699 if (clean_src_k_mappings) {
700 __flush_dcache_page(kfrom, kfrom);
701 set_bit(PG_dc_clean, &from->flags);
702 } else {
703 clear_bit(PG_dc_clean, &from->flags);
704 }
705}
706
707void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
708{
709 clear_page(to);
710 clear_bit(PG_dc_clean, &page->flags);
711}
712
713
714
715
716
717
718SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
719{
720
721 flush_cache_all();
722 return 0;
723}
724