1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64#include <linux/module.h>
65#include <linux/mm.h>
66#include <linux/sched.h>
67#include <linux/cache.h>
68#include <linux/mmu_context.h>
69#include <linux/syscalls.h>
70#include <linux/uaccess.h>
71#include <linux/pagemap.h>
72#include <asm/cacheflush.h>
73#include <asm/cachectl.h>
74#include <asm/setup.h>
75
76char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len)
77{
78 int n = 0;
79 unsigned int c = smp_processor_id();
80
81#define PR_CACHE(p, enb, str) \
82{ \
83 if (!(p)->ver) \
84 n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \
85 else \
86 n += scnprintf(buf + n, len - n, \
87 str"\t\t: (%uK) VIPT, %dway set-asc, %ub Line %s\n", \
88 TO_KB((p)->sz), (p)->assoc, (p)->line_len, \
89 enb ? "" : "DISABLED (kernel-build)"); \
90}
91
92 PR_CACHE(&cpuinfo_arc700[c].icache, __CONFIG_ARC_HAS_ICACHE, "I-Cache");
93 PR_CACHE(&cpuinfo_arc700[c].dcache, __CONFIG_ARC_HAS_DCACHE, "D-Cache");
94
95 return buf;
96}
97
98
99
100
101
102
103void __cpuinit read_decode_cache_bcr(void)
104{
105 struct bcr_cache ibcr, dbcr;
106 struct cpuinfo_arc_cache *p_ic, *p_dc;
107 unsigned int cpu = smp_processor_id();
108
109 p_ic = &cpuinfo_arc700[cpu].icache;
110 READ_BCR(ARC_REG_IC_BCR, ibcr);
111
112 if (ibcr.config == 0x3)
113 p_ic->assoc = 2;
114 p_ic->line_len = 8 << ibcr.line_len;
115 p_ic->sz = 0x200 << ibcr.sz;
116 p_ic->ver = ibcr.ver;
117
118 p_dc = &cpuinfo_arc700[cpu].dcache;
119 READ_BCR(ARC_REG_DC_BCR, dbcr);
120
121 if (dbcr.config == 0x2)
122 p_dc->assoc = 4;
123 p_dc->line_len = 16 << dbcr.line_len;
124 p_dc->sz = 0x200 << dbcr.sz;
125 p_dc->ver = dbcr.ver;
126}
127
128
129
130
131
132
133
134
135void __cpuinit arc_cache_init(void)
136{
137 unsigned int temp;
138 unsigned int cpu = smp_processor_id();
139 struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
140 struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
141 int way_pg_ratio = way_pg_ratio;
142 int dcache_does_alias;
143 char str[256];
144
145 printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
146
147 if (!ic->ver)
148 goto chk_dc;
149
150#ifdef CONFIG_ARC_HAS_ICACHE
151
152 if ((ic->assoc != ARC_ICACHE_WAYS) ||
153 (ic->line_len != ARC_ICACHE_LINE_LEN)) {
154 panic("Cache H/W doesn't match kernel Config");
155 }
156#if (CONFIG_ARC_MMU_VER > 2)
157 if (ic->ver != 3) {
158 if (running_on_hw)
159 panic("Cache ver doesn't match MMU ver\n");
160
161
162 pr_err("Use -prop=icache_version=3,-prop=dcache_version=3\n");
163
164 }
165#endif
166#endif
167
168
169 temp = read_aux_reg(ARC_REG_IC_CTRL);
170
171#ifdef CONFIG_ARC_HAS_ICACHE
172 temp &= ~IC_CTRL_CACHE_DISABLE;
173#else
174 temp |= IC_CTRL_CACHE_DISABLE;
175#endif
176
177 write_aux_reg(ARC_REG_IC_CTRL, temp);
178
179chk_dc:
180 if (!dc->ver)
181 return;
182
183#ifdef CONFIG_ARC_HAS_DCACHE
184 if ((dc->assoc != ARC_DCACHE_WAYS) ||
185 (dc->line_len != ARC_DCACHE_LINE_LEN)) {
186 panic("Cache H/W doesn't match kernel Config");
187 }
188
189 dcache_does_alias = (dc->sz / ARC_DCACHE_WAYS) > PAGE_SIZE;
190
191
192 if (dcache_does_alias && !cache_is_vipt_aliasing())
193 panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
194 else if (!dcache_does_alias && cache_is_vipt_aliasing())
195 panic("Don't need CONFIG_ARC_CACHE_VIPT_ALIASING\n");
196#endif
197
198
199
200
201
202 temp = read_aux_reg(ARC_REG_DC_CTRL);
203 temp &= ~DC_CTRL_INV_MODE_FLUSH;
204
205#ifdef CONFIG_ARC_HAS_DCACHE
206
207 write_aux_reg(ARC_REG_DC_CTRL, temp & ~IC_CTRL_CACHE_DISABLE);
208#else
209
210 write_aux_reg(ARC_REG_DC_FLSH, 0x1);
211
212 write_aux_reg(ARC_REG_DC_CTRL, temp | IC_CTRL_CACHE_DISABLE);
213#endif
214
215 return;
216}
217
218#define OP_INV 0x1
219#define OP_FLUSH 0x2
220#define OP_FLUSH_N_INV 0x3
221
222#ifdef CONFIG_ARC_HAS_DCACHE
223
224
225
226
227
228static inline void wait_for_flush(void)
229{
230 while (read_aux_reg(ARC_REG_DC_CTRL) & DC_CTRL_FLUSH_STATUS)
231 ;
232}
233
234
235
236
237
238
239
240static inline void __dc_entire_op(const int cacheop)
241{
242 unsigned long flags, tmp = tmp;
243 int aux;
244
245 local_irq_save(flags);
246
247 if (cacheop == OP_FLUSH_N_INV) {
248
249
250
251
252
253 tmp = read_aux_reg(ARC_REG_DC_CTRL);
254 write_aux_reg(ARC_REG_DC_CTRL, tmp | DC_CTRL_INV_MODE_FLUSH);
255 }
256
257 if (cacheop & OP_INV)
258 aux = ARC_REG_DC_IVDC;
259 else
260 aux = ARC_REG_DC_FLSH;
261
262 write_aux_reg(aux, 0x1);
263
264 if (cacheop & OP_FLUSH)
265 wait_for_flush();
266
267
268 if (cacheop == OP_FLUSH_N_INV)
269 write_aux_reg(ARC_REG_DC_CTRL, tmp & ~DC_CTRL_INV_MODE_FLUSH);
270
271 local_irq_restore(flags);
272}
273
274
275
276
277
278
279
280static inline void __dc_line_loop(unsigned long paddr, unsigned long vaddr,
281 unsigned long sz, const int aux_reg)
282{
283 int num_lines;
284
285
286
287
288
289
290
291 if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
292 sz += paddr & ~DCACHE_LINE_MASK;
293 paddr &= DCACHE_LINE_MASK;
294 vaddr &= DCACHE_LINE_MASK;
295 }
296
297 num_lines = DIV_ROUND_UP(sz, ARC_DCACHE_LINE_LEN);
298
299#if (CONFIG_ARC_MMU_VER <= 2)
300 paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
301#endif
302
303 while (num_lines-- > 0) {
304#if (CONFIG_ARC_MMU_VER > 2)
305
306
307
308
309 write_aux_reg(ARC_REG_DC_PTAG, paddr);
310
311 write_aux_reg(aux_reg, vaddr);
312 vaddr += ARC_DCACHE_LINE_LEN;
313#else
314
315 write_aux_reg(aux_reg, paddr);
316#endif
317 paddr += ARC_DCACHE_LINE_LEN;
318 }
319}
320
321
322#define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op)
323
324
325
326
327static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr,
328 unsigned long sz, const int cacheop)
329{
330 unsigned long flags, tmp = tmp;
331 int aux;
332
333 local_irq_save(flags);
334
335 if (cacheop == OP_FLUSH_N_INV) {
336
337
338
339
340
341
342 tmp = read_aux_reg(ARC_REG_DC_CTRL);
343 write_aux_reg(ARC_REG_DC_CTRL, tmp | DC_CTRL_INV_MODE_FLUSH);
344 }
345
346 if (cacheop & OP_INV)
347 aux = ARC_REG_DC_IVDL;
348 else
349 aux = ARC_REG_DC_FLDL;
350
351 __dc_line_loop(paddr, vaddr, sz, aux);
352
353 if (cacheop & OP_FLUSH)
354 wait_for_flush();
355
356
357 if (cacheop == OP_FLUSH_N_INV)
358 write_aux_reg(ARC_REG_DC_CTRL, tmp & ~DC_CTRL_INV_MODE_FLUSH);
359
360 local_irq_restore(flags);
361}
362
363#else
364
365#define __dc_entire_op(cacheop)
366#define __dc_line_op(paddr, vaddr, sz, cacheop)
367#define __dc_line_op_k(paddr, sz, cacheop)
368
369#endif
370
371
372#ifdef CONFIG_ARC_HAS_ICACHE
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
420 unsigned long sz)
421{
422 unsigned long flags;
423 int num_lines;
424
425
426
427
428
429
430
431 if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
432 sz += paddr & ~ICACHE_LINE_MASK;
433 paddr &= ICACHE_LINE_MASK;
434 vaddr &= ICACHE_LINE_MASK;
435 }
436
437 num_lines = DIV_ROUND_UP(sz, ARC_ICACHE_LINE_LEN);
438
439#if (CONFIG_ARC_MMU_VER <= 2)
440
441 paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
442#endif
443
444 local_irq_save(flags);
445 while (num_lines-- > 0) {
446#if (CONFIG_ARC_MMU_VER > 2)
447
448 write_aux_reg(ARC_REG_IC_PTAG, paddr);
449
450
451 write_aux_reg(ARC_REG_IC_IVIL, vaddr);
452 vaddr += ARC_ICACHE_LINE_LEN;
453#else
454
455 write_aux_reg(ARC_REG_IC_IVIL, paddr);
456#endif
457 paddr += ARC_ICACHE_LINE_LEN;
458 }
459 local_irq_restore(flags);
460}
461
462#else
463
464#define __ic_line_inv_vaddr(pstart, vstart, sz)
465
466#endif
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485void flush_dcache_page(struct page *page)
486{
487 struct address_space *mapping;
488
489 if (!cache_is_vipt_aliasing()) {
490 set_bit(PG_arch_1, &page->flags);
491 return;
492 }
493
494
495 mapping = page_mapping(page);
496 if (!mapping)
497 return;
498
499
500
501
502
503 if (!mapping_mapped(mapping)) {
504 set_bit(PG_arch_1, &page->flags);
505 } else if (page_mapped(page)) {
506
507
508 void *paddr = page_address(page);
509 unsigned long vaddr = page->index << PAGE_CACHE_SHIFT;
510
511 if (addr_not_cache_congruent(paddr, vaddr))
512 __flush_dcache_page(paddr, vaddr);
513 }
514}
515EXPORT_SYMBOL(flush_dcache_page);
516
517
518void dma_cache_wback_inv(unsigned long start, unsigned long sz)
519{
520 __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
521}
522EXPORT_SYMBOL(dma_cache_wback_inv);
523
524void dma_cache_inv(unsigned long start, unsigned long sz)
525{
526 __dc_line_op_k(start, sz, OP_INV);
527}
528EXPORT_SYMBOL(dma_cache_inv);
529
530void dma_cache_wback(unsigned long start, unsigned long sz)
531{
532 __dc_line_op_k(start, sz, OP_FLUSH);
533}
534EXPORT_SYMBOL(dma_cache_wback);
535
536
537
538
539
540
541
542
543void flush_icache_range(unsigned long kstart, unsigned long kend)
544{
545 unsigned int tot_sz, off, sz;
546 unsigned long phy, pfn;
547
548
549
550
551 if (kstart < TASK_SIZE) {
552 BUG_ON("Flush icache range for user virtual addr space");
553 return;
554 }
555
556
557
558
559 tot_sz = kend - kstart;
560 if (tot_sz > PAGE_SIZE) {
561 flush_cache_all();
562 return;
563 }
564
565
566 if (likely(kstart > PAGE_OFFSET)) {
567
568
569
570
571
572
573 __sync_icache_dcache(kstart, kstart, kend - kstart);
574 return;
575 }
576
577
578
579
580
581
582
583
584
585
586 while (tot_sz > 0) {
587 off = kstart % PAGE_SIZE;
588 pfn = vmalloc_to_pfn((void *)kstart);
589 phy = (pfn << PAGE_SHIFT) + off;
590 sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off);
591 __sync_icache_dcache(phy, kstart, sz);
592 kstart += sz;
593 tot_sz -= sz;
594 }
595}
596
597
598
599
600
601
602
603
604
605
606
607void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len)
608{
609 unsigned long flags;
610
611 local_irq_save(flags);
612 __ic_line_inv_vaddr(paddr, vaddr, len);
613 __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
614 local_irq_restore(flags);
615}
616
617
618void __inv_icache_page(unsigned long paddr, unsigned long vaddr)
619{
620 __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
621}
622
623
624
625
626
627void ___flush_dcache_page(unsigned long paddr, unsigned long vaddr)
628{
629 __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
630}
631
632void flush_icache_all(void)
633{
634 unsigned long flags;
635
636 local_irq_save(flags);
637
638 write_aux_reg(ARC_REG_IC_IVIC, 1);
639
640
641 read_aux_reg(ARC_REG_IC_CTRL);
642 local_irq_restore(flags);
643}
644
645noinline void flush_cache_all(void)
646{
647 unsigned long flags;
648
649 local_irq_save(flags);
650
651 flush_icache_all();
652 __dc_entire_op(OP_FLUSH_N_INV);
653
654 local_irq_restore(flags);
655
656}
657
658#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
659
660void flush_cache_mm(struct mm_struct *mm)
661{
662 flush_cache_all();
663}
664
665void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
666 unsigned long pfn)
667{
668 unsigned int paddr = pfn << PAGE_SHIFT;
669
670 __sync_icache_dcache(paddr, u_vaddr, PAGE_SIZE);
671}
672
673void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
674 unsigned long end)
675{
676 flush_cache_all();
677}
678
679void flush_anon_page(struct vm_area_struct *vma, struct page *page,
680 unsigned long u_vaddr)
681{
682
683 __flush_dcache_page(page_address(page), u_vaddr);
684 __flush_dcache_page(page_address(page), page_address(page));
685
686}
687
688#endif
689
690void copy_user_highpage(struct page *to, struct page *from,
691 unsigned long u_vaddr, struct vm_area_struct *vma)
692{
693 void *kfrom = page_address(from);
694 void *kto = page_address(to);
695 int clean_src_k_mappings = 0;
696
697
698
699
700
701
702
703
704
705 if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
706 __flush_dcache_page(kfrom, u_vaddr);
707 clean_src_k_mappings = 1;
708 }
709
710 copy_page(kto, kfrom);
711
712
713
714
715
716
717
718
719
720 set_bit(PG_arch_1, &to->flags);
721
722
723
724
725
726 if (clean_src_k_mappings) {
727 __flush_dcache_page(kfrom, kfrom);
728 } else {
729 set_bit(PG_arch_1, &from->flags);
730 }
731}
732
733void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
734{
735 clear_page(to);
736 set_bit(PG_arch_1, &page->flags);
737}
738
739
740
741
742
743
744SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
745{
746
747 flush_cache_all();
748 return 0;
749}
750