1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/cpu.h>
20#include <linux/err.h>
21#include <linux/init.h>
22#include <linux/smp.h>
23#include <linux/spinlock.h>
24#include <linux/log2.h>
25#include <linux/io.h>
26#include <linux/of.h>
27#include <linux/of_address.h>
28
29#include <asm/cacheflush.h>
30#include <asm/cp15.h>
31#include <asm/cputype.h>
32#include <asm/hardware/cache-l2x0.h>
33#include "cache-tauros3.h"
34#include "cache-aurora-l2.h"
35
36struct l2c_init_data {
37 const char *type;
38 unsigned way_size_0;
39 unsigned num_lock;
40 void (*of_parse)(const struct device_node *, u32 *, u32 *);
41 void (*enable)(void __iomem *, unsigned);
42 void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
43 void (*save)(void __iomem *);
44 void (*configure)(void __iomem *);
45 void (*unlock)(void __iomem *, unsigned);
46 struct outer_cache_fns outer_cache;
47};
48
49#define CACHE_LINE_SIZE 32
50
51static void __iomem *l2x0_base;
52static const struct l2c_init_data *l2x0_data;
53static DEFINE_RAW_SPINLOCK(l2x0_lock);
54static u32 l2x0_way_mask;
55static u32 l2x0_size;
56static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
57
58struct l2x0_regs l2x0_saved_regs;
59
60
61
62
63static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask)
64{
65
66 while (readl_relaxed(reg) & mask)
67 cpu_relax();
68}
69
70
71
72
73
74static void l2c_write_sec(unsigned long val, void __iomem *base, unsigned reg)
75{
76 if (val == readl_relaxed(base + reg))
77 return;
78 if (outer_cache.write_sec)
79 outer_cache.write_sec(val, reg);
80 else
81 writel_relaxed(val, base + reg);
82}
83
84
85
86
87
88
89static inline void l2c_set_debug(void __iomem *base, unsigned long val)
90{
91 l2c_write_sec(val, base, L2X0_DEBUG_CTRL);
92}
93
94static void __l2c_op_way(void __iomem *reg)
95{
96 writel_relaxed(l2x0_way_mask, reg);
97 l2c_wait_mask(reg, l2x0_way_mask);
98}
99
100static inline void l2c_unlock(void __iomem *base, unsigned num)
101{
102 unsigned i;
103
104 for (i = 0; i < num; i++) {
105 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE +
106 i * L2X0_LOCKDOWN_STRIDE);
107 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE +
108 i * L2X0_LOCKDOWN_STRIDE);
109 }
110}
111
112static void l2c_configure(void __iomem *base)
113{
114 l2c_write_sec(l2x0_saved_regs.aux_ctrl, base, L2X0_AUX_CTRL);
115}
116
117
118
119
120
121static void l2c_enable(void __iomem *base, unsigned num_lock)
122{
123 unsigned long flags;
124
125 if (outer_cache.configure)
126 outer_cache.configure(&l2x0_saved_regs);
127 else
128 l2x0_data->configure(base);
129
130 l2x0_data->unlock(base, num_lock);
131
132 local_irq_save(flags);
133 __l2c_op_way(base + L2X0_INV_WAY);
134 writel_relaxed(0, base + sync_reg_offset);
135 l2c_wait_mask(base + sync_reg_offset, 1);
136 local_irq_restore(flags);
137
138 l2c_write_sec(L2X0_CTRL_EN, base, L2X0_CTRL);
139}
140
141static void l2c_disable(void)
142{
143 void __iomem *base = l2x0_base;
144
145 outer_cache.flush_all();
146 l2c_write_sec(0, base, L2X0_CTRL);
147 dsb(st);
148}
149
150static void l2c_save(void __iomem *base)
151{
152 l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
153}
154
155static void l2c_resume(void)
156{
157 void __iomem *base = l2x0_base;
158
159
160 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN))
161 l2c_enable(base, l2x0_data->num_lock);
162}
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178static void __l2c210_cache_sync(void __iomem *base)
179{
180 writel_relaxed(0, base + sync_reg_offset);
181}
182
183static void __l2c210_op_pa_range(void __iomem *reg, unsigned long start,
184 unsigned long end)
185{
186 while (start < end) {
187 writel_relaxed(start, reg);
188 start += CACHE_LINE_SIZE;
189 }
190}
191
192static void l2c210_inv_range(unsigned long start, unsigned long end)
193{
194 void __iomem *base = l2x0_base;
195
196 if (start & (CACHE_LINE_SIZE - 1)) {
197 start &= ~(CACHE_LINE_SIZE - 1);
198 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
199 start += CACHE_LINE_SIZE;
200 }
201
202 if (end & (CACHE_LINE_SIZE - 1)) {
203 end &= ~(CACHE_LINE_SIZE - 1);
204 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
205 }
206
207 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
208 __l2c210_cache_sync(base);
209}
210
211static void l2c210_clean_range(unsigned long start, unsigned long end)
212{
213 void __iomem *base = l2x0_base;
214
215 start &= ~(CACHE_LINE_SIZE - 1);
216 __l2c210_op_pa_range(base + L2X0_CLEAN_LINE_PA, start, end);
217 __l2c210_cache_sync(base);
218}
219
220static void l2c210_flush_range(unsigned long start, unsigned long end)
221{
222 void __iomem *base = l2x0_base;
223
224 start &= ~(CACHE_LINE_SIZE - 1);
225 __l2c210_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, start, end);
226 __l2c210_cache_sync(base);
227}
228
229static void l2c210_flush_all(void)
230{
231 void __iomem *base = l2x0_base;
232
233 BUG_ON(!irqs_disabled());
234
235 __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
236 __l2c210_cache_sync(base);
237}
238
239static void l2c210_sync(void)
240{
241 __l2c210_cache_sync(l2x0_base);
242}
243
244static const struct l2c_init_data l2c210_data __initconst = {
245 .type = "L2C-210",
246 .way_size_0 = SZ_8K,
247 .num_lock = 1,
248 .enable = l2c_enable,
249 .save = l2c_save,
250 .configure = l2c_configure,
251 .unlock = l2c_unlock,
252 .outer_cache = {
253 .inv_range = l2c210_inv_range,
254 .clean_range = l2c210_clean_range,
255 .flush_range = l2c210_flush_range,
256 .flush_all = l2c210_flush_all,
257 .disable = l2c_disable,
258 .sync = l2c210_sync,
259 .resume = l2c_resume,
260 },
261};
262
263
264
265
266
267
268
269
270
271
272
273static inline void __l2c220_cache_sync(void __iomem *base)
274{
275 writel_relaxed(0, base + L2X0_CACHE_SYNC);
276 l2c_wait_mask(base + L2X0_CACHE_SYNC, 1);
277}
278
279static void l2c220_op_way(void __iomem *base, unsigned reg)
280{
281 unsigned long flags;
282
283 raw_spin_lock_irqsave(&l2x0_lock, flags);
284 __l2c_op_way(base + reg);
285 __l2c220_cache_sync(base);
286 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
287}
288
289static unsigned long l2c220_op_pa_range(void __iomem *reg, unsigned long start,
290 unsigned long end, unsigned long flags)
291{
292 raw_spinlock_t *lock = &l2x0_lock;
293
294 while (start < end) {
295 unsigned long blk_end = start + min(end - start, 4096UL);
296
297 while (start < blk_end) {
298 l2c_wait_mask(reg, 1);
299 writel_relaxed(start, reg);
300 start += CACHE_LINE_SIZE;
301 }
302
303 if (blk_end < end) {
304 raw_spin_unlock_irqrestore(lock, flags);
305 raw_spin_lock_irqsave(lock, flags);
306 }
307 }
308
309 return flags;
310}
311
312static void l2c220_inv_range(unsigned long start, unsigned long end)
313{
314 void __iomem *base = l2x0_base;
315 unsigned long flags;
316
317 raw_spin_lock_irqsave(&l2x0_lock, flags);
318 if ((start | end) & (CACHE_LINE_SIZE - 1)) {
319 if (start & (CACHE_LINE_SIZE - 1)) {
320 start &= ~(CACHE_LINE_SIZE - 1);
321 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
322 start += CACHE_LINE_SIZE;
323 }
324
325 if (end & (CACHE_LINE_SIZE - 1)) {
326 end &= ~(CACHE_LINE_SIZE - 1);
327 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
328 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
329 }
330 }
331
332 flags = l2c220_op_pa_range(base + L2X0_INV_LINE_PA,
333 start, end, flags);
334 l2c_wait_mask(base + L2X0_INV_LINE_PA, 1);
335 __l2c220_cache_sync(base);
336 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
337}
338
339static void l2c220_clean_range(unsigned long start, unsigned long end)
340{
341 void __iomem *base = l2x0_base;
342 unsigned long flags;
343
344 start &= ~(CACHE_LINE_SIZE - 1);
345 if ((end - start) >= l2x0_size) {
346 l2c220_op_way(base, L2X0_CLEAN_WAY);
347 return;
348 }
349
350 raw_spin_lock_irqsave(&l2x0_lock, flags);
351 flags = l2c220_op_pa_range(base + L2X0_CLEAN_LINE_PA,
352 start, end, flags);
353 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
354 __l2c220_cache_sync(base);
355 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
356}
357
358static void l2c220_flush_range(unsigned long start, unsigned long end)
359{
360 void __iomem *base = l2x0_base;
361 unsigned long flags;
362
363 start &= ~(CACHE_LINE_SIZE - 1);
364 if ((end - start) >= l2x0_size) {
365 l2c220_op_way(base, L2X0_CLEAN_INV_WAY);
366 return;
367 }
368
369 raw_spin_lock_irqsave(&l2x0_lock, flags);
370 flags = l2c220_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA,
371 start, end, flags);
372 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
373 __l2c220_cache_sync(base);
374 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
375}
376
377static void l2c220_flush_all(void)
378{
379 l2c220_op_way(l2x0_base, L2X0_CLEAN_INV_WAY);
380}
381
382static void l2c220_sync(void)
383{
384 unsigned long flags;
385
386 raw_spin_lock_irqsave(&l2x0_lock, flags);
387 __l2c220_cache_sync(l2x0_base);
388 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
389}
390
391static void l2c220_enable(void __iomem *base, unsigned num_lock)
392{
393
394
395
396
397
398 l2x0_saved_regs.aux_ctrl |= L220_AUX_CTRL_NS_LOCKDOWN;
399
400 l2c_enable(base, num_lock);
401}
402
403static void l2c220_unlock(void __iomem *base, unsigned num_lock)
404{
405 if (readl_relaxed(base + L2X0_AUX_CTRL) & L220_AUX_CTRL_NS_LOCKDOWN)
406 l2c_unlock(base, num_lock);
407}
408
409static const struct l2c_init_data l2c220_data = {
410 .type = "L2C-220",
411 .way_size_0 = SZ_8K,
412 .num_lock = 1,
413 .enable = l2c220_enable,
414 .save = l2c_save,
415 .configure = l2c_configure,
416 .unlock = l2c220_unlock,
417 .outer_cache = {
418 .inv_range = l2c220_inv_range,
419 .clean_range = l2c220_clean_range,
420 .flush_range = l2c220_flush_range,
421 .flush_all = l2c220_flush_all,
422 .disable = l2c_disable,
423 .sync = l2c220_sync,
424 .resume = l2c_resume,
425 },
426};
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472static void l2c310_inv_range_erratum(unsigned long start, unsigned long end)
473{
474 void __iomem *base = l2x0_base;
475
476 if ((start | end) & (CACHE_LINE_SIZE - 1)) {
477 unsigned long flags;
478
479
480 raw_spin_lock_irqsave(&l2x0_lock, flags);
481 l2c_set_debug(base, 0x03);
482
483 if (start & (CACHE_LINE_SIZE - 1)) {
484 start &= ~(CACHE_LINE_SIZE - 1);
485 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
486 writel_relaxed(start, base + L2X0_INV_LINE_PA);
487 start += CACHE_LINE_SIZE;
488 }
489
490 if (end & (CACHE_LINE_SIZE - 1)) {
491 end &= ~(CACHE_LINE_SIZE - 1);
492 writel_relaxed(end, base + L2X0_CLEAN_LINE_PA);
493 writel_relaxed(end, base + L2X0_INV_LINE_PA);
494 }
495
496 l2c_set_debug(base, 0x00);
497 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
498 }
499
500 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
501 __l2c210_cache_sync(base);
502}
503
504static void l2c310_flush_range_erratum(unsigned long start, unsigned long end)
505{
506 raw_spinlock_t *lock = &l2x0_lock;
507 unsigned long flags;
508 void __iomem *base = l2x0_base;
509
510 raw_spin_lock_irqsave(lock, flags);
511 while (start < end) {
512 unsigned long blk_end = start + min(end - start, 4096UL);
513
514 l2c_set_debug(base, 0x03);
515 while (start < blk_end) {
516 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
517 writel_relaxed(start, base + L2X0_INV_LINE_PA);
518 start += CACHE_LINE_SIZE;
519 }
520 l2c_set_debug(base, 0x00);
521
522 if (blk_end < end) {
523 raw_spin_unlock_irqrestore(lock, flags);
524 raw_spin_lock_irqsave(lock, flags);
525 }
526 }
527 raw_spin_unlock_irqrestore(lock, flags);
528 __l2c210_cache_sync(base);
529}
530
531static void l2c310_flush_all_erratum(void)
532{
533 void __iomem *base = l2x0_base;
534 unsigned long flags;
535
536 raw_spin_lock_irqsave(&l2x0_lock, flags);
537 l2c_set_debug(base, 0x03);
538 __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
539 l2c_set_debug(base, 0x00);
540 __l2c210_cache_sync(base);
541 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
542}
543
544static void __init l2c310_save(void __iomem *base)
545{
546 unsigned revision;
547
548 l2c_save(base);
549
550 l2x0_saved_regs.tag_latency = readl_relaxed(base +
551 L310_TAG_LATENCY_CTRL);
552 l2x0_saved_regs.data_latency = readl_relaxed(base +
553 L310_DATA_LATENCY_CTRL);
554 l2x0_saved_regs.filter_end = readl_relaxed(base +
555 L310_ADDR_FILTER_END);
556 l2x0_saved_regs.filter_start = readl_relaxed(base +
557 L310_ADDR_FILTER_START);
558
559 revision = readl_relaxed(base + L2X0_CACHE_ID) &
560 L2X0_CACHE_ID_RTL_MASK;
561
562
563 if (revision >= L310_CACHE_ID_RTL_R2P0)
564 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base +
565 L310_PREFETCH_CTRL);
566
567
568 if (revision >= L310_CACHE_ID_RTL_R3P0)
569 l2x0_saved_regs.pwr_ctrl = readl_relaxed(base +
570 L310_POWER_CTRL);
571}
572
573static void l2c310_configure(void __iomem *base)
574{
575 unsigned revision;
576
577 l2c_configure(base);
578
579
580 l2c_write_sec(l2x0_saved_regs.tag_latency, base,
581 L310_TAG_LATENCY_CTRL);
582 l2c_write_sec(l2x0_saved_regs.data_latency, base,
583 L310_DATA_LATENCY_CTRL);
584 l2c_write_sec(l2x0_saved_regs.filter_end, base,
585 L310_ADDR_FILTER_END);
586 l2c_write_sec(l2x0_saved_regs.filter_start, base,
587 L310_ADDR_FILTER_START);
588
589 revision = readl_relaxed(base + L2X0_CACHE_ID) &
590 L2X0_CACHE_ID_RTL_MASK;
591
592 if (revision >= L310_CACHE_ID_RTL_R2P0)
593 l2c_write_sec(l2x0_saved_regs.prefetch_ctrl, base,
594 L310_PREFETCH_CTRL);
595 if (revision >= L310_CACHE_ID_RTL_R3P0)
596 l2c_write_sec(l2x0_saved_regs.pwr_ctrl, base,
597 L310_POWER_CTRL);
598}
599
600static int l2c310_cpu_enable_flz(struct notifier_block *nb, unsigned long act, void *data)
601{
602 switch (act & ~CPU_TASKS_FROZEN) {
603 case CPU_STARTING:
604 set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
605 break;
606 case CPU_DYING:
607 set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1)));
608 break;
609 }
610 return NOTIFY_OK;
611}
612
613static void __init l2c310_enable(void __iomem *base, unsigned num_lock)
614{
615 unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_RTL_MASK;
616 bool cortex_a9 = read_cpuid_part() == ARM_CPU_PART_CORTEX_A9;
617 u32 aux = l2x0_saved_regs.aux_ctrl;
618
619 if (rev >= L310_CACHE_ID_RTL_R2P0) {
620 if (cortex_a9) {
621 aux |= L310_AUX_CTRL_EARLY_BRESP;
622 pr_info("L2C-310 enabling early BRESP for Cortex-A9\n");
623 } else if (aux & L310_AUX_CTRL_EARLY_BRESP) {
624 pr_warn("L2C-310 early BRESP only supported with Cortex-A9\n");
625 aux &= ~L310_AUX_CTRL_EARLY_BRESP;
626 }
627 }
628
629 if (cortex_a9) {
630 u32 aux_cur = readl_relaxed(base + L2X0_AUX_CTRL);
631 u32 acr = get_auxcr();
632
633 pr_debug("Cortex-A9 ACR=0x%08x\n", acr);
634
635 if (acr & BIT(3) && !(aux_cur & L310_AUX_CTRL_FULL_LINE_ZERO))
636 pr_err("L2C-310: full line of zeros enabled in Cortex-A9 but not L2C-310 - invalid\n");
637
638 if (aux & L310_AUX_CTRL_FULL_LINE_ZERO && !(acr & BIT(3)))
639 pr_err("L2C-310: enabling full line of zeros but not enabled in Cortex-A9\n");
640
641 if (!(aux & L310_AUX_CTRL_FULL_LINE_ZERO) && !outer_cache.write_sec) {
642 aux |= L310_AUX_CTRL_FULL_LINE_ZERO;
643 pr_info("L2C-310 full line of zeros enabled for Cortex-A9\n");
644 }
645 } else if (aux & (L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP)) {
646 pr_err("L2C-310: disabling Cortex-A9 specific feature bits\n");
647 aux &= ~(L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP);
648 }
649
650
651 if (rev >= L310_CACHE_ID_RTL_R3P0)
652 l2x0_saved_regs.pwr_ctrl = L310_DYNAMIC_CLK_GATING_EN |
653 L310_STNDBY_MODE_EN;
654
655
656
657
658
659
660 l2x0_saved_regs.aux_ctrl = aux | L310_AUX_CTRL_NS_LOCKDOWN;
661
662 l2c_enable(base, num_lock);
663
664
665 aux = readl_relaxed(base + L2X0_AUX_CTRL);
666
667 if (aux & (L310_AUX_CTRL_DATA_PREFETCH | L310_AUX_CTRL_INSTR_PREFETCH)) {
668 u32 prefetch = readl_relaxed(base + L310_PREFETCH_CTRL);
669
670 pr_info("L2C-310 %s%s prefetch enabled, offset %u lines\n",
671 aux & L310_AUX_CTRL_INSTR_PREFETCH ? "I" : "",
672 aux & L310_AUX_CTRL_DATA_PREFETCH ? "D" : "",
673 1 + (prefetch & L310_PREFETCH_CTRL_OFFSET_MASK));
674 }
675
676
677 if (rev >= L310_CACHE_ID_RTL_R3P0) {
678 u32 power_ctrl;
679
680 power_ctrl = readl_relaxed(base + L310_POWER_CTRL);
681 pr_info("L2C-310 dynamic clock gating %sabled, standby mode %sabled\n",
682 power_ctrl & L310_DYNAMIC_CLK_GATING_EN ? "en" : "dis",
683 power_ctrl & L310_STNDBY_MODE_EN ? "en" : "dis");
684 }
685
686 if (aux & L310_AUX_CTRL_FULL_LINE_ZERO) {
687 set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
688 cpu_notifier(l2c310_cpu_enable_flz, 0);
689 }
690}
691
692static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
693 struct outer_cache_fns *fns)
694{
695 unsigned revision = cache_id & L2X0_CACHE_ID_RTL_MASK;
696 const char *errata[8];
697 unsigned n = 0;
698
699 if (IS_ENABLED(CONFIG_PL310_ERRATA_588369) &&
700 revision < L310_CACHE_ID_RTL_R2P0 &&
701
702 fns->inv_range == l2c210_inv_range) {
703 fns->inv_range = l2c310_inv_range_erratum;
704 fns->flush_range = l2c310_flush_range_erratum;
705 errata[n++] = "588369";
706 }
707
708 if (IS_ENABLED(CONFIG_PL310_ERRATA_727915) &&
709 revision >= L310_CACHE_ID_RTL_R2P0 &&
710 revision < L310_CACHE_ID_RTL_R3P1) {
711 fns->flush_all = l2c310_flush_all_erratum;
712 errata[n++] = "727915";
713 }
714
715 if (revision >= L310_CACHE_ID_RTL_R3P0 &&
716 revision < L310_CACHE_ID_RTL_R3P2) {
717 u32 val = l2x0_saved_regs.prefetch_ctrl;
718
719 if (val & (BIT(30) | BIT(23))) {
720 val &= ~(BIT(30) | BIT(23));
721 l2x0_saved_regs.prefetch_ctrl = val;
722 errata[n++] = "752271";
723 }
724 }
725
726 if (IS_ENABLED(CONFIG_PL310_ERRATA_753970) &&
727 revision == L310_CACHE_ID_RTL_R3P0) {
728 sync_reg_offset = L2X0_DUMMY_REG;
729 errata[n++] = "753970";
730 }
731
732 if (IS_ENABLED(CONFIG_PL310_ERRATA_769419))
733 errata[n++] = "769419";
734
735 if (n) {
736 unsigned i;
737
738 pr_info("L2C-310 errat%s", n > 1 ? "a" : "um");
739 for (i = 0; i < n; i++)
740 pr_cont(" %s", errata[i]);
741 pr_cont(" enabled\n");
742 }
743}
744
745static void l2c310_disable(void)
746{
747
748
749
750
751 if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO)
752 set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1)));
753
754 l2c_disable();
755}
756
757static void l2c310_resume(void)
758{
759 l2c_resume();
760
761
762 if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO)
763 set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
764}
765
766static void l2c310_unlock(void __iomem *base, unsigned num_lock)
767{
768 if (readl_relaxed(base + L2X0_AUX_CTRL) & L310_AUX_CTRL_NS_LOCKDOWN)
769 l2c_unlock(base, num_lock);
770}
771
772static const struct l2c_init_data l2c310_init_fns __initconst = {
773 .type = "L2C-310",
774 .way_size_0 = SZ_8K,
775 .num_lock = 8,
776 .enable = l2c310_enable,
777 .fixup = l2c310_fixup,
778 .save = l2c310_save,
779 .configure = l2c310_configure,
780 .unlock = l2c310_unlock,
781 .outer_cache = {
782 .inv_range = l2c210_inv_range,
783 .clean_range = l2c210_clean_range,
784 .flush_range = l2c210_flush_range,
785 .flush_all = l2c210_flush_all,
786 .disable = l2c310_disable,
787 .sync = l2c210_sync,
788 .resume = l2c310_resume,
789 },
790};
791
792static int __init __l2c_init(const struct l2c_init_data *data,
793 u32 aux_val, u32 aux_mask, u32 cache_id)
794{
795 struct outer_cache_fns fns;
796 unsigned way_size_bits, ways;
797 u32 aux, old_aux;
798
799
800
801
802
803 l2x0_data = kmemdup(data, sizeof(*data), GFP_KERNEL);
804 if (!l2x0_data)
805 return -ENOMEM;
806
807
808
809
810
811
812 if (aux_val & aux_mask)
813 pr_alert("L2C: platform provided aux values permit register corruption.\n");
814
815 old_aux = aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
816 aux &= aux_mask;
817 aux |= aux_val;
818
819 if (old_aux != aux)
820 pr_warn("L2C: DT/platform modifies aux control register: 0x%08x -> 0x%08x\n",
821 old_aux, aux);
822
823
824 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
825 case L2X0_CACHE_ID_PART_L310:
826 if ((aux_val | ~aux_mask) & (L2C_AUX_CTRL_WAY_SIZE_MASK | L310_AUX_CTRL_ASSOCIATIVITY_16))
827 pr_warn("L2C: DT/platform tries to modify or specify cache size\n");
828 if (aux & (1 << 16))
829 ways = 16;
830 else
831 ways = 8;
832 break;
833
834 case L2X0_CACHE_ID_PART_L210:
835 case L2X0_CACHE_ID_PART_L220:
836 ways = (aux >> 13) & 0xf;
837 break;
838
839 case AURORA_CACHE_ID:
840 ways = (aux >> 13) & 0xf;
841 ways = 2 << ((ways + 1) >> 2);
842 break;
843
844 default:
845
846 ways = 8;
847 break;
848 }
849
850 l2x0_way_mask = (1 << ways) - 1;
851
852
853
854
855
856
857
858
859
860 way_size_bits = (aux & L2C_AUX_CTRL_WAY_SIZE_MASK) >>
861 L2C_AUX_CTRL_WAY_SIZE_SHIFT;
862 l2x0_size = ways * (data->way_size_0 << way_size_bits);
863
864 fns = data->outer_cache;
865 fns.write_sec = outer_cache.write_sec;
866 fns.configure = outer_cache.configure;
867 if (data->fixup)
868 data->fixup(l2x0_base, cache_id, &fns);
869
870
871
872
873
874 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
875 l2x0_saved_regs.aux_ctrl = aux;
876
877 data->enable(l2x0_base, data->num_lock);
878 }
879
880 outer_cache = fns;
881
882
883
884
885
886 if (data->save)
887 data->save(l2x0_base);
888
889
890 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
891
892 pr_info("%s cache controller enabled, %d ways, %d kB\n",
893 data->type, ways, l2x0_size >> 10);
894 pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
895 data->type, cache_id, aux);
896
897 return 0;
898}
899
900void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
901{
902 const struct l2c_init_data *data;
903 u32 cache_id;
904
905 l2x0_base = base;
906
907 cache_id = readl_relaxed(base + L2X0_CACHE_ID);
908
909 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
910 default:
911 case L2X0_CACHE_ID_PART_L210:
912 data = &l2c210_data;
913 break;
914
915 case L2X0_CACHE_ID_PART_L220:
916 data = &l2c220_data;
917 break;
918
919 case L2X0_CACHE_ID_PART_L310:
920 data = &l2c310_init_fns;
921 break;
922 }
923
924
925 if (data->save)
926 data->save(l2x0_base);
927
928 __l2c_init(data, aux_val, aux_mask, cache_id);
929}
930
931#ifdef CONFIG_OF
932static int l2_wt_override;
933
934
935
936static u32 cache_id_part_number_from_dt;
937
938
939
940
941
942
943
944
945
946
947
948static int __init l2x0_cache_size_of_parse(const struct device_node *np,
949 u32 *aux_val, u32 *aux_mask,
950 u32 *associativity,
951 u32 max_way_size)
952{
953 u32 mask = 0, val = 0;
954 u32 cache_size = 0, sets = 0;
955 u32 way_size_bits = 1;
956 u32 way_size = 0;
957 u32 block_size = 0;
958 u32 line_size = 0;
959
960 of_property_read_u32(np, "cache-size", &cache_size);
961 of_property_read_u32(np, "cache-sets", &sets);
962 of_property_read_u32(np, "cache-block-size", &block_size);
963 of_property_read_u32(np, "cache-line-size", &line_size);
964
965 if (!cache_size || !sets)
966 return -ENODEV;
967
968
969 if (!line_size) {
970 if (block_size) {
971
972 line_size = block_size;
973 } else {
974
975 pr_warn("L2C OF: no cache block/line size given: "
976 "falling back to default size %d bytes\n",
977 CACHE_LINE_SIZE);
978 line_size = CACHE_LINE_SIZE;
979 }
980 }
981
982 if (line_size != CACHE_LINE_SIZE)
983 pr_warn("L2C OF: DT supplied line size %d bytes does "
984 "not match hardware line size of %d bytes\n",
985 line_size,
986 CACHE_LINE_SIZE);
987
988
989
990
991
992
993
994
995
996 way_size = sets * line_size;
997 *associativity = cache_size / way_size;
998
999 if (way_size > max_way_size) {
1000 pr_err("L2C OF: set size %dKB is too large\n", way_size);
1001 return -EINVAL;
1002 }
1003
1004 pr_info("L2C OF: override cache size: %d bytes (%dKB)\n",
1005 cache_size, cache_size >> 10);
1006 pr_info("L2C OF: override line size: %d bytes\n", line_size);
1007 pr_info("L2C OF: override way size: %d bytes (%dKB)\n",
1008 way_size, way_size >> 10);
1009 pr_info("L2C OF: override associativity: %d\n", *associativity);
1010
1011
1012
1013
1014
1015 way_size_bits = ilog2(way_size >> 10) - 3;
1016 if (way_size_bits < 1 || way_size_bits > 6) {
1017 pr_err("L2C OF: cache way size illegal: %dKB is not mapped\n",
1018 way_size);
1019 return -EINVAL;
1020 }
1021
1022 mask |= L2C_AUX_CTRL_WAY_SIZE_MASK;
1023 val |= (way_size_bits << L2C_AUX_CTRL_WAY_SIZE_SHIFT);
1024
1025 *aux_val &= ~mask;
1026 *aux_val |= val;
1027 *aux_mask &= ~mask;
1028
1029 return 0;
1030}
1031
1032static void __init l2x0_of_parse(const struct device_node *np,
1033 u32 *aux_val, u32 *aux_mask)
1034{
1035 u32 data[2] = { 0, 0 };
1036 u32 tag = 0;
1037 u32 dirty = 0;
1038 u32 val = 0, mask = 0;
1039 u32 assoc;
1040 int ret;
1041
1042 of_property_read_u32(np, "arm,tag-latency", &tag);
1043 if (tag) {
1044 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
1045 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
1046 }
1047
1048 of_property_read_u32_array(np, "arm,data-latency",
1049 data, ARRAY_SIZE(data));
1050 if (data[0] && data[1]) {
1051 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
1052 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
1053 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
1054 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
1055 }
1056
1057 of_property_read_u32(np, "arm,dirty-latency", &dirty);
1058 if (dirty) {
1059 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
1060 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
1061 }
1062
1063 ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_256K);
1064 if (ret)
1065 return;
1066
1067 if (assoc > 8) {
1068 pr_err("l2x0 of: cache setting yield too high associativity\n");
1069 pr_err("l2x0 of: %d calculated, max 8\n", assoc);
1070 } else {
1071 mask |= L2X0_AUX_CTRL_ASSOC_MASK;
1072 val |= (assoc << L2X0_AUX_CTRL_ASSOC_SHIFT);
1073 }
1074
1075 *aux_val &= ~mask;
1076 *aux_val |= val;
1077 *aux_mask &= ~mask;
1078}
1079
1080static const struct l2c_init_data of_l2c210_data __initconst = {
1081 .type = "L2C-210",
1082 .way_size_0 = SZ_8K,
1083 .num_lock = 1,
1084 .of_parse = l2x0_of_parse,
1085 .enable = l2c_enable,
1086 .save = l2c_save,
1087 .configure = l2c_configure,
1088 .unlock = l2c_unlock,
1089 .outer_cache = {
1090 .inv_range = l2c210_inv_range,
1091 .clean_range = l2c210_clean_range,
1092 .flush_range = l2c210_flush_range,
1093 .flush_all = l2c210_flush_all,
1094 .disable = l2c_disable,
1095 .sync = l2c210_sync,
1096 .resume = l2c_resume,
1097 },
1098};
1099
1100static const struct l2c_init_data of_l2c220_data __initconst = {
1101 .type = "L2C-220",
1102 .way_size_0 = SZ_8K,
1103 .num_lock = 1,
1104 .of_parse = l2x0_of_parse,
1105 .enable = l2c220_enable,
1106 .save = l2c_save,
1107 .configure = l2c_configure,
1108 .unlock = l2c220_unlock,
1109 .outer_cache = {
1110 .inv_range = l2c220_inv_range,
1111 .clean_range = l2c220_clean_range,
1112 .flush_range = l2c220_flush_range,
1113 .flush_all = l2c220_flush_all,
1114 .disable = l2c_disable,
1115 .sync = l2c220_sync,
1116 .resume = l2c_resume,
1117 },
1118};
1119
1120static void __init l2c310_of_parse(const struct device_node *np,
1121 u32 *aux_val, u32 *aux_mask)
1122{
1123 u32 data[3] = { 0, 0, 0 };
1124 u32 tag[3] = { 0, 0, 0 };
1125 u32 filter[2] = { 0, 0 };
1126 u32 assoc;
1127 u32 prefetch;
1128 u32 val;
1129 int ret;
1130
1131 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
1132 if (tag[0] && tag[1] && tag[2])
1133 l2x0_saved_regs.tag_latency =
1134 L310_LATENCY_CTRL_RD(tag[0] - 1) |
1135 L310_LATENCY_CTRL_WR(tag[1] - 1) |
1136 L310_LATENCY_CTRL_SETUP(tag[2] - 1);
1137
1138 of_property_read_u32_array(np, "arm,data-latency",
1139 data, ARRAY_SIZE(data));
1140 if (data[0] && data[1] && data[2])
1141 l2x0_saved_regs.data_latency =
1142 L310_LATENCY_CTRL_RD(data[0] - 1) |
1143 L310_LATENCY_CTRL_WR(data[1] - 1) |
1144 L310_LATENCY_CTRL_SETUP(data[2] - 1);
1145
1146 of_property_read_u32_array(np, "arm,filter-ranges",
1147 filter, ARRAY_SIZE(filter));
1148 if (filter[1]) {
1149 l2x0_saved_regs.filter_end =
1150 ALIGN(filter[0] + filter[1], SZ_1M);
1151 l2x0_saved_regs.filter_start = (filter[0] & ~(SZ_1M - 1))
1152 | L310_ADDR_FILTER_EN;
1153 }
1154
1155 ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K);
1156 if (!ret) {
1157 switch (assoc) {
1158 case 16:
1159 *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK;
1160 *aux_val |= L310_AUX_CTRL_ASSOCIATIVITY_16;
1161 *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK;
1162 break;
1163 case 8:
1164 *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK;
1165 *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK;
1166 break;
1167 default:
1168 pr_err("L2C-310 OF cache associativity %d invalid, only 8 or 16 permitted\n",
1169 assoc);
1170 break;
1171 }
1172 }
1173
1174 prefetch = l2x0_saved_regs.prefetch_ctrl;
1175
1176 ret = of_property_read_u32(np, "arm,double-linefill", &val);
1177 if (ret == 0) {
1178 if (val)
1179 prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL;
1180 else
1181 prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL;
1182 } else if (ret != -EINVAL) {
1183 pr_err("L2C-310 OF arm,double-linefill property value is missing\n");
1184 }
1185
1186 ret = of_property_read_u32(np, "arm,double-linefill-incr", &val);
1187 if (ret == 0) {
1188 if (val)
1189 prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL_INCR;
1190 else
1191 prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL_INCR;
1192 } else if (ret != -EINVAL) {
1193 pr_err("L2C-310 OF arm,double-linefill-incr property value is missing\n");
1194 }
1195
1196 ret = of_property_read_u32(np, "arm,double-linefill-wrap", &val);
1197 if (ret == 0) {
1198 if (!val)
1199 prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP;
1200 else
1201 prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP;
1202 } else if (ret != -EINVAL) {
1203 pr_err("L2C-310 OF arm,double-linefill-wrap property value is missing\n");
1204 }
1205
1206 ret = of_property_read_u32(np, "arm,prefetch-drop", &val);
1207 if (ret == 0) {
1208 if (val)
1209 prefetch |= L310_PREFETCH_CTRL_PREFETCH_DROP;
1210 else
1211 prefetch &= ~L310_PREFETCH_CTRL_PREFETCH_DROP;
1212 } else if (ret != -EINVAL) {
1213 pr_err("L2C-310 OF arm,prefetch-drop property value is missing\n");
1214 }
1215
1216 ret = of_property_read_u32(np, "arm,prefetch-offset", &val);
1217 if (ret == 0) {
1218 prefetch &= ~L310_PREFETCH_CTRL_OFFSET_MASK;
1219 prefetch |= val & L310_PREFETCH_CTRL_OFFSET_MASK;
1220 } else if (ret != -EINVAL) {
1221 pr_err("L2C-310 OF arm,prefetch-offset property value is missing\n");
1222 }
1223
1224 ret = of_property_read_u32(np, "prefetch-data", &val);
1225 if (ret == 0) {
1226 if (val)
1227 prefetch |= L310_PREFETCH_CTRL_DATA_PREFETCH;
1228 else
1229 prefetch &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
1230 } else if (ret != -EINVAL) {
1231 pr_err("L2C-310 OF prefetch-data property value is missing\n");
1232 }
1233
1234 ret = of_property_read_u32(np, "prefetch-instr", &val);
1235 if (ret == 0) {
1236 if (val)
1237 prefetch |= L310_PREFETCH_CTRL_INSTR_PREFETCH;
1238 else
1239 prefetch &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
1240 } else if (ret != -EINVAL) {
1241 pr_err("L2C-310 OF prefetch-instr property value is missing\n");
1242 }
1243
1244 l2x0_saved_regs.prefetch_ctrl = prefetch;
1245}
1246
1247static const struct l2c_init_data of_l2c310_data __initconst = {
1248 .type = "L2C-310",
1249 .way_size_0 = SZ_8K,
1250 .num_lock = 8,
1251 .of_parse = l2c310_of_parse,
1252 .enable = l2c310_enable,
1253 .fixup = l2c310_fixup,
1254 .save = l2c310_save,
1255 .configure = l2c310_configure,
1256 .unlock = l2c310_unlock,
1257 .outer_cache = {
1258 .inv_range = l2c210_inv_range,
1259 .clean_range = l2c210_clean_range,
1260 .flush_range = l2c210_flush_range,
1261 .flush_all = l2c210_flush_all,
1262 .disable = l2c310_disable,
1263 .sync = l2c210_sync,
1264 .resume = l2c310_resume,
1265 },
1266};
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277static const struct l2c_init_data of_l2c310_coherent_data __initconst = {
1278 .type = "L2C-310 Coherent",
1279 .way_size_0 = SZ_8K,
1280 .num_lock = 8,
1281 .of_parse = l2c310_of_parse,
1282 .enable = l2c310_enable,
1283 .fixup = l2c310_fixup,
1284 .save = l2c310_save,
1285 .configure = l2c310_configure,
1286 .unlock = l2c310_unlock,
1287 .outer_cache = {
1288 .inv_range = l2c210_inv_range,
1289 .clean_range = l2c210_clean_range,
1290 .flush_range = l2c210_flush_range,
1291 .flush_all = l2c210_flush_all,
1292 .disable = l2c310_disable,
1293 .resume = l2c310_resume,
1294 },
1295};
1296
1297
1298
1299
1300
1301
1302static unsigned long aurora_range_end(unsigned long start, unsigned long end)
1303{
1304
1305
1306
1307
1308
1309 if (end > start + MAX_RANGE_SIZE)
1310 end = start + MAX_RANGE_SIZE;
1311
1312
1313
1314
1315 if (end > PAGE_ALIGN(start+1))
1316 end = PAGE_ALIGN(start+1);
1317
1318 return end;
1319}
1320
1321static void aurora_pa_range(unsigned long start, unsigned long end,
1322 unsigned long offset)
1323{
1324 void __iomem *base = l2x0_base;
1325 unsigned long range_end;
1326 unsigned long flags;
1327
1328
1329
1330
1331 start &= ~(CACHE_LINE_SIZE - 1);
1332 end = ALIGN(end, CACHE_LINE_SIZE);
1333
1334
1335
1336
1337 while (start < end) {
1338 range_end = aurora_range_end(start, end);
1339
1340 raw_spin_lock_irqsave(&l2x0_lock, flags);
1341 writel_relaxed(start, base + AURORA_RANGE_BASE_ADDR_REG);
1342 writel_relaxed(range_end - CACHE_LINE_SIZE, base + offset);
1343 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
1344
1345 writel_relaxed(0, base + AURORA_SYNC_REG);
1346 start = range_end;
1347 }
1348}
1349static void aurora_inv_range(unsigned long start, unsigned long end)
1350{
1351 aurora_pa_range(start, end, AURORA_INVAL_RANGE_REG);
1352}
1353
1354static void aurora_clean_range(unsigned long start, unsigned long end)
1355{
1356
1357
1358
1359
1360 if (!l2_wt_override)
1361 aurora_pa_range(start, end, AURORA_CLEAN_RANGE_REG);
1362}
1363
1364static void aurora_flush_range(unsigned long start, unsigned long end)
1365{
1366 if (l2_wt_override)
1367 aurora_pa_range(start, end, AURORA_INVAL_RANGE_REG);
1368 else
1369 aurora_pa_range(start, end, AURORA_FLUSH_RANGE_REG);
1370}
1371
1372static void aurora_flush_all(void)
1373{
1374 void __iomem *base = l2x0_base;
1375 unsigned long flags;
1376
1377
1378 raw_spin_lock_irqsave(&l2x0_lock, flags);
1379 __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
1380 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
1381
1382 writel_relaxed(0, base + AURORA_SYNC_REG);
1383}
1384
1385static void aurora_cache_sync(void)
1386{
1387 writel_relaxed(0, l2x0_base + AURORA_SYNC_REG);
1388}
1389
1390static void aurora_disable(void)
1391{
1392 void __iomem *base = l2x0_base;
1393 unsigned long flags;
1394
1395 raw_spin_lock_irqsave(&l2x0_lock, flags);
1396 __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
1397 writel_relaxed(0, base + AURORA_SYNC_REG);
1398 l2c_write_sec(0, base, L2X0_CTRL);
1399 dsb(st);
1400 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
1401}
1402
1403static void aurora_save(void __iomem *base)
1404{
1405 l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL);
1406 l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL);
1407}
1408
1409
1410
1411
1412
1413static void __init aurora_enable_no_outer(void __iomem *base,
1414 unsigned num_lock)
1415{
1416 u32 u;
1417
1418 asm volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (u));
1419 u |= AURORA_CTRL_FW;
1420 asm volatile("mcr p15, 1, %0, c15, c2, 0" : : "r" (u));
1421
1422 isb();
1423
1424 l2c_enable(base, num_lock);
1425}
1426
1427static void __init aurora_fixup(void __iomem *base, u32 cache_id,
1428 struct outer_cache_fns *fns)
1429{
1430 sync_reg_offset = AURORA_SYNC_REG;
1431}
1432
1433static void __init aurora_of_parse(const struct device_node *np,
1434 u32 *aux_val, u32 *aux_mask)
1435{
1436 u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU;
1437 u32 mask = AURORA_ACR_REPLACEMENT_MASK;
1438
1439 of_property_read_u32(np, "cache-id-part",
1440 &cache_id_part_number_from_dt);
1441
1442
1443 l2_wt_override = of_property_read_bool(np, "wt-override");
1444
1445 if (l2_wt_override) {
1446 val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY;
1447 mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
1448 }
1449
1450 *aux_val &= ~mask;
1451 *aux_val |= val;
1452 *aux_mask &= ~mask;
1453}
1454
1455static const struct l2c_init_data of_aurora_with_outer_data __initconst = {
1456 .type = "Aurora",
1457 .way_size_0 = SZ_4K,
1458 .num_lock = 4,
1459 .of_parse = aurora_of_parse,
1460 .enable = l2c_enable,
1461 .fixup = aurora_fixup,
1462 .save = aurora_save,
1463 .configure = l2c_configure,
1464 .unlock = l2c_unlock,
1465 .outer_cache = {
1466 .inv_range = aurora_inv_range,
1467 .clean_range = aurora_clean_range,
1468 .flush_range = aurora_flush_range,
1469 .flush_all = aurora_flush_all,
1470 .disable = aurora_disable,
1471 .sync = aurora_cache_sync,
1472 .resume = l2c_resume,
1473 },
1474};
1475
1476static const struct l2c_init_data of_aurora_no_outer_data __initconst = {
1477 .type = "Aurora",
1478 .way_size_0 = SZ_4K,
1479 .num_lock = 4,
1480 .of_parse = aurora_of_parse,
1481 .enable = aurora_enable_no_outer,
1482 .fixup = aurora_fixup,
1483 .save = aurora_save,
1484 .configure = l2c_configure,
1485 .unlock = l2c_unlock,
1486 .outer_cache = {
1487 .resume = l2c_resume,
1488 },
1489};
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521#define BCM_SYS_EMI_START_ADDR 0x40000000UL
1522#define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL
1523
1524#define BCM_SYS_EMI_OFFSET 0x40000000UL
1525#define BCM_VC_EMI_OFFSET 0x80000000UL
1526
1527static inline int bcm_addr_is_sys_emi(unsigned long addr)
1528{
1529 return (addr >= BCM_SYS_EMI_START_ADDR) &&
1530 (addr < BCM_VC_EMI_SEC3_START_ADDR);
1531}
1532
1533static inline unsigned long bcm_l2_phys_addr(unsigned long addr)
1534{
1535 if (bcm_addr_is_sys_emi(addr))
1536 return addr + BCM_SYS_EMI_OFFSET;
1537 else
1538 return addr + BCM_VC_EMI_OFFSET;
1539}
1540
1541static void bcm_inv_range(unsigned long start, unsigned long end)
1542{
1543 unsigned long new_start, new_end;
1544
1545 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1546
1547 if (unlikely(end <= start))
1548 return;
1549
1550 new_start = bcm_l2_phys_addr(start);
1551 new_end = bcm_l2_phys_addr(end);
1552
1553
1554 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
1555 l2c210_inv_range(new_start, new_end);
1556 return;
1557 }
1558
1559
1560
1561
1562 l2c210_inv_range(new_start,
1563 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
1564 l2c210_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
1565 new_end);
1566}
1567
1568static void bcm_clean_range(unsigned long start, unsigned long end)
1569{
1570 unsigned long new_start, new_end;
1571
1572 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1573
1574 if (unlikely(end <= start))
1575 return;
1576
1577 new_start = bcm_l2_phys_addr(start);
1578 new_end = bcm_l2_phys_addr(end);
1579
1580
1581 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
1582 l2c210_clean_range(new_start, new_end);
1583 return;
1584 }
1585
1586
1587
1588
1589 l2c210_clean_range(new_start,
1590 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
1591 l2c210_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
1592 new_end);
1593}
1594
1595static void bcm_flush_range(unsigned long start, unsigned long end)
1596{
1597 unsigned long new_start, new_end;
1598
1599 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1600
1601 if (unlikely(end <= start))
1602 return;
1603
1604 if ((end - start) >= l2x0_size) {
1605 outer_cache.flush_all();
1606 return;
1607 }
1608
1609 new_start = bcm_l2_phys_addr(start);
1610 new_end = bcm_l2_phys_addr(end);
1611
1612
1613 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
1614 l2c210_flush_range(new_start, new_end);
1615 return;
1616 }
1617
1618
1619
1620
1621 l2c210_flush_range(new_start,
1622 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
1623 l2c210_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
1624 new_end);
1625}
1626
1627
1628static const struct l2c_init_data of_bcm_l2x0_data __initconst = {
1629 .type = "BCM-L2C-310",
1630 .way_size_0 = SZ_8K,
1631 .num_lock = 8,
1632 .of_parse = l2c310_of_parse,
1633 .enable = l2c310_enable,
1634 .save = l2c310_save,
1635 .configure = l2c310_configure,
1636 .unlock = l2c310_unlock,
1637 .outer_cache = {
1638 .inv_range = bcm_inv_range,
1639 .clean_range = bcm_clean_range,
1640 .flush_range = bcm_flush_range,
1641 .flush_all = l2c210_flush_all,
1642 .disable = l2c310_disable,
1643 .sync = l2c210_sync,
1644 .resume = l2c310_resume,
1645 },
1646};
1647
1648static void __init tauros3_save(void __iomem *base)
1649{
1650 l2c_save(base);
1651
1652 l2x0_saved_regs.aux2_ctrl =
1653 readl_relaxed(base + TAUROS3_AUX2_CTRL);
1654 l2x0_saved_regs.prefetch_ctrl =
1655 readl_relaxed(base + L310_PREFETCH_CTRL);
1656}
1657
1658static void tauros3_configure(void __iomem *base)
1659{
1660 l2c_configure(base);
1661 writel_relaxed(l2x0_saved_regs.aux2_ctrl,
1662 base + TAUROS3_AUX2_CTRL);
1663 writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
1664 base + L310_PREFETCH_CTRL);
1665}
1666
1667static const struct l2c_init_data of_tauros3_data __initconst = {
1668 .type = "Tauros3",
1669 .way_size_0 = SZ_8K,
1670 .num_lock = 8,
1671 .enable = l2c_enable,
1672 .save = tauros3_save,
1673 .configure = tauros3_configure,
1674 .unlock = l2c_unlock,
1675
1676 .outer_cache = {
1677 .resume = l2c_resume,
1678 },
1679};
1680
1681#define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns }
1682static const struct of_device_id l2x0_ids[] __initconst = {
1683 L2C_ID("arm,l210-cache", of_l2c210_data),
1684 L2C_ID("arm,l220-cache", of_l2c220_data),
1685 L2C_ID("arm,pl310-cache", of_l2c310_data),
1686 L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
1687 L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data),
1688 L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data),
1689 L2C_ID("marvell,tauros3-cache", of_tauros3_data),
1690
1691 L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
1692 {}
1693};
1694
1695int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
1696{
1697 const struct l2c_init_data *data;
1698 struct device_node *np;
1699 struct resource res;
1700 u32 cache_id, old_aux;
1701 u32 cache_level = 2;
1702
1703 np = of_find_matching_node(NULL, l2x0_ids);
1704 if (!np)
1705 return -ENODEV;
1706
1707 if (of_address_to_resource(np, 0, &res))
1708 return -ENODEV;
1709
1710 l2x0_base = ioremap(res.start, resource_size(&res));
1711 if (!l2x0_base)
1712 return -ENOMEM;
1713
1714 l2x0_saved_regs.phy_base = res.start;
1715
1716 data = of_match_node(l2x0_ids, np)->data;
1717
1718 if (of_device_is_compatible(np, "arm,pl310-cache") &&
1719 of_property_read_bool(np, "arm,io-coherent"))
1720 data = &of_l2c310_coherent_data;
1721
1722 old_aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
1723 if (old_aux != ((old_aux & aux_mask) | aux_val)) {
1724 pr_warn("L2C: platform modifies aux control register: 0x%08x -> 0x%08x\n",
1725 old_aux, (old_aux & aux_mask) | aux_val);
1726 } else if (aux_mask != ~0U && aux_val != 0) {
1727 pr_alert("L2C: platform provided aux values match the hardware, so have no effect. Please remove them.\n");
1728 }
1729
1730
1731 if (!of_property_read_bool(np, "cache-unified"))
1732 pr_err("L2C: device tree omits to specify unified cache\n");
1733
1734 if (of_property_read_u32(np, "cache-level", &cache_level))
1735 pr_err("L2C: device tree omits to specify cache-level\n");
1736
1737 if (cache_level != 2)
1738 pr_err("L2C: device tree specifies invalid cache level\n");
1739
1740
1741 if (data->save)
1742 data->save(l2x0_base);
1743
1744
1745 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
1746 if (data->of_parse)
1747 data->of_parse(np, &aux_val, &aux_mask);
1748
1749 if (cache_id_part_number_from_dt)
1750 cache_id = cache_id_part_number_from_dt;
1751 else
1752 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
1753
1754 return __l2c_init(data, aux_val, aux_mask, cache_id);
1755}
1756#endif
1757