1
2
3
4
5
6#include <command.h>
7#include <config.h>
8#include <dm.h>
9#include <hang.h>
10#include <i2c.h>
11#include <ram.h>
12#include <time.h>
13#include <asm/global_data.h>
14
15#include <asm/sections.h>
16#include <linux/io.h>
17
18#include <mach/octeon_ddr.h>
19
20#define CONFIG_REF_HERTZ 50000000
21
22DECLARE_GLOBAL_DATA_PTR;
23
24
25static s64 _sign(s64 v)
26{
27 return (v < 0);
28}
29
30#ifndef DDR_NO_DEBUG
31char *lookup_env(struct ddr_priv *priv, const char *format, ...)
32{
33 char *s;
34 unsigned long value;
35 va_list args;
36 char buffer[64];
37
38 va_start(args, format);
39 vsnprintf(buffer, sizeof(buffer), format, args);
40 va_end(args);
41
42 s = ddr_getenv_debug(priv, buffer);
43 if (s) {
44 value = simple_strtoul(s, NULL, 0);
45 printf("Parameter found in environment %s=\"%s\" 0x%lx (%ld)\n",
46 buffer, s, value, value);
47 }
48
49 return s;
50}
51
52char *lookup_env_ull(struct ddr_priv *priv, const char *format, ...)
53{
54 char *s;
55 u64 value;
56 va_list args;
57 char buffer[64];
58
59 va_start(args, format);
60 vsnprintf(buffer, sizeof(buffer), format, args);
61 va_end(args);
62
63 s = ddr_getenv_debug(priv, buffer);
64 if (s) {
65 value = simple_strtoull(s, NULL, 0);
66 printf("Parameter found in environment. %s = 0x%016llx\n",
67 buffer, value);
68 }
69
70 return s;
71}
72#else
73char *lookup_env(struct ddr_priv *priv, const char *format, ...)
74{
75 return NULL;
76}
77
78char *lookup_env_ull(struct ddr_priv *priv, const char *format, ...)
79{
80 return NULL;
81}
82#endif
83
84
85#define CVMX_L2C_TADS ((OCTEON_IS_MODEL(OCTEON_CN68XX) || \
86 OCTEON_IS_MODEL(OCTEON_CN73XX) || \
87 OCTEON_IS_MODEL(OCTEON_CNF75XX)) ? 4 : \
88 (OCTEON_IS_MODEL(OCTEON_CN78XX)) ? 8 : 1)
89
90
91#define CVMX_L2C_IOBS ((OCTEON_IS_MODEL(OCTEON_CN68XX) || \
92 OCTEON_IS_MODEL(OCTEON_CN78XX) || \
93 OCTEON_IS_MODEL(OCTEON_CN73XX) || \
94 OCTEON_IS_MODEL(OCTEON_CNF75XX)) ? 2 : 1)
95
96#define CVMX_L2C_MAX_MEMSZ_ALLOWED (OCTEON_IS_OCTEON2() ? \
97 (32 * CVMX_L2C_TADS) : \
98 (OCTEON_IS_MODEL(OCTEON_CN70XX) ? \
99 512 : (OCTEON_IS_OCTEON3() ? 1024 : 0)))
100
101
102
103
104
105
106
107
108
109static void cvmx_l2c_set_big_size(struct ddr_priv *priv, u64 mem_size, int mode)
110{
111 if ((OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) &&
112 !OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X)) {
113 union cvmx_l2c_big_ctl big_ctl;
114 int bits = 0, zero_bits = 0;
115 u64 mem;
116
117 if (mem_size > (CVMX_L2C_MAX_MEMSZ_ALLOWED * 1024ull)) {
118 printf("WARNING: Invalid memory size(%lld) requested, should be <= %lld\n",
119 mem_size,
120 (u64)CVMX_L2C_MAX_MEMSZ_ALLOWED * 1024);
121 mem_size = CVMX_L2C_MAX_MEMSZ_ALLOWED * 1024;
122 }
123
124 mem = mem_size;
125 while (mem) {
126 if ((mem & 1) == 0)
127 zero_bits++;
128 bits++;
129 mem >>= 1;
130 }
131
132 if ((bits - zero_bits) != 1 || (bits - 9) <= 0) {
133 printf("ERROR: Invalid DRAM size (%lld) requested, refer to L2C_BIG_CTL[maxdram] for valid options.\n",
134 mem_size);
135 return;
136 }
137
138
139
140
141
142 if (mode == 0 && OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
143 mode = 1;
144
145 big_ctl.u64 = 0;
146 big_ctl.s.maxdram = bits - 9;
147 big_ctl.cn61xx.disable = mode;
148 l2c_wr(priv, CVMX_L2C_BIG_CTL_REL, big_ctl.u64);
149 }
150}
151
152static u32 octeon3_refclock(u32 alt_refclk, u32 ddr_hertz,
153 struct dimm_config *dimm_config)
154{
155 u32 ddr_ref_hertz = CONFIG_REF_HERTZ;
156 int ddr_type;
157 int spd_dimm_type;
158
159 debug("%s(%u, %u, %p)\n", __func__, alt_refclk, ddr_hertz, dimm_config);
160
161
162
163
164
165
166
167
168
169
170
171 if (alt_refclk) {
172
173
174
175
176 ddr_ref_hertz = alt_refclk * 1000000;
177 printf("%s: DRAM init: %d MHz refclk is REQUESTED ALWAYS\n",
178 __func__, alt_refclk);
179 } else if (ddr_hertz > 1000000000) {
180 ddr_type = get_ddr_type(dimm_config, 0);
181 spd_dimm_type = get_dimm_module_type(dimm_config, 0, ddr_type);
182
183 debug("ddr type: 0x%x, dimm type: 0x%x\n", ddr_type,
184 spd_dimm_type);
185
186 if (ddr_type == DDR4_DRAM &&
187 (spd_dimm_type == 1 || spd_dimm_type == 5 ||
188 spd_dimm_type == 8)) {
189
190 ddr_ref_hertz = 100000000;
191 puts("DRAM init: 100 MHz refclk is REQUIRED\n");
192 }
193 }
194
195 debug("%s: speed: %u\n", __func__, ddr_ref_hertz);
196 return ddr_ref_hertz;
197}
198
199int encode_row_lsb_ddr3(int row_lsb)
200{
201 int row_lsb_start = 14;
202
203
204
205
206
207
208
209
210
211
212
213 if (octeon_is_cpuid(OCTEON_CN6XXX) ||
214 octeon_is_cpuid(OCTEON_CNF7XXX) || octeon_is_cpuid(OCTEON_CN7XXX))
215 row_lsb_start = 14;
216 else
217 printf("ERROR: Unsupported Octeon model: 0x%x\n",
218 read_c0_prid());
219
220 return row_lsb - row_lsb_start;
221}
222
223int encode_pbank_lsb_ddr3(int pbank_lsb)
224{
225
226
227
228
229
230
231
232
233
234
235
236 int pbank_lsb_start = 0;
237
238 if (octeon_is_cpuid(OCTEON_CN6XXX) ||
239 octeon_is_cpuid(OCTEON_CNF7XXX) || octeon_is_cpuid(OCTEON_CN7XXX))
240 pbank_lsb_start = 28;
241 else
242 printf("ERROR: Unsupported Octeon model: 0x%x\n",
243 read_c0_prid());
244
245 return pbank_lsb - pbank_lsb_start;
246}
247
248static void set_ddr_clock_initialized(struct ddr_priv *priv, int if_num,
249 bool inited_flag)
250{
251 priv->ddr_clock_initialized[if_num] = inited_flag;
252}
253
254static int ddr_clock_initialized(struct ddr_priv *priv, int if_num)
255{
256 return priv->ddr_clock_initialized[if_num];
257}
258
259static void set_ddr_memory_preserved(struct ddr_priv *priv)
260{
261 priv->ddr_memory_preserved = true;
262}
263
264bool ddr_memory_preserved(struct ddr_priv *priv)
265{
266 return priv->ddr_memory_preserved;
267}
268
269static void cn78xx_lmc_dreset_init(struct ddr_priv *priv, int if_num)
270{
271 union cvmx_lmcx_dll_ctl2 dll_ctl2;
272
273
274
275
276
277
278
279
280
281
282
283
284
285 dll_ctl2.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL2(if_num));
286 dll_ctl2.cn78xx.dll_bringup = 1;
287 lmc_wr(priv, CVMX_LMCX_DLL_CTL2(if_num), dll_ctl2.u64);
288
289
290
291
292
293 lmc_rd(priv, CVMX_LMCX_DLL_CTL2(if_num));
294
295
296
297
298
299 udelay(1);
300
301
302
303
304
305
306
307
308 dll_ctl2.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL2(if_num));
309 dll_ctl2.cn78xx.quad_dll_ena = 1;
310 lmc_wr(priv, CVMX_LMCX_DLL_CTL2(if_num), dll_ctl2.u64);
311
312
313
314
315
316 lmc_rd(priv, CVMX_LMCX_DLL_CTL2(if_num));
317
318
319
320
321
322 udelay(10);
323
324
325
326
327
328
329
330
331 dll_ctl2.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL2(if_num));
332 dll_ctl2.cn78xx.dll_bringup = 0;
333 lmc_wr(priv, CVMX_LMCX_DLL_CTL2(if_num), dll_ctl2.u64);
334
335
336
337
338
339 lmc_rd(priv, CVMX_LMCX_DLL_CTL2(if_num));
340
341
342
343
344
345
346
347
348
349
350
351
352
353 dll_ctl2.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL2(if_num));
354 dll_ctl2.cn78xx.dreset = 0;
355 lmc_wr(priv, CVMX_LMCX_DLL_CTL2(if_num), dll_ctl2.u64);
356}
357
358int initialize_ddr_clock(struct ddr_priv *priv, struct ddr_conf *ddr_conf,
359 u32 cpu_hertz, u32 ddr_hertz, u32 ddr_ref_hertz,
360 int if_num, u32 if_mask)
361{
362 char *s;
363
364 if (ddr_clock_initialized(priv, if_num))
365 return 0;
366
367 if (!ddr_clock_initialized(priv, 0)) {
368 union cvmx_lmcx_reset_ctl reset_ctl;
369 int i;
370
371
372
373
374
375 for (i = 3; i >= 0; --i) {
376 if ((if_mask & (1 << i)) == 0)
377 continue;
378
379 reset_ctl.u64 = lmc_rd(priv, CVMX_LMCX_RESET_CTL(i));
380 if (reset_ctl.s.ddr3psv == 1) {
381 debug("LMC%d Preserving memory\n", i);
382 set_ddr_memory_preserved(priv);
383
384
385 reset_ctl.s.ddr3pwarm = 0;
386 reset_ctl.s.ddr3psoft = 0;
387 reset_ctl.s.ddr3psv = 0;
388 lmc_wr(priv, CVMX_LMCX_RESET_CTL(i),
389 reset_ctl.u64);
390 }
391 }
392 }
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411 if (octeon_is_cpuid(OCTEON_CN78XX) || octeon_is_cpuid(OCTEON_CN73XX) ||
412 octeon_is_cpuid(OCTEON_CNF75XX)) {
413 union cvmx_lmcx_dll_ctl2 dll_ctl2;
414 union cvmx_lmcx_dll_ctl3 ddr_dll_ctl3;
415 union cvmx_lmcx_ddr_pll_ctl ddr_pll_ctl;
416 struct dimm_config *dimm_config_table =
417 ddr_conf->dimm_config_table;
418 int en_idx, save_en_idx, best_en_idx = 0;
419 u64 clkf, clkr, max_clkf = 127;
420 u64 best_clkf = 0, best_clkr = 0;
421 u64 best_pll_MHz = 0;
422 u64 pll_MHz;
423 u64 min_pll_MHz = 800;
424 u64 max_pll_MHz = 5000;
425 u64 error;
426 u64 best_error;
427 u64 best_calculated_ddr_hertz = 0;
428 u64 calculated_ddr_hertz = 0;
429 u64 orig_ddr_hertz = ddr_hertz;
430 const int _en[] = { 1, 2, 3, 4, 5, 6, 7, 8, 10, 12 };
431 int override_pll_settings;
432 int new_bwadj;
433 int ddr_type;
434 int i;
435
436
437 ddr_type = (read_spd(&dimm_config_table[0], 0,
438 DDR4_SPD_KEY_BYTE_DEVICE_TYPE) ==
439 0x0C) ? DDR4_DRAM : DDR3_DRAM;
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507 if (if_num != 0)
508 goto not_if0;
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536 for (i = 0; i < 4; ++i) {
537 if ((if_mask & (1 << i)) == 0)
538 continue;
539
540 dll_ctl2.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL2(i));
541
542 dll_ctl2.cn78xx.byp_setting = 0;
543 dll_ctl2.cn78xx.byp_sel = 0;
544 dll_ctl2.cn78xx.quad_dll_ena = 0;
545 dll_ctl2.cn78xx.dreset = 1;
546 dll_ctl2.cn78xx.dll_bringup = 0;
547 dll_ctl2.cn78xx.intf_en = 0;
548
549 lmc_wr(priv, CVMX_LMCX_DLL_CTL2(i), dll_ctl2.u64);
550 }
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578 ddr_pll_ctl.u64 = lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(0));
579
580 ddr_pll_ctl.cn78xx.reset_n = 0;
581 ddr_pll_ctl.cn78xx.ddr_div_reset = 1;
582 ddr_pll_ctl.cn78xx.phy_dcok = 0;
583
584
585
586
587
588
589
590
591
592
593 ddr_pll_ctl.cn78xx.dclk_invert =
594 !!(octeon_is_cpuid(OCTEON_CN73XX_PASS1_3) ||
595 octeon_is_cpuid(OCTEON_CNF75XX));
596
597
598
599
600
601
602 if (!(octeon_is_cpuid(OCTEON_CN73XX))) {
603 s = lookup_env(priv, "ddr0_set_dclk_invert");
604 if (s) {
605 ddr_pll_ctl.cn78xx.dclk_invert =
606 !!simple_strtoul(s, NULL, 0);
607 debug("LMC0: override DDR_PLL_CTL[dclk_invert] to %d\n",
608 ddr_pll_ctl.cn78xx.dclk_invert);
609 }
610 }
611
612 lmc_wr(priv, CVMX_LMCX_DDR_PLL_CTL(0), ddr_pll_ctl.u64);
613 debug("%-45s : 0x%016llx\n", "LMC0: DDR_PLL_CTL",
614 ddr_pll_ctl.u64);
615
616
617 if (if_mask & 0x2) {
618
619
620
621
622 if (!octeon_is_cpuid(OCTEON_CNF75XX)) {
623 int override = 0;
624
625
626
627
628
629 int lmc0_dclk_invert =
630 ddr_pll_ctl.cn78xx.dclk_invert;
631
632
633
634
635
636
637
638 int lmc1_dclk_invert;
639
640 lmc1_dclk_invert =
641 ((ddr_type == DDR4_DRAM) &&
642 !octeon_is_cpuid(OCTEON_CN73XX_PASS1_3))
643 ? lmc0_dclk_invert ^ 1 :
644 lmc0_dclk_invert;
645
646
647
648
649
650 s = lookup_env(priv, "ddr1_set_dclk_invert");
651 if (s) {
652 lmc1_dclk_invert =
653 !!simple_strtoul(s, NULL, 0);
654 override = 1;
655 }
656 debug("LMC1: %s DDR_PLL_CTL[dclk_invert] to %d (LMC0 %d)\n",
657 (override) ? "override" :
658 "default", lmc1_dclk_invert,
659 lmc0_dclk_invert);
660
661 ddr_pll_ctl.cn78xx.dclk_invert =
662 lmc1_dclk_invert;
663 }
664
665
666 lmc_wr(priv, CVMX_LMCX_DDR_PLL_CTL(1), ddr_pll_ctl.u64);
667 debug("%-45s : 0x%016llx\n",
668 "LMC1: DDR_PLL_CTL", ddr_pll_ctl.u64);
669 }
670
671
672
673
674
675
676
677
678
679
680
681
682
683 for (i = 0; (!ddr_memory_preserved(priv)) && i < 4; ++i) {
684 union cvmx_lmcx_reset_ctl reset_ctl;
685
686 if ((if_mask & (1 << i)) == 0)
687 continue;
688
689 reset_ctl.u64 = lmc_rd(priv, CVMX_LMCX_RESET_CTL(i));
690 reset_ctl.cn78xx.ddr3rst = 0;
691 debug("LMC%d Asserting DDR_RESET_L\n", i);
692 lmc_wr(priv, CVMX_LMCX_RESET_CTL(i), reset_ctl.u64);
693 lmc_rd(priv, CVMX_LMCX_RESET_CTL(i));
694 }
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716 error = ddr_hertz;
717 best_error = ddr_hertz;
718
719 debug("DDR Reference Hertz = %d\n", ddr_ref_hertz);
720
721 while (best_error == ddr_hertz) {
722 for (clkr = 0; clkr < 4; ++clkr) {
723 for (en_idx =
724 sizeof(_en) / sizeof(int) -
725 1; en_idx >= 0; --en_idx) {
726 save_en_idx = en_idx;
727 clkf =
728 ((ddr_hertz) *
729 (clkr + 1) * (_en[save_en_idx]));
730 clkf = divide_nint(clkf, ddr_ref_hertz)
731 - 1;
732 pll_MHz =
733 ddr_ref_hertz *
734 (clkf + 1) / (clkr + 1) / 1000000;
735 calculated_ddr_hertz =
736 ddr_ref_hertz *
737 (clkf +
738 1) / ((clkr +
739 1) * (_en[save_en_idx]));
740 error =
741 ddr_hertz - calculated_ddr_hertz;
742
743 if (pll_MHz < min_pll_MHz ||
744 pll_MHz > max_pll_MHz)
745 continue;
746 if (clkf > max_clkf) {
747
748
749
750
751 continue;
752 }
753 if (abs(error) > abs(best_error))
754 continue;
755
756 debug("clkr: %2llu, en[%d]: %2d, clkf: %4llu, pll_MHz: %4llu, ddr_hertz: %8llu, error: %8lld\n",
757 clkr, save_en_idx,
758 _en[save_en_idx], clkf, pll_MHz,
759 calculated_ddr_hertz, error);
760
761
762 if (abs(error) < abs(best_error) ||
763 pll_MHz > best_pll_MHz) {
764 best_pll_MHz = pll_MHz;
765 best_calculated_ddr_hertz =
766 calculated_ddr_hertz;
767 best_error = error;
768 best_clkr = clkr;
769 best_clkf = clkf;
770 best_en_idx = save_en_idx;
771 }
772 }
773 }
774
775 override_pll_settings = 0;
776
777 s = lookup_env(priv, "ddr_pll_clkr");
778 if (s) {
779 best_clkr = simple_strtoul(s, NULL, 0);
780 override_pll_settings = 1;
781 }
782
783 s = lookup_env(priv, "ddr_pll_clkf");
784 if (s) {
785 best_clkf = simple_strtoul(s, NULL, 0);
786 override_pll_settings = 1;
787 }
788
789 s = lookup_env(priv, "ddr_pll_en_idx");
790 if (s) {
791 best_en_idx = simple_strtoul(s, NULL, 0);
792 override_pll_settings = 1;
793 }
794
795 if (override_pll_settings) {
796 best_pll_MHz =
797 ddr_ref_hertz * (best_clkf +
798 1) /
799 (best_clkr + 1) / 1000000;
800 best_calculated_ddr_hertz =
801 ddr_ref_hertz * (best_clkf +
802 1) /
803 ((best_clkr + 1) * (_en[best_en_idx]));
804 best_error =
805 ddr_hertz - best_calculated_ddr_hertz;
806 }
807
808 debug("clkr: %2llu, en[%d]: %2d, clkf: %4llu, pll_MHz: %4llu, ddr_hertz: %8llu, error: %8lld <==\n",
809 best_clkr, best_en_idx, _en[best_en_idx],
810 best_clkf, best_pll_MHz,
811 best_calculated_ddr_hertz, best_error);
812
813
814
815
816
817 if (best_error == ddr_hertz) {
818 if (ddr_hertz < orig_ddr_hertz - 10000000)
819 break;
820 ddr_hertz -= 1000000;
821 best_error = ddr_hertz;
822 }
823 }
824
825 if (best_error == ddr_hertz) {
826 printf("ERROR: Can not compute a legal DDR clock speed configuration.\n");
827 return -1;
828 }
829
830 new_bwadj = (best_clkf + 1) / 10;
831 debug("bwadj: %2d\n", new_bwadj);
832
833 s = lookup_env(priv, "ddr_pll_bwadj");
834 if (s) {
835 new_bwadj = strtoul(s, NULL, 0);
836 debug("bwadj: %2d\n", new_bwadj);
837 }
838
839 for (i = 0; i < 2; ++i) {
840 if ((if_mask & (1 << i)) == 0)
841 continue;
842
843 ddr_pll_ctl.u64 =
844 lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(i));
845 debug("LMC%d: DDR_PLL_CTL : 0x%016llx\n",
846 i, ddr_pll_ctl.u64);
847
848 ddr_pll_ctl.cn78xx.ddr_ps_en = best_en_idx;
849 ddr_pll_ctl.cn78xx.clkf = best_clkf;
850 ddr_pll_ctl.cn78xx.clkr = best_clkr;
851 ddr_pll_ctl.cn78xx.reset_n = 0;
852 ddr_pll_ctl.cn78xx.bwadj = new_bwadj;
853
854 lmc_wr(priv, CVMX_LMCX_DDR_PLL_CTL(i), ddr_pll_ctl.u64);
855 debug("LMC%d: DDR_PLL_CTL : 0x%016llx\n",
856 i, ddr_pll_ctl.u64);
857
858
859
860
861
862 if (octeon_is_cpuid(OCTEON_CNF75XX))
863 break;
864 }
865
866 for (i = 0; i < 4; ++i) {
867 if ((if_mask & (1 << i)) == 0)
868 continue;
869
870
871
872
873
874 lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(i));
875
876
877
878
879
880 udelay(3);
881
882
883
884
885
886
887 ddr_pll_ctl.u64 =
888 lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(i));
889 ddr_pll_ctl.cn78xx.reset_n = 1;
890 lmc_wr(priv, CVMX_LMCX_DDR_PLL_CTL(i), ddr_pll_ctl.u64);
891
892
893
894
895
896 lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(i));
897
898
899
900
901
902 udelay(25);
903
904
905
906
907
908 if (octeon_is_cpuid(OCTEON_CNF75XX))
909 break;
910 }
911
912 for (i = 0; i < 4; ++i) {
913 if ((if_mask & (1 << i)) == 0)
914 continue;
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936 ddr_pll_ctl.u64 = lmc_rd(priv,
937 CVMX_LMCX_DDR_PLL_CTL(i));
938 ddr_pll_ctl.cn78xx.ddr_div_reset = 1;
939 lmc_wr(priv, CVMX_LMCX_DDR_PLL_CTL(i), ddr_pll_ctl.u64);
940
941
942
943
944
945
946
947 ddr_pll_ctl.u64 =
948 lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(i));
949 ddr_pll_ctl.cn78xx.ddr4_mode =
950 (ddr_type == DDR4_DRAM) ? 1 : 0;
951 lmc_wr(priv, CVMX_LMCX_DDR_PLL_CTL(i), ddr_pll_ctl.u64);
952
953
954
955
956
957
958 lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(i));
959
960
961
962
963
964 udelay(1);
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979 union cvmx_lmcx_comp_ctl2 comp_ctl2;
980 const struct ddr3_custom_config *custom_lmc_config =
981 &ddr_conf->custom_lmc_config;
982
983 comp_ctl2.u64 = lmc_rd(priv, CVMX_LMCX_COMP_CTL2(i));
984
985
986 comp_ctl2.cn78xx.dqx_ctl =
987 (custom_lmc_config->dqx_ctl ==
988 0) ? 4 : custom_lmc_config->dqx_ctl;
989
990 comp_ctl2.cn78xx.ck_ctl =
991 (custom_lmc_config->ck_ctl ==
992 0) ? 4 : custom_lmc_config->ck_ctl;
993
994 comp_ctl2.cn78xx.cmd_ctl =
995 (custom_lmc_config->cmd_ctl ==
996 0) ? 4 : custom_lmc_config->cmd_ctl;
997
998 comp_ctl2.cn78xx.rodt_ctl = 0x4;
999
1000 comp_ctl2.cn70xx.ptune_offset =
1001 (abs(custom_lmc_config->ptune_offset) & 0x7)
1002 | (_sign(custom_lmc_config->ptune_offset) << 3);
1003 comp_ctl2.cn70xx.ntune_offset =
1004 (abs(custom_lmc_config->ntune_offset) & 0x7)
1005 | (_sign(custom_lmc_config->ntune_offset) << 3);
1006
1007 s = lookup_env(priv, "ddr_clk_ctl");
1008 if (s) {
1009 comp_ctl2.cn78xx.ck_ctl =
1010 simple_strtoul(s, NULL, 0);
1011 }
1012
1013 s = lookup_env(priv, "ddr_ck_ctl");
1014 if (s) {
1015 comp_ctl2.cn78xx.ck_ctl =
1016 simple_strtoul(s, NULL, 0);
1017 }
1018
1019 s = lookup_env(priv, "ddr_cmd_ctl");
1020 if (s) {
1021 comp_ctl2.cn78xx.cmd_ctl =
1022 simple_strtoul(s, NULL, 0);
1023 }
1024
1025 s = lookup_env(priv, "ddr_dqx_ctl");
1026 if (s) {
1027 comp_ctl2.cn78xx.dqx_ctl =
1028 simple_strtoul(s, NULL, 0);
1029 }
1030
1031 s = lookup_env(priv, "ddr_ptune_offset");
1032 if (s) {
1033 comp_ctl2.cn78xx.ptune_offset =
1034 simple_strtoul(s, NULL, 0);
1035 }
1036
1037 s = lookup_env(priv, "ddr_ntune_offset");
1038 if (s) {
1039 comp_ctl2.cn78xx.ntune_offset =
1040 simple_strtoul(s, NULL, 0);
1041 }
1042
1043 lmc_wr(priv, CVMX_LMCX_COMP_CTL2(i), comp_ctl2.u64);
1044
1045
1046
1047
1048
1049
1050 lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(i));
1051
1052
1053
1054
1055
1056 udelay(1);
1057
1058
1059
1060
1061
1062
1063
1064 ddr_pll_ctl.u64 = lmc_rd(priv,
1065 CVMX_LMCX_DDR_PLL_CTL(i));
1066 ddr_pll_ctl.cn78xx.ddr_div_reset = 0;
1067 lmc_wr(priv, CVMX_LMCX_DDR_PLL_CTL(i), ddr_pll_ctl.u64);
1068
1069
1070
1071
1072
1073
1074 lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(i));
1075
1076
1077
1078
1079
1080 udelay(1);
1081 }
1082
1083
1084
1085
1086 for (i = (octeon_is_cpuid(OCTEON_CN73XX) ||
1087 octeon_is_cpuid(OCTEON_CNF75XX)) ? 1 : 2;
1088 i < 4; ++i) {
1089
1090
1091
1092
1093
1094 if ((if_mask & (1 << i)) == 0)
1095 continue;
1096
1097 dll_ctl2.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL2(i));
1098 dll_ctl2.cn78xx.intf_en = 1;
1099 lmc_wr(priv, CVMX_LMCX_DLL_CTL2(i), dll_ctl2.u64);
1100 lmc_rd(priv, CVMX_LMCX_DLL_CTL2(i));
1101 }
1102
1103
1104
1105
1106 for (i = 0; i < 4; ++i) {
1107 if ((if_mask & (1 << i)) == 0)
1108 continue;
1109
1110
1111
1112
1113
1114
1115 ddr_pll_ctl.u64 = lmc_rd(priv,
1116 CVMX_LMCX_DDR_PLL_CTL(i));
1117 ddr_pll_ctl.cn78xx.phy_dcok = 1;
1118 lmc_wr(priv, CVMX_LMCX_DDR_PLL_CTL(i), ddr_pll_ctl.u64);
1119
1120
1121
1122
1123
1124 lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(i));
1125
1126
1127
1128
1129
1130 udelay(20);
1131 }
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160 if (octeon_is_cpuid(OCTEON_CN73XX)) {
1161
1162 cn78xx_lmc_dreset_init(priv, 0);
1163 } else if (octeon_is_cpuid(OCTEON_CNF75XX)) {
1164 if (if_mask == 0x3) {
1165
1166
1167
1168
1169 cn78xx_lmc_dreset_init(priv, 1);
1170 }
1171 } else {
1172
1173 if (if_mask == 0x3)
1174 cn78xx_lmc_dreset_init(priv, 0);
1175
1176
1177 if (if_mask == 0xf) {
1178 cn78xx_lmc_dreset_init(priv, 2);
1179 cn78xx_lmc_dreset_init(priv, 3);
1180 }
1181 }
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215 if (if_mask == 0x3) {
1216 int temp_lmc_if_num = octeon_is_cpuid(OCTEON_CNF75XX) ?
1217 1 : 0;
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228 ddr_dll_ctl3.u64 = 0;
1229 ddr_dll_ctl3.cn78xx.dclk90_recal_dis = 1;
1230
1231 if (octeon_is_cpuid(OCTEON_CNF75XX))
1232 ddr_dll_ctl3.cn78xx.dll90_byte_sel = 7;
1233 else
1234 ddr_dll_ctl3.cn78xx.dll90_byte_sel = 1;
1235
1236 lmc_wr(priv,
1237 CVMX_LMCX_DLL_CTL3(temp_lmc_if_num),
1238 ddr_dll_ctl3.u64);
1239
1240
1241
1242
1243
1244 lmc_rd(priv, CVMX_LMCX_DLL_CTL3(temp_lmc_if_num));
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255 ddr_dll_ctl3.cn78xx.dclk90_fwd = 1;
1256 lmc_wr(priv,
1257 CVMX_LMCX_DLL_CTL3(temp_lmc_if_num),
1258 ddr_dll_ctl3.u64);
1259
1260
1261
1262
1263
1264 lmc_rd(priv, CVMX_LMCX_DLL_CTL3(temp_lmc_if_num));
1265 }
1266
1267 if (if_mask == 0xf) {
1268
1269
1270
1271
1272
1273
1274
1275
1276 ddr_dll_ctl3.u64 = 0;
1277 ddr_dll_ctl3.cn78xx.dclk90_recal_dis = 1;
1278 ddr_dll_ctl3.cn78xx.dll90_byte_sel = 7;
1279 lmc_wr(priv, CVMX_LMCX_DLL_CTL3(2), ddr_dll_ctl3.u64);
1280
1281
1282
1283
1284
1285
1286 ddr_dll_ctl3.u64 = 0;
1287 ddr_dll_ctl3.cn78xx.dclk90_recal_dis = 1;
1288 ddr_dll_ctl3.cn78xx.dll90_byte_sel = 2;
1289 lmc_wr(priv, CVMX_LMCX_DLL_CTL3(3), ddr_dll_ctl3.u64);
1290
1291
1292
1293
1294
1295 lmc_rd(priv, CVMX_LMCX_DLL_CTL3(3));
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308 ddr_dll_ctl3.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL3(2));
1309 ddr_dll_ctl3.cn78xx.dclk90_fwd = 1;
1310 ddr_dll_ctl3.cn78xx.dclk90_recal_dis = 1;
1311 lmc_wr(priv, CVMX_LMCX_DLL_CTL3(2), ddr_dll_ctl3.u64);
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324 ddr_dll_ctl3.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL3(3));
1325 ddr_dll_ctl3.cn78xx.dclk90_fwd = 1;
1326 ddr_dll_ctl3.cn78xx.dclk90_recal_dis = 1;
1327 lmc_wr(priv, CVMX_LMCX_DLL_CTL3(3), ddr_dll_ctl3.u64);
1328
1329
1330
1331
1332
1333 lmc_rd(priv, CVMX_LMCX_DLL_CTL3(3));
1334 }
1335
1336 if (octeon_is_cpuid(OCTEON_CNF75XX)) {
1337
1338
1339
1340
1341 cn78xx_lmc_dreset_init(priv, 0);
1342 }
1343
1344
1345 if (if_mask == 0x3) {
1346 if (octeon_is_cpuid(OCTEON_CNF75XX)) {
1347
1348
1349
1350
1351 cn78xx_lmc_dreset_init(priv, 0);
1352 } else {
1353 cn78xx_lmc_dreset_init(priv, 1);
1354 }
1355 }
1356
1357
1358 if (if_mask == 0xf) {
1359 cn78xx_lmc_dreset_init(priv, 0);
1360 cn78xx_lmc_dreset_init(priv, 1);
1361
1362
1363
1364
1365
1366 ddr_dll_ctl3.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL3(0));
1367 ddr_dll_ctl3.cn78xx.dclk90_recal_dis = 0;
1368 lmc_wr(priv, CVMX_LMCX_DLL_CTL3(0), ddr_dll_ctl3.u64);
1369 ddr_dll_ctl3.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL3(1));
1370 ddr_dll_ctl3.cn78xx.dclk90_recal_dis = 0;
1371 lmc_wr(priv, CVMX_LMCX_DLL_CTL3(1), ddr_dll_ctl3.u64);
1372 }
1373
1374
1375 for (i = 0; i < 4; ++i) {
1376 if ((if_mask & (1 << i)) == 0)
1377 continue;
1378 ddr_dll_ctl3.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL3(i));
1379 ddr_dll_ctl3.cn78xx.fine_tune_mode = 1;
1380 lmc_wr(priv, CVMX_LMCX_DLL_CTL3(i), ddr_dll_ctl3.u64);
1381 }
1382
1383
1384
1385
1386
1387
1388 if (octeon_is_cpuid(OCTEON_CN78XX_PASS2_X) ||
1389 octeon_is_cpuid(OCTEON_CN73XX) ||
1390 octeon_is_cpuid(OCTEON_CNF75XX)) {
1391 union cvmx_lmcx_phy_ctl lmc_phy_ctl;
1392 int i;
1393
1394 for (i = 0; i < 4; ++i) {
1395 if ((if_mask & (1 << i)) == 0)
1396 continue;
1397
1398 lmc_phy_ctl.u64 =
1399 lmc_rd(priv, CVMX_LMCX_PHY_CTL(i));
1400
1401 if (octeon_is_cpuid(OCTEON_CNF75XX) ||
1402 octeon_is_cpuid(OCTEON_CN73XX_PASS1_3)) {
1403
1404 lmc_phy_ctl.s.lv_mode = 0;
1405 } else {
1406
1407 lmc_phy_ctl.s.lv_mode = (~i) & 1;
1408 }
1409
1410 debug("LMC%d: PHY_CTL : 0x%016llx\n",
1411 i, lmc_phy_ctl.u64);
1412 lmc_wr(priv, CVMX_LMCX_PHY_CTL(i),
1413 lmc_phy_ctl.u64);
1414 }
1415 }
1416 }
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431not_if0:
1432
1433
1434
1435
1436
1437
1438 if ((!octeon_is_cpuid(OCTEON_CN6XXX)) &&
1439 (!octeon_is_cpuid(OCTEON_CNF7XXX)) &&
1440 (!octeon_is_cpuid(OCTEON_CN7XXX))) {
1441 union cvmx_lmcx_mem_cfg0 mem_cfg0;
1442
1443 mem_cfg0.u64 = 0;
1444 mem_cfg0.s.init_start = 1;
1445 lmc_wr(priv, CVMX_LMCX_MEM_CFG0(if_num), mem_cfg0.u64);
1446 lmc_rd(priv, CVMX_LMCX_MEM_CFG0(if_num));
1447 }
1448
1449 set_ddr_clock_initialized(priv, if_num, 1);
1450
1451 return 0;
1452}
1453
1454static void octeon_ipd_delay_cycles(u64 cycles)
1455{
1456 u64 start = csr_rd(CVMX_IPD_CLK_COUNT);
1457
1458 while (start + cycles > csr_rd(CVMX_IPD_CLK_COUNT))
1459 ;
1460}
1461
1462static void octeon_ipd_delay_cycles_o3(u64 cycles)
1463{
1464 u64 start = csr_rd(CVMX_FPA_CLK_COUNT);
1465
1466 while (start + cycles > csr_rd(CVMX_FPA_CLK_COUNT))
1467 ;
1468}
1469
1470static u32 measure_octeon_ddr_clock(struct ddr_priv *priv,
1471 struct ddr_conf *ddr_conf, u32 cpu_hertz,
1472 u32 ddr_hertz, u32 ddr_ref_hertz,
1473 int if_num, u32 if_mask)
1474{
1475 u64 core_clocks;
1476 u64 ddr_clocks;
1477 u64 calc_ddr_hertz;
1478
1479 if (ddr_conf) {
1480 if (initialize_ddr_clock(priv, ddr_conf, cpu_hertz,
1481 ddr_hertz, ddr_ref_hertz, if_num,
1482 if_mask) != 0)
1483 return 0;
1484 }
1485
1486
1487 if (OCTEON_IS_OCTEON2() || octeon_is_cpuid(OCTEON_CN70XX)) {
1488 core_clocks = csr_rd(CVMX_IPD_CLK_COUNT);
1489 ddr_clocks = lmc_rd(priv, CVMX_LMCX_DCLK_CNT(if_num));
1490
1491 octeon_ipd_delay_cycles(100000000);
1492 core_clocks = csr_rd(CVMX_IPD_CLK_COUNT) - core_clocks;
1493 ddr_clocks =
1494 lmc_rd(priv, CVMX_LMCX_DCLK_CNT(if_num)) - ddr_clocks;
1495 calc_ddr_hertz = ddr_clocks * gd->bus_clk / core_clocks;
1496 } else if (octeon_is_cpuid(OCTEON_CN7XXX)) {
1497 core_clocks = csr_rd(CVMX_FPA_CLK_COUNT);
1498 ddr_clocks = lmc_rd(priv, CVMX_LMCX_DCLK_CNT(if_num));
1499
1500 octeon_ipd_delay_cycles_o3(100000000);
1501 core_clocks = csr_rd(CVMX_FPA_CLK_COUNT) - core_clocks;
1502 ddr_clocks =
1503 lmc_rd(priv, CVMX_LMCX_DCLK_CNT(if_num)) - ddr_clocks;
1504 calc_ddr_hertz = ddr_clocks * gd->bus_clk / core_clocks;
1505 } else {
1506 core_clocks = csr_rd(CVMX_IPD_CLK_COUNT);
1507
1508
1509
1510
1511 ddr_clocks = lmc_rd(priv, CVMX_LMCX_DCLK_CNT_LO(if_num));
1512
1513 octeon_ipd_delay_cycles(100000000);
1514 core_clocks = csr_rd(CVMX_IPD_CLK_COUNT) - core_clocks;
1515 ddr_clocks =
1516 lmc_rd(priv, CVMX_LMCX_DCLK_CNT_LO(if_num)) - ddr_clocks;
1517 calc_ddr_hertz = ddr_clocks * cpu_hertz / core_clocks;
1518 }
1519
1520 debug("core clocks: %llu, ddr clocks: %llu, calc rate: %llu\n",
1521 core_clocks, ddr_clocks, calc_ddr_hertz);
1522 debug("LMC%d: Measured DDR clock: %lld, cpu clock: %u, ddr clocks: %llu\n",
1523 if_num, calc_ddr_hertz, cpu_hertz, ddr_clocks);
1524
1525
1526 if (calc_ddr_hertz < 10000) {
1527 udelay(8000000 * 100);
1528 printf("DDR clock misconfigured on interface %d. Resetting...\n",
1529 if_num);
1530 do_reset(NULL, 0, 0, NULL);
1531 }
1532
1533 return calc_ddr_hertz;
1534}
1535
1536u64 lmc_ddr3_rl_dbg_read(struct ddr_priv *priv, int if_num, int idx)
1537{
1538 union cvmx_lmcx_rlevel_dbg rlevel_dbg;
1539 union cvmx_lmcx_rlevel_ctl rlevel_ctl;
1540
1541 rlevel_ctl.u64 = lmc_rd(priv, CVMX_LMCX_RLEVEL_CTL(if_num));
1542 rlevel_ctl.s.byte = idx;
1543
1544 lmc_wr(priv, CVMX_LMCX_RLEVEL_CTL(if_num), rlevel_ctl.u64);
1545 lmc_rd(priv, CVMX_LMCX_RLEVEL_CTL(if_num));
1546
1547 rlevel_dbg.u64 = lmc_rd(priv, CVMX_LMCX_RLEVEL_DBG(if_num));
1548 return rlevel_dbg.s.bitmask;
1549}
1550
1551u64 lmc_ddr3_wl_dbg_read(struct ddr_priv *priv, int if_num, int idx)
1552{
1553 union cvmx_lmcx_wlevel_dbg wlevel_dbg;
1554
1555 wlevel_dbg.u64 = 0;
1556 wlevel_dbg.s.byte = idx;
1557
1558 lmc_wr(priv, CVMX_LMCX_WLEVEL_DBG(if_num), wlevel_dbg.u64);
1559 lmc_rd(priv, CVMX_LMCX_WLEVEL_DBG(if_num));
1560
1561 wlevel_dbg.u64 = lmc_rd(priv, CVMX_LMCX_WLEVEL_DBG(if_num));
1562 return wlevel_dbg.s.bitmask;
1563}
1564
1565int validate_ddr3_rlevel_bitmask(struct rlevel_bitmask *rlevel_bitmask_p,
1566 int ddr_type)
1567{
1568 int i;
1569 int errors = 0;
1570 u64 mask = 0;
1571 u8 mstart = 0;
1572 u8 width = 0;
1573 u8 firstbit = 0;
1574 u8 lastbit = 0;
1575 u8 bubble = 0;
1576 u8 tbubble = 0;
1577 u8 blank = 0;
1578 u8 narrow = 0;
1579 u8 trailing = 0;
1580 u64 bitmask = rlevel_bitmask_p->bm;
1581 u8 extras = 0;
1582 u8 toolong = 0;
1583 u64 temp;
1584
1585 if (bitmask == 0) {
1586 blank += RLEVEL_BITMASK_BLANK_ERROR;
1587 } else {
1588
1589 temp = bitmask;
1590 while (!(temp & 1)) {
1591 firstbit++;
1592 temp >>= 1;
1593 }
1594
1595
1596 lastbit = firstbit;
1597 while ((temp >>= 1))
1598 lastbit++;
1599
1600
1601
1602
1603
1604 width = MASKRANGE_BITS;
1605 for (mask = MASKRANGE; mask > 0; mask >>= 1, --width) {
1606 for (mstart = lastbit - width + 1; mstart >= firstbit;
1607 --mstart) {
1608 temp = mask << mstart;
1609 if ((bitmask & temp) == temp)
1610 goto done_now;
1611 }
1612 }
1613done_now:
1614
1615 if (width == MASKRANGE_BITS) {
1616 while ((bitmask >> (mstart - 1)) & 1) {
1617
1618 --mstart;
1619
1620 if (ddr_type == DDR4_DRAM)
1621 extras++;
1622 }
1623 }
1624
1625
1626 if (extras > 0)
1627 toolong =
1628 RLEVEL_BITMASK_TOOLONG_ERROR * ((1 << extras) - 1);
1629
1630
1631 if (width < 4)
1632 narrow = (4 - width) * RLEVEL_BITMASK_NARROW_ERROR;
1633
1634
1635
1636
1637
1638 temp = bitmask >> (firstbit + 1);
1639 i = mstart - firstbit - 1;
1640 while (--i >= 0) {
1641 if ((temp & 1) == 0)
1642 bubble += RLEVEL_BITMASK_BUBBLE_BITS_ERROR;
1643 temp >>= 1;
1644 }
1645
1646 temp = bitmask >> (mstart + width + extras);
1647 i = lastbit - (mstart + width + extras - 1);
1648 while (--i >= 0) {
1649 if (temp & 1) {
1650
1651
1652
1653
1654 trailing += RLEVEL_BITMASK_TRAILING_BITS_ERROR;
1655 } else {
1656
1657
1658
1659
1660 tbubble += RLEVEL_BITMASK_BUBBLE_BITS_ERROR;
1661 }
1662 temp >>= 1;
1663 }
1664 }
1665
1666 errors = bubble + tbubble + blank + narrow + trailing + toolong;
1667
1668
1669 rlevel_bitmask_p->mstart = mstart;
1670 rlevel_bitmask_p->width = width;
1671
1672 debug_bitmask_print("bm:%08lx mask:%02lx, width:%2u, mstart:%2d, fb:%2u, lb:%2u (bu:%2d, tb:%2d, bl:%2d, n:%2d, t:%2d, x:%2d) errors:%3d %s\n",
1673 (unsigned long)bitmask, mask, width, mstart,
1674 firstbit, lastbit, bubble, tbubble, blank,
1675 narrow, trailing, toolong, errors,
1676 (errors) ? "=> invalid" : "");
1677
1678 return errors;
1679}
1680
1681int compute_ddr3_rlevel_delay(u8 mstart, u8 width,
1682 union cvmx_lmcx_rlevel_ctl rlevel_ctl)
1683{
1684 int delay;
1685
1686 debug_bitmask_print(" offset_en:%d", rlevel_ctl.s.offset_en);
1687
1688 if (rlevel_ctl.s.offset_en) {
1689 delay = max((int)mstart,
1690 (int)(mstart + width - 1 - rlevel_ctl.s.offset));
1691 } else {
1692
1693 if (0) {
1694 delay = max(mstart + rlevel_ctl.s.offset, mstart + 1);
1695
1696
1697
1698
1699 delay = min(delay, mstart + width - 1);
1700 } else {
1701
1702 delay = (width - 1) / 2 + mstart;
1703 }
1704 }
1705
1706 return delay;
1707}
1708
1709
1710
1711const struct dimm_odt_config disable_odt_config[] = {
1712 { 0, 0x0000, {.u64 = 0x0000}, {.u64 = 0x0000}, 0, 0x0000, 0 },
1713 { 0, 0x0000, {.u64 = 0x0000}, {.u64 = 0x0000}, 0, 0x0000, 0 },
1714 { 0, 0x0000, {.u64 = 0x0000}, {.u64 = 0x0000}, 0, 0x0000, 0 },
1715 { 0, 0x0000, {.u64 = 0x0000}, {.u64 = 0x0000}, 0, 0x0000, 0 },
1716};
1717
1718
1719static int init_octeon_dram_interface(struct ddr_priv *priv,
1720 struct ddr_conf *ddr_conf,
1721 u32 ddr_hertz, u32 cpu_hertz,
1722 u32 ddr_ref_hertz, int if_num,
1723 u32 if_mask)
1724{
1725 u32 mem_size_mbytes = 0;
1726 char *s;
1727
1728 s = lookup_env(priv, "ddr_timing_hertz");
1729 if (s)
1730 ddr_hertz = simple_strtoul(s, NULL, 0);
1731
1732 if (OCTEON_IS_OCTEON3()) {
1733 int lmc_restart_retries = 0;
1734#define DEFAULT_RESTART_RETRIES 3
1735 int lmc_restart_retries_limit = DEFAULT_RESTART_RETRIES;
1736
1737 s = lookup_env(priv, "ddr_restart_retries_limit");
1738 if (s)
1739 lmc_restart_retries_limit = simple_strtoul(s, NULL, 0);
1740
1741restart_lmc_init:
1742 mem_size_mbytes = init_octeon3_ddr3_interface(priv, ddr_conf,
1743 ddr_hertz,
1744 cpu_hertz,
1745 ddr_ref_hertz,
1746 if_num, if_mask);
1747 if (mem_size_mbytes == 0) {
1748 if (lmc_restart_retries < lmc_restart_retries_limit) {
1749 lmc_restart_retries++;
1750 printf("N0.LMC%d Configuration problem: attempting LMC reset and init restart %d\n",
1751 if_num, lmc_restart_retries);
1752 goto restart_lmc_init;
1753 } else {
1754 if (lmc_restart_retries_limit > 0) {
1755 printf("INFO: N0.LMC%d Configuration: fatal problem remains after %d LMC init retries - Resetting node...\n",
1756 if_num, lmc_restart_retries);
1757 mdelay(500);
1758 do_reset(NULL, 0, 0, NULL);
1759 } else {
1760
1761 mem_size_mbytes = -1;
1762 }
1763 }
1764 }
1765 }
1766
1767 debug("N0.LMC%d Configuration Completed: %d MB\n",
1768 if_num, mem_size_mbytes);
1769
1770 return mem_size_mbytes;
1771}
1772
1773#define WLEVEL_BYTE_BITS 5
1774#define WLEVEL_BYTE_MSK ((1ULL << 5) - 1)
1775
1776void upd_wl_rank(union cvmx_lmcx_wlevel_rankx *lmc_wlevel_rank,
1777 int byte, int delay)
1778{
1779 union cvmx_lmcx_wlevel_rankx temp_wlevel_rank;
1780
1781 if (byte >= 0 && byte <= 8) {
1782 temp_wlevel_rank.u64 = lmc_wlevel_rank->u64;
1783 temp_wlevel_rank.u64 &=
1784 ~(WLEVEL_BYTE_MSK << (WLEVEL_BYTE_BITS * byte));
1785 temp_wlevel_rank.u64 |=
1786 ((delay & WLEVEL_BYTE_MSK) << (WLEVEL_BYTE_BITS * byte));
1787 lmc_wlevel_rank->u64 = temp_wlevel_rank.u64;
1788 }
1789}
1790
1791int get_wl_rank(union cvmx_lmcx_wlevel_rankx *lmc_wlevel_rank, int byte)
1792{
1793 int delay = 0;
1794
1795 if (byte >= 0 && byte <= 8)
1796 delay =
1797 ((lmc_wlevel_rank->u64) >> (WLEVEL_BYTE_BITS *
1798 byte)) & WLEVEL_BYTE_MSK;
1799
1800 return delay;
1801}
1802
1803void upd_rl_rank(union cvmx_lmcx_rlevel_rankx *lmc_rlevel_rank,
1804 int byte, int delay)
1805{
1806 union cvmx_lmcx_rlevel_rankx temp_rlevel_rank;
1807
1808 if (byte >= 0 && byte <= 8) {
1809 temp_rlevel_rank.u64 =
1810 lmc_rlevel_rank->u64 & ~(RLEVEL_BYTE_MSK <<
1811 (RLEVEL_BYTE_BITS * byte));
1812 temp_rlevel_rank.u64 |=
1813 ((delay & RLEVEL_BYTE_MSK) << (RLEVEL_BYTE_BITS * byte));
1814 lmc_rlevel_rank->u64 = temp_rlevel_rank.u64;
1815 }
1816}
1817
1818int get_rl_rank(union cvmx_lmcx_rlevel_rankx *lmc_rlevel_rank, int byte)
1819{
1820 int delay = 0;
1821
1822 if (byte >= 0 && byte <= 8)
1823 delay =
1824 ((lmc_rlevel_rank->u64) >> (RLEVEL_BYTE_BITS *
1825 byte)) & RLEVEL_BYTE_MSK;
1826
1827 return delay;
1828}
1829
1830void rlevel_to_wlevel(union cvmx_lmcx_rlevel_rankx *lmc_rlevel_rank,
1831 union cvmx_lmcx_wlevel_rankx *lmc_wlevel_rank, int byte)
1832{
1833 int byte_delay = get_rl_rank(lmc_rlevel_rank, byte);
1834
1835 debug("Estimating Wlevel delay byte %d: ", byte);
1836 debug("Rlevel=%d => ", byte_delay);
1837 byte_delay = divide_roundup(byte_delay, 2) & 0x1e;
1838 debug("Wlevel=%d\n", byte_delay);
1839 upd_wl_rank(lmc_wlevel_rank, byte, byte_delay);
1840}
1841
1842
1843static s64 calc_delay_trend(s64 v)
1844{
1845 if (v == 0)
1846 return 0;
1847 if (v < 0)
1848 return -1;
1849
1850 return 1;
1851}
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861int nonseq_del(struct rlevel_byte_data *rlevel_byte, int start, int end,
1862 int max_adj_delay_inc)
1863{
1864 s64 error = 0;
1865 s64 delay_trend, prev_trend = 0;
1866 int byte_idx;
1867 s64 seq_err;
1868 s64 adj_err;
1869 s64 delay_inc;
1870 s64 delay_diff;
1871
1872 for (byte_idx = start; byte_idx < end; ++byte_idx) {
1873 delay_diff = rlevel_byte[byte_idx + 1].delay -
1874 rlevel_byte[byte_idx].delay;
1875 delay_trend = calc_delay_trend(delay_diff);
1876
1877
1878
1879
1880
1881 if (prev_trend != 0 && delay_trend != 0 &&
1882 prev_trend != delay_trend) {
1883 seq_err = RLEVEL_NONSEQUENTIAL_DELAY_ERROR;
1884 } else {
1885 seq_err = 0;
1886 }
1887
1888
1889 delay_inc = abs(delay_diff);
1890
1891
1892
1893
1894
1895
1896
1897 if (max_adj_delay_inc != 0 && delay_inc > max_adj_delay_inc) {
1898 adj_err = (delay_inc - max_adj_delay_inc) *
1899 RLEVEL_ADJACENT_DELAY_ERROR;
1900 } else {
1901 adj_err = 0;
1902 }
1903
1904 rlevel_byte[byte_idx + 1].sqerrs = seq_err + adj_err;
1905 error += seq_err + adj_err;
1906
1907 debug_bitmask_print("Byte %d: %d, Byte %d: %d, delay_trend: %ld, prev_trend: %ld, [%ld/%ld]%s%s\n",
1908 byte_idx + 0,
1909 rlevel_byte[byte_idx + 0].delay,
1910 byte_idx + 1,
1911 rlevel_byte[byte_idx + 1].delay,
1912 delay_trend,
1913 prev_trend, seq_err, adj_err,
1914 (seq_err) ?
1915 " => Nonsequential byte delay" : "",
1916 (adj_err) ?
1917 " => Adjacent delay error" : "");
1918
1919 if (delay_trend != 0)
1920 prev_trend = delay_trend;
1921 }
1922
1923 return (int)error;
1924}
1925
1926int roundup_ddr3_wlevel_bitmask(int bitmask)
1927{
1928 int shifted_bitmask;
1929 int leader;
1930 int delay;
1931
1932 for (leader = 0; leader < 8; ++leader) {
1933 shifted_bitmask = (bitmask >> leader);
1934 if ((shifted_bitmask & 1) == 0)
1935 break;
1936 }
1937
1938 for (leader = leader; leader < 16; ++leader) {
1939 shifted_bitmask = (bitmask >> (leader % 8));
1940 if (shifted_bitmask & 1)
1941 break;
1942 }
1943
1944 delay = (leader & 1) ? leader + 1 : leader;
1945 delay = delay % 8;
1946
1947 return delay;
1948}
1949
1950
1951static void oct2_ddr3_seq(struct ddr_priv *priv, int rank_mask, int if_num,
1952 int sequence)
1953{
1954 char *s;
1955
1956#ifdef DEBUG_PERFORM_DDR3_SEQUENCE
1957 static const char * const sequence_str[] = {
1958 "power-up/init",
1959 "read-leveling",
1960 "self-refresh entry",
1961 "self-refresh exit",
1962 "precharge power-down entry",
1963 "precharge power-down exit",
1964 "write-leveling",
1965 "illegal"
1966 };
1967#endif
1968
1969 union cvmx_lmcx_control lmc_control;
1970 union cvmx_lmcx_config lmc_config;
1971 int save_ddr2t;
1972
1973 lmc_control.u64 = lmc_rd(priv, CVMX_LMCX_CONTROL(if_num));
1974 save_ddr2t = lmc_control.s.ddr2t;
1975
1976 if (save_ddr2t == 0 && octeon_is_cpuid(OCTEON_CN63XX_PASS1_X)) {
1977
1978
1979
1980
1981
1982
1983
1984 debug("Forcing DDR 2T during init seq. Re: Pass 1 LMC-14548\n");
1985 lmc_control.s.ddr2t = 1;
1986 }
1987
1988 s = lookup_env(priv, "ddr_init_2t");
1989 if (s)
1990 lmc_control.s.ddr2t = simple_strtoul(s, NULL, 0);
1991
1992 lmc_wr(priv, CVMX_LMCX_CONTROL(if_num), lmc_control.u64);
1993
1994 lmc_config.u64 = lmc_rd(priv, CVMX_LMCX_CONFIG(if_num));
1995
1996 lmc_config.s.init_start = 1;
1997 if (OCTEON_IS_OCTEON2())
1998 lmc_config.cn63xx.sequence = sequence;
1999 lmc_config.s.rankmask = rank_mask;
2000
2001#ifdef DEBUG_PERFORM_DDR3_SEQUENCE
2002 debug("Performing LMC sequence: rank_mask=0x%02x, sequence=%d, %s\n",
2003 rank_mask, sequence, sequence_str[sequence]);
2004#endif
2005
2006 lmc_wr(priv, CVMX_LMCX_CONFIG(if_num), lmc_config.u64);
2007 lmc_rd(priv, CVMX_LMCX_CONFIG(if_num));
2008 udelay(600);
2009
2010 lmc_control.s.ddr2t = save_ddr2t;
2011 lmc_wr(priv, CVMX_LMCX_CONTROL(if_num), lmc_control.u64);
2012 lmc_rd(priv, CVMX_LMCX_CONTROL(if_num));
2013}
2014
2015
2016static int is_dll_offset_provided(const int8_t *dll_offset_table)
2017{
2018 int i;
2019
2020 if (!dll_offset_table)
2021 return 0;
2022
2023 for (i = 0; i < 9; ++i) {
2024 if (dll_offset_table[i] != 0)
2025 return 1;
2026 }
2027
2028 return 0;
2029}
2030
2031void change_dll_offset_enable(struct ddr_priv *priv, int if_num, int change)
2032{
2033 union cvmx_lmcx_dll_ctl3 ddr_dll_ctl3;
2034
2035 ddr_dll_ctl3.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL3(if_num));
2036 SET_DDR_DLL_CTL3(offset_ena, !!change);
2037 lmc_wr(priv, CVMX_LMCX_DLL_CTL3(if_num), ddr_dll_ctl3.u64);
2038 ddr_dll_ctl3.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL3(if_num));
2039}
2040
2041unsigned short load_dll_offset(struct ddr_priv *priv, int if_num,
2042 int dll_offset_mode, int byte_offset, int byte)
2043{
2044 union cvmx_lmcx_dll_ctl3 ddr_dll_ctl3;
2045 int field_width = 6;
2046
2047
2048
2049
2050
2051 int byte_sel = (byte == 10) ? byte : byte + 1;
2052
2053 if (octeon_is_cpuid(OCTEON_CN6XXX))
2054 field_width = 5;
2055
2056 ddr_dll_ctl3.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL3(if_num));
2057 SET_DDR_DLL_CTL3(load_offset, 0);
2058 lmc_wr(priv, CVMX_LMCX_DLL_CTL3(if_num), ddr_dll_ctl3.u64);
2059 ddr_dll_ctl3.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL3(if_num));
2060
2061 SET_DDR_DLL_CTL3(mode_sel, dll_offset_mode);
2062 SET_DDR_DLL_CTL3(offset,
2063 (abs(byte_offset) & (~(-1 << field_width))) |
2064 (_sign(byte_offset) << field_width));
2065 SET_DDR_DLL_CTL3(byte_sel, byte_sel);
2066 lmc_wr(priv, CVMX_LMCX_DLL_CTL3(if_num), ddr_dll_ctl3.u64);
2067 ddr_dll_ctl3.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL3(if_num));
2068
2069 SET_DDR_DLL_CTL3(load_offset, 1);
2070 lmc_wr(priv, CVMX_LMCX_DLL_CTL3(if_num), ddr_dll_ctl3.u64);
2071 ddr_dll_ctl3.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL3(if_num));
2072
2073 return (unsigned short)GET_DDR_DLL_CTL3(offset);
2074}
2075
2076void process_custom_dll_offsets(struct ddr_priv *priv, int if_num,
2077 const char *enable_str,
2078 const int8_t *offsets, const char *byte_str,
2079 int mode)
2080{
2081 const char *s;
2082 int enabled;
2083 int provided;
2084 int byte_offset;
2085 unsigned short offset[9] = { 0 };
2086 int byte;
2087
2088 s = lookup_env(priv, enable_str);
2089 if (s)
2090 enabled = !!simple_strtol(s, NULL, 0);
2091 else
2092 enabled = -1;
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102 if (enabled == 0)
2103 return;
2104
2105 provided = is_dll_offset_provided(offsets);
2106
2107 if (enabled < 0 && !provided)
2108 return;
2109
2110 change_dll_offset_enable(priv, if_num, 0);
2111
2112 for (byte = 0; byte < 9; ++byte) {
2113
2114 byte_offset = (provided) ? offsets[byte] : 0;
2115
2116
2117 if (enabled > 0) {
2118 s = lookup_env(priv, byte_str, if_num, byte);
2119 if (s)
2120 byte_offset = simple_strtol(s, NULL, 0);
2121 }
2122
2123 offset[byte] =
2124 load_dll_offset(priv, if_num, mode, byte_offset, byte);
2125 }
2126
2127 change_dll_offset_enable(priv, if_num, 1);
2128
2129 debug("N0.LMC%d: DLL %s Offset 8:0 : 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n",
2130 if_num, (mode == 2) ? "Read " : "Write",
2131 offset[8], offset[7], offset[6], offset[5], offset[4],
2132 offset[3], offset[2], offset[1], offset[0]);
2133}
2134
2135void ddr_init_seq(struct ddr_priv *priv, int rank_mask, int if_num)
2136{
2137 char *s;
2138 int ddr_init_loops = 1;
2139 int rankx;
2140
2141 s = lookup_env(priv, "ddr%d_init_loops", if_num);
2142 if (s)
2143 ddr_init_loops = simple_strtoul(s, NULL, 0);
2144
2145 while (ddr_init_loops--) {
2146 for (rankx = 0; rankx < 8; rankx++) {
2147 if (!(rank_mask & (1 << rankx)))
2148 continue;
2149
2150 if (OCTEON_IS_OCTEON3()) {
2151
2152 oct3_ddr3_seq(priv, 1 << rankx, if_num, 0);
2153 } else {
2154
2155 oct2_ddr3_seq(priv, 1 << rankx, if_num, 0);
2156 }
2157
2158 udelay(1000);
2159
2160 s = lookup_env(priv, "ddr_sequence1");
2161 if (s) {
2162 int sequence1;
2163
2164 sequence1 = simple_strtoul(s, NULL, 0);
2165
2166 if (OCTEON_IS_OCTEON3()) {
2167 oct3_ddr3_seq(priv, 1 << rankx,
2168 if_num, sequence1);
2169 } else {
2170 oct2_ddr3_seq(priv, 1 << rankx,
2171 if_num, sequence1);
2172 }
2173 }
2174
2175 s = lookup_env(priv, "ddr_sequence2");
2176 if (s) {
2177 int sequence2;
2178
2179 sequence2 = simple_strtoul(s, NULL, 0);
2180
2181 if (OCTEON_IS_OCTEON3())
2182 oct3_ddr3_seq(priv, 1 << rankx,
2183 if_num, sequence2);
2184 else
2185 oct2_ddr3_seq(priv, 1 << rankx,
2186 if_num, sequence2);
2187 }
2188 }
2189 }
2190}
2191
2192static int octeon_ddr_initialize(struct ddr_priv *priv, u32 cpu_hertz,
2193 u32 ddr_hertz, u32 ddr_ref_hertz,
2194 u32 if_mask,
2195 struct ddr_conf *ddr_conf,
2196 u32 *measured_ddr_hertz)
2197{
2198 u32 ddr_conf_valid_mask = 0;
2199 int memsize_mbytes = 0;
2200 char *eptr;
2201 int if_idx;
2202 u32 ddr_max_speed = 667000000;
2203 u32 calc_ddr_hertz = -1;
2204 int val;
2205 int ret;
2206
2207 if (env_get("ddr_verbose") || env_get("ddr_prompt"))
2208 priv->flags |= FLAG_DDR_VERBOSE;
2209
2210#ifdef DDR_VERBOSE
2211 priv->flags |= FLAG_DDR_VERBOSE;
2212#endif
2213
2214 if (env_get("ddr_trace_init")) {
2215 printf("Parameter ddr_trace_init found in environment.\n");
2216 priv->flags |= FLAG_DDR_TRACE_INIT;
2217 priv->flags |= FLAG_DDR_VERBOSE;
2218 }
2219
2220 priv->flags |= FLAG_DDR_DEBUG;
2221
2222 val = env_get_ulong("ddr_debug", 10, (u32)-1);
2223 switch (val) {
2224 case 0:
2225 priv->flags &= ~FLAG_DDR_DEBUG;
2226 printf("Parameter ddr_debug clear in environment\n");
2227 break;
2228 case (u32)-1:
2229 break;
2230 default:
2231 printf("Parameter ddr_debug set in environment\n");
2232 priv->flags |= FLAG_DDR_DEBUG;
2233 priv->flags |= FLAG_DDR_VERBOSE;
2234 break;
2235 }
2236 if (env_get("ddr_prompt"))
2237 priv->flags |= FLAG_DDR_PROMPT;
2238
2239
2240 if (priv->flags & FLAG_FAILSAFE_MODE)
2241 priv->flags |= FLAG_DDR_VERBOSE;
2242
2243#ifdef DDR_DEBUG
2244 priv->flags |= FLAG_DDR_DEBUG;
2245
2246 priv->flags |= FLAG_DDR_VERBOSE;
2247#endif
2248
2249 if ((octeon_is_cpuid(OCTEON_CN61XX) ||
2250 octeon_is_cpuid(OCTEON_CNF71XX)) && ddr_max_speed > 533333333) {
2251 ddr_max_speed = 533333333;
2252 } else if (octeon_is_cpuid(OCTEON_CN7XXX)) {
2253
2254 ddr_max_speed = 1210000000;
2255 }
2256
2257 if (ddr_hertz > ddr_max_speed) {
2258 printf("DDR clock speed %u exceeds maximum supported DDR speed, reducing to %uHz\n",
2259 ddr_hertz, ddr_max_speed);
2260 ddr_hertz = ddr_max_speed;
2261 }
2262
2263 if (OCTEON_IS_OCTEON3()) {
2264 if (ddr_hertz > cpu_hertz) {
2265 printf("\nFATAL ERROR: DDR speed %u exceeds CPU speed %u, exiting...\n\n",
2266 ddr_hertz, cpu_hertz);
2267 return -1;
2268 }
2269 }
2270
2271
2272 eptr = env_get("disable_l2_ecc");
2273 if (eptr) {
2274 printf("Disabling L2 ECC based on disable_l2_ecc environment variable\n");
2275 union cvmx_l2c_ctl l2c_val;
2276
2277 l2c_val.u64 = l2c_rd(priv, CVMX_L2C_CTL_REL);
2278 l2c_val.s.disecc = 1;
2279 l2c_wr(priv, CVMX_L2C_CTL_REL, l2c_val.u64);
2280 } else {
2281 union cvmx_l2c_ctl l2c_val;
2282
2283 l2c_val.u64 = l2c_rd(priv, CVMX_L2C_CTL_REL);
2284 l2c_val.s.disecc = 0;
2285 l2c_wr(priv, CVMX_L2C_CTL_REL, l2c_val.u64);
2286 }
2287
2288
2289
2290
2291
2292 eptr = env_get("disable_l2_index_aliasing");
2293 if (eptr) {
2294 union cvmx_l2c_ctl l2c_val;
2295
2296 puts("L2 index aliasing disabled.\n");
2297
2298 l2c_val.u64 = l2c_rd(priv, CVMX_L2C_CTL_REL);
2299 l2c_val.s.disidxalias = 1;
2300 l2c_wr(priv, CVMX_L2C_CTL_REL, l2c_val.u64);
2301 } else {
2302 union cvmx_l2c_ctl l2c_val;
2303
2304
2305
2306 l2c_val.u64 = l2c_rd(priv, CVMX_L2C_CTL_REL);
2307 l2c_val.s.disidxalias = 0;
2308 l2c_wr(priv, CVMX_L2C_CTL_REL, l2c_val.u64);
2309 }
2310
2311 if (OCTEON_IS_OCTEON3()) {
2312
2313
2314
2315
2316
2317
2318
2319
2320 union cvmx_l2c_ctl l2c_ctl;
2321 u64 rdf_cnt;
2322 char *s;
2323
2324 l2c_ctl.u64 = l2c_rd(priv, CVMX_L2C_CTL_REL);
2325
2326
2327
2328
2329
2330 rdf_cnt = (((u64)10 * cpu_hertz) / ddr_hertz) - 1;
2331 rdf_cnt = rdf_cnt < 256 ? rdf_cnt : 255;
2332 l2c_ctl.cn78xx.rdf_cnt = rdf_cnt;
2333
2334 s = lookup_env(priv, "early_fill_count");
2335 if (s)
2336 l2c_ctl.cn78xx.rdf_cnt = simple_strtoul(s, NULL, 0);
2337
2338 debug("%-45s : %d, cpu_hertz:%d, ddr_hertz:%d\n",
2339 "EARLY FILL COUNT ", l2c_ctl.cn78xx.rdf_cnt, cpu_hertz,
2340 ddr_hertz);
2341 l2c_wr(priv, CVMX_L2C_CTL_REL, l2c_ctl.u64);
2342 }
2343
2344
2345 for (if_idx = 0; if_idx < 4; ++if_idx) {
2346 if ((if_mask & (1 << if_idx)) &&
2347 validate_dimm(priv,
2348 &ddr_conf[(int)if_idx].dimm_config_table[0],
2349 0))
2350 ddr_conf_valid_mask |= (1 << if_idx);
2351 }
2352
2353 if (octeon_is_cpuid(OCTEON_CN68XX) || octeon_is_cpuid(OCTEON_CN78XX)) {
2354 int four_lmc_mode = 1;
2355 char *s;
2356
2357 if (priv->flags & FLAG_FAILSAFE_MODE)
2358 four_lmc_mode = 0;
2359
2360
2361
2362
2363 if (octeon_is_cpuid(OCTEON_CN68XX_PASS1_0))
2364 four_lmc_mode = 0;
2365
2366 s = env_get("ddr_four_lmc");
2367 if (s) {
2368 four_lmc_mode = simple_strtoul(s, NULL, 0);
2369 printf("Parameter found in environment. ddr_four_lmc = %d\n",
2370 four_lmc_mode);
2371 }
2372
2373 if (!four_lmc_mode) {
2374 puts("Forcing two-LMC Mode.\n");
2375
2376 ddr_conf_valid_mask &= ~(3 << 2);
2377 }
2378 } else if (octeon_is_cpuid(OCTEON_CN73XX)) {
2379 int one_lmc_mode = 0;
2380 char *s;
2381
2382 s = env_get("ddr_one_lmc");
2383 if (s) {
2384 one_lmc_mode = simple_strtoul(s, NULL, 0);
2385 printf("Parameter found in environment. ddr_one_lmc = %d\n",
2386 one_lmc_mode);
2387 }
2388
2389 if (one_lmc_mode) {
2390 puts("Forcing one-LMC Mode.\n");
2391
2392 ddr_conf_valid_mask &= ~(1 << 1);
2393 }
2394 }
2395
2396 if (!ddr_conf_valid_mask) {
2397 printf
2398 ("ERROR: No valid DIMMs detected on any DDR interface.\n");
2399 hang();
2400 return -1;
2401 }
2402
2403
2404
2405
2406
2407
2408
2409 for (if_idx = 0; if_idx < 4; ++if_idx) {
2410 u32 tmp_hertz;
2411
2412 if (!(ddr_conf_valid_mask & (1 << if_idx)))
2413 continue;
2414
2415try_again:
2416
2417
2418
2419
2420 if ((octeon_is_cpuid(OCTEON_CN73XX)) ||
2421 (octeon_is_cpuid(OCTEON_CNF75XX)) ||
2422 (octeon_is_cpuid(OCTEON_CN78XX_PASS2_X))) {
2423
2424 if (if_idx == 0) {
2425 union cvmx_lmcx_ddr_pll_ctl ddr_pll_ctl;
2426
2427 ddr_pll_ctl.u64 =
2428 lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(0));
2429
2430
2431
2432
2433
2434 if (ddr_ref_hertz == 100000000) {
2435 ddr_pll_ctl.cn78xx.dclk_alt_refclk_sel =
2436 1;
2437 lmc_wr(priv, CVMX_LMCX_DDR_PLL_CTL(0),
2438 ddr_pll_ctl.u64);
2439 udelay(1000);
2440 } else {
2441
2442
2443
2444
2445
2446 ddr_pll_ctl.cn78xx.dclk_alt_refclk_sel =
2447 0;
2448 lmc_wr(priv, CVMX_LMCX_DDR_PLL_CTL(0),
2449 ddr_pll_ctl.u64);
2450 udelay(1000);
2451 }
2452 }
2453 } else {
2454 if (ddr_ref_hertz == 100000000) {
2455 debug("N0: DRAM init: requested 100 MHz refclk NOT SUPPORTED\n");
2456 ddr_ref_hertz = CONFIG_REF_HERTZ;
2457 }
2458 }
2459
2460 tmp_hertz = measure_octeon_ddr_clock(priv, &ddr_conf[if_idx],
2461 cpu_hertz, ddr_hertz,
2462 ddr_ref_hertz, if_idx,
2463 ddr_conf_valid_mask);
2464
2465
2466
2467
2468
2469 if ((octeon_is_cpuid(OCTEON_CN73XX)) ||
2470 (octeon_is_cpuid(OCTEON_CNF75XX)) ||
2471 (octeon_is_cpuid(OCTEON_CN78XX_PASS2_X))) {
2472
2473
2474
2475
2476
2477
2478 if (if_idx == 0 && ddr_ref_hertz == 100000000) {
2479
2480
2481
2482
2483
2484 int hertz_diff =
2485 abs((int)tmp_hertz - (int)ddr_hertz);
2486 if (hertz_diff > ((int)ddr_hertz * 5 / 100)) {
2487
2488 debug("N0: DRAM init: requested 100 MHz refclk NOT FOUND\n");
2489 ddr_ref_hertz = CONFIG_REF_HERTZ;
2490
2491 set_ddr_clock_initialized(priv, 0, 0);
2492 goto try_again;
2493 } else {
2494 debug("N0: DRAM Init: requested 100 MHz refclk FOUND and SELECTED\n");
2495 }
2496 }
2497 }
2498
2499 if (tmp_hertz > 0)
2500 calc_ddr_hertz = tmp_hertz;
2501 debug("LMC%d: measured speed: %u hz\n", if_idx, tmp_hertz);
2502 }
2503
2504 if (measured_ddr_hertz)
2505 *measured_ddr_hertz = calc_ddr_hertz;
2506
2507 memsize_mbytes = 0;
2508 for (if_idx = 0; if_idx < 4; ++if_idx) {
2509 if (!(ddr_conf_valid_mask & (1 << if_idx)))
2510 continue;
2511
2512 ret = init_octeon_dram_interface(priv, &ddr_conf[if_idx],
2513 calc_ddr_hertz,
2514 cpu_hertz, ddr_ref_hertz,
2515 if_idx, ddr_conf_valid_mask);
2516 if (ret > 0)
2517 memsize_mbytes += ret;
2518 }
2519
2520 if (memsize_mbytes == 0)
2521
2522 return -1;
2523
2524
2525
2526
2527
2528 if ((octeon_is_cpuid(OCTEON_CN73XX)) ||
2529 (octeon_is_cpuid(OCTEON_CNF75XX)) ||
2530 (octeon_is_cpuid(OCTEON_CN78XX_PASS2_X))) {
2531 eptr = env_get("ddr_dbi_switchover");
2532 if (eptr) {
2533 printf("DBI Switchover starting...\n");
2534 cvmx_dbi_switchover(priv);
2535 printf("DBI Switchover finished.\n");
2536 }
2537 }
2538
2539
2540 if ((octeon_is_cpuid(OCTEON_CN73XX)) ||
2541 (octeon_is_cpuid(OCTEON_CNF75XX)) ||
2542 (octeon_is_cpuid(OCTEON_CN78XX_PASS2_X)))
2543 cvmx_maybe_tune_node(priv, calc_ddr_hertz);
2544
2545 eptr = env_get("limit_dram_mbytes");
2546 if (eptr) {
2547 unsigned int mbytes = dectoul(eptr, NULL);
2548
2549 if (mbytes > 0) {
2550 memsize_mbytes = mbytes;
2551 printf("Limiting DRAM size to %d MBytes based on limit_dram_mbytes env. variable\n",
2552 mbytes);
2553 }
2554 }
2555
2556 debug("LMC Initialization complete. Total DRAM %d MB\n",
2557 memsize_mbytes);
2558
2559 return memsize_mbytes;
2560}
2561
2562static int octeon_ddr_probe(struct udevice *dev)
2563{
2564 struct ddr_priv *priv = dev_get_priv(dev);
2565 struct ofnode_phandle_args l2c_node;
2566 struct ddr_conf *ddr_conf_ptr;
2567 u32 ddr_conf_valid_mask = 0;
2568 u32 measured_ddr_hertz = 0;
2569 int conf_table_count;
2570 int def_ddr_freq;
2571 u32 mem_mbytes = 0;
2572 u32 ddr_hertz;
2573 u32 ddr_ref_hertz;
2574 int alt_refclk;
2575 const char *eptr;
2576 fdt_addr_t addr;
2577 u64 *ptr;
2578 u64 val;
2579 int ret;
2580 int i;
2581
2582
2583 if (gd->flags & GD_FLG_RELOC)
2584 return 0;
2585
2586
2587
2588
2589
2590 ptr = (u64 *)_end;
2591 for (i = 0; i < (0x100000 / sizeof(u64)); i++)
2592 val = readq(ptr++);
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603 priv->lmc_base = dev_remap_addr(dev);
2604 debug("%s: lmc_base=%p\n", __func__, priv->lmc_base);
2605
2606
2607 ret = dev_read_phandle_with_args(dev, "l2c-handle", NULL, 0, 0,
2608 &l2c_node);
2609 if (ret) {
2610 printf("Can't access L2C node!\n");
2611 return -ENODEV;
2612 }
2613
2614 addr = ofnode_get_addr(l2c_node.node);
2615 if (addr == FDT_ADDR_T_NONE) {
2616 printf("Can't access L2C node!\n");
2617 return -ENODEV;
2618 }
2619
2620 priv->l2c_base = map_physmem(addr, 0, MAP_NOCACHE);
2621 debug("%s: l2c_base=%p\n", __func__, priv->l2c_base);
2622
2623 ddr_conf_ptr = octeon_ddr_conf_table_get(&conf_table_count,
2624 &def_ddr_freq);
2625 if (!ddr_conf_ptr) {
2626 printf("ERROR: unable to determine DDR configuration\n");
2627 return -ENODEV;
2628 }
2629
2630 for (i = 0; i < conf_table_count; i++) {
2631 if (ddr_conf_ptr[i].dimm_config_table[0].spd_addrs[0] ||
2632 ddr_conf_ptr[i].dimm_config_table[0].spd_ptrs[0])
2633 ddr_conf_valid_mask |= 1 << i;
2634 }
2635
2636
2637
2638
2639
2640 alt_refclk = 0;
2641 ddr_hertz = def_ddr_freq * 1000000;
2642
2643 eptr = env_get("ddr_clock_hertz");
2644 if (eptr) {
2645 ddr_hertz = simple_strtoul(eptr, NULL, 0);
2646 gd->mem_clk = divide_nint(ddr_hertz, 1000000);
2647 printf("Parameter found in environment. ddr_clock_hertz = %d\n",
2648 ddr_hertz);
2649 }
2650
2651 ddr_ref_hertz = octeon3_refclock(alt_refclk,
2652 ddr_hertz,
2653 &ddr_conf_ptr[0].dimm_config_table[0]);
2654
2655 debug("Initializing DDR, clock = %uhz, reference = %uhz\n",
2656 ddr_hertz, ddr_ref_hertz);
2657
2658 mem_mbytes = octeon_ddr_initialize(priv, gd->cpu_clk,
2659 ddr_hertz, ddr_ref_hertz,
2660 ddr_conf_valid_mask,
2661 ddr_conf_ptr, &measured_ddr_hertz);
2662 debug("Mem size in MBYTES: %u\n", mem_mbytes);
2663
2664 gd->mem_clk = divide_nint(measured_ddr_hertz, 1000000);
2665
2666 debug("Measured DDR clock %d Hz\n", measured_ddr_hertz);
2667
2668 if (measured_ddr_hertz != 0) {
2669 if (!gd->mem_clk) {
2670
2671
2672
2673
2674 gd->mem_clk = divide_nint(measured_ddr_hertz, 1000000);
2675 } else if ((measured_ddr_hertz > ddr_hertz + 3000000) ||
2676 (measured_ddr_hertz < ddr_hertz - 3000000)) {
2677 printf("\nWARNING:\n");
2678 printf("WARNING: Measured DDR clock mismatch! expected: %lld MHz, measured: %lldMHz, cpu clock: %lu MHz\n",
2679 divide_nint(ddr_hertz, 1000000),
2680 divide_nint(measured_ddr_hertz, 1000000),
2681 gd->cpu_clk);
2682 printf("WARNING:\n\n");
2683 gd->mem_clk = divide_nint(measured_ddr_hertz, 1000000);
2684 }
2685 }
2686
2687 if (!mem_mbytes)
2688 return -ENODEV;
2689
2690 priv->info.base = CONFIG_SYS_SDRAM_BASE;
2691 priv->info.size = MB(mem_mbytes);
2692
2693
2694
2695
2696
2697 cvmx_l2c_set_big_size(priv, mem_mbytes, 0);
2698
2699 debug("Ram size %uMiB\n", mem_mbytes);
2700
2701 return 0;
2702}
2703
2704static int octeon_get_info(struct udevice *dev, struct ram_info *info)
2705{
2706 struct ddr_priv *priv = dev_get_priv(dev);
2707
2708 *info = priv->info;
2709
2710 return 0;
2711}
2712
2713static struct ram_ops octeon_ops = {
2714 .get_info = octeon_get_info,
2715};
2716
2717static const struct udevice_id octeon_ids[] = {
2718 {.compatible = "cavium,octeon-7xxx-ddr4" },
2719 { }
2720};
2721
2722U_BOOT_DRIVER(octeon_ddr) = {
2723 .name = "octeon_ddr",
2724 .id = UCLASS_RAM,
2725 .of_match = octeon_ids,
2726 .ops = &octeon_ops,
2727 .probe = octeon_ddr_probe,
2728 .plat_auto = sizeof(struct ddr_priv),
2729};
2730