1
2
3
4
5
6#include "ddr3_init.h"
7#include "mv_ddr_training_db.h"
8#include "mv_ddr_regs.h"
9#include "mv_ddr_sys_env_lib.h"
10
11#define DDR_INTERFACES_NUM 1
12#define DDR_INTERFACE_OCTETS_NUM 5
13
14
15
16
17
18
19
20#define L2_FILTER_FOR_MAX_MEMORY_SIZE 0xC0000000
21#define ADDRESS_FILTERING_END_REGISTER 0x8c04
22
23#define DYNAMIC_CS_SIZE_CONFIG
24#define DISABLE_L2_FILTERING_DURING_DDR_TRAINING
25
26
27#define TSEN_CONTROL_LSB_REG 0xE4070
28#define TSEN_CONTROL_LSB_TC_TRIM_OFFSET 0
29#define TSEN_CONTROL_LSB_TC_TRIM_MASK (0x7 << TSEN_CONTROL_LSB_TC_TRIM_OFFSET)
30#define TSEN_CONTROL_MSB_REG 0xE4074
31#define TSEN_CONTROL_MSB_RST_OFFSET 8
32#define TSEN_CONTROL_MSB_RST_MASK (0x1 << TSEN_CONTROL_MSB_RST_OFFSET)
33#define TSEN_STATUS_REG 0xe4078
34#define TSEN_STATUS_READOUT_VALID_OFFSET 10
35#define TSEN_STATUS_READOUT_VALID_MASK (0x1 << \
36 TSEN_STATUS_READOUT_VALID_OFFSET)
37#define TSEN_STATUS_TEMP_OUT_OFFSET 0
38#define TSEN_STATUS_TEMP_OUT_MASK (0x3ff << TSEN_STATUS_TEMP_OUT_OFFSET)
39
40static struct dlb_config ddr3_dlb_config_table[] = {
41 {DLB_CTRL_REG, 0x2000005c},
42 {DLB_BUS_OPT_WT_REG, 0x00880000},
43 {DLB_AGING_REG, 0x0f7f007f},
44 {DLB_EVICTION_CTRL_REG, 0x0000129f},
45 {DLB_EVICTION_TIMERS_REG, 0x00ff0000},
46 {DLB_WTS_DIFF_CS_REG, 0x04030802},
47 {DLB_WTS_DIFF_BG_REG, 0x00000a02},
48 {DLB_WTS_SAME_BG_REG, 0x09000a01},
49 {DLB_WTS_CMDS_REG, 0x00020005},
50 {DLB_WTS_ATTR_PRIO_REG, 0x00060f10},
51 {DLB_QUEUE_MAP_REG, 0x00000543},
52 {DLB_SPLIT_REG, 0x00000000},
53 {DLB_USER_CMD_REG, 0x00000000},
54 {0x0, 0x0}
55};
56
57static struct dlb_config *sys_env_dlb_config_ptr_get(void)
58{
59 return &ddr3_dlb_config_table[0];
60}
61
62static u8 a38x_bw_per_freq[MV_DDR_FREQ_LAST] = {
63 0x3,
64 0x4,
65 0x4,
66 0x5,
67 0x5,
68 0x5,
69 0x5,
70 0x3,
71 0x3,
72 0x4,
73 0x5,
74 0x5,
75 0x3,
76 0x5,
77 0x3,
78 0x5
79};
80
81static u8 a38x_rate_per_freq[MV_DDR_FREQ_LAST] = {
82 0x1,
83 0x2,
84 0x2,
85 0x2,
86 0x2,
87 0x3,
88 0x3,
89 0x1,
90 0x1,
91 0x2,
92 0x2,
93 0x2,
94 0x1,
95 0x2,
96 0x1,
97 0x2
98};
99
100static u16 a38x_vco_freq_per_sar_ref_clk_25_mhz[] = {
101 666,
102 1332,
103 800,
104 1600,
105 1066,
106 2132,
107 1200,
108 2400,
109 1332,
110 1332,
111 1500,
112 1500,
113 1600,
114 1600,
115 1700,
116 1700,
117 1866,
118 1866,
119 1800,
120 2000,
121 2000,
122 4000,
123 2132,
124 2132,
125 2300,
126 2300,
127 2400,
128 2400,
129 2500,
130 2500,
131 800
132};
133
134static u16 a38x_vco_freq_per_sar_ref_clk_40_mhz[] = {
135 666,
136 1332,
137 800,
138 800,
139 1066,
140 1066,
141 1200,
142 2400,
143 1332,
144 1332,
145 1500,
146 1600,
147 1600,
148 1600,
149 1700,
150 1560,
151 1866,
152 1866,
153 1800,
154 2000,
155 2000,
156 4000,
157 2132,
158 2132,
159 2300,
160 2300,
161 2400,
162 2400,
163 2500,
164 2500,
165 1800
166};
167
168
169static u32 async_mode_at_tf;
170
171static u32 dq_bit_map_2_phy_pin[] = {
172 1, 0, 2, 6, 9, 8, 3, 7,
173 8, 9, 1, 7, 2, 6, 3, 0,
174 3, 9, 7, 8, 1, 0, 2, 6,
175 1, 0, 6, 2, 8, 3, 7, 9,
176 0, 1, 2, 9, 7, 8, 3, 6,
177};
178
179void mv_ddr_mem_scrubbing(void)
180{
181 ddr3_new_tip_ecc_scrub();
182}
183
184static int ddr3_tip_a38x_set_divider(u8 dev_num, u32 if_id,
185 enum mv_ddr_freq freq);
186
187
188
189
190static u32 ddr3_ctrl_get_junc_temp(u8 dev_num)
191{
192 int reg = 0;
193
194
195 if ((reg_read(TSEN_CONTROL_MSB_REG) & TSEN_CONTROL_MSB_RST_MASK) == 0) {
196 reg_bit_set(TSEN_CONTROL_MSB_REG, TSEN_CONTROL_MSB_RST_MASK);
197
198 reg = reg_read(TSEN_CONTROL_LSB_REG);
199 reg &= ~TSEN_CONTROL_LSB_TC_TRIM_MASK;
200 reg |= 0x3 << TSEN_CONTROL_LSB_TC_TRIM_OFFSET;
201 reg_write(TSEN_CONTROL_LSB_REG, reg);
202 }
203 mdelay(10);
204
205
206 if ((reg_read(TSEN_STATUS_REG) & TSEN_STATUS_READOUT_VALID_MASK) == 0) {
207 printf("%s: TSEN not ready\n", __func__);
208 return 0;
209 }
210
211 reg = reg_read(TSEN_STATUS_REG);
212 reg = (reg & TSEN_STATUS_TEMP_OUT_MASK) >> TSEN_STATUS_TEMP_OUT_OFFSET;
213
214 return ((((10000 * reg) / 21445) * 1000) - 272674) / 1000;
215}
216
217
218
219
220
221
222
223
224static int ddr3_tip_a38x_get_freq_config(u8 dev_num, enum mv_ddr_freq freq,
225 struct hws_tip_freq_config_info
226 *freq_config_info)
227{
228 if (a38x_bw_per_freq[freq] == 0xff)
229 return MV_NOT_SUPPORTED;
230
231 if (freq_config_info == NULL)
232 return MV_BAD_PARAM;
233
234 freq_config_info->bw_per_freq = a38x_bw_per_freq[freq];
235 freq_config_info->rate_per_freq = a38x_rate_per_freq[freq];
236 freq_config_info->is_supported = 1;
237
238 return MV_OK;
239}
240
241static void dunit_read(u32 addr, u32 mask, u32 *data)
242{
243 *data = reg_read(addr) & mask;
244}
245
246static void dunit_write(u32 addr, u32 mask, u32 data)
247{
248 u32 reg_val = data;
249
250 if (mask != MASK_ALL_BITS) {
251 dunit_read(addr, MASK_ALL_BITS, ®_val);
252 reg_val &= (~mask);
253 reg_val |= (data & mask);
254 }
255
256 reg_write(addr, reg_val);
257}
258
259#define ODPG_ENABLE_REG 0x186d4
260#define ODPG_EN_OFFS 0
261#define ODPG_EN_MASK 0x1
262#define ODPG_EN_ENA 1
263#define ODPG_EN_DONE 0
264#define ODPG_DIS_OFFS 8
265#define ODPG_DIS_MASK 0x1
266#define ODPG_DIS_DIS 1
267void mv_ddr_odpg_enable(void)
268{
269 dunit_write(ODPG_ENABLE_REG,
270 ODPG_EN_MASK << ODPG_EN_OFFS,
271 ODPG_EN_ENA << ODPG_EN_OFFS);
272}
273
274void mv_ddr_odpg_disable(void)
275{
276 dunit_write(ODPG_ENABLE_REG,
277 ODPG_DIS_MASK << ODPG_DIS_OFFS,
278 ODPG_DIS_DIS << ODPG_DIS_OFFS);
279}
280
281void mv_ddr_odpg_done_clr(void)
282{
283 return;
284}
285
286int mv_ddr_is_odpg_done(u32 count)
287{
288 u32 i, data;
289
290 for (i = 0; i < count; i++) {
291 dunit_read(ODPG_ENABLE_REG, MASK_ALL_BITS, &data);
292 if (((data >> ODPG_EN_OFFS) & ODPG_EN_MASK) ==
293 ODPG_EN_DONE)
294 break;
295 }
296
297 if (i >= count) {
298 printf("%s: timeout\n", __func__);
299 return MV_FAIL;
300 }
301
302 return MV_OK;
303}
304
305void mv_ddr_training_enable(void)
306{
307 dunit_write(GLOB_CTRL_STATUS_REG,
308 TRAINING_TRIGGER_MASK << TRAINING_TRIGGER_OFFS,
309 TRAINING_TRIGGER_ENA << TRAINING_TRIGGER_OFFS);
310}
311
312#define DRAM_INIT_CTRL_STATUS_REG 0x18488
313#define TRAINING_TRIGGER_OFFS 0
314#define TRAINING_TRIGGER_MASK 0x1
315#define TRAINING_TRIGGER_ENA 1
316#define TRAINING_DONE_OFFS 1
317#define TRAINING_DONE_MASK 0x1
318#define TRAINING_DONE_DONE 1
319#define TRAINING_DONE_NOT_DONE 0
320#define TRAINING_RESULT_OFFS 2
321#define TRAINING_RESULT_MASK 0x1
322#define TRAINING_RESULT_PASS 0
323#define TRAINING_RESULT_FAIL 1
324int mv_ddr_is_training_done(u32 count, u32 *result)
325{
326 u32 i, data;
327
328 if (result == NULL) {
329 printf("%s: NULL result pointer found\n", __func__);
330 return MV_FAIL;
331 }
332
333 for (i = 0; i < count; i++) {
334 dunit_read(DRAM_INIT_CTRL_STATUS_REG, MASK_ALL_BITS, &data);
335 if (((data >> TRAINING_DONE_OFFS) & TRAINING_DONE_MASK) ==
336 TRAINING_DONE_DONE)
337 break;
338 }
339
340 if (i >= count) {
341 printf("%s: timeout\n", __func__);
342 return MV_FAIL;
343 }
344
345 *result = (data >> TRAINING_RESULT_OFFS) & TRAINING_RESULT_MASK;
346
347 return MV_OK;
348}
349
350#define DM_PAD 10
351u32 mv_ddr_dm_pad_get(void)
352{
353 return DM_PAD;
354}
355
356
357
358
359
360
361
362
363
364static int ddr3_tip_a38x_select_ddr_controller(u8 dev_num, int enable)
365{
366 u32 reg;
367
368 reg = reg_read(DUAL_DUNIT_CFG_REG);
369
370 if (enable)
371 reg |= (1 << 6);
372 else
373 reg &= ~(1 << 6);
374
375 reg_write(DUAL_DUNIT_CFG_REG, reg);
376
377 return MV_OK;
378}
379
380static u8 ddr3_tip_clock_mode(u32 frequency)
381{
382 if ((frequency == MV_DDR_FREQ_LOW_FREQ) || (mv_ddr_freq_get(frequency) <= 400))
383 return 1;
384
385 return 2;
386}
387
388static int mv_ddr_sar_freq_get(int dev_num, enum mv_ddr_freq *freq)
389{
390 u32 reg, ref_clk_satr;
391
392
393 reg = (reg_read(REG_DEVICE_SAR1_ADDR) >>
394 RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) &
395 RST2_CPU_DDR_CLOCK_SELECT_IN_MASK;
396
397 ref_clk_satr = reg_read(DEVICE_SAMPLE_AT_RESET2_REG);
398 if (((ref_clk_satr >> DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_OFFSET) & 0x1) ==
399 DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_25MHZ) {
400 switch (reg) {
401 case 0x1:
402 DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
403 ("Warning: Unsupported freq mode for 333Mhz configured(%d)\n",
404 reg));
405
406 case 0x0:
407 *freq = MV_DDR_FREQ_333;
408 break;
409 case 0x3:
410 DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
411 ("Warning: Unsupported freq mode for 400Mhz configured(%d)\n",
412 reg));
413
414 case 0x2:
415 *freq = MV_DDR_FREQ_400;
416 break;
417 case 0xd:
418 DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
419 ("Warning: Unsupported freq mode for 533Mhz configured(%d)\n",
420 reg));
421
422 case 0x4:
423 *freq = MV_DDR_FREQ_533;
424 break;
425 case 0x6:
426 *freq = MV_DDR_FREQ_600;
427 break;
428 case 0x11:
429 case 0x14:
430 DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
431 ("Warning: Unsupported freq mode for 667Mhz configured(%d)\n",
432 reg));
433
434 case 0x8:
435 *freq = MV_DDR_FREQ_667;
436 break;
437 case 0x15:
438 case 0x1b:
439 DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
440 ("Warning: Unsupported freq mode for 800Mhz configured(%d)\n",
441 reg));
442
443 case 0xc:
444 *freq = MV_DDR_FREQ_800;
445 break;
446 case 0x10:
447 *freq = MV_DDR_FREQ_933;
448 break;
449 case 0x12:
450 *freq = MV_DDR_FREQ_900;
451 break;
452 case 0x13:
453 *freq = MV_DDR_FREQ_933;
454 break;
455 default:
456 *freq = 0;
457 return MV_NOT_SUPPORTED;
458 }
459 } else {
460 switch (reg) {
461 case 0x3:
462 *freq = MV_DDR_FREQ_400;
463 break;
464 case 0x5:
465 *freq = MV_DDR_FREQ_533;
466 break;
467 case 0xb:
468 *freq = MV_DDR_FREQ_800;
469 break;
470 case 0x1e:
471 *freq = MV_DDR_FREQ_900;
472 break;
473 default:
474 *freq = 0;
475 return MV_NOT_SUPPORTED;
476 }
477 }
478
479 return MV_OK;
480}
481
482static int ddr3_tip_a38x_get_medium_freq(int dev_num, enum mv_ddr_freq *freq)
483{
484 u32 reg, ref_clk_satr;
485
486
487 reg = (reg_read(REG_DEVICE_SAR1_ADDR) >>
488 RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) &
489 RST2_CPU_DDR_CLOCK_SELECT_IN_MASK;
490
491 ref_clk_satr = reg_read(DEVICE_SAMPLE_AT_RESET2_REG);
492 if (((ref_clk_satr >> DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_OFFSET) & 0x1) ==
493 DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_25MHZ) {
494 switch (reg) {
495 case 0x0:
496 case 0x1:
497
498 *freq = MV_DDR_FREQ_333;
499 break;
500 case 0x2:
501 case 0x3:
502
503 *freq = MV_DDR_FREQ_400;
504 break;
505 case 0x4:
506 case 0xd:
507
508 *freq = MV_DDR_FREQ_533;
509 break;
510 case 0x8:
511 case 0x10:
512 case 0x11:
513 case 0x14:
514 *freq = MV_DDR_FREQ_333;
515 break;
516 case 0xc:
517 case 0x15:
518 case 0x1b:
519 *freq = MV_DDR_FREQ_400;
520 break;
521 case 0x6:
522 *freq = MV_DDR_FREQ_300;
523 break;
524 case 0x12:
525 *freq = MV_DDR_FREQ_360;
526 break;
527 case 0x13:
528 *freq = MV_DDR_FREQ_400;
529 break;
530 default:
531 *freq = 0;
532 return MV_NOT_SUPPORTED;
533 }
534 } else {
535 switch (reg) {
536 case 0x3:
537
538 *freq = MV_DDR_FREQ_400;
539 break;
540 case 0x5:
541
542 *freq = MV_DDR_FREQ_533;
543 break;
544 case 0xb:
545 *freq = MV_DDR_FREQ_400;
546 break;
547 case 0x1e:
548 *freq = MV_DDR_FREQ_360;
549 break;
550 default:
551 *freq = 0;
552 return MV_NOT_SUPPORTED;
553 }
554 }
555
556 return MV_OK;
557}
558
559static int ddr3_tip_a38x_get_device_info(u8 dev_num, struct ddr3_device_info *info_ptr)
560{
561#if defined(CONFIG_ARMADA_39X)
562 info_ptr->device_id = 0x6900;
563#else
564 info_ptr->device_id = 0x6800;
565#endif
566 info_ptr->ck_delay = ck_delay;
567
568 return MV_OK;
569}
570
571
572static int is_prfa_done(void)
573{
574 u32 reg_val;
575 u32 iter = 0;
576
577 do {
578 if (iter++ > MAX_POLLING_ITERATIONS) {
579 printf("error: %s: polling timeout\n", __func__);
580 return MV_FAIL;
581 }
582 dunit_read(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, ®_val);
583 reg_val >>= PRFA_REQ_OFFS;
584 reg_val &= PRFA_REQ_MASK;
585 } while (reg_val == PRFA_REQ_ENA);
586
587 return MV_OK;
588}
589
590
591static int prfa_write(enum hws_access_type phy_access, u32 phy,
592 enum hws_ddr_phy phy_type, u32 addr,
593 u32 data, enum hws_operation op_type)
594{
595 u32 reg_val = ((data & PRFA_DATA_MASK) << PRFA_DATA_OFFS) |
596 ((addr & PRFA_REG_NUM_MASK) << PRFA_REG_NUM_OFFS) |
597 ((phy & PRFA_PUP_NUM_MASK) << PRFA_PUP_NUM_OFFS) |
598 ((phy_type & PRFA_PUP_CTRL_DATA_MASK) << PRFA_PUP_CTRL_DATA_OFFS) |
599 ((phy_access & PRFA_PUP_BCAST_WR_ENA_MASK) << PRFA_PUP_BCAST_WR_ENA_OFFS) |
600 (((addr >> 6) & PRFA_REG_NUM_HI_MASK) << PRFA_REG_NUM_HI_OFFS) |
601 ((op_type & PRFA_TYPE_MASK) << PRFA_TYPE_OFFS);
602 dunit_write(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, reg_val);
603 reg_val |= (PRFA_REQ_ENA << PRFA_REQ_OFFS);
604 dunit_write(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, reg_val);
605
606
607 if (is_prfa_done() != MV_OK)
608 return MV_FAIL;
609
610 return MV_OK;
611}
612
613
614static int prfa_read(enum hws_access_type phy_access, u32 phy,
615 enum hws_ddr_phy phy_type, u32 addr, u32 *data)
616{
617 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
618 u32 max_phy = ddr3_tip_dev_attr_get(0, MV_ATTR_OCTET_PER_INTERFACE);
619 u32 i, reg_val;
620
621 if (phy_access == ACCESS_TYPE_MULTICAST) {
622 for (i = 0; i < max_phy; i++) {
623 VALIDATE_BUS_ACTIVE(tm->bus_act_mask, i);
624 if (prfa_write(ACCESS_TYPE_UNICAST, i, phy_type, addr, 0, OPERATION_READ) != MV_OK)
625 return MV_FAIL;
626 dunit_read(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, ®_val);
627 data[i] = (reg_val >> PRFA_DATA_OFFS) & PRFA_DATA_MASK;
628 }
629 } else {
630 if (prfa_write(phy_access, phy, phy_type, addr, 0, OPERATION_READ) != MV_OK)
631 return MV_FAIL;
632 dunit_read(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, ®_val);
633 *data = (reg_val >> PRFA_DATA_OFFS) & PRFA_DATA_MASK;
634 }
635
636 return MV_OK;
637}
638
639static int mv_ddr_sw_db_init(u32 dev_num, u32 board_id)
640{
641 struct hws_tip_config_func_db config_func;
642
643
644 config_func.mv_ddr_dunit_read = dunit_read;
645 config_func.mv_ddr_dunit_write = dunit_write;
646 config_func.tip_dunit_mux_select_func =
647 ddr3_tip_a38x_select_ddr_controller;
648 config_func.tip_get_freq_config_info_func =
649 ddr3_tip_a38x_get_freq_config;
650 config_func.tip_set_freq_divider_func = ddr3_tip_a38x_set_divider;
651 config_func.tip_get_device_info_func = ddr3_tip_a38x_get_device_info;
652 config_func.tip_get_temperature = ddr3_ctrl_get_junc_temp;
653 config_func.tip_get_clock_ratio = ddr3_tip_clock_mode;
654 config_func.tip_external_read = ddr3_tip_ext_read;
655 config_func.tip_external_write = ddr3_tip_ext_write;
656 config_func.mv_ddr_phy_read = prfa_read;
657 config_func.mv_ddr_phy_write = prfa_write;
658
659 ddr3_tip_init_config_func(dev_num, &config_func);
660
661 ddr3_tip_register_dq_table(dev_num, dq_bit_map_2_phy_pin);
662
663
664 ddr3_tip_dev_attr_init(dev_num);
665 ddr3_tip_dev_attr_set(dev_num, MV_ATTR_TIP_REV, MV_TIP_REV_4);
666 ddr3_tip_dev_attr_set(dev_num, MV_ATTR_PHY_EDGE, MV_DDR_PHY_EDGE_POSITIVE);
667 ddr3_tip_dev_attr_set(dev_num, MV_ATTR_OCTET_PER_INTERFACE, DDR_INTERFACE_OCTETS_NUM);
668#ifdef CONFIG_ARMADA_39X
669 ddr3_tip_dev_attr_set(dev_num, MV_ATTR_INTERLEAVE_WA, 1);
670#else
671 ddr3_tip_dev_attr_set(dev_num, MV_ATTR_INTERLEAVE_WA, 0);
672#endif
673
674 ca_delay = 0;
675 delay_enable = 1;
676 dfs_low_freq = DFS_LOW_FREQ_VALUE;
677 calibration_update_control = 1;
678
679 ddr3_tip_a38x_get_medium_freq(dev_num, &medium_freq);
680
681 return MV_OK;
682}
683
684static int mv_ddr_training_mask_set(void)
685{
686 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
687 enum mv_ddr_freq ddr_freq = tm->interface_params[0].memory_freq;
688
689 mask_tune_func = (SET_LOW_FREQ_MASK_BIT |
690 LOAD_PATTERN_MASK_BIT |
691 SET_MEDIUM_FREQ_MASK_BIT | WRITE_LEVELING_MASK_BIT |
692 WRITE_LEVELING_SUPP_MASK_BIT |
693 READ_LEVELING_MASK_BIT |
694 PBS_RX_MASK_BIT |
695 PBS_TX_MASK_BIT |
696 SET_TARGET_FREQ_MASK_BIT |
697 WRITE_LEVELING_TF_MASK_BIT |
698 WRITE_LEVELING_SUPP_TF_MASK_BIT |
699 READ_LEVELING_TF_MASK_BIT |
700 CENTRALIZATION_RX_MASK_BIT |
701 CENTRALIZATION_TX_MASK_BIT);
702 rl_mid_freq_wa = 1;
703
704 if ((ddr_freq == MV_DDR_FREQ_333) || (ddr_freq == MV_DDR_FREQ_400)) {
705 mask_tune_func = (WRITE_LEVELING_MASK_BIT |
706 LOAD_PATTERN_2_MASK_BIT |
707 WRITE_LEVELING_SUPP_MASK_BIT |
708 READ_LEVELING_MASK_BIT |
709 PBS_RX_MASK_BIT |
710 PBS_TX_MASK_BIT |
711 CENTRALIZATION_RX_MASK_BIT |
712 CENTRALIZATION_TX_MASK_BIT);
713 rl_mid_freq_wa = 0;
714 }
715
716
717 if (mv_ddr_is_ecc_ena()) {
718 mask_tune_func &= ~WRITE_LEVELING_SUPP_TF_MASK_BIT;
719 mask_tune_func &= ~WRITE_LEVELING_SUPP_MASK_BIT;
720 mask_tune_func &= ~PBS_TX_MASK_BIT;
721 mask_tune_func &= ~PBS_RX_MASK_BIT;
722 }
723
724 return MV_OK;
725}
726
727
728
729
730
731
732
733void mv_ddr_set_calib_controller(void)
734{
735 calibration_update_control = CAL_UPDATE_CTRL_INT;
736}
737
738static int ddr3_tip_a38x_set_divider(u8 dev_num, u32 if_id,
739 enum mv_ddr_freq frequency)
740{
741 u32 divider = 0;
742 u32 sar_val, ref_clk_satr;
743 u32 async_val;
744 u32 freq = mv_ddr_freq_get(frequency);
745
746 if (if_id != 0) {
747 DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
748 ("A38x does not support interface 0x%x\n",
749 if_id));
750 return MV_BAD_PARAM;
751 }
752
753
754 sar_val = (reg_read(REG_DEVICE_SAR1_ADDR) >>
755 RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) &
756 RST2_CPU_DDR_CLOCK_SELECT_IN_MASK;
757
758 ref_clk_satr = reg_read(DEVICE_SAMPLE_AT_RESET2_REG);
759 if (((ref_clk_satr >> DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_OFFSET) & 0x1) ==
760 DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_25MHZ)
761 divider = a38x_vco_freq_per_sar_ref_clk_25_mhz[sar_val] / freq;
762 else
763 divider = a38x_vco_freq_per_sar_ref_clk_40_mhz[sar_val] / freq;
764
765 if ((async_mode_at_tf == 1) && (freq > 400)) {
766
767 dunit_write(0x20220, 0x1000, 0x1000);
768 dunit_write(0xe42f4, 0x200, 0x200);
769
770
771 mdelay(5);
772
773
774 switch (frequency) {
775 case MV_DDR_FREQ_467:
776 async_val = 0x806f012;
777 break;
778 case MV_DDR_FREQ_533:
779 async_val = 0x807f012;
780 break;
781 case MV_DDR_FREQ_600:
782 async_val = 0x805f00a;
783 break;
784 case MV_DDR_FREQ_667:
785 async_val = 0x809f012;
786 break;
787 case MV_DDR_FREQ_800:
788 async_val = 0x807f00a;
789 break;
790 case MV_DDR_FREQ_850:
791 async_val = 0x80cb012;
792 break;
793 case MV_DDR_FREQ_900:
794 async_val = 0x80d7012;
795 break;
796 case MV_DDR_FREQ_933:
797 async_val = 0x80df012;
798 break;
799 case MV_DDR_FREQ_1000:
800 async_val = 0x80ef012;
801 break;
802 case MV_DDR_FREQ_1066:
803 async_val = 0x80ff012;
804 break;
805 default:
806
807 async_val = 0x809f012;
808 }
809 dunit_write(0xe42f0, 0xffffffff, async_val);
810 } else {
811
812 dunit_write(0x20220, 0x1000, 0x0);
813 dunit_write(0xe42f4, 0x200, 0x0);
814
815
816 dunit_write(0xe4264, 0xff, 0x1f);
817
818
819 dunit_write(0xe4260, (0xff << 8), (0x2 << 8));
820
821
822 dunit_write(0xe4260, (0xff << 24), (0x2 << 24));
823
824
825 dunit_write(0xe4268, (0x3f << 8), (divider << 8));
826
827
828 dunit_write(0xe4264, (1 << 8), (1 << 8));
829
830
831 dunit_write(0xe4264, (1 << 8), 0x0);
832
833
834 dunit_write(0xe4260, (0xff << 8), 0x0);
835
836
837 dunit_write(0xe4260, (0xff << 24), 0x0);
838
839
840 dunit_write(0xe4264, 0xff, 0x0);
841 }
842
843
844 dunit_write(0x18488, (1 << 16), ((ddr3_tip_clock_mode(frequency) & 0x1) << 16));
845 dunit_write(0x1524, (1 << 15), ((ddr3_tip_clock_mode(frequency) - 1) << 15));
846
847 return MV_OK;
848}
849
850
851
852
853int ddr3_tip_ext_read(u32 dev_num, u32 if_id, u32 reg_addr,
854 u32 num_of_bursts, u32 *data)
855{
856 u32 burst_num;
857
858 for (burst_num = 0; burst_num < num_of_bursts * 8; burst_num++)
859 data[burst_num] = readl(reg_addr + 4 * burst_num);
860
861 return MV_OK;
862}
863
864
865
866
867int ddr3_tip_ext_write(u32 dev_num, u32 if_id, u32 reg_addr,
868 u32 num_of_bursts, u32 *data) {
869 u32 burst_num;
870
871 for (burst_num = 0; burst_num < num_of_bursts * 8; burst_num++)
872 writel(data[burst_num], reg_addr + 4 * burst_num);
873
874 return MV_OK;
875}
876
877int mv_ddr_early_init(void)
878{
879 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
880
881
882
883
884
885
886
887
888
889
890 mv_ddr_sw_db_init(0, 0);
891
892 if (tm->interface_params[0].memory_freq != MV_DDR_FREQ_SAR)
893 async_mode_at_tf = 1;
894
895 return MV_OK;
896}
897
898int mv_ddr_early_init2(void)
899{
900 mv_ddr_training_mask_set();
901
902 return MV_OK;
903}
904
905int mv_ddr_pre_training_fixup(void)
906{
907 return 0;
908}
909
910int mv_ddr_post_training_fixup(void)
911{
912 return 0;
913}
914
915int ddr3_post_run_alg(void)
916{
917 return MV_OK;
918}
919
920int ddr3_silicon_post_init(void)
921{
922 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
923
924
925 if (DDR3_IS_16BIT_DRAM_MODE(tm->bus_act_mask)) {
926 CHECK_STATUS(ddr3_tip_if_write
927 (0, ACCESS_TYPE_UNICAST, PARAM_NOT_CARE,
928 SDRAM_CFG_REG, 0x0, 0x8000));
929 }
930
931 return MV_OK;
932}
933
934u32 mv_ddr_init_freq_get(void)
935{
936 enum mv_ddr_freq freq;
937
938 mv_ddr_sar_freq_get(0, &freq);
939
940 return freq;
941}
942
943static u32 ddr3_get_bus_width(void)
944{
945 u32 bus_width;
946
947 bus_width = (reg_read(SDRAM_CFG_REG) & 0x8000) >>
948 BUS_IN_USE_OFFS;
949
950 return (bus_width == 0) ? 16 : 32;
951}
952
953static u32 ddr3_get_device_width(u32 cs)
954{
955 u32 device_width;
956
957 device_width = (reg_read(SDRAM_ADDR_CTRL_REG) &
958 (CS_STRUCT_MASK << CS_STRUCT_OFFS(cs))) >>
959 CS_STRUCT_OFFS(cs);
960
961 return (device_width == 0) ? 8 : 16;
962}
963
964static u32 ddr3_get_device_size(u32 cs)
965{
966 u32 device_size_low, device_size_high, device_size;
967 u32 data, cs_low_offset, cs_high_offset;
968
969 cs_low_offset = CS_SIZE_OFFS(cs);
970 cs_high_offset = CS_SIZE_HIGH_OFFS(cs);
971
972 data = reg_read(SDRAM_ADDR_CTRL_REG);
973 device_size_low = (data >> cs_low_offset) & 0x3;
974 device_size_high = (data >> cs_high_offset) & 0x1;
975
976 device_size = device_size_low | (device_size_high << 2);
977
978 switch (device_size) {
979 case 0:
980 return 2048;
981 case 2:
982 return 512;
983 case 3:
984 return 1024;
985 case 4:
986 return 4096;
987 case 5:
988 return 8192;
989 case 1:
990 default:
991 DEBUG_INIT_C("Error: Wrong device size of Cs: ", cs, 1);
992
993 return 0;
994 }
995}
996
997int ddr3_calc_mem_cs_size(u32 cs, uint64_t *cs_size)
998{
999 u32 cs_mem_size;
1000
1001
1002 cs_mem_size = ((ddr3_get_bus_width() / ddr3_get_device_width(cs)) *
1003 ddr3_get_device_size(cs)) / 8;
1004
1005
1006
1007
1008
1009
1010
1011 cs_mem_size *= DDR_CONTROLLER_BUS_WIDTH_MULTIPLIER;
1012
1013 if ((cs_mem_size < 128) || (cs_mem_size > 4096)) {
1014 DEBUG_INIT_C("Error: Wrong Memory size of Cs: ", cs, 1);
1015 return MV_BAD_VALUE;
1016 }
1017
1018 *cs_size = cs_mem_size << 20;
1019
1020 return MV_OK;
1021}
1022
1023static int ddr3_fast_path_dynamic_cs_size_config(u32 cs_ena)
1024{
1025 u32 reg, cs;
1026 uint64_t mem_total_size = 0;
1027 uint64_t cs_mem_size = 0;
1028 uint64_t mem_total_size_c, cs_mem_size_c;
1029
1030#ifdef DEVICE_MAX_DRAM_ADDRESS_SIZE
1031 u32 physical_mem_size;
1032 u32 max_mem_size = DEVICE_MAX_DRAM_ADDRESS_SIZE;
1033 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
1034#endif
1035
1036
1037 for (cs = 0; cs < MAX_CS_NUM; cs++) {
1038 if (cs_ena & (1 << cs)) {
1039
1040 if (ddr3_calc_mem_cs_size(cs, &cs_mem_size) != MV_OK)
1041 return MV_FAIL;
1042
1043#ifdef DEVICE_MAX_DRAM_ADDRESS_SIZE
1044
1045
1046
1047
1048
1049 physical_mem_size = mem_size
1050 [tm->interface_params[0].memory_size];
1051
1052 if (ddr3_get_device_width(cs) == 16) {
1053
1054
1055
1056
1057 max_mem_size = DEVICE_MAX_DRAM_ADDRESS_SIZE * 2;
1058 }
1059
1060 if (physical_mem_size > max_mem_size) {
1061 cs_mem_size = max_mem_size *
1062 (ddr3_get_bus_width() /
1063 ddr3_get_device_width(cs));
1064 printf("Updated Physical Mem size is from 0x%x to %x\n",
1065 physical_mem_size,
1066 DEVICE_MAX_DRAM_ADDRESS_SIZE);
1067 }
1068#endif
1069
1070
1071 reg = 0xffffe1;
1072 reg |= (cs << 2);
1073 reg |= (cs_mem_size - 1) & 0xffff0000;
1074
1075 reg_write(REG_FASTPATH_WIN_CTRL_ADDR(cs), reg);
1076
1077
1078 reg = ((cs_mem_size) * cs) & 0xffff0000;
1079
1080 reg_write(REG_FASTPATH_WIN_BASE_ADDR(cs), reg);
1081
1082
1083
1084
1085
1086
1087
1088 mem_total_size_c = (mem_total_size >> 16) & 0xffffffffffff;
1089 cs_mem_size_c = (cs_mem_size >> 16) & 0xffffffffffff;
1090
1091 if (mem_total_size_c + cs_mem_size_c < 0x10000)
1092 mem_total_size += cs_mem_size;
1093 else
1094 mem_total_size = L2_FILTER_FOR_MAX_MEMORY_SIZE;
1095 }
1096 }
1097
1098
1099 reg_write(ADDRESS_FILTERING_END_REGISTER, mem_total_size);
1100
1101 return MV_OK;
1102}
1103
1104static int ddr3_restore_and_set_final_windows(u32 *win, const char *ddr_type)
1105{
1106 u32 win_ctrl_reg, num_of_win_regs;
1107 u32 cs_ena = mv_ddr_sys_env_get_cs_ena_from_reg();
1108 u32 ui;
1109
1110 win_ctrl_reg = REG_XBAR_WIN_4_CTRL_ADDR;
1111 num_of_win_regs = 16;
1112
1113
1114 for (ui = 0; ui < num_of_win_regs; ui++)
1115 reg_write((win_ctrl_reg + 0x4 * ui), win[ui]);
1116
1117 printf("%s Training Sequence - Switching XBAR Window to FastPath Window\n",
1118 ddr_type);
1119
1120#if defined DYNAMIC_CS_SIZE_CONFIG
1121 if (ddr3_fast_path_dynamic_cs_size_config(cs_ena) != MV_OK)
1122 printf("ddr3_fast_path_dynamic_cs_size_config FAILED\n");
1123#else
1124 u32 reg, cs;
1125 reg = 0x1fffffe1;
1126 for (cs = 0; cs < MAX_CS_NUM; cs++) {
1127 if (cs_ena & (1 << cs)) {
1128 reg |= (cs << 2);
1129 break;
1130 }
1131 }
1132
1133 reg_write(REG_FASTPATH_WIN_CTRL_ADDR(0), reg);
1134#endif
1135
1136 return MV_OK;
1137}
1138
1139static int ddr3_save_and_set_training_windows(u32 *win)
1140{
1141 u32 cs_ena;
1142 u32 reg, tmp_count, cs, ui;
1143 u32 win_ctrl_reg, win_base_reg, win_remap_reg;
1144 u32 num_of_win_regs, win_jump_index;
1145 win_ctrl_reg = REG_XBAR_WIN_4_CTRL_ADDR;
1146 win_base_reg = REG_XBAR_WIN_4_BASE_ADDR;
1147 win_remap_reg = REG_XBAR_WIN_4_REMAP_ADDR;
1148 win_jump_index = 0x10;
1149 num_of_win_regs = 16;
1150 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
1151
1152#ifdef DISABLE_L2_FILTERING_DURING_DDR_TRAINING
1153
1154
1155
1156
1157 reg_write(ADDRESS_FILTERING_END_REGISTER, 0);
1158#endif
1159
1160 cs_ena = tm->interface_params[0].as_bus_params[0].cs_bitmask;
1161
1162
1163
1164 reg_write(REG_XBAR_WIN_19_CTRL_ADDR, 0);
1165
1166
1167 for (ui = 0; ui < num_of_win_regs; ui++)
1168 win[ui] = reg_read(win_ctrl_reg + 0x4 * ui);
1169
1170
1171 reg = 0;
1172 tmp_count = 0;
1173 for (cs = 0; cs < MAX_CS_NUM; cs++) {
1174 if (cs_ena & (1 << cs)) {
1175 switch (cs) {
1176 case 0:
1177 reg = 0x0e00;
1178 break;
1179 case 1:
1180 reg = 0x0d00;
1181 break;
1182 case 2:
1183 reg = 0x0b00;
1184 break;
1185 case 3:
1186 reg = 0x0700;
1187 break;
1188 }
1189 reg |= (1 << 0);
1190 reg |= (SDRAM_CS_SIZE & 0xffff0000);
1191
1192 reg_write(win_ctrl_reg + win_jump_index * tmp_count,
1193 reg);
1194 reg = (((SDRAM_CS_SIZE + 1) * (tmp_count)) &
1195 0xffff0000);
1196 reg_write(win_base_reg + win_jump_index * tmp_count,
1197 reg);
1198
1199 if (win_remap_reg <= REG_XBAR_WIN_7_REMAP_ADDR)
1200 reg_write(win_remap_reg +
1201 win_jump_index * tmp_count, 0);
1202
1203 tmp_count++;
1204 }
1205 }
1206
1207 return MV_OK;
1208}
1209
1210static u32 win[16];
1211
1212int mv_ddr_pre_training_soc_config(const char *ddr_type)
1213{
1214 u32 soc_num;
1215 u32 reg_val;
1216
1217
1218 soc_num = (reg_read(REG_SAMPLE_RESET_HIGH_ADDR) & SAR1_CPU_CORE_MASK) >>
1219 SAR1_CPU_CORE_OFFSET;
1220 switch (soc_num) {
1221 case 0x3:
1222 reg_bit_set(CPU_CONFIGURATION_REG(3), CPU_MRVL_ID_OFFSET);
1223 reg_bit_set(CPU_CONFIGURATION_REG(2), CPU_MRVL_ID_OFFSET);
1224
1225 case 0x1:
1226 reg_bit_set(CPU_CONFIGURATION_REG(1), CPU_MRVL_ID_OFFSET);
1227
1228 case 0x0:
1229 reg_bit_set(CPU_CONFIGURATION_REG(0), CPU_MRVL_ID_OFFSET);
1230
1231 default:
1232 break;
1233 }
1234
1235
1236
1237
1238
1239
1240 if (mv_ddr_sys_env_suspend_wakeup_check() ==
1241 SUSPEND_WAKEUP_ENABLED_GPIO_DETECTED) {
1242 reg_bit_set(SDRAM_INIT_CTRL_REG,
1243 DRAM_RESET_MASK_MASKED << DRAM_RESET_MASK_OFFS);
1244 }
1245
1246
1247 if (reg_read(REG_BOOTROM_ROUTINE_ADDR) &
1248 (1 << REG_BOOTROM_ROUTINE_DRAM_INIT_OFFS)) {
1249 printf("%s Training Sequence - 2nd boot - Skip\n", ddr_type);
1250 return MV_OK;
1251 }
1252
1253
1254 reg_val = reg_read(TRAINING_DBG_3_REG);
1255
1256 reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(0));
1257 reg_val |= (0x4 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(0));
1258
1259 reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(1));
1260 reg_val |= (0x4 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(1));
1261
1262 reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(3));
1263 reg_val |= (0x6 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(3));
1264
1265 reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(4));
1266 reg_val |= (0x6 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(4));
1267
1268 reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(5));
1269 reg_val |= (0x6 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(5));
1270
1271 reg_write(TRAINING_DBG_3_REG, reg_val);
1272
1273
1274
1275
1276
1277
1278
1279 reg_write(AXI_CTRL_REG, 0);
1280
1281
1282
1283
1284
1285 ddr3_save_and_set_training_windows(win);
1286
1287 return MV_OK;
1288}
1289
1290static int ddr3_new_tip_dlb_config(void)
1291{
1292 u32 reg, i = 0;
1293 struct dlb_config *config_table_ptr = sys_env_dlb_config_ptr_get();
1294
1295
1296 while (config_table_ptr[i].reg_addr != 0) {
1297 reg_write(config_table_ptr[i].reg_addr,
1298 config_table_ptr[i].reg_data);
1299 i++;
1300 }
1301
1302
1303
1304 reg = reg_read(DLB_CTRL_REG);
1305 reg &= ~(DLB_EN_MASK << DLB_EN_OFFS) &
1306 ~(WR_COALESCE_EN_MASK << WR_COALESCE_EN_OFFS) &
1307 ~(AXI_PREFETCH_EN_MASK << AXI_PREFETCH_EN_OFFS) &
1308 ~(MBUS_PREFETCH_EN_MASK << MBUS_PREFETCH_EN_OFFS) &
1309 ~(PREFETCH_NXT_LN_SZ_TRIG_MASK << PREFETCH_NXT_LN_SZ_TRIG_OFFS);
1310
1311 reg |= (DLB_EN_ENA << DLB_EN_OFFS) |
1312 (WR_COALESCE_EN_ENA << WR_COALESCE_EN_OFFS) |
1313 (AXI_PREFETCH_EN_ENA << AXI_PREFETCH_EN_OFFS) |
1314 (MBUS_PREFETCH_EN_ENA << MBUS_PREFETCH_EN_OFFS) |
1315 (PREFETCH_NXT_LN_SZ_TRIG_ENA << PREFETCH_NXT_LN_SZ_TRIG_OFFS);
1316
1317 reg_write(DLB_CTRL_REG, reg);
1318
1319 return MV_OK;
1320}
1321
1322int mv_ddr_post_training_soc_config(const char *ddr_type)
1323{
1324 u32 reg_val;
1325
1326
1327 ddr3_restore_and_set_final_windows(win, ddr_type);
1328
1329
1330 reg_val = reg_read(REG_BOOTROM_ROUTINE_ADDR);
1331 reg_write(REG_BOOTROM_ROUTINE_ADDR,
1332 reg_val | (1 << REG_BOOTROM_ROUTINE_DRAM_INIT_OFFS));
1333
1334
1335 ddr3_new_tip_dlb_config();
1336
1337 return MV_OK;
1338}
1339
1340void mv_ddr_mc_config(void)
1341{
1342
1343 struct init_cntr_param init_param;
1344 int status;
1345
1346 init_param.do_mrs_phy = 1;
1347 init_param.is_ctrl64_bit = 0;
1348 init_param.init_phy = 1;
1349 init_param.msys_init = 1;
1350 status = hws_ddr3_tip_init_controller(0, &init_param);
1351 if (status != MV_OK)
1352 printf("DDR3 init controller - FAILED 0x%x\n", status);
1353
1354 status = mv_ddr_mc_init();
1355 if (status != MV_OK)
1356 printf("DDR3 init_sequence - FAILED 0x%x\n", status);
1357}
1358
1359
1360
1361int mv_ddr_mc_init(void)
1362{
1363 CHECK_STATUS(ddr3_tip_enable_init_sequence(0));
1364
1365 return MV_OK;
1366}
1367
1368
1369
1370
1371int ddr3_tip_configure_phy(u32 dev_num)
1372{
1373 u32 if_id, phy_id;
1374 u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE);
1375 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
1376
1377 CHECK_STATUS(ddr3_tip_bus_write
1378 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1379 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
1380 PAD_ZRI_CAL_PHY_REG,
1381 ((0x7f & g_zpri_data) << 7 | (0x7f & g_znri_data))));
1382 CHECK_STATUS(ddr3_tip_bus_write
1383 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1384 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_CONTROL,
1385 PAD_ZRI_CAL_PHY_REG,
1386 ((0x7f & g_zpri_ctrl) << 7 | (0x7f & g_znri_ctrl))));
1387 CHECK_STATUS(ddr3_tip_bus_write
1388 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1389 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
1390 PAD_ODT_CAL_PHY_REG,
1391 ((0x3f & g_zpodt_data) << 6 | (0x3f & g_znodt_data))));
1392 CHECK_STATUS(ddr3_tip_bus_write
1393 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1394 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_CONTROL,
1395 PAD_ODT_CAL_PHY_REG,
1396 ((0x3f & g_zpodt_ctrl) << 6 | (0x3f & g_znodt_ctrl))));
1397
1398 CHECK_STATUS(ddr3_tip_bus_write
1399 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1400 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
1401 PAD_PRE_DISABLE_PHY_REG, 0));
1402 CHECK_STATUS(ddr3_tip_bus_write
1403 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1404 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
1405 CMOS_CONFIG_PHY_REG, 0));
1406 CHECK_STATUS(ddr3_tip_bus_write
1407 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1408 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_CONTROL,
1409 CMOS_CONFIG_PHY_REG, 0));
1410
1411 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
1412
1413 VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
1414
1415 for (phy_id = 0;
1416 phy_id < octets_per_if_num;
1417 phy_id++) {
1418 VALIDATE_BUS_ACTIVE(tm->bus_act_mask, phy_id);
1419
1420 CHECK_STATUS(ddr3_tip_bus_read_modify_write
1421 (dev_num, ACCESS_TYPE_UNICAST,
1422 if_id, phy_id, DDR_PHY_DATA,
1423 PAD_CFG_PHY_REG,
1424 ((clamp_tbl[if_id] << 4) | vref_init_val),
1425 ((0x7 << 4) | 0x7)));
1426
1427 CHECK_STATUS(ddr3_tip_bus_read_modify_write
1428 (dev_num, ACCESS_TYPE_UNICAST,
1429 if_id, phy_id, DDR_PHY_CONTROL,
1430 PAD_CFG_PHY_REG, 0x4, 0x7));
1431 }
1432 }
1433
1434 if (ddr3_tip_dev_attr_get(dev_num, MV_ATTR_PHY_EDGE) ==
1435 MV_DDR_PHY_EDGE_POSITIVE)
1436 CHECK_STATUS(ddr3_tip_bus_write
1437 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1438 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1439 DDR_PHY_DATA, 0x90, 0x6002));
1440
1441
1442 return MV_OK;
1443}
1444
1445
1446int mv_ddr_manual_cal_do(void)
1447{
1448 return 0;
1449}
1450