1
2
3
4
5
6
7#include <common.h>
8#include <asm/io.h>
9#include <asm/arch/clock.h>
10#include <asm/arch/gp_padctrl.h>
11#include <asm/arch/pinmux.h>
12#include <asm/arch/tegra.h>
13#include <asm/arch-tegra/clk_rst.h>
14#include <asm/arch-tegra/pmc.h>
15#include <asm/arch-tegra/scu.h>
16#include "cpu.h"
17
18int get_num_cpus(void)
19{
20 struct apb_misc_gp_ctlr *gp;
21 uint rev;
22 debug("%s entry\n", __func__);
23
24 gp = (struct apb_misc_gp_ctlr *)NV_PA_APB_MISC_GP_BASE;
25 rev = (readl(&gp->hidrev) & HIDREV_CHIPID_MASK) >> HIDREV_CHIPID_SHIFT;
26
27 switch (rev) {
28 case CHIPID_TEGRA20:
29 return 2;
30 break;
31 case CHIPID_TEGRA30:
32 case CHIPID_TEGRA114:
33 case CHIPID_TEGRA124:
34 case CHIPID_TEGRA210:
35 default:
36 return 4;
37 break;
38 }
39}
40
41
42
43
44struct clk_pll_table tegra_pll_x_table[TEGRA_SOC_CNT][CLOCK_OSC_FREQ_COUNT] = {
45
46
47
48
49
50
51
52
53
54
55 {
56 { .n = 1000, .m = 13, .p = 0, .cpcon = 12 },
57 { .n = 625, .m = 12, .p = 0, .cpcon = 8 },
58 { .n = 1000, .m = 12, .p = 0, .cpcon = 12 },
59 { .n = 1000, .m = 26, .p = 0, .cpcon = 12 },
60 { .n = 0, .m = 0, .p = 0, .cpcon = 0 },
61 { .n = 0, .m = 0, .p = 0, .cpcon = 0 },
62 },
63
64
65
66
67
68
69
70
71
72
73 {
74 { .n = 923, .m = 10, .p = 0, .cpcon = 12 },
75 { .n = 750, .m = 12, .p = 0, .cpcon = 8 },
76 { .n = 600, .m = 6, .p = 0, .cpcon = 12 },
77 { .n = 600, .m = 13, .p = 0, .cpcon = 12 },
78 { .n = 0, .m = 0, .p = 0, .cpcon = 0 },
79 { .n = 0, .m = 0, .p = 0, .cpcon = 0 },
80 },
81
82
83
84
85
86
87
88
89
90
91 {
92 { .n = 600, .m = 13, .p = 0, .cpcon = 8 },
93 { .n = 500, .m = 16, .p = 0, .cpcon = 8 },
94 { .n = 600, .m = 12, .p = 0, .cpcon = 8 },
95 { .n = 600, .m = 26, .p = 0, .cpcon = 8 },
96 { .n = 0, .m = 0, .p = 0, .cpcon = 0 },
97 { .n = 0, .m = 0, .p = 0, .cpcon = 0 },
98 },
99
100
101
102
103
104
105
106
107
108 {
109 { .n = 108, .m = 1, .p = 1 },
110 { .n = 73, .m = 1, .p = 1 },
111 { .n = 116, .m = 1, .p = 1 },
112 { .n = 108, .m = 2, .p = 1 },
113 { .n = 0, .m = 0, .p = 0 },
114 { .n = 0, .m = 0, .p = 0 },
115 },
116
117
118
119
120
121
122
123
124
125
126 {
127 { .n = 108, .m = 1, .p = 1 },
128 { .n = 73, .m = 1, .p = 1 },
129 { .n = 116, .m = 1, .p = 1 },
130 { .n = 108, .m = 2, .p = 1 },
131 { .n = 0, .m = 0, .p = 0 },
132 { .n = 0, .m = 0, .p = 0 },
133 },
134
135
136
137
138
139
140
141
142
143
144 {
145 { .n = 108, .m = 1, .p = 1 },
146 { .n = 73, .m = 1, .p = 1 },
147 { .n = 116, .m = 1, .p = 1 },
148 { .n = 108, .m = 2, .p = 1 },
149 { .n = 36, .m = 1, .p = 1 },
150 { .n = 58, .m = 2, .p = 1 },
151 },
152};
153
154static inline void pllx_set_iddq(void)
155{
156#if defined(CONFIG_TEGRA124) || defined(CONFIG_TEGRA210)
157 struct clk_rst_ctlr *clkrst = (struct clk_rst_ctlr *)NV_PA_CLK_RST_BASE;
158 u32 reg;
159 debug("%s entry\n", __func__);
160
161
162 reg = readl(&clkrst->crc_pllx_misc3);
163 reg &= ~PLLX_IDDQ_MASK;
164 writel(reg, &clkrst->crc_pllx_misc3);
165 udelay(2);
166 debug("%s: IDDQ: PLLX IDDQ = 0x%08X\n", __func__,
167 readl(&clkrst->crc_pllx_misc3));
168#endif
169}
170
171int pllx_set_rate(struct clk_pll_simple *pll , u32 divn, u32 divm,
172 u32 divp, u32 cpcon)
173{
174 struct clk_pll_info *pllinfo = &tegra_pll_info_table[CLOCK_ID_XCPU];
175 int chip = tegra_get_chip();
176 u32 reg;
177 debug("%s entry\n", __func__);
178
179
180 if (readl(&pll->pll_base) & PLL_ENABLE_MASK) {
181 debug("%s: PLLX already enabled, returning\n", __func__);
182 return 0;
183 }
184
185 pllx_set_iddq();
186
187
188 reg = PLL_BYPASS_MASK | (divm << pllinfo->m_shift);
189 reg |= (divn << pllinfo->n_shift) | (divp << pllinfo->p_shift);
190 writel(reg, &pll->pll_base);
191
192
193 if (chip == CHIPID_TEGRA20 || chip == CHIPID_TEGRA30)
194 reg = (cpcon << pllinfo->kcp_shift);
195 else
196 reg = 0;
197
198
199
200
201
202
203 if (divn > 600)
204 reg |= (1 << PLL_DCCON_SHIFT);
205 writel(reg, &pll->pll_misc);
206
207
208 reg = readl(&pll->pll_base);
209 reg &= ~PLL_BYPASS_MASK;
210 writel(reg, &pll->pll_base);
211 debug("%s: base = 0x%08X\n", __func__, reg);
212
213
214 reg = readl(&pll->pll_misc);
215 if (pllinfo->lock_ena < 32)
216 reg |= (1 << pllinfo->lock_ena);
217 writel(reg, &pll->pll_misc);
218 debug("%s: misc = 0x%08X\n", __func__, reg);
219
220
221 reg = readl(&pll->pll_base);
222 reg |= PLL_ENABLE_MASK;
223 writel(reg, &pll->pll_base);
224 debug("%s: base final = 0x%08X\n", __func__, reg);
225
226 return 0;
227}
228
229void init_pllx(void)
230{
231 struct clk_rst_ctlr *clkrst = (struct clk_rst_ctlr *)NV_PA_CLK_RST_BASE;
232 struct clk_pll_simple *pll = &clkrst->crc_pll_simple[SIMPLE_PLLX];
233 int soc_type, sku_info, chip_sku;
234 enum clock_osc_freq osc;
235 struct clk_pll_table *sel;
236 debug("%s entry\n", __func__);
237
238
239 soc_type = tegra_get_chip();
240 debug("%s: SoC = 0x%02X\n", __func__, soc_type);
241
242
243 sku_info = tegra_get_sku_info();
244 debug("%s: SKU info byte = 0x%02X\n", __func__, sku_info);
245
246
247 chip_sku = tegra_get_chip_sku();
248 debug("%s: Chip SKU = %d\n", __func__, chip_sku);
249
250
251 osc = clock_get_osc_freq();
252 debug("%s: osc = %d\n", __func__, osc);
253
254
255 sel = &tegra_pll_x_table[chip_sku][osc];
256 pllx_set_rate(pll, sel->n, sel->m, sel->p, sel->cpcon);
257}
258
259void enable_cpu_clock(int enable)
260{
261 struct clk_rst_ctlr *clkrst = (struct clk_rst_ctlr *)NV_PA_CLK_RST_BASE;
262 u32 clk;
263 debug("%s entry\n", __func__);
264
265
266
267
268
269
270
271
272
273 if (enable) {
274
275 init_pllx();
276
277
278 udelay(PLL_STABILIZATION_DELAY);
279
280 writel(CCLK_BURST_POLICY, &clkrst->crc_cclk_brst_pol);
281 writel(SUPER_CCLK_DIVIDER, &clkrst->crc_super_cclk_div);
282 }
283
284
285
286
287
288 clk = readl(&clkrst->crc_clk_cpu_cmplx);
289 clk |= 1 << CPU1_CLK_STP_SHIFT;
290 if (get_num_cpus() == 4)
291 clk |= (1 << CPU2_CLK_STP_SHIFT) + (1 << CPU3_CLK_STP_SHIFT);
292
293
294 clk &= ~CPU0_CLK_STP_MASK;
295 clk |= !enable << CPU0_CLK_STP_SHIFT;
296 writel(clk, &clkrst->crc_clk_cpu_cmplx);
297
298 clock_enable(PERIPH_ID_CPU);
299}
300
301static int is_cpu_powered(void)
302{
303 struct pmc_ctlr *pmc = (struct pmc_ctlr *)NV_PA_PMC_BASE;
304
305 return (readl(&pmc->pmc_pwrgate_status) & CPU_PWRED) ? 1 : 0;
306}
307
308static void remove_cpu_io_clamps(void)
309{
310 struct pmc_ctlr *pmc = (struct pmc_ctlr *)NV_PA_PMC_BASE;
311 u32 reg;
312 debug("%s entry\n", __func__);
313
314
315 reg = readl(&pmc->pmc_remove_clamping);
316 reg |= CPU_CLMP;
317 writel(reg, &pmc->pmc_remove_clamping);
318
319
320 udelay(IO_STABILIZATION_DELAY);
321}
322
323void powerup_cpu(void)
324{
325 struct pmc_ctlr *pmc = (struct pmc_ctlr *)NV_PA_PMC_BASE;
326 u32 reg;
327 int timeout = IO_STABILIZATION_DELAY;
328 debug("%s entry\n", __func__);
329
330 if (!is_cpu_powered()) {
331
332 reg = readl(&pmc->pmc_pwrgate_toggle);
333 reg &= PARTID_CP;
334 reg |= START_CP;
335 writel(reg, &pmc->pmc_pwrgate_toggle);
336
337
338 while (!is_cpu_powered()) {
339 if (timeout-- == 0)
340 printf("CPU failed to power up!\n");
341 else
342 udelay(10);
343 }
344
345
346
347
348
349
350
351 remove_cpu_io_clamps();
352 }
353}
354
355void reset_A9_cpu(int reset)
356{
357
358
359
360
361
362
363
364 int mask = crc_rst_cpu | crc_rst_de | crc_rst_debug;
365 int num_cpus = get_num_cpus();
366 int cpu;
367
368 debug("%s entry\n", __func__);
369
370 for (cpu = 1; cpu < num_cpus; cpu++)
371 reset_cmplx_set_enable(cpu, mask, 1);
372 reset_cmplx_set_enable(0, mask, reset);
373
374
375 reset_set_enable(PERIPH_ID_CPU, reset);
376}
377
378void clock_enable_coresight(int enable)
379{
380 u32 rst, src = 2;
381
382 debug("%s entry\n", __func__);
383 clock_set_enable(PERIPH_ID_CORESIGHT, enable);
384 reset_set_enable(PERIPH_ID_CORESIGHT, !enable);
385
386 if (enable) {
387
388
389
390
391
392
393 src = CLK_DIVIDER(NVBL_PLLP_KHZ, CSITE_KHZ);
394 clock_ll_set_source_divisor(PERIPH_ID_CSI, 0, src);
395
396
397 rst = CORESIGHT_UNLOCK;
398 writel(rst, CSITE_CPU_DBG0_LAR);
399 writel(rst, CSITE_CPU_DBG1_LAR);
400 if (get_num_cpus() == 4) {
401 writel(rst, CSITE_CPU_DBG2_LAR);
402 writel(rst, CSITE_CPU_DBG3_LAR);
403 }
404 }
405}
406
407void halt_avp(void)
408{
409 debug("%s entry\n", __func__);
410
411 for (;;) {
412 writel(HALT_COP_EVENT_JTAG | (FLOW_MODE_STOP << 29),
413 FLOW_CTLR_HALT_COP_EVENTS);
414 }
415}
416