1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/init.h>
19#include <linux/device.h>
20#include <linux/smp.h>
21#include <linux/io.h>
22#include <linux/irqchip/arm-gic.h>
23
24#include <asm/sections.h>
25#include <asm/smp_scu.h>
26#include <asm/virt.h>
27
28#include "omap-secure.h"
29#include "omap-wakeupgen.h"
30#include <asm/cputype.h>
31
32#include "soc.h"
33#include "iomap.h"
34#include "common.h"
35#include "clockdomain.h"
36#include "pm.h"
37
38#define CPU_MASK 0xff0ffff0
39#define CPU_CORTEX_A9 0x410FC090
40#define CPU_CORTEX_A15 0x410FC0F0
41
42#define OMAP5_CORE_COUNT 0x2
43
44#define AUX_CORE_BOOT0_GP_RELEASE 0x020
45#define AUX_CORE_BOOT0_HS_RELEASE 0x200
46
47struct omap_smp_config {
48 unsigned long cpu1_rstctrl_pa;
49 void __iomem *cpu1_rstctrl_va;
50 void __iomem *scu_base;
51 void __iomem *wakeupgen_base;
52 void *startup_addr;
53};
54
55static struct omap_smp_config cfg;
56
57static const struct omap_smp_config omap443x_cfg __initconst = {
58 .cpu1_rstctrl_pa = 0x4824380c,
59 .startup_addr = omap4_secondary_startup,
60};
61
62static const struct omap_smp_config omap446x_cfg __initconst = {
63 .cpu1_rstctrl_pa = 0x4824380c,
64 .startup_addr = omap4460_secondary_startup,
65};
66
67static const struct omap_smp_config omap5_cfg __initconst = {
68 .cpu1_rstctrl_pa = 0x48243810,
69 .startup_addr = omap5_secondary_startup,
70};
71
72static DEFINE_SPINLOCK(boot_lock);
73
74void __iomem *omap4_get_scu_base(void)
75{
76 return cfg.scu_base;
77}
78
79#ifdef CONFIG_OMAP5_ERRATA_801819
80void omap5_erratum_workaround_801819(void)
81{
82 u32 acr, revidr;
83 u32 acr_mask;
84
85
86 asm volatile ("mrc p15, 0, %0, c0, c0, 6" : "=r" (revidr));
87 if (revidr & (0x1 << 3))
88 return;
89
90 asm volatile ("mrc p15, 0, %0, c1, c0, 1" : "=r" (acr));
91
92
93
94
95
96
97 acr_mask = (0x3 << 25) | (0x3 << 27);
98
99 if ((acr & acr_mask) == acr_mask)
100 return;
101
102 acr |= acr_mask;
103 omap_smc1(OMAP5_DRA7_MON_SET_ACR_INDEX, acr);
104
105 pr_debug("%s: ARM erratum workaround 801819 applied on CPU%d\n",
106 __func__, smp_processor_id());
107}
108#else
109static inline void omap5_erratum_workaround_801819(void) { }
110#endif
111
112static void omap4_secondary_init(unsigned int cpu)
113{
114
115
116
117
118
119
120
121
122 if (soc_is_omap443x() && (omap_type() != OMAP2_DEVICE_TYPE_GP))
123 omap_secure_dispatcher(OMAP4_PPA_CPU_ACTRL_SMP_INDEX,
124 4, 0, 0, 0, 0, 0);
125
126 if (soc_is_omap54xx() || soc_is_dra7xx()) {
127
128
129
130
131 set_cntfreq();
132
133 omap5_erratum_workaround_801819();
134 }
135
136
137
138
139 spin_lock(&boot_lock);
140 spin_unlock(&boot_lock);
141}
142
143static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
144{
145 static struct clockdomain *cpu1_clkdm;
146 static bool booted;
147 static struct powerdomain *cpu1_pwrdm;
148
149
150
151
152
153 spin_lock(&boot_lock);
154
155
156
157
158
159
160
161 if (omap_secure_apis_support())
162 omap_modify_auxcoreboot0(AUX_CORE_BOOT0_HS_RELEASE,
163 0xfffffdff);
164 else
165 writel_relaxed(AUX_CORE_BOOT0_GP_RELEASE,
166 cfg.wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
167
168 if (!cpu1_clkdm && !cpu1_pwrdm) {
169 cpu1_clkdm = clkdm_lookup("mpu1_clkdm");
170 cpu1_pwrdm = pwrdm_lookup("cpu1_pwrdm");
171 }
172
173
174
175
176
177
178
179
180
181
182
183
184 if (booted && cpu1_pwrdm && cpu1_clkdm) {
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200 if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD)) {
201 local_irq_disable();
202 gic_dist_disable();
203 }
204
205
206
207
208
209 clkdm_deny_idle_nolock(cpu1_clkdm);
210 pwrdm_set_next_pwrst(cpu1_pwrdm, PWRDM_POWER_ON);
211 clkdm_allow_idle_nolock(cpu1_clkdm);
212
213 if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD)) {
214 while (gic_dist_disabled()) {
215 udelay(1);
216 cpu_relax();
217 }
218 gic_timer_retrigger();
219 local_irq_enable();
220 }
221 } else {
222 dsb_sev();
223 booted = true;
224 }
225
226 arch_send_wakeup_ipi_mask(cpumask_of(cpu));
227
228
229
230
231
232 spin_unlock(&boot_lock);
233
234 return 0;
235}
236
237
238
239
240
241static void __init omap4_smp_init_cpus(void)
242{
243 unsigned int i = 0, ncores = 1, cpu_id;
244
245
246 cpu_id = read_cpuid_id() & CPU_MASK;
247 if (cpu_id == CPU_CORTEX_A9) {
248
249
250
251
252 cfg.scu_base = OMAP2_L4_IO_ADDRESS(scu_a9_get_base());
253 BUG_ON(!cfg.scu_base);
254 ncores = scu_get_core_count(cfg.scu_base);
255 } else if (cpu_id == CPU_CORTEX_A15) {
256 ncores = OMAP5_CORE_COUNT;
257 }
258
259
260 if (ncores > nr_cpu_ids) {
261 pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
262 ncores, nr_cpu_ids);
263 ncores = nr_cpu_ids;
264 }
265
266 for (i = 0; i < ncores; i++)
267 set_cpu_possible(i, true);
268}
269
270
271
272
273
274
275static bool __init omap4_smp_cpu1_startup_valid(unsigned long addr)
276{
277 if ((addr >= __pa(PAGE_OFFSET)) && (addr <= __pa(__bss_start)))
278 return false;
279
280 return true;
281}
282
283
284
285
286
287
288
289static void __init omap4_smp_maybe_reset_cpu1(struct omap_smp_config *c)
290{
291 unsigned long cpu1_startup_pa, cpu1_ns_pa_addr;
292 bool needs_reset = false;
293 u32 released;
294
295 if (omap_secure_apis_support())
296 released = omap_read_auxcoreboot0() & AUX_CORE_BOOT0_HS_RELEASE;
297 else
298 released = readl_relaxed(cfg.wakeupgen_base +
299 OMAP_AUX_CORE_BOOT_0) &
300 AUX_CORE_BOOT0_GP_RELEASE;
301 if (released) {
302 pr_warn("smp: CPU1 not parked?\n");
303
304 return;
305 }
306
307 cpu1_startup_pa = readl_relaxed(cfg.wakeupgen_base +
308 OMAP_AUX_CORE_BOOT_1);
309
310
311 if (!omap4_smp_cpu1_startup_valid(cpu1_startup_pa))
312 needs_reset = true;
313
314
315
316
317
318 if ((soc_is_omap44xx() || soc_is_omap54xx())) {
319 cpu1_ns_pa_addr = omap4_get_cpu1_ns_pa_addr();
320 if (!omap4_smp_cpu1_startup_valid(cpu1_ns_pa_addr))
321 needs_reset = true;
322 } else {
323 cpu1_ns_pa_addr = 0;
324 }
325
326 if (!needs_reset || !c->cpu1_rstctrl_va)
327 return;
328
329 pr_info("smp: CPU1 parked within kernel, needs reset (0x%lx 0x%lx)\n",
330 cpu1_startup_pa, cpu1_ns_pa_addr);
331
332 writel_relaxed(1, c->cpu1_rstctrl_va);
333 readl_relaxed(c->cpu1_rstctrl_va);
334 writel_relaxed(0, c->cpu1_rstctrl_va);
335}
336
337static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
338{
339 const struct omap_smp_config *c = NULL;
340
341 if (soc_is_omap443x())
342 c = &omap443x_cfg;
343 else if (soc_is_omap446x())
344 c = &omap446x_cfg;
345 else if (soc_is_dra74x() || soc_is_omap54xx() || soc_is_dra76x())
346 c = &omap5_cfg;
347
348 if (!c) {
349 pr_err("%s Unknown SMP SoC?\n", __func__);
350 return;
351 }
352
353
354 cfg.cpu1_rstctrl_pa = c->cpu1_rstctrl_pa;
355 cfg.startup_addr = c->startup_addr;
356 cfg.wakeupgen_base = omap_get_wakeupgen_base();
357
358 if (soc_is_dra74x() || soc_is_omap54xx() || soc_is_dra76x()) {
359 if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE)
360 cfg.startup_addr = omap5_secondary_hyp_startup;
361 omap5_erratum_workaround_801819();
362 }
363
364 cfg.cpu1_rstctrl_va = ioremap(cfg.cpu1_rstctrl_pa, 4);
365 if (!cfg.cpu1_rstctrl_va)
366 return;
367
368
369
370
371
372 if (cfg.scu_base)
373 scu_enable(cfg.scu_base);
374
375 omap4_smp_maybe_reset_cpu1(&cfg);
376
377
378
379
380
381
382
383 if (omap_secure_apis_support())
384 omap_auxcoreboot_addr(__pa_symbol(cfg.startup_addr));
385 else
386 writel_relaxed(__pa_symbol(cfg.startup_addr),
387 cfg.wakeupgen_base + OMAP_AUX_CORE_BOOT_1);
388}
389
390const struct smp_operations omap4_smp_ops __initconst = {
391 .smp_init_cpus = omap4_smp_init_cpus,
392 .smp_prepare_cpus = omap4_smp_prepare_cpus,
393 .smp_secondary_init = omap4_secondary_init,
394 .smp_boot_secondary = omap4_boot_secondary,
395#ifdef CONFIG_HOTPLUG_CPU
396 .cpu_die = omap4_cpu_die,
397 .cpu_kill = omap4_cpu_kill,
398#endif
399};
400