1
2
3
4
5
6
7
8
9
10
11#include <linux/cpu.h>
12#include <linux/pm.h>
13#include <linux/elf.h>
14#include <linux/errno.h>
15#include <linux/kallsyms.h>
16#include <linux/kernel.h>
17#include <linux/mm.h>
18#include <linux/slab.h>
19#include <linux/module.h>
20#include <linux/notifier.h>
21#include <linux/personality.h>
22#include <linux/sched.h>
23#include <linux/stddef.h>
24#include <linux/thread_info.h>
25#include <linux/unistd.h>
26#include <linux/efi.h>
27#include <linux/interrupt.h>
28#include <linux/delay.h>
29#include <linux/kdebug.h>
30#include <linux/utsname.h>
31#include <linux/tracehook.h>
32#include <linux/rcupdate.h>
33
34#include <asm/cpu.h>
35#include <asm/delay.h>
36#include <asm/elf.h>
37#include <asm/irq.h>
38#include <asm/kexec.h>
39#include <asm/pgalloc.h>
40#include <asm/processor.h>
41#include <asm/sal.h>
42#include <asm/switch_to.h>
43#include <asm/tlbflush.h>
44#include <asm/uaccess.h>
45#include <asm/unwind.h>
46#include <asm/user.h>
47
48#include "entry.h"
49
50#ifdef CONFIG_PERFMON
51# include <asm/perfmon.h>
52#endif
53
54#include "sigframe.h"
55
56void (*ia64_mark_idle)(int);
57
58unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
59EXPORT_SYMBOL(boot_option_idle_override);
60void (*pm_power_off) (void);
61EXPORT_SYMBOL(pm_power_off);
62
63void
64ia64_do_show_stack (struct unw_frame_info *info, void *arg)
65{
66 unsigned long ip, sp, bsp;
67 char buf[128];
68
69 printk("\nCall Trace:\n");
70 do {
71 unw_get_ip(info, &ip);
72 if (ip == 0)
73 break;
74
75 unw_get_sp(info, &sp);
76 unw_get_bsp(info, &bsp);
77 snprintf(buf, sizeof(buf),
78 " [<%016lx>] %%s\n"
79 " sp=%016lx bsp=%016lx\n",
80 ip, sp, bsp);
81 print_symbol(buf, ip);
82 } while (unw_unwind(info) >= 0);
83}
84
85void
86show_stack (struct task_struct *task, unsigned long *sp)
87{
88 if (!task)
89 unw_init_running(ia64_do_show_stack, NULL);
90 else {
91 struct unw_frame_info info;
92
93 unw_init_from_blocked_task(&info, task);
94 ia64_do_show_stack(&info, NULL);
95 }
96}
97
98void
99show_regs (struct pt_regs *regs)
100{
101 unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri;
102
103 print_modules();
104 printk("\n");
105 show_regs_print_info(KERN_DEFAULT);
106 printk("psr : %016lx ifs : %016lx ip : [<%016lx>] %s (%s)\n",
107 regs->cr_ipsr, regs->cr_ifs, ip, print_tainted(),
108 init_utsname()->release);
109 print_symbol("ip is at %s\n", ip);
110 printk("unat: %016lx pfs : %016lx rsc : %016lx\n",
111 regs->ar_unat, regs->ar_pfs, regs->ar_rsc);
112 printk("rnat: %016lx bsps: %016lx pr : %016lx\n",
113 regs->ar_rnat, regs->ar_bspstore, regs->pr);
114 printk("ldrs: %016lx ccv : %016lx fpsr: %016lx\n",
115 regs->loadrs, regs->ar_ccv, regs->ar_fpsr);
116 printk("csd : %016lx ssd : %016lx\n", regs->ar_csd, regs->ar_ssd);
117 printk("b0 : %016lx b6 : %016lx b7 : %016lx\n", regs->b0, regs->b6, regs->b7);
118 printk("f6 : %05lx%016lx f7 : %05lx%016lx\n",
119 regs->f6.u.bits[1], regs->f6.u.bits[0],
120 regs->f7.u.bits[1], regs->f7.u.bits[0]);
121 printk("f8 : %05lx%016lx f9 : %05lx%016lx\n",
122 regs->f8.u.bits[1], regs->f8.u.bits[0],
123 regs->f9.u.bits[1], regs->f9.u.bits[0]);
124 printk("f10 : %05lx%016lx f11 : %05lx%016lx\n",
125 regs->f10.u.bits[1], regs->f10.u.bits[0],
126 regs->f11.u.bits[1], regs->f11.u.bits[0]);
127
128 printk("r1 : %016lx r2 : %016lx r3 : %016lx\n", regs->r1, regs->r2, regs->r3);
129 printk("r8 : %016lx r9 : %016lx r10 : %016lx\n", regs->r8, regs->r9, regs->r10);
130 printk("r11 : %016lx r12 : %016lx r13 : %016lx\n", regs->r11, regs->r12, regs->r13);
131 printk("r14 : %016lx r15 : %016lx r16 : %016lx\n", regs->r14, regs->r15, regs->r16);
132 printk("r17 : %016lx r18 : %016lx r19 : %016lx\n", regs->r17, regs->r18, regs->r19);
133 printk("r20 : %016lx r21 : %016lx r22 : %016lx\n", regs->r20, regs->r21, regs->r22);
134 printk("r23 : %016lx r24 : %016lx r25 : %016lx\n", regs->r23, regs->r24, regs->r25);
135 printk("r26 : %016lx r27 : %016lx r28 : %016lx\n", regs->r26, regs->r27, regs->r28);
136 printk("r29 : %016lx r30 : %016lx r31 : %016lx\n", regs->r29, regs->r30, regs->r31);
137
138 if (user_mode(regs)) {
139
140 unsigned long val, *bsp, ndirty;
141 int i, sof, is_nat = 0;
142
143 sof = regs->cr_ifs & 0x7f;
144 ndirty = (regs->loadrs >> 19);
145 bsp = ia64_rse_skip_regs((unsigned long *) regs->ar_bspstore, ndirty);
146 for (i = 0; i < sof; ++i) {
147 get_user(val, (unsigned long __user *) ia64_rse_skip_regs(bsp, i));
148 printk("r%-3u:%c%016lx%s", 32 + i, is_nat ? '*' : ' ', val,
149 ((i == sof - 1) || (i % 3) == 2) ? "\n" : " ");
150 }
151 } else
152 show_stack(NULL, NULL);
153}
154
155
156void
157console_print(const char *s)
158{
159 printk(KERN_EMERG "%s", s);
160}
161
162void
163do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall)
164{
165 if (fsys_mode(current, &scr->pt)) {
166
167
168
169
170 if (!ia64_psr(&scr->pt)->lp)
171 ia64_psr(&scr->pt)->lp = 1;
172 return;
173 }
174
175#ifdef CONFIG_PERFMON
176 if (current->thread.pfm_needs_checking)
177
178
179
180
181 pfm_handle_work();
182#endif
183
184
185 if (test_thread_flag(TIF_SIGPENDING)) {
186 local_irq_enable();
187 ia64_do_signal(scr, in_syscall);
188 }
189
190 if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME)) {
191 local_irq_enable();
192 tracehook_notify_resume(&scr->pt);
193 }
194
195
196 if (unlikely(test_thread_flag(TIF_RESTORE_RSE))) {
197 local_irq_enable();
198 ia64_sync_krbs();
199 }
200
201 local_irq_disable();
202}
203
204static int __init nohalt_setup(char * str)
205{
206 cpu_idle_poll_ctrl(true);
207 return 1;
208}
209__setup("nohalt", nohalt_setup);
210
211#ifdef CONFIG_HOTPLUG_CPU
212
213static inline void play_dead(void)
214{
215 unsigned int this_cpu = smp_processor_id();
216
217
218 __this_cpu_write(cpu_state, CPU_DEAD);
219
220 max_xtp();
221 local_irq_disable();
222 idle_task_exit();
223 ia64_jump_to_sal(&sal_boot_rendez_state[this_cpu]);
224
225
226
227
228 BUG();
229}
230#else
231static inline void play_dead(void)
232{
233 BUG();
234}
235#endif
236
237void arch_cpu_idle_dead(void)
238{
239 play_dead();
240}
241
242void arch_cpu_idle(void)
243{
244 void (*mark_idle)(int) = ia64_mark_idle;
245
246#ifdef CONFIG_SMP
247 min_xtp();
248#endif
249 rmb();
250 if (mark_idle)
251 (*mark_idle)(1);
252
253 safe_halt();
254
255 if (mark_idle)
256 (*mark_idle)(0);
257#ifdef CONFIG_SMP
258 normal_xtp();
259#endif
260}
261
262void
263ia64_save_extra (struct task_struct *task)
264{
265#ifdef CONFIG_PERFMON
266 unsigned long info;
267#endif
268
269 if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0)
270 ia64_save_debug_regs(&task->thread.dbr[0]);
271
272#ifdef CONFIG_PERFMON
273 if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0)
274 pfm_save_regs(task);
275
276 info = __this_cpu_read(pfm_syst_info);
277 if (info & PFM_CPUINFO_SYST_WIDE)
278 pfm_syst_wide_update_task(task, info, 0);
279#endif
280}
281
282void
283ia64_load_extra (struct task_struct *task)
284{
285#ifdef CONFIG_PERFMON
286 unsigned long info;
287#endif
288
289 if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0)
290 ia64_load_debug_regs(&task->thread.dbr[0]);
291
292#ifdef CONFIG_PERFMON
293 if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0)
294 pfm_load_regs(task);
295
296 info = __this_cpu_read(pfm_syst_info);
297 if (info & PFM_CPUINFO_SYST_WIDE)
298 pfm_syst_wide_update_task(task, info, 1);
299#endif
300}
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333int
334copy_thread(unsigned long clone_flags,
335 unsigned long user_stack_base, unsigned long user_stack_size,
336 struct task_struct *p)
337{
338 extern char ia64_ret_from_clone;
339 struct switch_stack *child_stack, *stack;
340 unsigned long rbs, child_rbs, rbs_size;
341 struct pt_regs *child_ptregs;
342 struct pt_regs *regs = current_pt_regs();
343 int retval = 0;
344
345 child_ptregs = (struct pt_regs *) ((unsigned long) p + IA64_STK_OFFSET) - 1;
346 child_stack = (struct switch_stack *) child_ptregs - 1;
347
348 rbs = (unsigned long) current + IA64_RBS_OFFSET;
349 child_rbs = (unsigned long) p + IA64_RBS_OFFSET;
350
351
352 p->thread.ksp = (unsigned long) child_stack - 16;
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370# define THREAD_FLAGS_TO_CLEAR (IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID \
371 | IA64_THREAD_PM_VALID)
372# define THREAD_FLAGS_TO_SET 0
373 p->thread.flags = ((current->thread.flags & ~THREAD_FLAGS_TO_CLEAR)
374 | THREAD_FLAGS_TO_SET);
375
376 ia64_drop_fpu(p);
377
378 if (unlikely(p->flags & PF_KTHREAD)) {
379 if (unlikely(!user_stack_base)) {
380
381 return 0;
382 }
383 memset(child_stack, 0, sizeof(*child_ptregs) + sizeof(*child_stack));
384 child_stack->r4 = user_stack_base;
385 child_stack->r5 = user_stack_size;
386
387
388
389
390 child_ptregs->cr_ipsr = ia64_getreg(_IA64_REG_PSR) | IA64_PSR_BN;
391
392 child_ptregs->cr_ifs = 1UL << 63;
393 child_stack->ar_fpsr = child_ptregs->ar_fpsr
394 = ia64_getreg(_IA64_REG_AR_FPSR);
395 child_stack->pr = (1 << PRED_KERNEL_STACK);
396 child_stack->ar_bspstore = child_rbs;
397 child_stack->b0 = (unsigned long) &ia64_ret_from_clone;
398
399
400
401
402
403
404 child_ptregs->cr_ipsr = ((child_ptregs->cr_ipsr | IA64_PSR_BITS_TO_SET)
405 & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_PP | IA64_PSR_UP));
406
407 return 0;
408 }
409 stack = ((struct switch_stack *) regs) - 1;
410
411 memcpy(child_stack, stack, sizeof(*child_ptregs) + sizeof(*child_stack));
412
413
414 rbs_size = stack->ar_bspstore - rbs;
415 memcpy((void *) child_rbs, (void *) rbs, rbs_size);
416 if (clone_flags & CLONE_SETTLS)
417 child_ptregs->r13 = regs->r16;
418 if (user_stack_base) {
419 child_ptregs->r12 = user_stack_base + user_stack_size - 16;
420 child_ptregs->ar_bspstore = user_stack_base;
421 child_ptregs->ar_rnat = 0;
422 child_ptregs->loadrs = 0;
423 }
424 child_stack->ar_bspstore = child_rbs + rbs_size;
425 child_stack->b0 = (unsigned long) &ia64_ret_from_clone;
426
427
428
429
430
431
432 child_ptregs->cr_ipsr = ((child_ptregs->cr_ipsr | IA64_PSR_BITS_TO_SET)
433 & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_PP | IA64_PSR_UP));
434
435#ifdef CONFIG_PERFMON
436 if (current->thread.pfm_context)
437 pfm_inherit(p, child_ptregs);
438#endif
439 return retval;
440}
441
442static void
443do_copy_task_regs (struct task_struct *task, struct unw_frame_info *info, void *arg)
444{
445 unsigned long mask, sp, nat_bits = 0, ar_rnat, urbs_end, cfm;
446 unsigned long uninitialized_var(ip);
447 elf_greg_t *dst = arg;
448 struct pt_regs *pt;
449 char nat;
450 int i;
451
452 memset(dst, 0, sizeof(elf_gregset_t));
453
454 if (unw_unwind_to_user(info) < 0)
455 return;
456
457 unw_get_sp(info, &sp);
458 pt = (struct pt_regs *) (sp + 16);
459
460 urbs_end = ia64_get_user_rbs_end(task, pt, &cfm);
461
462 if (ia64_sync_user_rbs(task, info->sw, pt->ar_bspstore, urbs_end) < 0)
463 return;
464
465 ia64_peek(task, info->sw, urbs_end, (long) ia64_rse_rnat_addr((long *) urbs_end),
466 &ar_rnat);
467
468
469
470
471
472
473
474
475
476
477
478
479
480 for (i = 1, mask = (1UL << i); i < 32; ++i) {
481 unw_get_gr(info, i, &dst[i], &nat);
482 if (nat)
483 nat_bits |= mask;
484 mask <<= 1;
485 }
486 dst[32] = nat_bits;
487 unw_get_pr(info, &dst[33]);
488
489 for (i = 0; i < 8; ++i)
490 unw_get_br(info, i, &dst[34 + i]);
491
492 unw_get_rp(info, &ip);
493 dst[42] = ip + ia64_psr(pt)->ri;
494 dst[43] = cfm;
495 dst[44] = pt->cr_ipsr & IA64_PSR_UM;
496
497 unw_get_ar(info, UNW_AR_RSC, &dst[45]);
498
499
500
501
502 dst[46] = urbs_end;
503 dst[47] = pt->ar_bspstore;
504 dst[48] = ar_rnat;
505 unw_get_ar(info, UNW_AR_CCV, &dst[49]);
506 unw_get_ar(info, UNW_AR_UNAT, &dst[50]);
507 unw_get_ar(info, UNW_AR_FPSR, &dst[51]);
508 dst[52] = pt->ar_pfs;
509 unw_get_ar(info, UNW_AR_LC, &dst[53]);
510 unw_get_ar(info, UNW_AR_EC, &dst[54]);
511 unw_get_ar(info, UNW_AR_CSD, &dst[55]);
512 unw_get_ar(info, UNW_AR_SSD, &dst[56]);
513}
514
515void
516do_dump_task_fpu (struct task_struct *task, struct unw_frame_info *info, void *arg)
517{
518 elf_fpreg_t *dst = arg;
519 int i;
520
521 memset(dst, 0, sizeof(elf_fpregset_t));
522
523 if (unw_unwind_to_user(info) < 0)
524 return;
525
526
527
528 for (i = 2; i < 32; ++i)
529 unw_get_fr(info, i, dst + i);
530
531 ia64_flush_fph(task);
532 if ((task->thread.flags & IA64_THREAD_FPH_VALID) != 0)
533 memcpy(dst + 32, task->thread.fph, 96*16);
534}
535
536void
537do_copy_regs (struct unw_frame_info *info, void *arg)
538{
539 do_copy_task_regs(current, info, arg);
540}
541
542void
543do_dump_fpu (struct unw_frame_info *info, void *arg)
544{
545 do_dump_task_fpu(current, info, arg);
546}
547
548void
549ia64_elf_core_copy_regs (struct pt_regs *pt, elf_gregset_t dst)
550{
551 unw_init_running(do_copy_regs, dst);
552}
553
554int
555dump_fpu (struct pt_regs *pt, elf_fpregset_t dst)
556{
557 unw_init_running(do_dump_fpu, dst);
558 return 1;
559}
560
561
562
563
564void
565flush_thread (void)
566{
567
568 current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID);
569 ia64_drop_fpu(current);
570}
571
572
573
574
575
576void
577exit_thread (void)
578{
579
580 ia64_drop_fpu(current);
581#ifdef CONFIG_PERFMON
582
583 if (current->thread.pfm_context)
584 pfm_exit_thread(current);
585
586
587 if (current->thread.flags & IA64_THREAD_DBG_VALID)
588 pfm_release_debug_registers(current);
589#endif
590}
591
592unsigned long
593get_wchan (struct task_struct *p)
594{
595 struct unw_frame_info info;
596 unsigned long ip;
597 int count = 0;
598
599 if (!p || p == current || p->state == TASK_RUNNING)
600 return 0;
601
602
603
604
605
606
607
608
609
610 unw_init_from_blocked_task(&info, p);
611 do {
612 if (p->state == TASK_RUNNING)
613 return 0;
614 if (unw_unwind(&info) < 0)
615 return 0;
616 unw_get_ip(&info, &ip);
617 if (!in_sched_functions(ip))
618 return ip;
619 } while (count++ < 16);
620 return 0;
621}
622
623void
624cpu_halt (void)
625{
626 pal_power_mgmt_info_u_t power_info[8];
627 unsigned long min_power;
628 int i, min_power_state;
629
630 if (ia64_pal_halt_info(power_info) != 0)
631 return;
632
633 min_power_state = 0;
634 min_power = power_info[0].pal_power_mgmt_info_s.power_consumption;
635 for (i = 1; i < 8; ++i)
636 if (power_info[i].pal_power_mgmt_info_s.im
637 && power_info[i].pal_power_mgmt_info_s.power_consumption < min_power) {
638 min_power = power_info[i].pal_power_mgmt_info_s.power_consumption;
639 min_power_state = i;
640 }
641
642 while (1)
643 ia64_pal_halt(min_power_state);
644}
645
646void machine_shutdown(void)
647{
648#ifdef CONFIG_HOTPLUG_CPU
649 int cpu;
650
651 for_each_online_cpu(cpu) {
652 if (cpu != smp_processor_id())
653 cpu_down(cpu);
654 }
655#endif
656#ifdef CONFIG_KEXEC
657 kexec_disable_iosapic();
658#endif
659}
660
661void
662machine_restart (char *restart_cmd)
663{
664 (void) notify_die(DIE_MACHINE_RESTART, restart_cmd, NULL, 0, 0, 0);
665 efi_reboot(REBOOT_WARM, NULL);
666}
667
668void
669machine_halt (void)
670{
671 (void) notify_die(DIE_MACHINE_HALT, "", NULL, 0, 0, 0);
672 cpu_halt();
673}
674
675void
676machine_power_off (void)
677{
678 if (pm_power_off)
679 pm_power_off();
680 machine_halt();
681}
682
683