1
2
3#include <linux/context_tracking.h>
4#include <linux/err.h>
5#include <linux/compat.h>
6#include <linux/sched/debug.h>
7
8#include <asm/kup.h>
9#include <asm/cputime.h>
10#include <asm/hw_irq.h>
11#include <asm/interrupt.h>
12#include <asm/kprobes.h>
13#include <asm/paca.h>
14#include <asm/ptrace.h>
15#include <asm/reg.h>
16#include <asm/signal.h>
17#include <asm/switch_to.h>
18#include <asm/syscall.h>
19#include <asm/time.h>
20#include <asm/tm.h>
21#include <asm/unistd.h>
22
23#if defined(CONFIG_PPC_ADV_DEBUG_REGS) && defined(CONFIG_PPC32)
24unsigned long global_dbcr0[NR_CPUS];
25#endif
26
27typedef long (*syscall_fn)(long, long, long, long, long, long);
28
29#ifdef CONFIG_PPC_BOOK3S_64
30DEFINE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant);
31static inline bool exit_must_hard_disable(void)
32{
33 return static_branch_unlikely(&interrupt_exit_not_reentrant);
34}
35#else
36static inline bool exit_must_hard_disable(void)
37{
38 return true;
39}
40#endif
41
42
43
44
45
46
47
48
49
50
51
52
53static notrace __always_inline bool prep_irq_for_enabled_exit(bool restartable)
54{
55
56 trace_hardirqs_on();
57
58 if (exit_must_hard_disable() || !restartable)
59 __hard_EE_RI_disable();
60
61#ifdef CONFIG_PPC64
62
63 if (unlikely(lazy_irq_pending_nocheck())) {
64 if (exit_must_hard_disable() || !restartable) {
65 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
66 __hard_RI_enable();
67 }
68 trace_hardirqs_off();
69
70 return false;
71 }
72#endif
73 return true;
74}
75
76
77notrace long system_call_exception(long r3, long r4, long r5,
78 long r6, long r7, long r8,
79 unsigned long r0, struct pt_regs *regs)
80{
81 syscall_fn f;
82
83 kuap_lock();
84
85 regs->orig_gpr3 = r3;
86
87 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
88 BUG_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
89
90 trace_hardirqs_off();
91
92 CT_WARN_ON(ct_state() == CONTEXT_KERNEL);
93 user_exit_irqoff();
94
95 BUG_ON(regs_is_unrecoverable(regs));
96 BUG_ON(!(regs->msr & MSR_PR));
97 BUG_ON(arch_irq_disabled_regs(regs));
98
99#ifdef CONFIG_PPC_PKEY
100 if (mmu_has_feature(MMU_FTR_PKEY)) {
101 unsigned long amr, iamr;
102 bool flush_needed = false;
103
104
105
106
107 amr = mfspr(SPRN_AMR);
108 iamr = mfspr(SPRN_IAMR);
109 regs->amr = amr;
110 regs->iamr = iamr;
111 if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
112 mtspr(SPRN_AMR, AMR_KUAP_BLOCKED);
113 flush_needed = true;
114 }
115 if (mmu_has_feature(MMU_FTR_BOOK3S_KUEP)) {
116 mtspr(SPRN_IAMR, AMR_KUEP_BLOCKED);
117 flush_needed = true;
118 }
119 if (flush_needed)
120 isync();
121 } else
122#endif
123 kuap_assert_locked();
124
125 booke_restore_dbcr0();
126
127 account_cpu_user_entry();
128
129 account_stolen_time();
130
131
132
133
134
135
136
137 irq_soft_mask_regs_set_state(regs, IRQS_ENABLED);
138
139
140
141
142
143
144
145
146
147
148 if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
149 unlikely(MSR_TM_TRANSACTIONAL(regs->msr)))
150 set_bits(_TIF_RESTOREALL, ¤t_thread_info()->flags);
151
152
153
154
155
156
157
158#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
159 if (unlikely(MSR_TM_TRANSACTIONAL(regs->msr)) &&
160 !trap_is_unsupported_scv(regs)) {
161
162 hard_irq_disable();
163 mtmsr(mfmsr() | MSR_TM);
164
165
166 asm volatile(".long 0x7c00071d | ((%0) << 16)"
167 :: "r"(TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT));
168
169
170
171
172
173
174
175
176
177 return -ENOSYS;
178 }
179#endif
180
181 local_irq_enable();
182
183 if (unlikely(read_thread_flags() & _TIF_SYSCALL_DOTRACE)) {
184 if (unlikely(trap_is_unsupported_scv(regs))) {
185
186 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
187 return regs->gpr[3];
188 }
189
190
191
192
193
194
195
196 r0 = do_syscall_trace_enter(regs);
197 if (unlikely(r0 >= NR_syscalls))
198 return regs->gpr[3];
199 r3 = regs->gpr[3];
200 r4 = regs->gpr[4];
201 r5 = regs->gpr[5];
202 r6 = regs->gpr[6];
203 r7 = regs->gpr[7];
204 r8 = regs->gpr[8];
205
206 } else if (unlikely(r0 >= NR_syscalls)) {
207 if (unlikely(trap_is_unsupported_scv(regs))) {
208
209 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
210 return regs->gpr[3];
211 }
212 return -ENOSYS;
213 }
214
215
216 barrier_nospec();
217
218 if (unlikely(is_compat_task())) {
219 f = (void *)compat_sys_call_table[r0];
220
221 r3 &= 0x00000000ffffffffULL;
222 r4 &= 0x00000000ffffffffULL;
223 r5 &= 0x00000000ffffffffULL;
224 r6 &= 0x00000000ffffffffULL;
225 r7 &= 0x00000000ffffffffULL;
226 r8 &= 0x00000000ffffffffULL;
227
228 } else {
229 f = (void *)sys_call_table[r0];
230 }
231
232 return f(r3, r4, r5, r6, r7, r8);
233}
234
235static notrace void booke_load_dbcr0(void)
236{
237#ifdef CONFIG_PPC_ADV_DEBUG_REGS
238 unsigned long dbcr0 = current->thread.debug.dbcr0;
239
240 if (likely(!(dbcr0 & DBCR0_IDM)))
241 return;
242
243
244
245
246
247 mtmsr(mfmsr() & ~MSR_DE);
248 if (IS_ENABLED(CONFIG_PPC32)) {
249 isync();
250 global_dbcr0[smp_processor_id()] = mfspr(SPRN_DBCR0);
251 }
252 mtspr(SPRN_DBCR0, dbcr0);
253 mtspr(SPRN_DBSR, -1);
254#endif
255}
256
257static void check_return_regs_valid(struct pt_regs *regs)
258{
259#ifdef CONFIG_PPC_BOOK3S_64
260 unsigned long trap, srr0, srr1;
261 static bool warned;
262 u8 *validp;
263 char *h;
264
265 if (trap_is_scv(regs))
266 return;
267
268 trap = TRAP(regs);
269
270 if (cpu_has_feature(CPU_FTR_HVMODE) && trap == INTERRUPT_EXTERNAL)
271 trap = 0xea0;
272
273 switch (trap) {
274 case 0x980:
275 case INTERRUPT_H_DATA_STORAGE:
276 case 0xe20:
277 case 0xe40:
278 case INTERRUPT_HMI:
279 case 0xe80:
280 case 0xea0:
281 case INTERRUPT_H_FAC_UNAVAIL:
282 case 0x1200:
283 case 0x1500:
284 case 0x1600:
285 case 0x1800:
286 validp = &local_paca->hsrr_valid;
287 if (!*validp)
288 return;
289
290 srr0 = mfspr(SPRN_HSRR0);
291 srr1 = mfspr(SPRN_HSRR1);
292 h = "H";
293
294 break;
295 default:
296 validp = &local_paca->srr_valid;
297 if (!*validp)
298 return;
299
300 srr0 = mfspr(SPRN_SRR0);
301 srr1 = mfspr(SPRN_SRR1);
302 h = "";
303 break;
304 }
305
306 if (srr0 == regs->nip && srr1 == regs->msr)
307 return;
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323 barrier();
324
325 if (!*validp)
326 return;
327
328 if (!warned) {
329 warned = true;
330 printk("%sSRR0 was: %lx should be: %lx\n", h, srr0, regs->nip);
331 printk("%sSRR1 was: %lx should be: %lx\n", h, srr1, regs->msr);
332 show_regs(regs);
333 }
334
335 *validp = 0;
336#endif
337}
338
339static notrace unsigned long
340interrupt_exit_user_prepare_main(unsigned long ret, struct pt_regs *regs)
341{
342 unsigned long ti_flags;
343
344again:
345 ti_flags = read_thread_flags();
346 while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
347 local_irq_enable();
348 if (ti_flags & _TIF_NEED_RESCHED) {
349 schedule();
350 } else {
351
352
353
354
355
356 if (ti_flags & _TIF_SIGPENDING)
357 ret |= _TIF_RESTOREALL;
358 do_notify_resume(regs, ti_flags);
359 }
360 local_irq_disable();
361 ti_flags = read_thread_flags();
362 }
363
364 if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && IS_ENABLED(CONFIG_PPC_FPU)) {
365 if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
366 unlikely((ti_flags & _TIF_RESTORE_TM))) {
367 restore_tm_state(regs);
368 } else {
369 unsigned long mathflags = MSR_FP;
370
371 if (cpu_has_feature(CPU_FTR_VSX))
372 mathflags |= MSR_VEC | MSR_VSX;
373 else if (cpu_has_feature(CPU_FTR_ALTIVEC))
374 mathflags |= MSR_VEC;
375
376
377
378
379
380
381
382
383 if ((regs->msr & mathflags) != mathflags)
384 restore_math(regs);
385 }
386 }
387
388 check_return_regs_valid(regs);
389
390 user_enter_irqoff();
391 if (!prep_irq_for_enabled_exit(true)) {
392 user_exit_irqoff();
393 local_irq_enable();
394 local_irq_disable();
395 goto again;
396 }
397
398#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
399 local_paca->tm_scratch = regs->msr;
400#endif
401
402 booke_load_dbcr0();
403
404 account_cpu_user_exit();
405
406
407 kuap_user_restore(regs);
408
409 return ret;
410}
411
412
413
414
415
416
417
418
419
420
421notrace unsigned long syscall_exit_prepare(unsigned long r3,
422 struct pt_regs *regs,
423 long scv)
424{
425 unsigned long ti_flags;
426 unsigned long ret = 0;
427 bool is_not_scv = !IS_ENABLED(CONFIG_PPC_BOOK3S_64) || !scv;
428
429 CT_WARN_ON(ct_state() == CONTEXT_USER);
430
431 kuap_assert_locked();
432
433 regs->result = r3;
434
435
436 rseq_syscall(regs);
437
438 ti_flags = read_thread_flags();
439
440 if (unlikely(r3 >= (unsigned long)-MAX_ERRNO) && is_not_scv) {
441 if (likely(!(ti_flags & (_TIF_NOERROR | _TIF_RESTOREALL)))) {
442 r3 = -r3;
443 regs->ccr |= 0x10000000;
444 }
445 }
446
447 if (unlikely(ti_flags & _TIF_PERSYSCALL_MASK)) {
448 if (ti_flags & _TIF_RESTOREALL)
449 ret = _TIF_RESTOREALL;
450 else
451 regs->gpr[3] = r3;
452 clear_bits(_TIF_PERSYSCALL_MASK, ¤t_thread_info()->flags);
453 } else {
454 regs->gpr[3] = r3;
455 }
456
457 if (unlikely(ti_flags & _TIF_SYSCALL_DOTRACE)) {
458 do_syscall_trace_leave(regs);
459 ret |= _TIF_RESTOREALL;
460 }
461
462 local_irq_disable();
463 ret = interrupt_exit_user_prepare_main(ret, regs);
464
465#ifdef CONFIG_PPC64
466 regs->exit_result = ret;
467#endif
468
469 return ret;
470}
471
472#ifdef CONFIG_PPC64
473notrace unsigned long syscall_exit_restart(unsigned long r3, struct pt_regs *regs)
474{
475
476
477
478
479
480
481
482 __hard_irq_disable();
483 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
484
485#ifdef CONFIG_PPC_BOOK3S_64
486 set_kuap(AMR_KUAP_BLOCKED);
487#endif
488
489 trace_hardirqs_off();
490 user_exit_irqoff();
491 account_cpu_user_entry();
492
493 BUG_ON(!user_mode(regs));
494
495 regs->exit_result = interrupt_exit_user_prepare_main(regs->exit_result, regs);
496
497 return regs->exit_result;
498}
499#endif
500
501notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs)
502{
503 unsigned long ret;
504
505 BUG_ON(regs_is_unrecoverable(regs));
506 BUG_ON(arch_irq_disabled_regs(regs));
507 CT_WARN_ON(ct_state() == CONTEXT_USER);
508
509
510
511
512
513 kuap_assert_locked();
514
515 local_irq_disable();
516
517 ret = interrupt_exit_user_prepare_main(0, regs);
518
519#ifdef CONFIG_PPC64
520 regs->exit_result = ret;
521#endif
522
523 return ret;
524}
525
526void preempt_schedule_irq(void);
527
528notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs)
529{
530 unsigned long flags;
531 unsigned long ret = 0;
532 unsigned long kuap;
533 bool stack_store = read_thread_flags() & _TIF_EMULATE_STACK_STORE;
534
535 if (regs_is_unrecoverable(regs))
536 unrecoverable_exception(regs);
537
538
539
540
541 if (TRAP(regs) != INTERRUPT_PROGRAM)
542 CT_WARN_ON(ct_state() == CONTEXT_USER);
543
544 kuap = kuap_get_and_assert_locked();
545
546 local_irq_save(flags);
547
548 if (!arch_irq_disabled_regs(regs)) {
549
550 WARN_ON_ONCE(!(regs->msr & MSR_EE));
551again:
552 if (IS_ENABLED(CONFIG_PREEMPT)) {
553
554 if (unlikely(read_thread_flags() & _TIF_NEED_RESCHED)) {
555 if (preempt_count() == 0)
556 preempt_schedule_irq();
557 }
558 }
559
560 check_return_regs_valid(regs);
561
562
563
564
565
566 if (!prep_irq_for_enabled_exit(unlikely(stack_store))) {
567
568
569
570
571
572
573
574
575 hard_irq_disable();
576 replay_soft_interrupts();
577
578 goto again;
579 }
580#ifdef CONFIG_PPC64
581
582
583
584
585
586
587
588 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
589
590 } else {
591 check_return_regs_valid(regs);
592
593 if (unlikely(stack_store))
594 __hard_EE_RI_disable();
595
596
597
598
599
600
601
602
603 if (regs->msr & MSR_EE)
604 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
605#endif
606 }
607
608 if (unlikely(stack_store)) {
609 clear_bits(_TIF_EMULATE_STACK_STORE, ¤t_thread_info()->flags);
610 ret = 1;
611 }
612
613#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
614 local_paca->tm_scratch = regs->msr;
615#endif
616
617
618
619
620
621
622 kuap_kernel_restore(regs, kuap);
623
624 return ret;
625}
626
627#ifdef CONFIG_PPC64
628notrace unsigned long interrupt_exit_user_restart(struct pt_regs *regs)
629{
630 __hard_irq_disable();
631 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
632
633#ifdef CONFIG_PPC_BOOK3S_64
634 set_kuap(AMR_KUAP_BLOCKED);
635#endif
636
637 trace_hardirqs_off();
638 user_exit_irqoff();
639 account_cpu_user_entry();
640
641 BUG_ON(!user_mode(regs));
642
643 regs->exit_result |= interrupt_exit_user_prepare(regs);
644
645 return regs->exit_result;
646}
647
648
649
650
651
652notrace unsigned long interrupt_exit_kernel_restart(struct pt_regs *regs)
653{
654 __hard_irq_disable();
655 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
656
657#ifdef CONFIG_PPC_BOOK3S_64
658 set_kuap(AMR_KUAP_BLOCKED);
659#endif
660
661 if (regs->softe == IRQS_ENABLED)
662 trace_hardirqs_off();
663
664 BUG_ON(user_mode(regs));
665
666 return interrupt_exit_kernel_prepare(regs);
667}
668#endif
669