1
2#define pr_fmt(fmt) "SMP alternatives: " fmt
3
4#include <linux/module.h>
5#include <linux/sched.h>
6#include <linux/perf_event.h>
7#include <linux/mutex.h>
8#include <linux/list.h>
9#include <linux/stringify.h>
10#include <linux/highmem.h>
11#include <linux/mm.h>
12#include <linux/vmalloc.h>
13#include <linux/memory.h>
14#include <linux/stop_machine.h>
15#include <linux/slab.h>
16#include <linux/kdebug.h>
17#include <linux/kprobes.h>
18#include <linux/mmu_context.h>
19#include <linux/bsearch.h>
20#include <linux/sync_core.h>
21#include <asm/text-patching.h>
22#include <asm/alternative.h>
23#include <asm/sections.h>
24#include <asm/mce.h>
25#include <asm/nmi.h>
26#include <asm/cacheflush.h>
27#include <asm/tlbflush.h>
28#include <asm/insn.h>
29#include <asm/io.h>
30#include <asm/fixmap.h>
31#include <asm/paravirt.h>
32
33int __read_mostly alternatives_patched;
34
35EXPORT_SYMBOL_GPL(alternatives_patched);
36
37#define MAX_PATCH_LEN (255-1)
38
39static int __initdata_or_module debug_alternative;
40
41static int __init debug_alt(char *str)
42{
43 debug_alternative = 1;
44 return 1;
45}
46__setup("debug-alternative", debug_alt);
47
48static int noreplace_smp;
49
50static int __init setup_noreplace_smp(char *str)
51{
52 noreplace_smp = 1;
53 return 1;
54}
55__setup("noreplace-smp", setup_noreplace_smp);
56
57#define DPRINTK(fmt, args...) \
58do { \
59 if (debug_alternative) \
60 printk(KERN_DEBUG pr_fmt(fmt) "\n", ##args); \
61} while (0)
62
63#define DUMP_BYTES(buf, len, fmt, args...) \
64do { \
65 if (unlikely(debug_alternative)) { \
66 int j; \
67 \
68 if (!(len)) \
69 break; \
70 \
71 printk(KERN_DEBUG pr_fmt(fmt), ##args); \
72 for (j = 0; j < (len) - 1; j++) \
73 printk(KERN_CONT "%02hhx ", buf[j]); \
74 printk(KERN_CONT "%02hhx\n", buf[j]); \
75 } \
76} while (0)
77
78static const unsigned char x86nops[] =
79{
80 BYTES_NOP1,
81 BYTES_NOP2,
82 BYTES_NOP3,
83 BYTES_NOP4,
84 BYTES_NOP5,
85 BYTES_NOP6,
86 BYTES_NOP7,
87 BYTES_NOP8,
88};
89
90const unsigned char * const x86_nops[ASM_NOP_MAX+1] =
91{
92 NULL,
93 x86nops,
94 x86nops + 1,
95 x86nops + 1 + 2,
96 x86nops + 1 + 2 + 3,
97 x86nops + 1 + 2 + 3 + 4,
98 x86nops + 1 + 2 + 3 + 4 + 5,
99 x86nops + 1 + 2 + 3 + 4 + 5 + 6,
100 x86nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
101};
102
103
104static void __init_or_module add_nops(void *insns, unsigned int len)
105{
106 while (len > 0) {
107 unsigned int noplen = len;
108 if (noplen > ASM_NOP_MAX)
109 noplen = ASM_NOP_MAX;
110 memcpy(insns, x86_nops[noplen], noplen);
111 insns += noplen;
112 len -= noplen;
113 }
114}
115
116extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
117extern s32 __smp_locks[], __smp_locks_end[];
118void text_poke_early(void *addr, const void *opcode, size_t len);
119
120
121
122
123static inline bool is_jmp(const u8 opcode)
124{
125 return opcode == 0xeb || opcode == 0xe9;
126}
127
128static void __init_or_module
129recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insn_buff)
130{
131 u8 *next_rip, *tgt_rip;
132 s32 n_dspl, o_dspl;
133 int repl_len;
134
135 if (a->replacementlen != 5)
136 return;
137
138 o_dspl = *(s32 *)(insn_buff + 1);
139
140
141 next_rip = repl_insn + a->replacementlen;
142
143 tgt_rip = next_rip + o_dspl;
144 n_dspl = tgt_rip - orig_insn;
145
146 DPRINTK("target RIP: %px, new_displ: 0x%x", tgt_rip, n_dspl);
147
148 if (tgt_rip - orig_insn >= 0) {
149 if (n_dspl - 2 <= 127)
150 goto two_byte_jmp;
151 else
152 goto five_byte_jmp;
153
154 } else {
155 if (((n_dspl - 2) & 0xff) == (n_dspl - 2))
156 goto two_byte_jmp;
157 else
158 goto five_byte_jmp;
159 }
160
161two_byte_jmp:
162 n_dspl -= 2;
163
164 insn_buff[0] = 0xeb;
165 insn_buff[1] = (s8)n_dspl;
166 add_nops(insn_buff + 2, 3);
167
168 repl_len = 2;
169 goto done;
170
171five_byte_jmp:
172 n_dspl -= 5;
173
174 insn_buff[0] = 0xe9;
175 *(s32 *)&insn_buff[1] = n_dspl;
176
177 repl_len = 5;
178
179done:
180
181 DPRINTK("final displ: 0x%08x, JMP 0x%lx",
182 n_dspl, (unsigned long)orig_insn + n_dspl + repl_len);
183}
184
185
186
187
188
189
190
191
192
193
194static __always_inline int optimize_nops_range(u8 *instr, u8 instrlen, int off)
195{
196 unsigned long flags;
197 int i = off, nnops;
198
199 while (i < instrlen) {
200 if (instr[i] != 0x90)
201 break;
202
203 i++;
204 }
205
206 nnops = i - off;
207
208 if (nnops <= 1)
209 return nnops;
210
211 local_irq_save(flags);
212 add_nops(instr + off, nnops);
213 local_irq_restore(flags);
214
215 DUMP_BYTES(instr, instrlen, "%px: [%d:%d) optimized NOPs: ", instr, off, i);
216
217 return nnops;
218}
219
220
221
222
223
224static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr)
225{
226 struct insn insn;
227 int i = 0;
228
229
230
231
232
233 for (;;) {
234 if (insn_decode_kernel(&insn, &instr[i]))
235 return;
236
237
238
239
240
241 if (insn.length == 1 && insn.opcode.bytes[0] == 0x90)
242 i += optimize_nops_range(instr, a->instrlen, i);
243 else
244 i += insn.length;
245
246 if (i >= a->instrlen)
247 return;
248 }
249}
250
251
252
253
254
255
256
257
258
259
260
261void __init_or_module noinline apply_alternatives(struct alt_instr *start,
262 struct alt_instr *end)
263{
264 struct alt_instr *a;
265 u8 *instr, *replacement;
266 u8 insn_buff[MAX_PATCH_LEN];
267
268 DPRINTK("alt table %px, -> %px", start, end);
269
270
271
272
273
274
275
276
277
278 for (a = start; a < end; a++) {
279 int insn_buff_sz = 0;
280
281 u16 feature = a->cpuid & ~ALTINSTR_FLAG_INV;
282
283 instr = (u8 *)&a->instr_offset + a->instr_offset;
284 replacement = (u8 *)&a->repl_offset + a->repl_offset;
285 BUG_ON(a->instrlen > sizeof(insn_buff));
286 BUG_ON(feature >= (NCAPINTS + NBUGINTS) * 32);
287
288
289
290
291
292
293
294 if (!boot_cpu_has(feature) == !(a->cpuid & ALTINSTR_FLAG_INV))
295 goto next;
296
297 DPRINTK("feat: %s%d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d)",
298 (a->cpuid & ALTINSTR_FLAG_INV) ? "!" : "",
299 feature >> 5,
300 feature & 0x1f,
301 instr, instr, a->instrlen,
302 replacement, a->replacementlen);
303
304 DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr);
305 DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement);
306
307 memcpy(insn_buff, replacement, a->replacementlen);
308 insn_buff_sz = a->replacementlen;
309
310
311
312
313
314
315
316 if (a->replacementlen == 5 && *insn_buff == 0xe8) {
317 *(s32 *)(insn_buff + 1) += replacement - instr;
318 DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
319 *(s32 *)(insn_buff + 1),
320 (unsigned long)instr + *(s32 *)(insn_buff + 1) + 5);
321 }
322
323 if (a->replacementlen && is_jmp(replacement[0]))
324 recompute_jump(a, instr, replacement, insn_buff);
325
326 for (; insn_buff_sz < a->instrlen; insn_buff_sz++)
327 insn_buff[insn_buff_sz] = 0x90;
328
329 DUMP_BYTES(insn_buff, insn_buff_sz, "%px: final_insn: ", instr);
330
331 text_poke_early(instr, insn_buff, insn_buff_sz);
332
333next:
334 optimize_nops(a, instr);
335 }
336}
337
338#ifdef CONFIG_SMP
339static void alternatives_smp_lock(const s32 *start, const s32 *end,
340 u8 *text, u8 *text_end)
341{
342 const s32 *poff;
343
344 for (poff = start; poff < end; poff++) {
345 u8 *ptr = (u8 *)poff + *poff;
346
347 if (!*poff || ptr < text || ptr >= text_end)
348 continue;
349
350 if (*ptr == 0x3e)
351 text_poke(ptr, ((unsigned char []){0xf0}), 1);
352 }
353}
354
355static void alternatives_smp_unlock(const s32 *start, const s32 *end,
356 u8 *text, u8 *text_end)
357{
358 const s32 *poff;
359
360 for (poff = start; poff < end; poff++) {
361 u8 *ptr = (u8 *)poff + *poff;
362
363 if (!*poff || ptr < text || ptr >= text_end)
364 continue;
365
366 if (*ptr == 0xf0)
367 text_poke(ptr, ((unsigned char []){0x3E}), 1);
368 }
369}
370
371struct smp_alt_module {
372
373 struct module *mod;
374 char *name;
375
376
377 const s32 *locks;
378 const s32 *locks_end;
379
380
381 u8 *text;
382 u8 *text_end;
383
384 struct list_head next;
385};
386static LIST_HEAD(smp_alt_modules);
387static bool uniproc_patched = false;
388
389void __init_or_module alternatives_smp_module_add(struct module *mod,
390 char *name,
391 void *locks, void *locks_end,
392 void *text, void *text_end)
393{
394 struct smp_alt_module *smp;
395
396 mutex_lock(&text_mutex);
397 if (!uniproc_patched)
398 goto unlock;
399
400 if (num_possible_cpus() == 1)
401
402 goto smp_unlock;
403
404 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
405 if (NULL == smp)
406
407 goto unlock;
408
409 smp->mod = mod;
410 smp->name = name;
411 smp->locks = locks;
412 smp->locks_end = locks_end;
413 smp->text = text;
414 smp->text_end = text_end;
415 DPRINTK("locks %p -> %p, text %p -> %p, name %s\n",
416 smp->locks, smp->locks_end,
417 smp->text, smp->text_end, smp->name);
418
419 list_add_tail(&smp->next, &smp_alt_modules);
420smp_unlock:
421 alternatives_smp_unlock(locks, locks_end, text, text_end);
422unlock:
423 mutex_unlock(&text_mutex);
424}
425
426void __init_or_module alternatives_smp_module_del(struct module *mod)
427{
428 struct smp_alt_module *item;
429
430 mutex_lock(&text_mutex);
431 list_for_each_entry(item, &smp_alt_modules, next) {
432 if (mod != item->mod)
433 continue;
434 list_del(&item->next);
435 kfree(item);
436 break;
437 }
438 mutex_unlock(&text_mutex);
439}
440
441void alternatives_enable_smp(void)
442{
443 struct smp_alt_module *mod;
444
445
446 BUG_ON(num_possible_cpus() == 1);
447
448 mutex_lock(&text_mutex);
449
450 if (uniproc_patched) {
451 pr_info("switching to SMP code\n");
452 BUG_ON(num_online_cpus() != 1);
453 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
454 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
455 list_for_each_entry(mod, &smp_alt_modules, next)
456 alternatives_smp_lock(mod->locks, mod->locks_end,
457 mod->text, mod->text_end);
458 uniproc_patched = false;
459 }
460 mutex_unlock(&text_mutex);
461}
462
463
464
465
466
467int alternatives_text_reserved(void *start, void *end)
468{
469 struct smp_alt_module *mod;
470 const s32 *poff;
471 u8 *text_start = start;
472 u8 *text_end = end;
473
474 lockdep_assert_held(&text_mutex);
475
476 list_for_each_entry(mod, &smp_alt_modules, next) {
477 if (mod->text > text_end || mod->text_end < text_start)
478 continue;
479 for (poff = mod->locks; poff < mod->locks_end; poff++) {
480 const u8 *ptr = (const u8 *)poff + *poff;
481
482 if (text_start <= ptr && text_end > ptr)
483 return 1;
484 }
485 }
486
487 return 0;
488}
489#endif
490
491#ifdef CONFIG_PARAVIRT
492void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
493 struct paravirt_patch_site *end)
494{
495 struct paravirt_patch_site *p;
496 char insn_buff[MAX_PATCH_LEN];
497
498 for (p = start; p < end; p++) {
499 unsigned int used;
500
501 BUG_ON(p->len > MAX_PATCH_LEN);
502
503 memcpy(insn_buff, p->instr, p->len);
504 used = paravirt_patch(p->type, insn_buff, (unsigned long)p->instr, p->len);
505
506 BUG_ON(used > p->len);
507
508
509 add_nops(insn_buff + used, p->len - used);
510 text_poke_early(p->instr, insn_buff, p->len);
511 }
512}
513extern struct paravirt_patch_site __start_parainstructions[],
514 __stop_parainstructions[];
515#endif
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533extern void int3_magic(unsigned int *ptr);
534
535asm (
536" .pushsection .init.text, \"ax\", @progbits\n"
537" .type int3_magic, @function\n"
538"int3_magic:\n"
539" movl $1, (%" _ASM_ARG1 ")\n"
540" ret\n"
541" .size int3_magic, .-int3_magic\n"
542" .popsection\n"
543);
544
545extern __initdata unsigned long int3_selftest_ip;
546
547static int __init
548int3_exception_notify(struct notifier_block *self, unsigned long val, void *data)
549{
550 struct die_args *args = data;
551 struct pt_regs *regs = args->regs;
552
553 if (!regs || user_mode(regs))
554 return NOTIFY_DONE;
555
556 if (val != DIE_INT3)
557 return NOTIFY_DONE;
558
559 if (regs->ip - INT3_INSN_SIZE != int3_selftest_ip)
560 return NOTIFY_DONE;
561
562 int3_emulate_call(regs, (unsigned long)&int3_magic);
563 return NOTIFY_STOP;
564}
565
566static void __init int3_selftest(void)
567{
568 static __initdata struct notifier_block int3_exception_nb = {
569 .notifier_call = int3_exception_notify,
570 .priority = INT_MAX-1,
571 };
572 unsigned int val = 0;
573
574 BUG_ON(register_die_notifier(&int3_exception_nb));
575
576
577
578
579
580
581
582
583 asm volatile ("1: int3; nop; nop; nop; nop\n\t"
584 ".pushsection .init.data,\"aw\"\n\t"
585 ".align " __ASM_SEL(4, 8) "\n\t"
586 ".type int3_selftest_ip, @object\n\t"
587 ".size int3_selftest_ip, " __ASM_SEL(4, 8) "\n\t"
588 "int3_selftest_ip:\n\t"
589 __ASM_SEL(.long, .quad) " 1b\n\t"
590 ".popsection\n\t"
591 : ASM_CALL_CONSTRAINT
592 : __ASM_SEL_RAW(a, D) (&val)
593 : "memory");
594
595 BUG_ON(val != 1);
596
597 unregister_die_notifier(&int3_exception_nb);
598}
599
600void __init alternative_instructions(void)
601{
602 int3_selftest();
603
604
605
606
607
608
609 stop_nmi();
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637 paravirt_set_cap();
638
639
640
641
642
643 apply_paravirt(__parainstructions, __parainstructions_end);
644
645
646
647
648
649 apply_alternatives(__alt_instructions, __alt_instructions_end);
650
651#ifdef CONFIG_SMP
652
653 if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) {
654 uniproc_patched = true;
655 alternatives_smp_module_add(NULL, "core kernel",
656 __smp_locks, __smp_locks_end,
657 _text, _etext);
658 }
659
660 if (!uniproc_patched || num_possible_cpus() == 1) {
661 free_init_pages("SMP alternatives",
662 (unsigned long)__smp_locks,
663 (unsigned long)__smp_locks_end);
664 }
665#endif
666
667 restart_nmi();
668 alternatives_patched = 1;
669}
670
671
672
673
674
675
676
677
678
679
680
681
682
683void __init_or_module text_poke_early(void *addr, const void *opcode,
684 size_t len)
685{
686 unsigned long flags;
687
688 if (boot_cpu_has(X86_FEATURE_NX) &&
689 is_module_text_address((unsigned long)addr)) {
690
691
692
693
694
695 memcpy(addr, opcode, len);
696 } else {
697 local_irq_save(flags);
698 memcpy(addr, opcode, len);
699 local_irq_restore(flags);
700 sync_core();
701
702
703
704
705
706 }
707}
708
709typedef struct {
710 struct mm_struct *mm;
711} temp_mm_state_t;
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm)
727{
728 temp_mm_state_t temp_state;
729
730 lockdep_assert_irqs_disabled();
731
732
733
734
735
736
737 if (this_cpu_read(cpu_tlbstate_shared.is_lazy))
738 leave_mm(smp_processor_id());
739
740 temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm);
741 switch_mm_irqs_off(NULL, mm, current);
742
743
744
745
746
747
748
749
750
751
752
753
754 if (hw_breakpoint_active())
755 hw_breakpoint_disable();
756
757 return temp_state;
758}
759
760static inline void unuse_temporary_mm(temp_mm_state_t prev_state)
761{
762 lockdep_assert_irqs_disabled();
763 switch_mm_irqs_off(NULL, prev_state.mm, current);
764
765
766
767
768
769 if (hw_breakpoint_active())
770 hw_breakpoint_restore();
771}
772
773__ro_after_init struct mm_struct *poking_mm;
774__ro_after_init unsigned long poking_addr;
775
776static void *__text_poke(void *addr, const void *opcode, size_t len)
777{
778 bool cross_page_boundary = offset_in_page(addr) + len > PAGE_SIZE;
779 struct page *pages[2] = {NULL};
780 temp_mm_state_t prev;
781 unsigned long flags;
782 pte_t pte, *ptep;
783 spinlock_t *ptl;
784 pgprot_t pgprot;
785
786
787
788
789
790 BUG_ON(!after_bootmem);
791
792 if (!core_kernel_text((unsigned long)addr)) {
793 pages[0] = vmalloc_to_page(addr);
794 if (cross_page_boundary)
795 pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
796 } else {
797 pages[0] = virt_to_page(addr);
798 WARN_ON(!PageReserved(pages[0]));
799 if (cross_page_boundary)
800 pages[1] = virt_to_page(addr + PAGE_SIZE);
801 }
802
803
804
805
806 BUG_ON(!pages[0] || (cross_page_boundary && !pages[1]));
807
808
809
810
811
812 pgprot = __pgprot(pgprot_val(PAGE_KERNEL) & ~_PAGE_GLOBAL);
813
814
815
816
817 ptep = get_locked_pte(poking_mm, poking_addr, &ptl);
818
819
820
821
822 VM_BUG_ON(!ptep);
823
824 local_irq_save(flags);
825
826 pte = mk_pte(pages[0], pgprot);
827 set_pte_at(poking_mm, poking_addr, ptep, pte);
828
829 if (cross_page_boundary) {
830 pte = mk_pte(pages[1], pgprot);
831 set_pte_at(poking_mm, poking_addr + PAGE_SIZE, ptep + 1, pte);
832 }
833
834
835
836
837
838 prev = use_temporary_mm(poking_mm);
839
840 kasan_disable_current();
841 memcpy((u8 *)poking_addr + offset_in_page(addr), opcode, len);
842 kasan_enable_current();
843
844
845
846
847
848 barrier();
849
850 pte_clear(poking_mm, poking_addr, ptep);
851 if (cross_page_boundary)
852 pte_clear(poking_mm, poking_addr + PAGE_SIZE, ptep + 1);
853
854
855
856
857
858
859 unuse_temporary_mm(prev);
860
861
862
863
864
865 flush_tlb_mm_range(poking_mm, poking_addr, poking_addr +
866 (cross_page_boundary ? 2 : 1) * PAGE_SIZE,
867 PAGE_SHIFT, false);
868
869
870
871
872
873 BUG_ON(memcmp(addr, opcode, len));
874
875 local_irq_restore(flags);
876 pte_unmap_unlock(ptep, ptl);
877 return addr;
878}
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896void *text_poke(void *addr, const void *opcode, size_t len)
897{
898 lockdep_assert_held(&text_mutex);
899
900 return __text_poke(addr, opcode, len);
901}
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917void *text_poke_kgdb(void *addr, const void *opcode, size_t len)
918{
919 return __text_poke(addr, opcode, len);
920}
921
922static void do_sync_core(void *info)
923{
924 sync_core();
925}
926
927void text_poke_sync(void)
928{
929 on_each_cpu(do_sync_core, NULL, 1);
930}
931
932struct text_poke_loc {
933 s32 rel_addr;
934 s32 rel32;
935 u8 opcode;
936 const u8 text[POKE_MAX_OPCODE_SIZE];
937 u8 old;
938};
939
940struct bp_patching_desc {
941 struct text_poke_loc *vec;
942 int nr_entries;
943 atomic_t refs;
944};
945
946static struct bp_patching_desc *bp_desc;
947
948static __always_inline
949struct bp_patching_desc *try_get_desc(struct bp_patching_desc **descp)
950{
951 struct bp_patching_desc *desc = __READ_ONCE(*descp);
952
953 if (!desc || !arch_atomic_inc_not_zero(&desc->refs))
954 return NULL;
955
956 return desc;
957}
958
959static __always_inline void put_desc(struct bp_patching_desc *desc)
960{
961 smp_mb__before_atomic();
962 arch_atomic_dec(&desc->refs);
963}
964
965static __always_inline void *text_poke_addr(struct text_poke_loc *tp)
966{
967 return _stext + tp->rel_addr;
968}
969
970static __always_inline int patch_cmp(const void *key, const void *elt)
971{
972 struct text_poke_loc *tp = (struct text_poke_loc *) elt;
973
974 if (key < text_poke_addr(tp))
975 return -1;
976 if (key > text_poke_addr(tp))
977 return 1;
978 return 0;
979}
980
981noinstr int poke_int3_handler(struct pt_regs *regs)
982{
983 struct bp_patching_desc *desc;
984 struct text_poke_loc *tp;
985 int len, ret = 0;
986 void *ip;
987
988 if (user_mode(regs))
989 return 0;
990
991
992
993
994
995
996
997
998
999 smp_rmb();
1000
1001 desc = try_get_desc(&bp_desc);
1002 if (!desc)
1003 return 0;
1004
1005
1006
1007
1008 ip = (void *) regs->ip - INT3_INSN_SIZE;
1009
1010
1011
1012
1013 if (unlikely(desc->nr_entries > 1)) {
1014 tp = __inline_bsearch(ip, desc->vec, desc->nr_entries,
1015 sizeof(struct text_poke_loc),
1016 patch_cmp);
1017 if (!tp)
1018 goto out_put;
1019 } else {
1020 tp = desc->vec;
1021 if (text_poke_addr(tp) != ip)
1022 goto out_put;
1023 }
1024
1025 len = text_opcode_size(tp->opcode);
1026 ip += len;
1027
1028 switch (tp->opcode) {
1029 case INT3_INSN_OPCODE:
1030
1031
1032
1033
1034 goto out_put;
1035
1036 case RET_INSN_OPCODE:
1037 int3_emulate_ret(regs);
1038 break;
1039
1040 case CALL_INSN_OPCODE:
1041 int3_emulate_call(regs, (long)ip + tp->rel32);
1042 break;
1043
1044 case JMP32_INSN_OPCODE:
1045 case JMP8_INSN_OPCODE:
1046 int3_emulate_jmp(regs, (long)ip + tp->rel32);
1047 break;
1048
1049 default:
1050 BUG();
1051 }
1052
1053 ret = 1;
1054
1055out_put:
1056 put_desc(desc);
1057 return ret;
1058}
1059
1060#define TP_VEC_MAX (PAGE_SIZE / sizeof(struct text_poke_loc))
1061static struct text_poke_loc tp_vec[TP_VEC_MAX];
1062static int tp_vec_nr;
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries)
1086{
1087 struct bp_patching_desc desc = {
1088 .vec = tp,
1089 .nr_entries = nr_entries,
1090 .refs = ATOMIC_INIT(1),
1091 };
1092 unsigned char int3 = INT3_INSN_OPCODE;
1093 unsigned int i;
1094 int do_sync;
1095
1096 lockdep_assert_held(&text_mutex);
1097
1098 smp_store_release(&bp_desc, &desc);
1099
1100
1101
1102
1103
1104 smp_wmb();
1105
1106
1107
1108
1109 for (i = 0; i < nr_entries; i++) {
1110 tp[i].old = *(u8 *)text_poke_addr(&tp[i]);
1111 text_poke(text_poke_addr(&tp[i]), &int3, INT3_INSN_SIZE);
1112 }
1113
1114 text_poke_sync();
1115
1116
1117
1118
1119 for (do_sync = 0, i = 0; i < nr_entries; i++) {
1120 u8 old[POKE_MAX_OPCODE_SIZE] = { tp[i].old, };
1121 int len = text_opcode_size(tp[i].opcode);
1122
1123 if (len - INT3_INSN_SIZE > 0) {
1124 memcpy(old + INT3_INSN_SIZE,
1125 text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
1126 len - INT3_INSN_SIZE);
1127 text_poke(text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
1128 (const char *)tp[i].text + INT3_INSN_SIZE,
1129 len - INT3_INSN_SIZE);
1130 do_sync++;
1131 }
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157 perf_event_text_poke(text_poke_addr(&tp[i]), old, len,
1158 tp[i].text, len);
1159 }
1160
1161 if (do_sync) {
1162
1163
1164
1165
1166
1167 text_poke_sync();
1168 }
1169
1170
1171
1172
1173
1174 for (do_sync = 0, i = 0; i < nr_entries; i++) {
1175 if (tp[i].text[0] == INT3_INSN_OPCODE)
1176 continue;
1177
1178 text_poke(text_poke_addr(&tp[i]), tp[i].text, INT3_INSN_SIZE);
1179 do_sync++;
1180 }
1181
1182 if (do_sync)
1183 text_poke_sync();
1184
1185
1186
1187
1188
1189 WRITE_ONCE(bp_desc, NULL);
1190 if (!atomic_dec_and_test(&desc.refs))
1191 atomic_cond_read_acquire(&desc.refs, !VAL);
1192}
1193
1194static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
1195 const void *opcode, size_t len, const void *emulate)
1196{
1197 struct insn insn;
1198 int ret;
1199
1200 memcpy((void *)tp->text, opcode, len);
1201 if (!emulate)
1202 emulate = opcode;
1203
1204 ret = insn_decode_kernel(&insn, emulate);
1205
1206 BUG_ON(ret < 0);
1207 BUG_ON(len != insn.length);
1208
1209 tp->rel_addr = addr - (void *)_stext;
1210 tp->opcode = insn.opcode.bytes[0];
1211
1212 switch (tp->opcode) {
1213 case INT3_INSN_OPCODE:
1214 case RET_INSN_OPCODE:
1215 break;
1216
1217 case CALL_INSN_OPCODE:
1218 case JMP32_INSN_OPCODE:
1219 case JMP8_INSN_OPCODE:
1220 tp->rel32 = insn.immediate.value;
1221 break;
1222
1223 default:
1224 switch (len) {
1225 case 2:
1226 BUG_ON(memcmp(emulate, x86_nops[len], len));
1227 tp->opcode = JMP8_INSN_OPCODE;
1228 tp->rel32 = 0;
1229 break;
1230
1231 case 5:
1232 BUG_ON(memcmp(emulate, x86_nops[len], len));
1233 tp->opcode = JMP32_INSN_OPCODE;
1234 tp->rel32 = 0;
1235 break;
1236
1237 default:
1238 BUG();
1239 }
1240 break;
1241 }
1242}
1243
1244
1245
1246
1247
1248static bool tp_order_fail(void *addr)
1249{
1250 struct text_poke_loc *tp;
1251
1252 if (!tp_vec_nr)
1253 return false;
1254
1255 if (!addr)
1256 return true;
1257
1258 tp = &tp_vec[tp_vec_nr - 1];
1259 if ((unsigned long)text_poke_addr(tp) > (unsigned long)addr)
1260 return true;
1261
1262 return false;
1263}
1264
1265static void text_poke_flush(void *addr)
1266{
1267 if (tp_vec_nr == TP_VEC_MAX || tp_order_fail(addr)) {
1268 text_poke_bp_batch(tp_vec, tp_vec_nr);
1269 tp_vec_nr = 0;
1270 }
1271}
1272
1273void text_poke_finish(void)
1274{
1275 text_poke_flush(NULL);
1276}
1277
1278void __ref text_poke_queue(void *addr, const void *opcode, size_t len, const void *emulate)
1279{
1280 struct text_poke_loc *tp;
1281
1282 if (unlikely(system_state == SYSTEM_BOOTING)) {
1283 text_poke_early(addr, opcode, len);
1284 return;
1285 }
1286
1287 text_poke_flush(addr);
1288
1289 tp = &tp_vec[tp_vec_nr++];
1290 text_poke_loc_init(tp, addr, opcode, len, emulate);
1291}
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304void __ref text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate)
1305{
1306 struct text_poke_loc tp;
1307
1308 if (unlikely(system_state == SYSTEM_BOOTING)) {
1309 text_poke_early(addr, opcode, len);
1310 return;
1311 }
1312
1313 text_poke_loc_init(&tp, addr, opcode, len, emulate);
1314 text_poke_bp_batch(&tp, 1);
1315}
1316