linux/arch/x86/kernel/alternative.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2#define pr_fmt(fmt) "SMP alternatives: " fmt
   3
   4#include <linux/module.h>
   5#include <linux/sched.h>
   6#include <linux/perf_event.h>
   7#include <linux/mutex.h>
   8#include <linux/list.h>
   9#include <linux/stringify.h>
  10#include <linux/highmem.h>
  11#include <linux/mm.h>
  12#include <linux/vmalloc.h>
  13#include <linux/memory.h>
  14#include <linux/stop_machine.h>
  15#include <linux/slab.h>
  16#include <linux/kdebug.h>
  17#include <linux/kprobes.h>
  18#include <linux/mmu_context.h>
  19#include <linux/bsearch.h>
  20#include <linux/sync_core.h>
  21#include <asm/text-patching.h>
  22#include <asm/alternative.h>
  23#include <asm/sections.h>
  24#include <asm/mce.h>
  25#include <asm/nmi.h>
  26#include <asm/cacheflush.h>
  27#include <asm/tlbflush.h>
  28#include <asm/insn.h>
  29#include <asm/io.h>
  30#include <asm/fixmap.h>
  31#include <asm/paravirt.h>
  32
  33int __read_mostly alternatives_patched;
  34
  35EXPORT_SYMBOL_GPL(alternatives_patched);
  36
  37#define MAX_PATCH_LEN (255-1)
  38
  39static int __initdata_or_module debug_alternative;
  40
  41static int __init debug_alt(char *str)
  42{
  43        debug_alternative = 1;
  44        return 1;
  45}
  46__setup("debug-alternative", debug_alt);
  47
  48static int noreplace_smp;
  49
  50static int __init setup_noreplace_smp(char *str)
  51{
  52        noreplace_smp = 1;
  53        return 1;
  54}
  55__setup("noreplace-smp", setup_noreplace_smp);
  56
  57#define DPRINTK(fmt, args...)                                           \
  58do {                                                                    \
  59        if (debug_alternative)                                          \
  60                printk(KERN_DEBUG pr_fmt(fmt) "\n", ##args);            \
  61} while (0)
  62
  63#define DUMP_BYTES(buf, len, fmt, args...)                              \
  64do {                                                                    \
  65        if (unlikely(debug_alternative)) {                              \
  66                int j;                                                  \
  67                                                                        \
  68                if (!(len))                                             \
  69                        break;                                          \
  70                                                                        \
  71                printk(KERN_DEBUG pr_fmt(fmt), ##args);                 \
  72                for (j = 0; j < (len) - 1; j++)                         \
  73                        printk(KERN_CONT "%02hhx ", buf[j]);            \
  74                printk(KERN_CONT "%02hhx\n", buf[j]);                   \
  75        }                                                               \
  76} while (0)
  77
  78static const unsigned char x86nops[] =
  79{
  80        BYTES_NOP1,
  81        BYTES_NOP2,
  82        BYTES_NOP3,
  83        BYTES_NOP4,
  84        BYTES_NOP5,
  85        BYTES_NOP6,
  86        BYTES_NOP7,
  87        BYTES_NOP8,
  88};
  89
  90const unsigned char * const x86_nops[ASM_NOP_MAX+1] =
  91{
  92        NULL,
  93        x86nops,
  94        x86nops + 1,
  95        x86nops + 1 + 2,
  96        x86nops + 1 + 2 + 3,
  97        x86nops + 1 + 2 + 3 + 4,
  98        x86nops + 1 + 2 + 3 + 4 + 5,
  99        x86nops + 1 + 2 + 3 + 4 + 5 + 6,
 100        x86nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
 101};
 102
 103/* Use this to add nops to a buffer, then text_poke the whole buffer. */
 104static void __init_or_module add_nops(void *insns, unsigned int len)
 105{
 106        while (len > 0) {
 107                unsigned int noplen = len;
 108                if (noplen > ASM_NOP_MAX)
 109                        noplen = ASM_NOP_MAX;
 110                memcpy(insns, x86_nops[noplen], noplen);
 111                insns += noplen;
 112                len -= noplen;
 113        }
 114}
 115
 116extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
 117extern s32 __smp_locks[], __smp_locks_end[];
 118void text_poke_early(void *addr, const void *opcode, size_t len);
 119
 120/*
 121 * Are we looking at a near JMP with a 1 or 4-byte displacement.
 122 */
 123static inline bool is_jmp(const u8 opcode)
 124{
 125        return opcode == 0xeb || opcode == 0xe9;
 126}
 127
 128static void __init_or_module
 129recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insn_buff)
 130{
 131        u8 *next_rip, *tgt_rip;
 132        s32 n_dspl, o_dspl;
 133        int repl_len;
 134
 135        if (a->replacementlen != 5)
 136                return;
 137
 138        o_dspl = *(s32 *)(insn_buff + 1);
 139
 140        /* next_rip of the replacement JMP */
 141        next_rip = repl_insn + a->replacementlen;
 142        /* target rip of the replacement JMP */
 143        tgt_rip  = next_rip + o_dspl;
 144        n_dspl = tgt_rip - orig_insn;
 145
 146        DPRINTK("target RIP: %px, new_displ: 0x%x", tgt_rip, n_dspl);
 147
 148        if (tgt_rip - orig_insn >= 0) {
 149                if (n_dspl - 2 <= 127)
 150                        goto two_byte_jmp;
 151                else
 152                        goto five_byte_jmp;
 153        /* negative offset */
 154        } else {
 155                if (((n_dspl - 2) & 0xff) == (n_dspl - 2))
 156                        goto two_byte_jmp;
 157                else
 158                        goto five_byte_jmp;
 159        }
 160
 161two_byte_jmp:
 162        n_dspl -= 2;
 163
 164        insn_buff[0] = 0xeb;
 165        insn_buff[1] = (s8)n_dspl;
 166        add_nops(insn_buff + 2, 3);
 167
 168        repl_len = 2;
 169        goto done;
 170
 171five_byte_jmp:
 172        n_dspl -= 5;
 173
 174        insn_buff[0] = 0xe9;
 175        *(s32 *)&insn_buff[1] = n_dspl;
 176
 177        repl_len = 5;
 178
 179done:
 180
 181        DPRINTK("final displ: 0x%08x, JMP 0x%lx",
 182                n_dspl, (unsigned long)orig_insn + n_dspl + repl_len);
 183}
 184
 185/*
 186 * optimize_nops_range() - Optimize a sequence of single byte NOPs (0x90)
 187 *
 188 * @instr: instruction byte stream
 189 * @instrlen: length of the above
 190 * @off: offset within @instr where the first NOP has been detected
 191 *
 192 * Return: number of NOPs found (and replaced).
 193 */
 194static __always_inline int optimize_nops_range(u8 *instr, u8 instrlen, int off)
 195{
 196        unsigned long flags;
 197        int i = off, nnops;
 198
 199        while (i < instrlen) {
 200                if (instr[i] != 0x90)
 201                        break;
 202
 203                i++;
 204        }
 205
 206        nnops = i - off;
 207
 208        if (nnops <= 1)
 209                return nnops;
 210
 211        local_irq_save(flags);
 212        add_nops(instr + off, nnops);
 213        local_irq_restore(flags);
 214
 215        DUMP_BYTES(instr, instrlen, "%px: [%d:%d) optimized NOPs: ", instr, off, i);
 216
 217        return nnops;
 218}
 219
 220/*
 221 * "noinline" to cause control flow change and thus invalidate I$ and
 222 * cause refetch after modification.
 223 */
 224static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr)
 225{
 226        struct insn insn;
 227        int i = 0;
 228
 229        /*
 230         * Jump over the non-NOP insns and optimize single-byte NOPs into bigger
 231         * ones.
 232         */
 233        for (;;) {
 234                if (insn_decode_kernel(&insn, &instr[i]))
 235                        return;
 236
 237                /*
 238                 * See if this and any potentially following NOPs can be
 239                 * optimized.
 240                 */
 241                if (insn.length == 1 && insn.opcode.bytes[0] == 0x90)
 242                        i += optimize_nops_range(instr, a->instrlen, i);
 243                else
 244                        i += insn.length;
 245
 246                if (i >= a->instrlen)
 247                        return;
 248        }
 249}
 250
 251/*
 252 * Replace instructions with better alternatives for this CPU type. This runs
 253 * before SMP is initialized to avoid SMP problems with self modifying code.
 254 * This implies that asymmetric systems where APs have less capabilities than
 255 * the boot processor are not handled. Tough. Make sure you disable such
 256 * features by hand.
 257 *
 258 * Marked "noinline" to cause control flow change and thus insn cache
 259 * to refetch changed I$ lines.
 260 */
 261void __init_or_module noinline apply_alternatives(struct alt_instr *start,
 262                                                  struct alt_instr *end)
 263{
 264        struct alt_instr *a;
 265        u8 *instr, *replacement;
 266        u8 insn_buff[MAX_PATCH_LEN];
 267
 268        DPRINTK("alt table %px, -> %px", start, end);
 269        /*
 270         * The scan order should be from start to end. A later scanned
 271         * alternative code can overwrite previously scanned alternative code.
 272         * Some kernel functions (e.g. memcpy, memset, etc) use this order to
 273         * patch code.
 274         *
 275         * So be careful if you want to change the scan order to any other
 276         * order.
 277         */
 278        for (a = start; a < end; a++) {
 279                int insn_buff_sz = 0;
 280                /* Mask away "NOT" flag bit for feature to test. */
 281                u16 feature = a->cpuid & ~ALTINSTR_FLAG_INV;
 282
 283                instr = (u8 *)&a->instr_offset + a->instr_offset;
 284                replacement = (u8 *)&a->repl_offset + a->repl_offset;
 285                BUG_ON(a->instrlen > sizeof(insn_buff));
 286                BUG_ON(feature >= (NCAPINTS + NBUGINTS) * 32);
 287
 288                /*
 289                 * Patch if either:
 290                 * - feature is present
 291                 * - feature not present but ALTINSTR_FLAG_INV is set to mean,
 292                 *   patch if feature is *NOT* present.
 293                 */
 294                if (!boot_cpu_has(feature) == !(a->cpuid & ALTINSTR_FLAG_INV))
 295                        goto next;
 296
 297                DPRINTK("feat: %s%d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d)",
 298                        (a->cpuid & ALTINSTR_FLAG_INV) ? "!" : "",
 299                        feature >> 5,
 300                        feature & 0x1f,
 301                        instr, instr, a->instrlen,
 302                        replacement, a->replacementlen);
 303
 304                DUMP_BYTES(instr, a->instrlen, "%px:   old_insn: ", instr);
 305                DUMP_BYTES(replacement, a->replacementlen, "%px:   rpl_insn: ", replacement);
 306
 307                memcpy(insn_buff, replacement, a->replacementlen);
 308                insn_buff_sz = a->replacementlen;
 309
 310                /*
 311                 * 0xe8 is a relative jump; fix the offset.
 312                 *
 313                 * Instruction length is checked before the opcode to avoid
 314                 * accessing uninitialized bytes for zero-length replacements.
 315                 */
 316                if (a->replacementlen == 5 && *insn_buff == 0xe8) {
 317                        *(s32 *)(insn_buff + 1) += replacement - instr;
 318                        DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
 319                                *(s32 *)(insn_buff + 1),
 320                                (unsigned long)instr + *(s32 *)(insn_buff + 1) + 5);
 321                }
 322
 323                if (a->replacementlen && is_jmp(replacement[0]))
 324                        recompute_jump(a, instr, replacement, insn_buff);
 325
 326                for (; insn_buff_sz < a->instrlen; insn_buff_sz++)
 327                        insn_buff[insn_buff_sz] = 0x90;
 328
 329                DUMP_BYTES(insn_buff, insn_buff_sz, "%px: final_insn: ", instr);
 330
 331                text_poke_early(instr, insn_buff, insn_buff_sz);
 332
 333next:
 334                optimize_nops(a, instr);
 335        }
 336}
 337
 338#ifdef CONFIG_SMP
 339static void alternatives_smp_lock(const s32 *start, const s32 *end,
 340                                  u8 *text, u8 *text_end)
 341{
 342        const s32 *poff;
 343
 344        for (poff = start; poff < end; poff++) {
 345                u8 *ptr = (u8 *)poff + *poff;
 346
 347                if (!*poff || ptr < text || ptr >= text_end)
 348                        continue;
 349                /* turn DS segment override prefix into lock prefix */
 350                if (*ptr == 0x3e)
 351                        text_poke(ptr, ((unsigned char []){0xf0}), 1);
 352        }
 353}
 354
 355static void alternatives_smp_unlock(const s32 *start, const s32 *end,
 356                                    u8 *text, u8 *text_end)
 357{
 358        const s32 *poff;
 359
 360        for (poff = start; poff < end; poff++) {
 361                u8 *ptr = (u8 *)poff + *poff;
 362
 363                if (!*poff || ptr < text || ptr >= text_end)
 364                        continue;
 365                /* turn lock prefix into DS segment override prefix */
 366                if (*ptr == 0xf0)
 367                        text_poke(ptr, ((unsigned char []){0x3E}), 1);
 368        }
 369}
 370
 371struct smp_alt_module {
 372        /* what is this ??? */
 373        struct module   *mod;
 374        char            *name;
 375
 376        /* ptrs to lock prefixes */
 377        const s32       *locks;
 378        const s32       *locks_end;
 379
 380        /* .text segment, needed to avoid patching init code ;) */
 381        u8              *text;
 382        u8              *text_end;
 383
 384        struct list_head next;
 385};
 386static LIST_HEAD(smp_alt_modules);
 387static bool uniproc_patched = false;    /* protected by text_mutex */
 388
 389void __init_or_module alternatives_smp_module_add(struct module *mod,
 390                                                  char *name,
 391                                                  void *locks, void *locks_end,
 392                                                  void *text,  void *text_end)
 393{
 394        struct smp_alt_module *smp;
 395
 396        mutex_lock(&text_mutex);
 397        if (!uniproc_patched)
 398                goto unlock;
 399
 400        if (num_possible_cpus() == 1)
 401                /* Don't bother remembering, we'll never have to undo it. */
 402                goto smp_unlock;
 403
 404        smp = kzalloc(sizeof(*smp), GFP_KERNEL);
 405        if (NULL == smp)
 406                /* we'll run the (safe but slow) SMP code then ... */
 407                goto unlock;
 408
 409        smp->mod        = mod;
 410        smp->name       = name;
 411        smp->locks      = locks;
 412        smp->locks_end  = locks_end;
 413        smp->text       = text;
 414        smp->text_end   = text_end;
 415        DPRINTK("locks %p -> %p, text %p -> %p, name %s\n",
 416                smp->locks, smp->locks_end,
 417                smp->text, smp->text_end, smp->name);
 418
 419        list_add_tail(&smp->next, &smp_alt_modules);
 420smp_unlock:
 421        alternatives_smp_unlock(locks, locks_end, text, text_end);
 422unlock:
 423        mutex_unlock(&text_mutex);
 424}
 425
 426void __init_or_module alternatives_smp_module_del(struct module *mod)
 427{
 428        struct smp_alt_module *item;
 429
 430        mutex_lock(&text_mutex);
 431        list_for_each_entry(item, &smp_alt_modules, next) {
 432                if (mod != item->mod)
 433                        continue;
 434                list_del(&item->next);
 435                kfree(item);
 436                break;
 437        }
 438        mutex_unlock(&text_mutex);
 439}
 440
 441void alternatives_enable_smp(void)
 442{
 443        struct smp_alt_module *mod;
 444
 445        /* Why bother if there are no other CPUs? */
 446        BUG_ON(num_possible_cpus() == 1);
 447
 448        mutex_lock(&text_mutex);
 449
 450        if (uniproc_patched) {
 451                pr_info("switching to SMP code\n");
 452                BUG_ON(num_online_cpus() != 1);
 453                clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
 454                clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
 455                list_for_each_entry(mod, &smp_alt_modules, next)
 456                        alternatives_smp_lock(mod->locks, mod->locks_end,
 457                                              mod->text, mod->text_end);
 458                uniproc_patched = false;
 459        }
 460        mutex_unlock(&text_mutex);
 461}
 462
 463/*
 464 * Return 1 if the address range is reserved for SMP-alternatives.
 465 * Must hold text_mutex.
 466 */
 467int alternatives_text_reserved(void *start, void *end)
 468{
 469        struct smp_alt_module *mod;
 470        const s32 *poff;
 471        u8 *text_start = start;
 472        u8 *text_end = end;
 473
 474        lockdep_assert_held(&text_mutex);
 475
 476        list_for_each_entry(mod, &smp_alt_modules, next) {
 477                if (mod->text > text_end || mod->text_end < text_start)
 478                        continue;
 479                for (poff = mod->locks; poff < mod->locks_end; poff++) {
 480                        const u8 *ptr = (const u8 *)poff + *poff;
 481
 482                        if (text_start <= ptr && text_end > ptr)
 483                                return 1;
 484                }
 485        }
 486
 487        return 0;
 488}
 489#endif /* CONFIG_SMP */
 490
 491#ifdef CONFIG_PARAVIRT
 492void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
 493                                     struct paravirt_patch_site *end)
 494{
 495        struct paravirt_patch_site *p;
 496        char insn_buff[MAX_PATCH_LEN];
 497
 498        for (p = start; p < end; p++) {
 499                unsigned int used;
 500
 501                BUG_ON(p->len > MAX_PATCH_LEN);
 502                /* prep the buffer with the original instructions */
 503                memcpy(insn_buff, p->instr, p->len);
 504                used = paravirt_patch(p->type, insn_buff, (unsigned long)p->instr, p->len);
 505
 506                BUG_ON(used > p->len);
 507
 508                /* Pad the rest with nops */
 509                add_nops(insn_buff + used, p->len - used);
 510                text_poke_early(p->instr, insn_buff, p->len);
 511        }
 512}
 513extern struct paravirt_patch_site __start_parainstructions[],
 514        __stop_parainstructions[];
 515#endif  /* CONFIG_PARAVIRT */
 516
 517/*
 518 * Self-test for the INT3 based CALL emulation code.
 519 *
 520 * This exercises int3_emulate_call() to make sure INT3 pt_regs are set up
 521 * properly and that there is a stack gap between the INT3 frame and the
 522 * previous context. Without this gap doing a virtual PUSH on the interrupted
 523 * stack would corrupt the INT3 IRET frame.
 524 *
 525 * See entry_{32,64}.S for more details.
 526 */
 527
 528/*
 529 * We define the int3_magic() function in assembly to control the calling
 530 * convention such that we can 'call' it from assembly.
 531 */
 532
 533extern void int3_magic(unsigned int *ptr); /* defined in asm */
 534
 535asm (
 536"       .pushsection    .init.text, \"ax\", @progbits\n"
 537"       .type           int3_magic, @function\n"
 538"int3_magic:\n"
 539"       movl    $1, (%" _ASM_ARG1 ")\n"
 540"       ret\n"
 541"       .size           int3_magic, .-int3_magic\n"
 542"       .popsection\n"
 543);
 544
 545extern __initdata unsigned long int3_selftest_ip; /* defined in asm below */
 546
 547static int __init
 548int3_exception_notify(struct notifier_block *self, unsigned long val, void *data)
 549{
 550        struct die_args *args = data;
 551        struct pt_regs *regs = args->regs;
 552
 553        if (!regs || user_mode(regs))
 554                return NOTIFY_DONE;
 555
 556        if (val != DIE_INT3)
 557                return NOTIFY_DONE;
 558
 559        if (regs->ip - INT3_INSN_SIZE != int3_selftest_ip)
 560                return NOTIFY_DONE;
 561
 562        int3_emulate_call(regs, (unsigned long)&int3_magic);
 563        return NOTIFY_STOP;
 564}
 565
 566static void __init int3_selftest(void)
 567{
 568        static __initdata struct notifier_block int3_exception_nb = {
 569                .notifier_call  = int3_exception_notify,
 570                .priority       = INT_MAX-1, /* last */
 571        };
 572        unsigned int val = 0;
 573
 574        BUG_ON(register_die_notifier(&int3_exception_nb));
 575
 576        /*
 577         * Basically: int3_magic(&val); but really complicated :-)
 578         *
 579         * Stick the address of the INT3 instruction into int3_selftest_ip,
 580         * then trigger the INT3, padded with NOPs to match a CALL instruction
 581         * length.
 582         */
 583        asm volatile ("1: int3; nop; nop; nop; nop\n\t"
 584                      ".pushsection .init.data,\"aw\"\n\t"
 585                      ".align " __ASM_SEL(4, 8) "\n\t"
 586                      ".type int3_selftest_ip, @object\n\t"
 587                      ".size int3_selftest_ip, " __ASM_SEL(4, 8) "\n\t"
 588                      "int3_selftest_ip:\n\t"
 589                      __ASM_SEL(.long, .quad) " 1b\n\t"
 590                      ".popsection\n\t"
 591                      : ASM_CALL_CONSTRAINT
 592                      : __ASM_SEL_RAW(a, D) (&val)
 593                      : "memory");
 594
 595        BUG_ON(val != 1);
 596
 597        unregister_die_notifier(&int3_exception_nb);
 598}
 599
 600void __init alternative_instructions(void)
 601{
 602        int3_selftest();
 603
 604        /*
 605         * The patching is not fully atomic, so try to avoid local
 606         * interruptions that might execute the to be patched code.
 607         * Other CPUs are not running.
 608         */
 609        stop_nmi();
 610
 611        /*
 612         * Don't stop machine check exceptions while patching.
 613         * MCEs only happen when something got corrupted and in this
 614         * case we must do something about the corruption.
 615         * Ignoring it is worse than an unlikely patching race.
 616         * Also machine checks tend to be broadcast and if one CPU
 617         * goes into machine check the others follow quickly, so we don't
 618         * expect a machine check to cause undue problems during to code
 619         * patching.
 620         */
 621
 622        /*
 623         * Paravirt patching and alternative patching can be combined to
 624         * replace a function call with a short direct code sequence (e.g.
 625         * by setting a constant return value instead of doing that in an
 626         * external function).
 627         * In order to make this work the following sequence is required:
 628         * 1. set (artificial) features depending on used paravirt
 629         *    functions which can later influence alternative patching
 630         * 2. apply paravirt patching (generally replacing an indirect
 631         *    function call with a direct one)
 632         * 3. apply alternative patching (e.g. replacing a direct function
 633         *    call with a custom code sequence)
 634         * Doing paravirt patching after alternative patching would clobber
 635         * the optimization of the custom code with a function call again.
 636         */
 637        paravirt_set_cap();
 638
 639        /*
 640         * First patch paravirt functions, such that we overwrite the indirect
 641         * call with the direct call.
 642         */
 643        apply_paravirt(__parainstructions, __parainstructions_end);
 644
 645        /*
 646         * Then patch alternatives, such that those paravirt calls that are in
 647         * alternatives can be overwritten by their immediate fragments.
 648         */
 649        apply_alternatives(__alt_instructions, __alt_instructions_end);
 650
 651#ifdef CONFIG_SMP
 652        /* Patch to UP if other cpus not imminent. */
 653        if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) {
 654                uniproc_patched = true;
 655                alternatives_smp_module_add(NULL, "core kernel",
 656                                            __smp_locks, __smp_locks_end,
 657                                            _text, _etext);
 658        }
 659
 660        if (!uniproc_patched || num_possible_cpus() == 1) {
 661                free_init_pages("SMP alternatives",
 662                                (unsigned long)__smp_locks,
 663                                (unsigned long)__smp_locks_end);
 664        }
 665#endif
 666
 667        restart_nmi();
 668        alternatives_patched = 1;
 669}
 670
 671/**
 672 * text_poke_early - Update instructions on a live kernel at boot time
 673 * @addr: address to modify
 674 * @opcode: source of the copy
 675 * @len: length to copy
 676 *
 677 * When you use this code to patch more than one byte of an instruction
 678 * you need to make sure that other CPUs cannot execute this code in parallel.
 679 * Also no thread must be currently preempted in the middle of these
 680 * instructions. And on the local CPU you need to be protected against NMI or
 681 * MCE handlers seeing an inconsistent instruction while you patch.
 682 */
 683void __init_or_module text_poke_early(void *addr, const void *opcode,
 684                                      size_t len)
 685{
 686        unsigned long flags;
 687
 688        if (boot_cpu_has(X86_FEATURE_NX) &&
 689            is_module_text_address((unsigned long)addr)) {
 690                /*
 691                 * Modules text is marked initially as non-executable, so the
 692                 * code cannot be running and speculative code-fetches are
 693                 * prevented. Just change the code.
 694                 */
 695                memcpy(addr, opcode, len);
 696        } else {
 697                local_irq_save(flags);
 698                memcpy(addr, opcode, len);
 699                local_irq_restore(flags);
 700                sync_core();
 701
 702                /*
 703                 * Could also do a CLFLUSH here to speed up CPU recovery; but
 704                 * that causes hangs on some VIA CPUs.
 705                 */
 706        }
 707}
 708
 709typedef struct {
 710        struct mm_struct *mm;
 711} temp_mm_state_t;
 712
 713/*
 714 * Using a temporary mm allows to set temporary mappings that are not accessible
 715 * by other CPUs. Such mappings are needed to perform sensitive memory writes
 716 * that override the kernel memory protections (e.g., W^X), without exposing the
 717 * temporary page-table mappings that are required for these write operations to
 718 * other CPUs. Using a temporary mm also allows to avoid TLB shootdowns when the
 719 * mapping is torn down.
 720 *
 721 * Context: The temporary mm needs to be used exclusively by a single core. To
 722 *          harden security IRQs must be disabled while the temporary mm is
 723 *          loaded, thereby preventing interrupt handler bugs from overriding
 724 *          the kernel memory protection.
 725 */
 726static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm)
 727{
 728        temp_mm_state_t temp_state;
 729
 730        lockdep_assert_irqs_disabled();
 731
 732        /*
 733         * Make sure not to be in TLB lazy mode, as otherwise we'll end up
 734         * with a stale address space WITHOUT being in lazy mode after
 735         * restoring the previous mm.
 736         */
 737        if (this_cpu_read(cpu_tlbstate_shared.is_lazy))
 738                leave_mm(smp_processor_id());
 739
 740        temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm);
 741        switch_mm_irqs_off(NULL, mm, current);
 742
 743        /*
 744         * If breakpoints are enabled, disable them while the temporary mm is
 745         * used. Userspace might set up watchpoints on addresses that are used
 746         * in the temporary mm, which would lead to wrong signals being sent or
 747         * crashes.
 748         *
 749         * Note that breakpoints are not disabled selectively, which also causes
 750         * kernel breakpoints (e.g., perf's) to be disabled. This might be
 751         * undesirable, but still seems reasonable as the code that runs in the
 752         * temporary mm should be short.
 753         */
 754        if (hw_breakpoint_active())
 755                hw_breakpoint_disable();
 756
 757        return temp_state;
 758}
 759
 760static inline void unuse_temporary_mm(temp_mm_state_t prev_state)
 761{
 762        lockdep_assert_irqs_disabled();
 763        switch_mm_irqs_off(NULL, prev_state.mm, current);
 764
 765        /*
 766         * Restore the breakpoints if they were disabled before the temporary mm
 767         * was loaded.
 768         */
 769        if (hw_breakpoint_active())
 770                hw_breakpoint_restore();
 771}
 772
 773__ro_after_init struct mm_struct *poking_mm;
 774__ro_after_init unsigned long poking_addr;
 775
 776static void *__text_poke(void *addr, const void *opcode, size_t len)
 777{
 778        bool cross_page_boundary = offset_in_page(addr) + len > PAGE_SIZE;
 779        struct page *pages[2] = {NULL};
 780        temp_mm_state_t prev;
 781        unsigned long flags;
 782        pte_t pte, *ptep;
 783        spinlock_t *ptl;
 784        pgprot_t pgprot;
 785
 786        /*
 787         * While boot memory allocator is running we cannot use struct pages as
 788         * they are not yet initialized. There is no way to recover.
 789         */
 790        BUG_ON(!after_bootmem);
 791
 792        if (!core_kernel_text((unsigned long)addr)) {
 793                pages[0] = vmalloc_to_page(addr);
 794                if (cross_page_boundary)
 795                        pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
 796        } else {
 797                pages[0] = virt_to_page(addr);
 798                WARN_ON(!PageReserved(pages[0]));
 799                if (cross_page_boundary)
 800                        pages[1] = virt_to_page(addr + PAGE_SIZE);
 801        }
 802        /*
 803         * If something went wrong, crash and burn since recovery paths are not
 804         * implemented.
 805         */
 806        BUG_ON(!pages[0] || (cross_page_boundary && !pages[1]));
 807
 808        /*
 809         * Map the page without the global bit, as TLB flushing is done with
 810         * flush_tlb_mm_range(), which is intended for non-global PTEs.
 811         */
 812        pgprot = __pgprot(pgprot_val(PAGE_KERNEL) & ~_PAGE_GLOBAL);
 813
 814        /*
 815         * The lock is not really needed, but this allows to avoid open-coding.
 816         */
 817        ptep = get_locked_pte(poking_mm, poking_addr, &ptl);
 818
 819        /*
 820         * This must not fail; preallocated in poking_init().
 821         */
 822        VM_BUG_ON(!ptep);
 823
 824        local_irq_save(flags);
 825
 826        pte = mk_pte(pages[0], pgprot);
 827        set_pte_at(poking_mm, poking_addr, ptep, pte);
 828
 829        if (cross_page_boundary) {
 830                pte = mk_pte(pages[1], pgprot);
 831                set_pte_at(poking_mm, poking_addr + PAGE_SIZE, ptep + 1, pte);
 832        }
 833
 834        /*
 835         * Loading the temporary mm behaves as a compiler barrier, which
 836         * guarantees that the PTE will be set at the time memcpy() is done.
 837         */
 838        prev = use_temporary_mm(poking_mm);
 839
 840        kasan_disable_current();
 841        memcpy((u8 *)poking_addr + offset_in_page(addr), opcode, len);
 842        kasan_enable_current();
 843
 844        /*
 845         * Ensure that the PTE is only cleared after the instructions of memcpy
 846         * were issued by using a compiler barrier.
 847         */
 848        barrier();
 849
 850        pte_clear(poking_mm, poking_addr, ptep);
 851        if (cross_page_boundary)
 852                pte_clear(poking_mm, poking_addr + PAGE_SIZE, ptep + 1);
 853
 854        /*
 855         * Loading the previous page-table hierarchy requires a serializing
 856         * instruction that already allows the core to see the updated version.
 857         * Xen-PV is assumed to serialize execution in a similar manner.
 858         */
 859        unuse_temporary_mm(prev);
 860
 861        /*
 862         * Flushing the TLB might involve IPIs, which would require enabled
 863         * IRQs, but not if the mm is not used, as it is in this point.
 864         */
 865        flush_tlb_mm_range(poking_mm, poking_addr, poking_addr +
 866                           (cross_page_boundary ? 2 : 1) * PAGE_SIZE,
 867                           PAGE_SHIFT, false);
 868
 869        /*
 870         * If the text does not match what we just wrote then something is
 871         * fundamentally screwy; there's nothing we can really do about that.
 872         */
 873        BUG_ON(memcmp(addr, opcode, len));
 874
 875        local_irq_restore(flags);
 876        pte_unmap_unlock(ptep, ptl);
 877        return addr;
 878}
 879
 880/**
 881 * text_poke - Update instructions on a live kernel
 882 * @addr: address to modify
 883 * @opcode: source of the copy
 884 * @len: length to copy
 885 *
 886 * Only atomic text poke/set should be allowed when not doing early patching.
 887 * It means the size must be writable atomically and the address must be aligned
 888 * in a way that permits an atomic write. It also makes sure we fit on a single
 889 * page.
 890 *
 891 * Note that the caller must ensure that if the modified code is part of a
 892 * module, the module would not be removed during poking. This can be achieved
 893 * by registering a module notifier, and ordering module removal and patching
 894 * trough a mutex.
 895 */
 896void *text_poke(void *addr, const void *opcode, size_t len)
 897{
 898        lockdep_assert_held(&text_mutex);
 899
 900        return __text_poke(addr, opcode, len);
 901}
 902
 903/**
 904 * text_poke_kgdb - Update instructions on a live kernel by kgdb
 905 * @addr: address to modify
 906 * @opcode: source of the copy
 907 * @len: length to copy
 908 *
 909 * Only atomic text poke/set should be allowed when not doing early patching.
 910 * It means the size must be writable atomically and the address must be aligned
 911 * in a way that permits an atomic write. It also makes sure we fit on a single
 912 * page.
 913 *
 914 * Context: should only be used by kgdb, which ensures no other core is running,
 915 *          despite the fact it does not hold the text_mutex.
 916 */
 917void *text_poke_kgdb(void *addr, const void *opcode, size_t len)
 918{
 919        return __text_poke(addr, opcode, len);
 920}
 921
 922static void do_sync_core(void *info)
 923{
 924        sync_core();
 925}
 926
 927void text_poke_sync(void)
 928{
 929        on_each_cpu(do_sync_core, NULL, 1);
 930}
 931
 932struct text_poke_loc {
 933        s32 rel_addr; /* addr := _stext + rel_addr */
 934        s32 rel32;
 935        u8 opcode;
 936        const u8 text[POKE_MAX_OPCODE_SIZE];
 937        u8 old;
 938};
 939
 940struct bp_patching_desc {
 941        struct text_poke_loc *vec;
 942        int nr_entries;
 943        atomic_t refs;
 944};
 945
 946static struct bp_patching_desc *bp_desc;
 947
 948static __always_inline
 949struct bp_patching_desc *try_get_desc(struct bp_patching_desc **descp)
 950{
 951        struct bp_patching_desc *desc = __READ_ONCE(*descp); /* rcu_dereference */
 952
 953        if (!desc || !arch_atomic_inc_not_zero(&desc->refs))
 954                return NULL;
 955
 956        return desc;
 957}
 958
 959static __always_inline void put_desc(struct bp_patching_desc *desc)
 960{
 961        smp_mb__before_atomic();
 962        arch_atomic_dec(&desc->refs);
 963}
 964
 965static __always_inline void *text_poke_addr(struct text_poke_loc *tp)
 966{
 967        return _stext + tp->rel_addr;
 968}
 969
 970static __always_inline int patch_cmp(const void *key, const void *elt)
 971{
 972        struct text_poke_loc *tp = (struct text_poke_loc *) elt;
 973
 974        if (key < text_poke_addr(tp))
 975                return -1;
 976        if (key > text_poke_addr(tp))
 977                return 1;
 978        return 0;
 979}
 980
 981noinstr int poke_int3_handler(struct pt_regs *regs)
 982{
 983        struct bp_patching_desc *desc;
 984        struct text_poke_loc *tp;
 985        int len, ret = 0;
 986        void *ip;
 987
 988        if (user_mode(regs))
 989                return 0;
 990
 991        /*
 992         * Having observed our INT3 instruction, we now must observe
 993         * bp_desc:
 994         *
 995         *      bp_desc = desc                  INT3
 996         *      WMB                             RMB
 997         *      write INT3                      if (desc)
 998         */
 999        smp_rmb();
1000
1001        desc = try_get_desc(&bp_desc);
1002        if (!desc)
1003                return 0;
1004
1005        /*
1006         * Discount the INT3. See text_poke_bp_batch().
1007         */
1008        ip = (void *) regs->ip - INT3_INSN_SIZE;
1009
1010        /*
1011         * Skip the binary search if there is a single member in the vector.
1012         */
1013        if (unlikely(desc->nr_entries > 1)) {
1014                tp = __inline_bsearch(ip, desc->vec, desc->nr_entries,
1015                                      sizeof(struct text_poke_loc),
1016                                      patch_cmp);
1017                if (!tp)
1018                        goto out_put;
1019        } else {
1020                tp = desc->vec;
1021                if (text_poke_addr(tp) != ip)
1022                        goto out_put;
1023        }
1024
1025        len = text_opcode_size(tp->opcode);
1026        ip += len;
1027
1028        switch (tp->opcode) {
1029        case INT3_INSN_OPCODE:
1030                /*
1031                 * Someone poked an explicit INT3, they'll want to handle it,
1032                 * do not consume.
1033                 */
1034                goto out_put;
1035
1036        case RET_INSN_OPCODE:
1037                int3_emulate_ret(regs);
1038                break;
1039
1040        case CALL_INSN_OPCODE:
1041                int3_emulate_call(regs, (long)ip + tp->rel32);
1042                break;
1043
1044        case JMP32_INSN_OPCODE:
1045        case JMP8_INSN_OPCODE:
1046                int3_emulate_jmp(regs, (long)ip + tp->rel32);
1047                break;
1048
1049        default:
1050                BUG();
1051        }
1052
1053        ret = 1;
1054
1055out_put:
1056        put_desc(desc);
1057        return ret;
1058}
1059
1060#define TP_VEC_MAX (PAGE_SIZE / sizeof(struct text_poke_loc))
1061static struct text_poke_loc tp_vec[TP_VEC_MAX];
1062static int tp_vec_nr;
1063
1064/**
1065 * text_poke_bp_batch() -- update instructions on live kernel on SMP
1066 * @tp:                 vector of instructions to patch
1067 * @nr_entries:         number of entries in the vector
1068 *
1069 * Modify multi-byte instruction by using int3 breakpoint on SMP.
1070 * We completely avoid stop_machine() here, and achieve the
1071 * synchronization using int3 breakpoint.
1072 *
1073 * The way it is done:
1074 *      - For each entry in the vector:
1075 *              - add a int3 trap to the address that will be patched
1076 *      - sync cores
1077 *      - For each entry in the vector:
1078 *              - update all but the first byte of the patched range
1079 *      - sync cores
1080 *      - For each entry in the vector:
1081 *              - replace the first byte (int3) by the first byte of
1082 *                replacing opcode
1083 *      - sync cores
1084 */
1085static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries)
1086{
1087        struct bp_patching_desc desc = {
1088                .vec = tp,
1089                .nr_entries = nr_entries,
1090                .refs = ATOMIC_INIT(1),
1091        };
1092        unsigned char int3 = INT3_INSN_OPCODE;
1093        unsigned int i;
1094        int do_sync;
1095
1096        lockdep_assert_held(&text_mutex);
1097
1098        smp_store_release(&bp_desc, &desc); /* rcu_assign_pointer */
1099
1100        /*
1101         * Corresponding read barrier in int3 notifier for making sure the
1102         * nr_entries and handler are correctly ordered wrt. patching.
1103         */
1104        smp_wmb();
1105
1106        /*
1107         * First step: add a int3 trap to the address that will be patched.
1108         */
1109        for (i = 0; i < nr_entries; i++) {
1110                tp[i].old = *(u8 *)text_poke_addr(&tp[i]);
1111                text_poke(text_poke_addr(&tp[i]), &int3, INT3_INSN_SIZE);
1112        }
1113
1114        text_poke_sync();
1115
1116        /*
1117         * Second step: update all but the first byte of the patched range.
1118         */
1119        for (do_sync = 0, i = 0; i < nr_entries; i++) {
1120                u8 old[POKE_MAX_OPCODE_SIZE] = { tp[i].old, };
1121                int len = text_opcode_size(tp[i].opcode);
1122
1123                if (len - INT3_INSN_SIZE > 0) {
1124                        memcpy(old + INT3_INSN_SIZE,
1125                               text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
1126                               len - INT3_INSN_SIZE);
1127                        text_poke(text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
1128                                  (const char *)tp[i].text + INT3_INSN_SIZE,
1129                                  len - INT3_INSN_SIZE);
1130                        do_sync++;
1131                }
1132
1133                /*
1134                 * Emit a perf event to record the text poke, primarily to
1135                 * support Intel PT decoding which must walk the executable code
1136                 * to reconstruct the trace. The flow up to here is:
1137                 *   - write INT3 byte
1138                 *   - IPI-SYNC
1139                 *   - write instruction tail
1140                 * At this point the actual control flow will be through the
1141                 * INT3 and handler and not hit the old or new instruction.
1142                 * Intel PT outputs FUP/TIP packets for the INT3, so the flow
1143                 * can still be decoded. Subsequently:
1144                 *   - emit RECORD_TEXT_POKE with the new instruction
1145                 *   - IPI-SYNC
1146                 *   - write first byte
1147                 *   - IPI-SYNC
1148                 * So before the text poke event timestamp, the decoder will see
1149                 * either the old instruction flow or FUP/TIP of INT3. After the
1150                 * text poke event timestamp, the decoder will see either the
1151                 * new instruction flow or FUP/TIP of INT3. Thus decoders can
1152                 * use the timestamp as the point at which to modify the
1153                 * executable code.
1154                 * The old instruction is recorded so that the event can be
1155                 * processed forwards or backwards.
1156                 */
1157                perf_event_text_poke(text_poke_addr(&tp[i]), old, len,
1158                                     tp[i].text, len);
1159        }
1160
1161        if (do_sync) {
1162                /*
1163                 * According to Intel, this core syncing is very likely
1164                 * not necessary and we'd be safe even without it. But
1165                 * better safe than sorry (plus there's not only Intel).
1166                 */
1167                text_poke_sync();
1168        }
1169
1170        /*
1171         * Third step: replace the first byte (int3) by the first byte of
1172         * replacing opcode.
1173         */
1174        for (do_sync = 0, i = 0; i < nr_entries; i++) {
1175                if (tp[i].text[0] == INT3_INSN_OPCODE)
1176                        continue;
1177
1178                text_poke(text_poke_addr(&tp[i]), tp[i].text, INT3_INSN_SIZE);
1179                do_sync++;
1180        }
1181
1182        if (do_sync)
1183                text_poke_sync();
1184
1185        /*
1186         * Remove and synchronize_rcu(), except we have a very primitive
1187         * refcount based completion.
1188         */
1189        WRITE_ONCE(bp_desc, NULL); /* RCU_INIT_POINTER */
1190        if (!atomic_dec_and_test(&desc.refs))
1191                atomic_cond_read_acquire(&desc.refs, !VAL);
1192}
1193
1194static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
1195                               const void *opcode, size_t len, const void *emulate)
1196{
1197        struct insn insn;
1198        int ret;
1199
1200        memcpy((void *)tp->text, opcode, len);
1201        if (!emulate)
1202                emulate = opcode;
1203
1204        ret = insn_decode_kernel(&insn, emulate);
1205
1206        BUG_ON(ret < 0);
1207        BUG_ON(len != insn.length);
1208
1209        tp->rel_addr = addr - (void *)_stext;
1210        tp->opcode = insn.opcode.bytes[0];
1211
1212        switch (tp->opcode) {
1213        case INT3_INSN_OPCODE:
1214        case RET_INSN_OPCODE:
1215                break;
1216
1217        case CALL_INSN_OPCODE:
1218        case JMP32_INSN_OPCODE:
1219        case JMP8_INSN_OPCODE:
1220                tp->rel32 = insn.immediate.value;
1221                break;
1222
1223        default: /* assume NOP */
1224                switch (len) {
1225                case 2: /* NOP2 -- emulate as JMP8+0 */
1226                        BUG_ON(memcmp(emulate, x86_nops[len], len));
1227                        tp->opcode = JMP8_INSN_OPCODE;
1228                        tp->rel32 = 0;
1229                        break;
1230
1231                case 5: /* NOP5 -- emulate as JMP32+0 */
1232                        BUG_ON(memcmp(emulate, x86_nops[len], len));
1233                        tp->opcode = JMP32_INSN_OPCODE;
1234                        tp->rel32 = 0;
1235                        break;
1236
1237                default: /* unknown instruction */
1238                        BUG();
1239                }
1240                break;
1241        }
1242}
1243
1244/*
1245 * We hard rely on the tp_vec being ordered; ensure this is so by flushing
1246 * early if needed.
1247 */
1248static bool tp_order_fail(void *addr)
1249{
1250        struct text_poke_loc *tp;
1251
1252        if (!tp_vec_nr)
1253                return false;
1254
1255        if (!addr) /* force */
1256                return true;
1257
1258        tp = &tp_vec[tp_vec_nr - 1];
1259        if ((unsigned long)text_poke_addr(tp) > (unsigned long)addr)
1260                return true;
1261
1262        return false;
1263}
1264
1265static void text_poke_flush(void *addr)
1266{
1267        if (tp_vec_nr == TP_VEC_MAX || tp_order_fail(addr)) {
1268                text_poke_bp_batch(tp_vec, tp_vec_nr);
1269                tp_vec_nr = 0;
1270        }
1271}
1272
1273void text_poke_finish(void)
1274{
1275        text_poke_flush(NULL);
1276}
1277
1278void __ref text_poke_queue(void *addr, const void *opcode, size_t len, const void *emulate)
1279{
1280        struct text_poke_loc *tp;
1281
1282        if (unlikely(system_state == SYSTEM_BOOTING)) {
1283                text_poke_early(addr, opcode, len);
1284                return;
1285        }
1286
1287        text_poke_flush(addr);
1288
1289        tp = &tp_vec[tp_vec_nr++];
1290        text_poke_loc_init(tp, addr, opcode, len, emulate);
1291}
1292
1293/**
1294 * text_poke_bp() -- update instructions on live kernel on SMP
1295 * @addr:       address to patch
1296 * @opcode:     opcode of new instruction
1297 * @len:        length to copy
1298 * @emulate:    instruction to be emulated
1299 *
1300 * Update a single instruction with the vector in the stack, avoiding
1301 * dynamically allocated memory. This function should be used when it is
1302 * not possible to allocate memory.
1303 */
1304void __ref text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate)
1305{
1306        struct text_poke_loc tp;
1307
1308        if (unlikely(system_state == SYSTEM_BOOTING)) {
1309                text_poke_early(addr, opcode, len);
1310                return;
1311        }
1312
1313        text_poke_loc_init(&tp, addr, opcode, len, emulate);
1314        text_poke_bp_batch(&tp, 1);
1315}
1316