linux/arch/x86/kernel/alternative.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2#define pr_fmt(fmt) "SMP alternatives: " fmt
   3
   4#include <linux/module.h>
   5#include <linux/sched.h>
   6#include <linux/mutex.h>
   7#include <linux/list.h>
   8#include <linux/stringify.h>
   9#include <linux/mm.h>
  10#include <linux/vmalloc.h>
  11#include <linux/memory.h>
  12#include <linux/stop_machine.h>
  13#include <linux/slab.h>
  14#include <linux/kdebug.h>
  15#include <linux/kprobes.h>
  16#include <linux/mmu_context.h>
  17#include <linux/bsearch.h>
  18#include <asm/text-patching.h>
  19#include <asm/alternative.h>
  20#include <asm/sections.h>
  21#include <asm/pgtable.h>
  22#include <asm/mce.h>
  23#include <asm/nmi.h>
  24#include <asm/cacheflush.h>
  25#include <asm/tlbflush.h>
  26#include <asm/io.h>
  27#include <asm/fixmap.h>
  28
  29int __read_mostly alternatives_patched;
  30
  31EXPORT_SYMBOL_GPL(alternatives_patched);
  32
  33#define MAX_PATCH_LEN (255-1)
  34
  35static int __initdata_or_module debug_alternative;
  36
  37static int __init debug_alt(char *str)
  38{
  39        debug_alternative = 1;
  40        return 1;
  41}
  42__setup("debug-alternative", debug_alt);
  43
  44static int noreplace_smp;
  45
  46static int __init setup_noreplace_smp(char *str)
  47{
  48        noreplace_smp = 1;
  49        return 1;
  50}
  51__setup("noreplace-smp", setup_noreplace_smp);
  52
  53#define DPRINTK(fmt, args...)                                           \
  54do {                                                                    \
  55        if (debug_alternative)                                          \
  56                printk(KERN_DEBUG "%s: " fmt "\n", __func__, ##args);   \
  57} while (0)
  58
  59#define DUMP_BYTES(buf, len, fmt, args...)                              \
  60do {                                                                    \
  61        if (unlikely(debug_alternative)) {                              \
  62                int j;                                                  \
  63                                                                        \
  64                if (!(len))                                             \
  65                        break;                                          \
  66                                                                        \
  67                printk(KERN_DEBUG fmt, ##args);                         \
  68                for (j = 0; j < (len) - 1; j++)                         \
  69                        printk(KERN_CONT "%02hhx ", buf[j]);            \
  70                printk(KERN_CONT "%02hhx\n", buf[j]);                   \
  71        }                                                               \
  72} while (0)
  73
  74/*
  75 * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes
  76 * that correspond to that nop. Getting from one nop to the next, we
  77 * add to the array the offset that is equal to the sum of all sizes of
  78 * nops preceding the one we are after.
  79 *
  80 * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the
  81 * nice symmetry of sizes of the previous nops.
  82 */
  83#if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
  84static const unsigned char intelnops[] =
  85{
  86        GENERIC_NOP1,
  87        GENERIC_NOP2,
  88        GENERIC_NOP3,
  89        GENERIC_NOP4,
  90        GENERIC_NOP5,
  91        GENERIC_NOP6,
  92        GENERIC_NOP7,
  93        GENERIC_NOP8,
  94        GENERIC_NOP5_ATOMIC
  95};
  96static const unsigned char * const intel_nops[ASM_NOP_MAX+2] =
  97{
  98        NULL,
  99        intelnops,
 100        intelnops + 1,
 101        intelnops + 1 + 2,
 102        intelnops + 1 + 2 + 3,
 103        intelnops + 1 + 2 + 3 + 4,
 104        intelnops + 1 + 2 + 3 + 4 + 5,
 105        intelnops + 1 + 2 + 3 + 4 + 5 + 6,
 106        intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
 107        intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
 108};
 109#endif
 110
 111#ifdef K8_NOP1
 112static const unsigned char k8nops[] =
 113{
 114        K8_NOP1,
 115        K8_NOP2,
 116        K8_NOP3,
 117        K8_NOP4,
 118        K8_NOP5,
 119        K8_NOP6,
 120        K8_NOP7,
 121        K8_NOP8,
 122        K8_NOP5_ATOMIC
 123};
 124static const unsigned char * const k8_nops[ASM_NOP_MAX+2] =
 125{
 126        NULL,
 127        k8nops,
 128        k8nops + 1,
 129        k8nops + 1 + 2,
 130        k8nops + 1 + 2 + 3,
 131        k8nops + 1 + 2 + 3 + 4,
 132        k8nops + 1 + 2 + 3 + 4 + 5,
 133        k8nops + 1 + 2 + 3 + 4 + 5 + 6,
 134        k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
 135        k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
 136};
 137#endif
 138
 139#if defined(K7_NOP1) && !defined(CONFIG_X86_64)
 140static const unsigned char k7nops[] =
 141{
 142        K7_NOP1,
 143        K7_NOP2,
 144        K7_NOP3,
 145        K7_NOP4,
 146        K7_NOP5,
 147        K7_NOP6,
 148        K7_NOP7,
 149        K7_NOP8,
 150        K7_NOP5_ATOMIC
 151};
 152static const unsigned char * const k7_nops[ASM_NOP_MAX+2] =
 153{
 154        NULL,
 155        k7nops,
 156        k7nops + 1,
 157        k7nops + 1 + 2,
 158        k7nops + 1 + 2 + 3,
 159        k7nops + 1 + 2 + 3 + 4,
 160        k7nops + 1 + 2 + 3 + 4 + 5,
 161        k7nops + 1 + 2 + 3 + 4 + 5 + 6,
 162        k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
 163        k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
 164};
 165#endif
 166
 167#ifdef P6_NOP1
 168static const unsigned char p6nops[] =
 169{
 170        P6_NOP1,
 171        P6_NOP2,
 172        P6_NOP3,
 173        P6_NOP4,
 174        P6_NOP5,
 175        P6_NOP6,
 176        P6_NOP7,
 177        P6_NOP8,
 178        P6_NOP5_ATOMIC
 179};
 180static const unsigned char * const p6_nops[ASM_NOP_MAX+2] =
 181{
 182        NULL,
 183        p6nops,
 184        p6nops + 1,
 185        p6nops + 1 + 2,
 186        p6nops + 1 + 2 + 3,
 187        p6nops + 1 + 2 + 3 + 4,
 188        p6nops + 1 + 2 + 3 + 4 + 5,
 189        p6nops + 1 + 2 + 3 + 4 + 5 + 6,
 190        p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
 191        p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
 192};
 193#endif
 194
 195/* Initialize these to a safe default */
 196#ifdef CONFIG_X86_64
 197const unsigned char * const *ideal_nops = p6_nops;
 198#else
 199const unsigned char * const *ideal_nops = intel_nops;
 200#endif
 201
 202void __init arch_init_ideal_nops(void)
 203{
 204        switch (boot_cpu_data.x86_vendor) {
 205        case X86_VENDOR_INTEL:
 206                /*
 207                 * Due to a decoder implementation quirk, some
 208                 * specific Intel CPUs actually perform better with
 209                 * the "k8_nops" than with the SDM-recommended NOPs.
 210                 */
 211                if (boot_cpu_data.x86 == 6 &&
 212                    boot_cpu_data.x86_model >= 0x0f &&
 213                    boot_cpu_data.x86_model != 0x1c &&
 214                    boot_cpu_data.x86_model != 0x26 &&
 215                    boot_cpu_data.x86_model != 0x27 &&
 216                    boot_cpu_data.x86_model < 0x30) {
 217                        ideal_nops = k8_nops;
 218                } else if (boot_cpu_has(X86_FEATURE_NOPL)) {
 219                           ideal_nops = p6_nops;
 220                } else {
 221#ifdef CONFIG_X86_64
 222                        ideal_nops = k8_nops;
 223#else
 224                        ideal_nops = intel_nops;
 225#endif
 226                }
 227                break;
 228
 229        case X86_VENDOR_HYGON:
 230                ideal_nops = p6_nops;
 231                return;
 232
 233        case X86_VENDOR_AMD:
 234                if (boot_cpu_data.x86 > 0xf) {
 235                        ideal_nops = p6_nops;
 236                        return;
 237                }
 238
 239                /* fall through */
 240
 241        default:
 242#ifdef CONFIG_X86_64
 243                ideal_nops = k8_nops;
 244#else
 245                if (boot_cpu_has(X86_FEATURE_K8))
 246                        ideal_nops = k8_nops;
 247                else if (boot_cpu_has(X86_FEATURE_K7))
 248                        ideal_nops = k7_nops;
 249                else
 250                        ideal_nops = intel_nops;
 251#endif
 252        }
 253}
 254
 255/* Use this to add nops to a buffer, then text_poke the whole buffer. */
 256static void __init_or_module add_nops(void *insns, unsigned int len)
 257{
 258        while (len > 0) {
 259                unsigned int noplen = len;
 260                if (noplen > ASM_NOP_MAX)
 261                        noplen = ASM_NOP_MAX;
 262                memcpy(insns, ideal_nops[noplen], noplen);
 263                insns += noplen;
 264                len -= noplen;
 265        }
 266}
 267
 268extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
 269extern s32 __smp_locks[], __smp_locks_end[];
 270void text_poke_early(void *addr, const void *opcode, size_t len);
 271
 272/*
 273 * Are we looking at a near JMP with a 1 or 4-byte displacement.
 274 */
 275static inline bool is_jmp(const u8 opcode)
 276{
 277        return opcode == 0xeb || opcode == 0xe9;
 278}
 279
 280static void __init_or_module
 281recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insn_buff)
 282{
 283        u8 *next_rip, *tgt_rip;
 284        s32 n_dspl, o_dspl;
 285        int repl_len;
 286
 287        if (a->replacementlen != 5)
 288                return;
 289
 290        o_dspl = *(s32 *)(insn_buff + 1);
 291
 292        /* next_rip of the replacement JMP */
 293        next_rip = repl_insn + a->replacementlen;
 294        /* target rip of the replacement JMP */
 295        tgt_rip  = next_rip + o_dspl;
 296        n_dspl = tgt_rip - orig_insn;
 297
 298        DPRINTK("target RIP: %px, new_displ: 0x%x", tgt_rip, n_dspl);
 299
 300        if (tgt_rip - orig_insn >= 0) {
 301                if (n_dspl - 2 <= 127)
 302                        goto two_byte_jmp;
 303                else
 304                        goto five_byte_jmp;
 305        /* negative offset */
 306        } else {
 307                if (((n_dspl - 2) & 0xff) == (n_dspl - 2))
 308                        goto two_byte_jmp;
 309                else
 310                        goto five_byte_jmp;
 311        }
 312
 313two_byte_jmp:
 314        n_dspl -= 2;
 315
 316        insn_buff[0] = 0xeb;
 317        insn_buff[1] = (s8)n_dspl;
 318        add_nops(insn_buff + 2, 3);
 319
 320        repl_len = 2;
 321        goto done;
 322
 323five_byte_jmp:
 324        n_dspl -= 5;
 325
 326        insn_buff[0] = 0xe9;
 327        *(s32 *)&insn_buff[1] = n_dspl;
 328
 329        repl_len = 5;
 330
 331done:
 332
 333        DPRINTK("final displ: 0x%08x, JMP 0x%lx",
 334                n_dspl, (unsigned long)orig_insn + n_dspl + repl_len);
 335}
 336
 337/*
 338 * "noinline" to cause control flow change and thus invalidate I$ and
 339 * cause refetch after modification.
 340 */
 341static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr)
 342{
 343        unsigned long flags;
 344        int i;
 345
 346        for (i = 0; i < a->padlen; i++) {
 347                if (instr[i] != 0x90)
 348                        return;
 349        }
 350
 351        local_irq_save(flags);
 352        add_nops(instr + (a->instrlen - a->padlen), a->padlen);
 353        local_irq_restore(flags);
 354
 355        DUMP_BYTES(instr, a->instrlen, "%px: [%d:%d) optimized NOPs: ",
 356                   instr, a->instrlen - a->padlen, a->padlen);
 357}
 358
 359/*
 360 * Replace instructions with better alternatives for this CPU type. This runs
 361 * before SMP is initialized to avoid SMP problems with self modifying code.
 362 * This implies that asymmetric systems where APs have less capabilities than
 363 * the boot processor are not handled. Tough. Make sure you disable such
 364 * features by hand.
 365 *
 366 * Marked "noinline" to cause control flow change and thus insn cache
 367 * to refetch changed I$ lines.
 368 */
 369void __init_or_module noinline apply_alternatives(struct alt_instr *start,
 370                                                  struct alt_instr *end)
 371{
 372        struct alt_instr *a;
 373        u8 *instr, *replacement;
 374        u8 insn_buff[MAX_PATCH_LEN];
 375
 376        DPRINTK("alt table %px, -> %px", start, end);
 377        /*
 378         * The scan order should be from start to end. A later scanned
 379         * alternative code can overwrite previously scanned alternative code.
 380         * Some kernel functions (e.g. memcpy, memset, etc) use this order to
 381         * patch code.
 382         *
 383         * So be careful if you want to change the scan order to any other
 384         * order.
 385         */
 386        for (a = start; a < end; a++) {
 387                int insn_buff_sz = 0;
 388
 389                instr = (u8 *)&a->instr_offset + a->instr_offset;
 390                replacement = (u8 *)&a->repl_offset + a->repl_offset;
 391                BUG_ON(a->instrlen > sizeof(insn_buff));
 392                BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32);
 393                if (!boot_cpu_has(a->cpuid)) {
 394                        if (a->padlen > 1)
 395                                optimize_nops(a, instr);
 396
 397                        continue;
 398                }
 399
 400                DPRINTK("feat: %d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d), pad: %d",
 401                        a->cpuid >> 5,
 402                        a->cpuid & 0x1f,
 403                        instr, instr, a->instrlen,
 404                        replacement, a->replacementlen, a->padlen);
 405
 406                DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr);
 407                DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement);
 408
 409                memcpy(insn_buff, replacement, a->replacementlen);
 410                insn_buff_sz = a->replacementlen;
 411
 412                /*
 413                 * 0xe8 is a relative jump; fix the offset.
 414                 *
 415                 * Instruction length is checked before the opcode to avoid
 416                 * accessing uninitialized bytes for zero-length replacements.
 417                 */
 418                if (a->replacementlen == 5 && *insn_buff == 0xe8) {
 419                        *(s32 *)(insn_buff + 1) += replacement - instr;
 420                        DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
 421                                *(s32 *)(insn_buff + 1),
 422                                (unsigned long)instr + *(s32 *)(insn_buff + 1) + 5);
 423                }
 424
 425                if (a->replacementlen && is_jmp(replacement[0]))
 426                        recompute_jump(a, instr, replacement, insn_buff);
 427
 428                if (a->instrlen > a->replacementlen) {
 429                        add_nops(insn_buff + a->replacementlen,
 430                                 a->instrlen - a->replacementlen);
 431                        insn_buff_sz += a->instrlen - a->replacementlen;
 432                }
 433                DUMP_BYTES(insn_buff, insn_buff_sz, "%px: final_insn: ", instr);
 434
 435                text_poke_early(instr, insn_buff, insn_buff_sz);
 436        }
 437}
 438
 439#ifdef CONFIG_SMP
 440static void alternatives_smp_lock(const s32 *start, const s32 *end,
 441                                  u8 *text, u8 *text_end)
 442{
 443        const s32 *poff;
 444
 445        for (poff = start; poff < end; poff++) {
 446                u8 *ptr = (u8 *)poff + *poff;
 447
 448                if (!*poff || ptr < text || ptr >= text_end)
 449                        continue;
 450                /* turn DS segment override prefix into lock prefix */
 451                if (*ptr == 0x3e)
 452                        text_poke(ptr, ((unsigned char []){0xf0}), 1);
 453        }
 454}
 455
 456static void alternatives_smp_unlock(const s32 *start, const s32 *end,
 457                                    u8 *text, u8 *text_end)
 458{
 459        const s32 *poff;
 460
 461        for (poff = start; poff < end; poff++) {
 462                u8 *ptr = (u8 *)poff + *poff;
 463
 464                if (!*poff || ptr < text || ptr >= text_end)
 465                        continue;
 466                /* turn lock prefix into DS segment override prefix */
 467                if (*ptr == 0xf0)
 468                        text_poke(ptr, ((unsigned char []){0x3E}), 1);
 469        }
 470}
 471
 472struct smp_alt_module {
 473        /* what is this ??? */
 474        struct module   *mod;
 475        char            *name;
 476
 477        /* ptrs to lock prefixes */
 478        const s32       *locks;
 479        const s32       *locks_end;
 480
 481        /* .text segment, needed to avoid patching init code ;) */
 482        u8              *text;
 483        u8              *text_end;
 484
 485        struct list_head next;
 486};
 487static LIST_HEAD(smp_alt_modules);
 488static bool uniproc_patched = false;    /* protected by text_mutex */
 489
 490void __init_or_module alternatives_smp_module_add(struct module *mod,
 491                                                  char *name,
 492                                                  void *locks, void *locks_end,
 493                                                  void *text,  void *text_end)
 494{
 495        struct smp_alt_module *smp;
 496
 497        mutex_lock(&text_mutex);
 498        if (!uniproc_patched)
 499                goto unlock;
 500
 501        if (num_possible_cpus() == 1)
 502                /* Don't bother remembering, we'll never have to undo it. */
 503                goto smp_unlock;
 504
 505        smp = kzalloc(sizeof(*smp), GFP_KERNEL);
 506        if (NULL == smp)
 507                /* we'll run the (safe but slow) SMP code then ... */
 508                goto unlock;
 509
 510        smp->mod        = mod;
 511        smp->name       = name;
 512        smp->locks      = locks;
 513        smp->locks_end  = locks_end;
 514        smp->text       = text;
 515        smp->text_end   = text_end;
 516        DPRINTK("locks %p -> %p, text %p -> %p, name %s\n",
 517                smp->locks, smp->locks_end,
 518                smp->text, smp->text_end, smp->name);
 519
 520        list_add_tail(&smp->next, &smp_alt_modules);
 521smp_unlock:
 522        alternatives_smp_unlock(locks, locks_end, text, text_end);
 523unlock:
 524        mutex_unlock(&text_mutex);
 525}
 526
 527void __init_or_module alternatives_smp_module_del(struct module *mod)
 528{
 529        struct smp_alt_module *item;
 530
 531        mutex_lock(&text_mutex);
 532        list_for_each_entry(item, &smp_alt_modules, next) {
 533                if (mod != item->mod)
 534                        continue;
 535                list_del(&item->next);
 536                kfree(item);
 537                break;
 538        }
 539        mutex_unlock(&text_mutex);
 540}
 541
 542void alternatives_enable_smp(void)
 543{
 544        struct smp_alt_module *mod;
 545
 546        /* Why bother if there are no other CPUs? */
 547        BUG_ON(num_possible_cpus() == 1);
 548
 549        mutex_lock(&text_mutex);
 550
 551        if (uniproc_patched) {
 552                pr_info("switching to SMP code\n");
 553                BUG_ON(num_online_cpus() != 1);
 554                clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
 555                clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
 556                list_for_each_entry(mod, &smp_alt_modules, next)
 557                        alternatives_smp_lock(mod->locks, mod->locks_end,
 558                                              mod->text, mod->text_end);
 559                uniproc_patched = false;
 560        }
 561        mutex_unlock(&text_mutex);
 562}
 563
 564/*
 565 * Return 1 if the address range is reserved for SMP-alternatives.
 566 * Must hold text_mutex.
 567 */
 568int alternatives_text_reserved(void *start, void *end)
 569{
 570        struct smp_alt_module *mod;
 571        const s32 *poff;
 572        u8 *text_start = start;
 573        u8 *text_end = end;
 574
 575        lockdep_assert_held(&text_mutex);
 576
 577        list_for_each_entry(mod, &smp_alt_modules, next) {
 578                if (mod->text > text_end || mod->text_end < text_start)
 579                        continue;
 580                for (poff = mod->locks; poff < mod->locks_end; poff++) {
 581                        const u8 *ptr = (const u8 *)poff + *poff;
 582
 583                        if (text_start <= ptr && text_end > ptr)
 584                                return 1;
 585                }
 586        }
 587
 588        return 0;
 589}
 590#endif /* CONFIG_SMP */
 591
 592#ifdef CONFIG_PARAVIRT
 593void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
 594                                     struct paravirt_patch_site *end)
 595{
 596        struct paravirt_patch_site *p;
 597        char insn_buff[MAX_PATCH_LEN];
 598
 599        for (p = start; p < end; p++) {
 600                unsigned int used;
 601
 602                BUG_ON(p->len > MAX_PATCH_LEN);
 603                /* prep the buffer with the original instructions */
 604                memcpy(insn_buff, p->instr, p->len);
 605                used = pv_ops.init.patch(p->type, insn_buff, (unsigned long)p->instr, p->len);
 606
 607                BUG_ON(used > p->len);
 608
 609                /* Pad the rest with nops */
 610                add_nops(insn_buff + used, p->len - used);
 611                text_poke_early(p->instr, insn_buff, p->len);
 612        }
 613}
 614extern struct paravirt_patch_site __start_parainstructions[],
 615        __stop_parainstructions[];
 616#endif  /* CONFIG_PARAVIRT */
 617
 618/*
 619 * Self-test for the INT3 based CALL emulation code.
 620 *
 621 * This exercises int3_emulate_call() to make sure INT3 pt_regs are set up
 622 * properly and that there is a stack gap between the INT3 frame and the
 623 * previous context. Without this gap doing a virtual PUSH on the interrupted
 624 * stack would corrupt the INT3 IRET frame.
 625 *
 626 * See entry_{32,64}.S for more details.
 627 */
 628
 629/*
 630 * We define the int3_magic() function in assembly to control the calling
 631 * convention such that we can 'call' it from assembly.
 632 */
 633
 634extern void int3_magic(unsigned int *ptr); /* defined in asm */
 635
 636asm (
 637"       .pushsection    .init.text, \"ax\", @progbits\n"
 638"       .type           int3_magic, @function\n"
 639"int3_magic:\n"
 640"       movl    $1, (%" _ASM_ARG1 ")\n"
 641"       ret\n"
 642"       .size           int3_magic, .-int3_magic\n"
 643"       .popsection\n"
 644);
 645
 646extern __initdata unsigned long int3_selftest_ip; /* defined in asm below */
 647
 648static int __init
 649int3_exception_notify(struct notifier_block *self, unsigned long val, void *data)
 650{
 651        struct die_args *args = data;
 652        struct pt_regs *regs = args->regs;
 653
 654        if (!regs || user_mode(regs))
 655                return NOTIFY_DONE;
 656
 657        if (val != DIE_INT3)
 658                return NOTIFY_DONE;
 659
 660        if (regs->ip - INT3_INSN_SIZE != int3_selftest_ip)
 661                return NOTIFY_DONE;
 662
 663        int3_emulate_call(regs, (unsigned long)&int3_magic);
 664        return NOTIFY_STOP;
 665}
 666
 667static void __init int3_selftest(void)
 668{
 669        static __initdata struct notifier_block int3_exception_nb = {
 670                .notifier_call  = int3_exception_notify,
 671                .priority       = INT_MAX-1, /* last */
 672        };
 673        unsigned int val = 0;
 674
 675        BUG_ON(register_die_notifier(&int3_exception_nb));
 676
 677        /*
 678         * Basically: int3_magic(&val); but really complicated :-)
 679         *
 680         * Stick the address of the INT3 instruction into int3_selftest_ip,
 681         * then trigger the INT3, padded with NOPs to match a CALL instruction
 682         * length.
 683         */
 684        asm volatile ("1: int3; nop; nop; nop; nop\n\t"
 685                      ".pushsection .init.data,\"aw\"\n\t"
 686                      ".align " __ASM_SEL(4, 8) "\n\t"
 687                      ".type int3_selftest_ip, @object\n\t"
 688                      ".size int3_selftest_ip, " __ASM_SEL(4, 8) "\n\t"
 689                      "int3_selftest_ip:\n\t"
 690                      __ASM_SEL(.long, .quad) " 1b\n\t"
 691                      ".popsection\n\t"
 692                      : ASM_CALL_CONSTRAINT
 693                      : __ASM_SEL_RAW(a, D) (&val)
 694                      : "memory");
 695
 696        BUG_ON(val != 1);
 697
 698        unregister_die_notifier(&int3_exception_nb);
 699}
 700
 701void __init alternative_instructions(void)
 702{
 703        int3_selftest();
 704
 705        /*
 706         * The patching is not fully atomic, so try to avoid local
 707         * interruptions that might execute the to be patched code.
 708         * Other CPUs are not running.
 709         */
 710        stop_nmi();
 711
 712        /*
 713         * Don't stop machine check exceptions while patching.
 714         * MCEs only happen when something got corrupted and in this
 715         * case we must do something about the corruption.
 716         * Ignoring it is worse than an unlikely patching race.
 717         * Also machine checks tend to be broadcast and if one CPU
 718         * goes into machine check the others follow quickly, so we don't
 719         * expect a machine check to cause undue problems during to code
 720         * patching.
 721         */
 722
 723        apply_alternatives(__alt_instructions, __alt_instructions_end);
 724
 725#ifdef CONFIG_SMP
 726        /* Patch to UP if other cpus not imminent. */
 727        if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) {
 728                uniproc_patched = true;
 729                alternatives_smp_module_add(NULL, "core kernel",
 730                                            __smp_locks, __smp_locks_end,
 731                                            _text, _etext);
 732        }
 733
 734        if (!uniproc_patched || num_possible_cpus() == 1) {
 735                free_init_pages("SMP alternatives",
 736                                (unsigned long)__smp_locks,
 737                                (unsigned long)__smp_locks_end);
 738        }
 739#endif
 740
 741        apply_paravirt(__parainstructions, __parainstructions_end);
 742
 743        restart_nmi();
 744        alternatives_patched = 1;
 745}
 746
 747/**
 748 * text_poke_early - Update instructions on a live kernel at boot time
 749 * @addr: address to modify
 750 * @opcode: source of the copy
 751 * @len: length to copy
 752 *
 753 * When you use this code to patch more than one byte of an instruction
 754 * you need to make sure that other CPUs cannot execute this code in parallel.
 755 * Also no thread must be currently preempted in the middle of these
 756 * instructions. And on the local CPU you need to be protected against NMI or
 757 * MCE handlers seeing an inconsistent instruction while you patch.
 758 */
 759void __init_or_module text_poke_early(void *addr, const void *opcode,
 760                                      size_t len)
 761{
 762        unsigned long flags;
 763
 764        if (boot_cpu_has(X86_FEATURE_NX) &&
 765            is_module_text_address((unsigned long)addr)) {
 766                /*
 767                 * Modules text is marked initially as non-executable, so the
 768                 * code cannot be running and speculative code-fetches are
 769                 * prevented. Just change the code.
 770                 */
 771                memcpy(addr, opcode, len);
 772        } else {
 773                local_irq_save(flags);
 774                memcpy(addr, opcode, len);
 775                local_irq_restore(flags);
 776                sync_core();
 777
 778                /*
 779                 * Could also do a CLFLUSH here to speed up CPU recovery; but
 780                 * that causes hangs on some VIA CPUs.
 781                 */
 782        }
 783}
 784
 785__ro_after_init struct mm_struct *poking_mm;
 786__ro_after_init unsigned long poking_addr;
 787
 788static void *__text_poke(void *addr, const void *opcode, size_t len)
 789{
 790        bool cross_page_boundary = offset_in_page(addr) + len > PAGE_SIZE;
 791        struct page *pages[2] = {NULL};
 792        temp_mm_state_t prev;
 793        unsigned long flags;
 794        pte_t pte, *ptep;
 795        spinlock_t *ptl;
 796        pgprot_t pgprot;
 797
 798        /*
 799         * While boot memory allocator is running we cannot use struct pages as
 800         * they are not yet initialized. There is no way to recover.
 801         */
 802        BUG_ON(!after_bootmem);
 803
 804        if (!core_kernel_text((unsigned long)addr)) {
 805                pages[0] = vmalloc_to_page(addr);
 806                if (cross_page_boundary)
 807                        pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
 808        } else {
 809                pages[0] = virt_to_page(addr);
 810                WARN_ON(!PageReserved(pages[0]));
 811                if (cross_page_boundary)
 812                        pages[1] = virt_to_page(addr + PAGE_SIZE);
 813        }
 814        /*
 815         * If something went wrong, crash and burn since recovery paths are not
 816         * implemented.
 817         */
 818        BUG_ON(!pages[0] || (cross_page_boundary && !pages[1]));
 819
 820        local_irq_save(flags);
 821
 822        /*
 823         * Map the page without the global bit, as TLB flushing is done with
 824         * flush_tlb_mm_range(), which is intended for non-global PTEs.
 825         */
 826        pgprot = __pgprot(pgprot_val(PAGE_KERNEL) & ~_PAGE_GLOBAL);
 827
 828        /*
 829         * The lock is not really needed, but this allows to avoid open-coding.
 830         */
 831        ptep = get_locked_pte(poking_mm, poking_addr, &ptl);
 832
 833        /*
 834         * This must not fail; preallocated in poking_init().
 835         */
 836        VM_BUG_ON(!ptep);
 837
 838        pte = mk_pte(pages[0], pgprot);
 839        set_pte_at(poking_mm, poking_addr, ptep, pte);
 840
 841        if (cross_page_boundary) {
 842                pte = mk_pte(pages[1], pgprot);
 843                set_pte_at(poking_mm, poking_addr + PAGE_SIZE, ptep + 1, pte);
 844        }
 845
 846        /*
 847         * Loading the temporary mm behaves as a compiler barrier, which
 848         * guarantees that the PTE will be set at the time memcpy() is done.
 849         */
 850        prev = use_temporary_mm(poking_mm);
 851
 852        kasan_disable_current();
 853        memcpy((u8 *)poking_addr + offset_in_page(addr), opcode, len);
 854        kasan_enable_current();
 855
 856        /*
 857         * Ensure that the PTE is only cleared after the instructions of memcpy
 858         * were issued by using a compiler barrier.
 859         */
 860        barrier();
 861
 862        pte_clear(poking_mm, poking_addr, ptep);
 863        if (cross_page_boundary)
 864                pte_clear(poking_mm, poking_addr + PAGE_SIZE, ptep + 1);
 865
 866        /*
 867         * Loading the previous page-table hierarchy requires a serializing
 868         * instruction that already allows the core to see the updated version.
 869         * Xen-PV is assumed to serialize execution in a similar manner.
 870         */
 871        unuse_temporary_mm(prev);
 872
 873        /*
 874         * Flushing the TLB might involve IPIs, which would require enabled
 875         * IRQs, but not if the mm is not used, as it is in this point.
 876         */
 877        flush_tlb_mm_range(poking_mm, poking_addr, poking_addr +
 878                           (cross_page_boundary ? 2 : 1) * PAGE_SIZE,
 879                           PAGE_SHIFT, false);
 880
 881        /*
 882         * If the text does not match what we just wrote then something is
 883         * fundamentally screwy; there's nothing we can really do about that.
 884         */
 885        BUG_ON(memcmp(addr, opcode, len));
 886
 887        pte_unmap_unlock(ptep, ptl);
 888        local_irq_restore(flags);
 889        return addr;
 890}
 891
 892/**
 893 * text_poke - Update instructions on a live kernel
 894 * @addr: address to modify
 895 * @opcode: source of the copy
 896 * @len: length to copy
 897 *
 898 * Only atomic text poke/set should be allowed when not doing early patching.
 899 * It means the size must be writable atomically and the address must be aligned
 900 * in a way that permits an atomic write. It also makes sure we fit on a single
 901 * page.
 902 *
 903 * Note that the caller must ensure that if the modified code is part of a
 904 * module, the module would not be removed during poking. This can be achieved
 905 * by registering a module notifier, and ordering module removal and patching
 906 * trough a mutex.
 907 */
 908void *text_poke(void *addr, const void *opcode, size_t len)
 909{
 910        lockdep_assert_held(&text_mutex);
 911
 912        return __text_poke(addr, opcode, len);
 913}
 914
 915/**
 916 * text_poke_kgdb - Update instructions on a live kernel by kgdb
 917 * @addr: address to modify
 918 * @opcode: source of the copy
 919 * @len: length to copy
 920 *
 921 * Only atomic text poke/set should be allowed when not doing early patching.
 922 * It means the size must be writable atomically and the address must be aligned
 923 * in a way that permits an atomic write. It also makes sure we fit on a single
 924 * page.
 925 *
 926 * Context: should only be used by kgdb, which ensures no other core is running,
 927 *          despite the fact it does not hold the text_mutex.
 928 */
 929void *text_poke_kgdb(void *addr, const void *opcode, size_t len)
 930{
 931        return __text_poke(addr, opcode, len);
 932}
 933
 934static void do_sync_core(void *info)
 935{
 936        sync_core();
 937}
 938
 939static struct bp_patching_desc {
 940        struct text_poke_loc *vec;
 941        int nr_entries;
 942} bp_patching;
 943
 944static int patch_cmp(const void *key, const void *elt)
 945{
 946        struct text_poke_loc *tp = (struct text_poke_loc *) elt;
 947
 948        if (key < tp->addr)
 949                return -1;
 950        if (key > tp->addr)
 951                return 1;
 952        return 0;
 953}
 954NOKPROBE_SYMBOL(patch_cmp);
 955
 956int poke_int3_handler(struct pt_regs *regs)
 957{
 958        struct text_poke_loc *tp;
 959        unsigned char int3 = 0xcc;
 960        void *ip;
 961
 962        /*
 963         * Having observed our INT3 instruction, we now must observe
 964         * bp_patching.nr_entries.
 965         *
 966         *      nr_entries != 0                 INT3
 967         *      WMB                             RMB
 968         *      write INT3                      if (nr_entries)
 969         *
 970         * Idem for other elements in bp_patching.
 971         */
 972        smp_rmb();
 973
 974        if (likely(!bp_patching.nr_entries))
 975                return 0;
 976
 977        if (user_mode(regs))
 978                return 0;
 979
 980        /*
 981         * Discount the sizeof(int3). See text_poke_bp_batch().
 982         */
 983        ip = (void *) regs->ip - sizeof(int3);
 984
 985        /*
 986         * Skip the binary search if there is a single member in the vector.
 987         */
 988        if (unlikely(bp_patching.nr_entries > 1)) {
 989                tp = bsearch(ip, bp_patching.vec, bp_patching.nr_entries,
 990                             sizeof(struct text_poke_loc),
 991                             patch_cmp);
 992                if (!tp)
 993                        return 0;
 994        } else {
 995                tp = bp_patching.vec;
 996                if (tp->addr != ip)
 997                        return 0;
 998        }
 999
1000        /* set up the specified breakpoint detour */
1001        regs->ip = (unsigned long) tp->detour;
1002
1003        return 1;
1004}
1005NOKPROBE_SYMBOL(poke_int3_handler);
1006
1007/**
1008 * text_poke_bp_batch() -- update instructions on live kernel on SMP
1009 * @tp:                 vector of instructions to patch
1010 * @nr_entries:         number of entries in the vector
1011 *
1012 * Modify multi-byte instruction by using int3 breakpoint on SMP.
1013 * We completely avoid stop_machine() here, and achieve the
1014 * synchronization using int3 breakpoint.
1015 *
1016 * The way it is done:
1017 *      - For each entry in the vector:
1018 *              - add a int3 trap to the address that will be patched
1019 *      - sync cores
1020 *      - For each entry in the vector:
1021 *              - update all but the first byte of the patched range
1022 *      - sync cores
1023 *      - For each entry in the vector:
1024 *              - replace the first byte (int3) by the first byte of
1025 *                replacing opcode
1026 *      - sync cores
1027 */
1028void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries)
1029{
1030        int patched_all_but_first = 0;
1031        unsigned char int3 = 0xcc;
1032        unsigned int i;
1033
1034        lockdep_assert_held(&text_mutex);
1035
1036        bp_patching.vec = tp;
1037        bp_patching.nr_entries = nr_entries;
1038
1039        /*
1040         * Corresponding read barrier in int3 notifier for making sure the
1041         * nr_entries and handler are correctly ordered wrt. patching.
1042         */
1043        smp_wmb();
1044
1045        /*
1046         * First step: add a int3 trap to the address that will be patched.
1047         */
1048        for (i = 0; i < nr_entries; i++)
1049                text_poke(tp[i].addr, &int3, sizeof(int3));
1050
1051        on_each_cpu(do_sync_core, NULL, 1);
1052
1053        /*
1054         * Second step: update all but the first byte of the patched range.
1055         */
1056        for (i = 0; i < nr_entries; i++) {
1057                if (tp[i].len - sizeof(int3) > 0) {
1058                        text_poke((char *)tp[i].addr + sizeof(int3),
1059                                  (const char *)tp[i].opcode + sizeof(int3),
1060                                  tp[i].len - sizeof(int3));
1061                        patched_all_but_first++;
1062                }
1063        }
1064
1065        if (patched_all_but_first) {
1066                /*
1067                 * According to Intel, this core syncing is very likely
1068                 * not necessary and we'd be safe even without it. But
1069                 * better safe than sorry (plus there's not only Intel).
1070                 */
1071                on_each_cpu(do_sync_core, NULL, 1);
1072        }
1073
1074        /*
1075         * Third step: replace the first byte (int3) by the first byte of
1076         * replacing opcode.
1077         */
1078        for (i = 0; i < nr_entries; i++)
1079                text_poke(tp[i].addr, tp[i].opcode, sizeof(int3));
1080
1081        on_each_cpu(do_sync_core, NULL, 1);
1082        /*
1083         * sync_core() implies an smp_mb() and orders this store against
1084         * the writing of the new instruction.
1085         */
1086        bp_patching.vec = NULL;
1087        bp_patching.nr_entries = 0;
1088}
1089
1090/**
1091 * text_poke_bp() -- update instructions on live kernel on SMP
1092 * @addr:       address to patch
1093 * @opcode:     opcode of new instruction
1094 * @len:        length to copy
1095 * @handler:    address to jump to when the temporary breakpoint is hit
1096 *
1097 * Update a single instruction with the vector in the stack, avoiding
1098 * dynamically allocated memory. This function should be used when it is
1099 * not possible to allocate memory.
1100 */
1101void text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
1102{
1103        struct text_poke_loc tp = {
1104                .detour = handler,
1105                .addr = addr,
1106                .len = len,
1107        };
1108
1109        if (len > POKE_MAX_OPCODE_SIZE) {
1110                WARN_ONCE(1, "len is larger than %d\n", POKE_MAX_OPCODE_SIZE);
1111                return;
1112        }
1113
1114        memcpy((void *)tp.opcode, opcode, len);
1115
1116        text_poke_bp_batch(&tp, 1);
1117}
1118