linux/arch/x86/mm/pti.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright(c) 2017 Intel Corporation. All rights reserved.
   4 *
   5 * This code is based in part on work published here:
   6 *
   7 *      https://github.com/IAIK/KAISER
   8 *
   9 * The original work was written by and and signed off by for the Linux
  10 * kernel by:
  11 *
  12 *   Signed-off-by: Richard Fellner <richard.fellner@student.tugraz.at>
  13 *   Signed-off-by: Moritz Lipp <moritz.lipp@iaik.tugraz.at>
  14 *   Signed-off-by: Daniel Gruss <daniel.gruss@iaik.tugraz.at>
  15 *   Signed-off-by: Michael Schwarz <michael.schwarz@iaik.tugraz.at>
  16 *
  17 * Major changes to the original code by: Dave Hansen <dave.hansen@intel.com>
  18 * Mostly rewritten by Thomas Gleixner <tglx@linutronix.de> and
  19 *                     Andy Lutomirsky <luto@amacapital.net>
  20 */
  21#include <linux/kernel.h>
  22#include <linux/errno.h>
  23#include <linux/string.h>
  24#include <linux/types.h>
  25#include <linux/bug.h>
  26#include <linux/init.h>
  27#include <linux/spinlock.h>
  28#include <linux/mm.h>
  29#include <linux/uaccess.h>
  30#include <linux/cpu.h>
  31
  32#include <asm/cpufeature.h>
  33#include <asm/hypervisor.h>
  34#include <asm/vsyscall.h>
  35#include <asm/cmdline.h>
  36#include <asm/pti.h>
  37#include <asm/tlbflush.h>
  38#include <asm/desc.h>
  39#include <asm/sections.h>
  40#include <asm/set_memory.h>
  41
  42#undef pr_fmt
  43#define pr_fmt(fmt)     "Kernel/User page tables isolation: " fmt
  44
  45/* Backporting helper */
  46#ifndef __GFP_NOTRACK
  47#define __GFP_NOTRACK   0
  48#endif
  49
  50/*
  51 * Define the page-table levels we clone for user-space on 32
  52 * and 64 bit.
  53 */
  54#ifdef CONFIG_X86_64
  55#define PTI_LEVEL_KERNEL_IMAGE  PTI_CLONE_PMD
  56#else
  57#define PTI_LEVEL_KERNEL_IMAGE  PTI_CLONE_PTE
  58#endif
  59
  60static void __init pti_print_if_insecure(const char *reason)
  61{
  62        if (boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
  63                pr_info("%s\n", reason);
  64}
  65
  66static void __init pti_print_if_secure(const char *reason)
  67{
  68        if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
  69                pr_info("%s\n", reason);
  70}
  71
  72static enum pti_mode {
  73        PTI_AUTO = 0,
  74        PTI_FORCE_OFF,
  75        PTI_FORCE_ON
  76} pti_mode;
  77
  78void __init pti_check_boottime_disable(void)
  79{
  80        char arg[5];
  81        int ret;
  82
  83        /* Assume mode is auto unless overridden. */
  84        pti_mode = PTI_AUTO;
  85
  86        if (hypervisor_is_type(X86_HYPER_XEN_PV)) {
  87                pti_mode = PTI_FORCE_OFF;
  88                pti_print_if_insecure("disabled on XEN PV.");
  89                return;
  90        }
  91
  92        ret = cmdline_find_option(boot_command_line, "pti", arg, sizeof(arg));
  93        if (ret > 0)  {
  94                if (ret == 3 && !strncmp(arg, "off", 3)) {
  95                        pti_mode = PTI_FORCE_OFF;
  96                        pti_print_if_insecure("disabled on command line.");
  97                        return;
  98                }
  99                if (ret == 2 && !strncmp(arg, "on", 2)) {
 100                        pti_mode = PTI_FORCE_ON;
 101                        pti_print_if_secure("force enabled on command line.");
 102                        goto enable;
 103                }
 104                if (ret == 4 && !strncmp(arg, "auto", 4)) {
 105                        pti_mode = PTI_AUTO;
 106                        goto autosel;
 107                }
 108        }
 109
 110        if (cmdline_find_option_bool(boot_command_line, "nopti") ||
 111            cpu_mitigations_off()) {
 112                pti_mode = PTI_FORCE_OFF;
 113                pti_print_if_insecure("disabled on command line.");
 114                return;
 115        }
 116
 117autosel:
 118        if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
 119                return;
 120enable:
 121        setup_force_cpu_cap(X86_FEATURE_PTI);
 122}
 123
 124pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
 125{
 126        /*
 127         * Changes to the high (kernel) portion of the kernelmode page
 128         * tables are not automatically propagated to the usermode tables.
 129         *
 130         * Users should keep in mind that, unlike the kernelmode tables,
 131         * there is no vmalloc_fault equivalent for the usermode tables.
 132         * Top-level entries added to init_mm's usermode pgd after boot
 133         * will not be automatically propagated to other mms.
 134         */
 135        if (!pgdp_maps_userspace(pgdp))
 136                return pgd;
 137
 138        /*
 139         * The user page tables get the full PGD, accessible from
 140         * userspace:
 141         */
 142        kernel_to_user_pgdp(pgdp)->pgd = pgd.pgd;
 143
 144        /*
 145         * If this is normal user memory, make it NX in the kernel
 146         * pagetables so that, if we somehow screw up and return to
 147         * usermode with the kernel CR3 loaded, we'll get a page fault
 148         * instead of allowing user code to execute with the wrong CR3.
 149         *
 150         * As exceptions, we don't set NX if:
 151         *  - _PAGE_USER is not set.  This could be an executable
 152         *     EFI runtime mapping or something similar, and the kernel
 153         *     may execute from it
 154         *  - we don't have NX support
 155         *  - we're clearing the PGD (i.e. the new pgd is not present).
 156         */
 157        if ((pgd.pgd & (_PAGE_USER|_PAGE_PRESENT)) == (_PAGE_USER|_PAGE_PRESENT) &&
 158            (__supported_pte_mask & _PAGE_NX))
 159                pgd.pgd |= _PAGE_NX;
 160
 161        /* return the copy of the PGD we want the kernel to use: */
 162        return pgd;
 163}
 164
 165/*
 166 * Walk the user copy of the page tables (optionally) trying to allocate
 167 * page table pages on the way down.
 168 *
 169 * Returns a pointer to a P4D on success, or NULL on failure.
 170 */
 171static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
 172{
 173        pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address));
 174        gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
 175
 176        if (address < PAGE_OFFSET) {
 177                WARN_ONCE(1, "attempt to walk user address\n");
 178                return NULL;
 179        }
 180
 181        if (pgd_none(*pgd)) {
 182                unsigned long new_p4d_page = __get_free_page(gfp);
 183                if (WARN_ON_ONCE(!new_p4d_page))
 184                        return NULL;
 185
 186                set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));
 187        }
 188        BUILD_BUG_ON(pgd_large(*pgd) != 0);
 189
 190        return p4d_offset(pgd, address);
 191}
 192
 193/*
 194 * Walk the user copy of the page tables (optionally) trying to allocate
 195 * page table pages on the way down.
 196 *
 197 * Returns a pointer to a PMD on success, or NULL on failure.
 198 */
 199static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
 200{
 201        gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
 202        p4d_t *p4d;
 203        pud_t *pud;
 204
 205        p4d = pti_user_pagetable_walk_p4d(address);
 206        if (!p4d)
 207                return NULL;
 208
 209        BUILD_BUG_ON(p4d_large(*p4d) != 0);
 210        if (p4d_none(*p4d)) {
 211                unsigned long new_pud_page = __get_free_page(gfp);
 212                if (WARN_ON_ONCE(!new_pud_page))
 213                        return NULL;
 214
 215                set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page)));
 216        }
 217
 218        pud = pud_offset(p4d, address);
 219        /* The user page tables do not use large mappings: */
 220        if (pud_large(*pud)) {
 221                WARN_ON(1);
 222                return NULL;
 223        }
 224        if (pud_none(*pud)) {
 225                unsigned long new_pmd_page = __get_free_page(gfp);
 226                if (WARN_ON_ONCE(!new_pmd_page))
 227                        return NULL;
 228
 229                set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
 230        }
 231
 232        return pmd_offset(pud, address);
 233}
 234
 235/*
 236 * Walk the shadow copy of the page tables (optionally) trying to allocate
 237 * page table pages on the way down.  Does not support large pages.
 238 *
 239 * Note: this is only used when mapping *new* kernel data into the
 240 * user/shadow page tables.  It is never used for userspace data.
 241 *
 242 * Returns a pointer to a PTE on success, or NULL on failure.
 243 */
 244static pte_t *pti_user_pagetable_walk_pte(unsigned long address)
 245{
 246        gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
 247        pmd_t *pmd;
 248        pte_t *pte;
 249
 250        pmd = pti_user_pagetable_walk_pmd(address);
 251        if (!pmd)
 252                return NULL;
 253
 254        /* We can't do anything sensible if we hit a large mapping. */
 255        if (pmd_large(*pmd)) {
 256                WARN_ON(1);
 257                return NULL;
 258        }
 259
 260        if (pmd_none(*pmd)) {
 261                unsigned long new_pte_page = __get_free_page(gfp);
 262                if (!new_pte_page)
 263                        return NULL;
 264
 265                set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page)));
 266        }
 267
 268        pte = pte_offset_kernel(pmd, address);
 269        if (pte_flags(*pte) & _PAGE_USER) {
 270                WARN_ONCE(1, "attempt to walk to user pte\n");
 271                return NULL;
 272        }
 273        return pte;
 274}
 275
 276#ifdef CONFIG_X86_VSYSCALL_EMULATION
 277static void __init pti_setup_vsyscall(void)
 278{
 279        pte_t *pte, *target_pte;
 280        unsigned int level;
 281
 282        pte = lookup_address(VSYSCALL_ADDR, &level);
 283        if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte))
 284                return;
 285
 286        target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR);
 287        if (WARN_ON(!target_pte))
 288                return;
 289
 290        *target_pte = *pte;
 291        set_vsyscall_pgtable_user_bits(kernel_to_user_pgdp(swapper_pg_dir));
 292}
 293#else
 294static void __init pti_setup_vsyscall(void) { }
 295#endif
 296
 297enum pti_clone_level {
 298        PTI_CLONE_PMD,
 299        PTI_CLONE_PTE,
 300};
 301
 302static void
 303pti_clone_pgtable(unsigned long start, unsigned long end,
 304                  enum pti_clone_level level)
 305{
 306        unsigned long addr;
 307
 308        /*
 309         * Clone the populated PMDs which cover start to end. These PMD areas
 310         * can have holes.
 311         */
 312        for (addr = start; addr < end;) {
 313                pte_t *pte, *target_pte;
 314                pmd_t *pmd, *target_pmd;
 315                pgd_t *pgd;
 316                p4d_t *p4d;
 317                pud_t *pud;
 318
 319                /* Overflow check */
 320                if (addr < start)
 321                        break;
 322
 323                pgd = pgd_offset_k(addr);
 324                if (WARN_ON(pgd_none(*pgd)))
 325                        return;
 326                p4d = p4d_offset(pgd, addr);
 327                if (WARN_ON(p4d_none(*p4d)))
 328                        return;
 329
 330                pud = pud_offset(p4d, addr);
 331                if (pud_none(*pud)) {
 332                        WARN_ON_ONCE(addr & ~PUD_MASK);
 333                        addr = round_up(addr + 1, PUD_SIZE);
 334                        continue;
 335                }
 336
 337                pmd = pmd_offset(pud, addr);
 338                if (pmd_none(*pmd)) {
 339                        WARN_ON_ONCE(addr & ~PMD_MASK);
 340                        addr = round_up(addr + 1, PMD_SIZE);
 341                        continue;
 342                }
 343
 344                if (pmd_large(*pmd) || level == PTI_CLONE_PMD) {
 345                        target_pmd = pti_user_pagetable_walk_pmd(addr);
 346                        if (WARN_ON(!target_pmd))
 347                                return;
 348
 349                        /*
 350                         * Only clone present PMDs.  This ensures only setting
 351                         * _PAGE_GLOBAL on present PMDs.  This should only be
 352                         * called on well-known addresses anyway, so a non-
 353                         * present PMD would be a surprise.
 354                         */
 355                        if (WARN_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT)))
 356                                return;
 357
 358                        /*
 359                         * Setting 'target_pmd' below creates a mapping in both
 360                         * the user and kernel page tables.  It is effectively
 361                         * global, so set it as global in both copies.  Note:
 362                         * the X86_FEATURE_PGE check is not _required_ because
 363                         * the CPU ignores _PAGE_GLOBAL when PGE is not
 364                         * supported.  The check keeps consistentency with
 365                         * code that only set this bit when supported.
 366                         */
 367                        if (boot_cpu_has(X86_FEATURE_PGE))
 368                                *pmd = pmd_set_flags(*pmd, _PAGE_GLOBAL);
 369
 370                        /*
 371                         * Copy the PMD.  That is, the kernelmode and usermode
 372                         * tables will share the last-level page tables of this
 373                         * address range
 374                         */
 375                        *target_pmd = *pmd;
 376
 377                        addr += PMD_SIZE;
 378
 379                } else if (level == PTI_CLONE_PTE) {
 380
 381                        /* Walk the page-table down to the pte level */
 382                        pte = pte_offset_kernel(pmd, addr);
 383                        if (pte_none(*pte)) {
 384                                addr += PAGE_SIZE;
 385                                continue;
 386                        }
 387
 388                        /* Only clone present PTEs */
 389                        if (WARN_ON(!(pte_flags(*pte) & _PAGE_PRESENT)))
 390                                return;
 391
 392                        /* Allocate PTE in the user page-table */
 393                        target_pte = pti_user_pagetable_walk_pte(addr);
 394                        if (WARN_ON(!target_pte))
 395                                return;
 396
 397                        /* Set GLOBAL bit in both PTEs */
 398                        if (boot_cpu_has(X86_FEATURE_PGE))
 399                                *pte = pte_set_flags(*pte, _PAGE_GLOBAL);
 400
 401                        /* Clone the PTE */
 402                        *target_pte = *pte;
 403
 404                        addr += PAGE_SIZE;
 405
 406                } else {
 407                        BUG();
 408                }
 409        }
 410}
 411
 412#ifdef CONFIG_X86_64
 413/*
 414 * Clone a single p4d (i.e. a top-level entry on 4-level systems and a
 415 * next-level entry on 5-level systems.
 416 */
 417static void __init pti_clone_p4d(unsigned long addr)
 418{
 419        p4d_t *kernel_p4d, *user_p4d;
 420        pgd_t *kernel_pgd;
 421
 422        user_p4d = pti_user_pagetable_walk_p4d(addr);
 423        if (!user_p4d)
 424                return;
 425
 426        kernel_pgd = pgd_offset_k(addr);
 427        kernel_p4d = p4d_offset(kernel_pgd, addr);
 428        *user_p4d = *kernel_p4d;
 429}
 430
 431/*
 432 * Clone the CPU_ENTRY_AREA and associated data into the user space visible
 433 * page table.
 434 */
 435static void __init pti_clone_user_shared(void)
 436{
 437        unsigned int cpu;
 438
 439        pti_clone_p4d(CPU_ENTRY_AREA_BASE);
 440
 441        for_each_possible_cpu(cpu) {
 442                /*
 443                 * The SYSCALL64 entry code needs to be able to find the
 444                 * thread stack and needs one word of scratch space in which
 445                 * to spill a register.  All of this lives in the TSS, in
 446                 * the sp1 and sp2 slots.
 447                 *
 448                 * This is done for all possible CPUs during boot to ensure
 449                 * that it's propagated to all mms.
 450                 */
 451
 452                unsigned long va = (unsigned long)&per_cpu(cpu_tss_rw, cpu);
 453                phys_addr_t pa = per_cpu_ptr_to_phys((void *)va);
 454                pte_t *target_pte;
 455
 456                target_pte = pti_user_pagetable_walk_pte(va);
 457                if (WARN_ON(!target_pte))
 458                        return;
 459
 460                *target_pte = pfn_pte(pa >> PAGE_SHIFT, PAGE_KERNEL);
 461        }
 462}
 463
 464#else /* CONFIG_X86_64 */
 465
 466/*
 467 * On 32 bit PAE systems with 1GB of Kernel address space there is only
 468 * one pgd/p4d for the whole kernel. Cloning that would map the whole
 469 * address space into the user page-tables, making PTI useless. So clone
 470 * the page-table on the PMD level to prevent that.
 471 */
 472static void __init pti_clone_user_shared(void)
 473{
 474        unsigned long start, end;
 475
 476        start = CPU_ENTRY_AREA_BASE;
 477        end   = start + (PAGE_SIZE * CPU_ENTRY_AREA_PAGES);
 478
 479        pti_clone_pgtable(start, end, PTI_CLONE_PMD);
 480}
 481#endif /* CONFIG_X86_64 */
 482
 483/*
 484 * Clone the ESPFIX P4D into the user space visible page table
 485 */
 486static void __init pti_setup_espfix64(void)
 487{
 488#ifdef CONFIG_X86_ESPFIX64
 489        pti_clone_p4d(ESPFIX_BASE_ADDR);
 490#endif
 491}
 492
 493/*
 494 * Clone the populated PMDs of the entry text and force it RO.
 495 */
 496static void pti_clone_entry_text(void)
 497{
 498        pti_clone_pgtable((unsigned long) __entry_text_start,
 499                          (unsigned long) __entry_text_end,
 500                          PTI_CLONE_PMD);
 501}
 502
 503/*
 504 * Global pages and PCIDs are both ways to make kernel TLB entries
 505 * live longer, reduce TLB misses and improve kernel performance.
 506 * But, leaving all kernel text Global makes it potentially accessible
 507 * to Meltdown-style attacks which make it trivial to find gadgets or
 508 * defeat KASLR.
 509 *
 510 * Only use global pages when it is really worth it.
 511 */
 512static inline bool pti_kernel_image_global_ok(void)
 513{
 514        /*
 515         * Systems with PCIDs get litlle benefit from global
 516         * kernel text and are not worth the downsides.
 517         */
 518        if (cpu_feature_enabled(X86_FEATURE_PCID))
 519                return false;
 520
 521        /*
 522         * Only do global kernel image for pti=auto.  Do the most
 523         * secure thing (not global) if pti=on specified.
 524         */
 525        if (pti_mode != PTI_AUTO)
 526                return false;
 527
 528        /*
 529         * K8 may not tolerate the cleared _PAGE_RW on the userspace
 530         * global kernel image pages.  Do the safe thing (disable
 531         * global kernel image).  This is unlikely to ever be
 532         * noticed because PTI is disabled by default on AMD CPUs.
 533         */
 534        if (boot_cpu_has(X86_FEATURE_K8))
 535                return false;
 536
 537        /*
 538         * RANDSTRUCT derives its hardening benefits from the
 539         * attacker's lack of knowledge about the layout of kernel
 540         * data structures.  Keep the kernel image non-global in
 541         * cases where RANDSTRUCT is in use to help keep the layout a
 542         * secret.
 543         */
 544        if (IS_ENABLED(CONFIG_GCC_PLUGIN_RANDSTRUCT))
 545                return false;
 546
 547        return true;
 548}
 549
 550/*
 551 * For some configurations, map all of kernel text into the user page
 552 * tables.  This reduces TLB misses, especially on non-PCID systems.
 553 */
 554static void pti_clone_kernel_text(void)
 555{
 556        /*
 557         * rodata is part of the kernel image and is normally
 558         * readable on the filesystem or on the web.  But, do not
 559         * clone the areas past rodata, they might contain secrets.
 560         */
 561        unsigned long start = PFN_ALIGN(_text);
 562        unsigned long end_clone  = (unsigned long)__end_rodata_aligned;
 563        unsigned long end_global = PFN_ALIGN((unsigned long)_etext);
 564
 565        if (!pti_kernel_image_global_ok())
 566                return;
 567
 568        pr_debug("mapping partial kernel image into user address space\n");
 569
 570        /*
 571         * Note that this will undo _some_ of the work that
 572         * pti_set_kernel_image_nonglobal() did to clear the
 573         * global bit.
 574         */
 575        pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE);
 576
 577        /*
 578         * pti_clone_pgtable() will set the global bit in any PMDs
 579         * that it clones, but we also need to get any PTEs in
 580         * the last level for areas that are not huge-page-aligned.
 581         */
 582
 583        /* Set the global bit for normal non-__init kernel text: */
 584        set_memory_global(start, (end_global - start) >> PAGE_SHIFT);
 585}
 586
 587static void pti_set_kernel_image_nonglobal(void)
 588{
 589        /*
 590         * The identity map is created with PMDs, regardless of the
 591         * actual length of the kernel.  We need to clear
 592         * _PAGE_GLOBAL up to a PMD boundary, not just to the end
 593         * of the image.
 594         */
 595        unsigned long start = PFN_ALIGN(_text);
 596        unsigned long end = ALIGN((unsigned long)_end, PMD_PAGE_SIZE);
 597
 598        /*
 599         * This clears _PAGE_GLOBAL from the entire kernel image.
 600         * pti_clone_kernel_text() map put _PAGE_GLOBAL back for
 601         * areas that are mapped to userspace.
 602         */
 603        set_memory_nonglobal(start, (end - start) >> PAGE_SHIFT);
 604}
 605
 606/*
 607 * Initialize kernel page table isolation
 608 */
 609void __init pti_init(void)
 610{
 611        if (!boot_cpu_has(X86_FEATURE_PTI))
 612                return;
 613
 614        pr_info("enabled\n");
 615
 616#ifdef CONFIG_X86_32
 617        /*
 618         * We check for X86_FEATURE_PCID here. But the init-code will
 619         * clear the feature flag on 32 bit because the feature is not
 620         * supported on 32 bit anyway. To print the warning we need to
 621         * check with cpuid directly again.
 622         */
 623        if (cpuid_ecx(0x1) & BIT(17)) {
 624                /* Use printk to work around pr_fmt() */
 625                printk(KERN_WARNING "\n");
 626                printk(KERN_WARNING "************************************************************\n");
 627                printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING!  **\n");
 628                printk(KERN_WARNING "**                                                        **\n");
 629                printk(KERN_WARNING "** You are using 32-bit PTI on a 64-bit PCID-capable CPU. **\n");
 630                printk(KERN_WARNING "** Your performance will increase dramatically if you     **\n");
 631                printk(KERN_WARNING "** switch to a 64-bit kernel!                             **\n");
 632                printk(KERN_WARNING "**                                                        **\n");
 633                printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING!  **\n");
 634                printk(KERN_WARNING "************************************************************\n");
 635        }
 636#endif
 637
 638        pti_clone_user_shared();
 639
 640        /* Undo all global bits from the init pagetables in head_64.S: */
 641        pti_set_kernel_image_nonglobal();
 642        /* Replace some of the global bits just for shared entry text: */
 643        pti_clone_entry_text();
 644        pti_setup_espfix64();
 645        pti_setup_vsyscall();
 646}
 647
 648/*
 649 * Finalize the kernel mappings in the userspace page-table. Some of the
 650 * mappings for the kernel image might have changed since pti_init()
 651 * cloned them. This is because parts of the kernel image have been
 652 * mapped RO and/or NX.  These changes need to be cloned again to the
 653 * userspace page-table.
 654 */
 655void pti_finalize(void)
 656{
 657        if (!boot_cpu_has(X86_FEATURE_PTI))
 658                return;
 659        /*
 660         * We need to clone everything (again) that maps parts of the
 661         * kernel image.
 662         */
 663        pti_clone_entry_text();
 664        pti_clone_kernel_text();
 665
 666        debug_checkwx_user();
 667}
 668