linux/arch/arm64/mm/proc.S
<<
>>
Prefs
   1/*
   2 * Based on arch/arm/mm/proc.S
   3 *
   4 * Copyright (C) 2001 Deep Blue Solutions Ltd.
   5 * Copyright (C) 2012 ARM Ltd.
   6 * Author: Catalin Marinas <catalin.marinas@arm.com>
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU General Public License
  18 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21#include <linux/init.h>
  22#include <linux/linkage.h>
  23#include <asm/assembler.h>
  24#include <asm/asm-offsets.h>
  25#include <asm/hwcap.h>
  26#include <asm/pgtable.h>
  27#include <asm/pgtable-hwdef.h>
  28#include <asm/cpufeature.h>
  29#include <asm/alternative.h>
  30
  31#ifdef CONFIG_ARM64_64K_PAGES
  32#define TCR_TG_FLAGS    TCR_TG0_64K | TCR_TG1_64K
  33#elif defined(CONFIG_ARM64_16K_PAGES)
  34#define TCR_TG_FLAGS    TCR_TG0_16K | TCR_TG1_16K
  35#else /* CONFIG_ARM64_4K_PAGES */
  36#define TCR_TG_FLAGS    TCR_TG0_4K | TCR_TG1_4K
  37#endif
  38
  39#ifdef CONFIG_RANDOMIZE_BASE
  40#define TCR_KASLR_FLAGS TCR_NFD1
  41#else
  42#define TCR_KASLR_FLAGS 0
  43#endif
  44
  45#define TCR_SMP_FLAGS   TCR_SHARED
  46
  47/* PTWs cacheable, inner/outer WBWA */
  48#define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA
  49
  50#ifdef CONFIG_KASAN_SW_TAGS
  51#define TCR_KASAN_FLAGS TCR_TBI1
  52#else
  53#define TCR_KASAN_FLAGS 0
  54#endif
  55
  56#define MAIR(attr, mt)  ((attr) << ((mt) * 8))
  57
  58#ifdef CONFIG_CPU_PM
  59/**
  60 * cpu_do_suspend - save CPU registers context
  61 *
  62 * x0: virtual address of context pointer
  63 */
  64ENTRY(cpu_do_suspend)
  65        mrs     x2, tpidr_el0
  66        mrs     x3, tpidrro_el0
  67        mrs     x4, contextidr_el1
  68        mrs     x5, cpacr_el1
  69        mrs     x6, tcr_el1
  70        mrs     x7, vbar_el1
  71        mrs     x8, mdscr_el1
  72        mrs     x9, oslsr_el1
  73        mrs     x10, sctlr_el1
  74alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
  75        mrs     x11, tpidr_el1
  76alternative_else
  77        mrs     x11, tpidr_el2
  78alternative_endif
  79        mrs     x12, sp_el0
  80        stp     x2, x3, [x0]
  81        stp     x4, xzr, [x0, #16]
  82        stp     x5, x6, [x0, #32]
  83        stp     x7, x8, [x0, #48]
  84        stp     x9, x10, [x0, #64]
  85        stp     x11, x12, [x0, #80]
  86        ret
  87ENDPROC(cpu_do_suspend)
  88
  89/**
  90 * cpu_do_resume - restore CPU register context
  91 *
  92 * x0: Address of context pointer
  93 */
  94        .pushsection ".idmap.text", "awx"
  95ENTRY(cpu_do_resume)
  96        ldp     x2, x3, [x0]
  97        ldp     x4, x5, [x0, #16]
  98        ldp     x6, x8, [x0, #32]
  99        ldp     x9, x10, [x0, #48]
 100        ldp     x11, x12, [x0, #64]
 101        ldp     x13, x14, [x0, #80]
 102        msr     tpidr_el0, x2
 103        msr     tpidrro_el0, x3
 104        msr     contextidr_el1, x4
 105        msr     cpacr_el1, x6
 106
 107        /* Don't change t0sz here, mask those bits when restoring */
 108        mrs     x5, tcr_el1
 109        bfi     x8, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
 110
 111        msr     tcr_el1, x8
 112        msr     vbar_el1, x9
 113
 114        /*
 115         * __cpu_setup() cleared MDSCR_EL1.MDE and friends, before unmasking
 116         * debug exceptions. By restoring MDSCR_EL1 here, we may take a debug
 117         * exception. Mask them until local_daif_restore() in cpu_suspend()
 118         * resets them.
 119         */
 120        disable_daif
 121        msr     mdscr_el1, x10
 122
 123        msr     sctlr_el1, x12
 124alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
 125        msr     tpidr_el1, x13
 126alternative_else
 127        msr     tpidr_el2, x13
 128alternative_endif
 129        msr     sp_el0, x14
 130        /*
 131         * Restore oslsr_el1 by writing oslar_el1
 132         */
 133        ubfx    x11, x11, #1, #1
 134        msr     oslar_el1, x11
 135        reset_pmuserenr_el0 x0                  // Disable PMU access from EL0
 136
 137alternative_if ARM64_HAS_RAS_EXTN
 138        msr_s   SYS_DISR_EL1, xzr
 139alternative_else_nop_endif
 140
 141        isb
 142        ret
 143ENDPROC(cpu_do_resume)
 144        .popsection
 145#endif
 146
 147/*
 148 *      cpu_do_switch_mm(pgd_phys, tsk)
 149 *
 150 *      Set the translation table base pointer to be pgd_phys.
 151 *
 152 *      - pgd_phys - physical address of new TTB
 153 */
 154ENTRY(cpu_do_switch_mm)
 155        mrs     x2, ttbr1_el1
 156        mmid    x1, x1                          // get mm->context.id
 157        phys_to_ttbr x3, x0
 158
 159alternative_if ARM64_HAS_CNP
 160        cbz     x1, 1f                          // skip CNP for reserved ASID
 161        orr     x3, x3, #TTBR_CNP_BIT
 1621:
 163alternative_else_nop_endif
 164#ifdef CONFIG_ARM64_SW_TTBR0_PAN
 165        bfi     x3, x1, #48, #16                // set the ASID field in TTBR0
 166#endif
 167        bfi     x2, x1, #48, #16                // set the ASID
 168        msr     ttbr1_el1, x2                   // in TTBR1 (since TCR.A1 is set)
 169        isb
 170        msr     ttbr0_el1, x3                   // now update TTBR0
 171        isb
 172        b       post_ttbr_update_workaround     // Back to C code...
 173ENDPROC(cpu_do_switch_mm)
 174
 175        .pushsection ".idmap.text", "awx"
 176
 177.macro  __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2
 178        adrp    \tmp1, empty_zero_page
 179        phys_to_ttbr \tmp2, \tmp1
 180        offset_ttbr1 \tmp2
 181        msr     ttbr1_el1, \tmp2
 182        isb
 183        tlbi    vmalle1
 184        dsb     nsh
 185        isb
 186.endm
 187
 188/*
 189 * void idmap_cpu_replace_ttbr1(phys_addr_t ttbr1)
 190 *
 191 * This is the low-level counterpart to cpu_replace_ttbr1, and should not be
 192 * called by anything else. It can only be executed from a TTBR0 mapping.
 193 */
 194ENTRY(idmap_cpu_replace_ttbr1)
 195        save_and_disable_daif flags=x2
 196
 197        __idmap_cpu_set_reserved_ttbr1 x1, x3
 198
 199        offset_ttbr1 x0
 200        msr     ttbr1_el1, x0
 201        isb
 202
 203        restore_daif x2
 204
 205        ret
 206ENDPROC(idmap_cpu_replace_ttbr1)
 207        .popsection
 208
 209#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
 210        .pushsection ".idmap.text", "awx"
 211
 212        .macro  __idmap_kpti_get_pgtable_ent, type
 213        dc      cvac, cur_\()\type\()p          // Ensure any existing dirty
 214        dmb     sy                              // lines are written back before
 215        ldr     \type, [cur_\()\type\()p]       // loading the entry
 216        tbz     \type, #0, skip_\()\type        // Skip invalid and
 217        tbnz    \type, #11, skip_\()\type       // non-global entries
 218        .endm
 219
 220        .macro __idmap_kpti_put_pgtable_ent_ng, type
 221        orr     \type, \type, #PTE_NG           // Same bit for blocks and pages
 222        str     \type, [cur_\()\type\()p]       // Update the entry and ensure
 223        dmb     sy                              // that it is visible to all
 224        dc      civac, cur_\()\type\()p         // CPUs.
 225        .endm
 226
 227/*
 228 * void __kpti_install_ng_mappings(int cpu, int num_cpus, phys_addr_t swapper)
 229 *
 230 * Called exactly once from stop_machine context by each CPU found during boot.
 231 */
 232__idmap_kpti_flag:
 233        .long   1
 234ENTRY(idmap_kpti_install_ng_mappings)
 235        cpu             .req    w0
 236        num_cpus        .req    w1
 237        swapper_pa      .req    x2
 238        swapper_ttb     .req    x3
 239        flag_ptr        .req    x4
 240        cur_pgdp        .req    x5
 241        end_pgdp        .req    x6
 242        pgd             .req    x7
 243        cur_pudp        .req    x8
 244        end_pudp        .req    x9
 245        pud             .req    x10
 246        cur_pmdp        .req    x11
 247        end_pmdp        .req    x12
 248        pmd             .req    x13
 249        cur_ptep        .req    x14
 250        end_ptep        .req    x15
 251        pte             .req    x16
 252
 253        mrs     swapper_ttb, ttbr1_el1
 254        restore_ttbr1   swapper_ttb
 255        adr     flag_ptr, __idmap_kpti_flag
 256
 257        cbnz    cpu, __idmap_kpti_secondary
 258
 259        /* We're the boot CPU. Wait for the others to catch up */
 260        sevl
 2611:      wfe
 262        ldaxr   w18, [flag_ptr]
 263        eor     w18, w18, num_cpus
 264        cbnz    w18, 1b
 265
 266        /* We need to walk swapper, so turn off the MMU. */
 267        pre_disable_mmu_workaround
 268        mrs     x18, sctlr_el1
 269        bic     x18, x18, #SCTLR_ELx_M
 270        msr     sctlr_el1, x18
 271        isb
 272
 273        /* Everybody is enjoying the idmap, so we can rewrite swapper. */
 274        /* PGD */
 275        mov     cur_pgdp, swapper_pa
 276        add     end_pgdp, cur_pgdp, #(PTRS_PER_PGD * 8)
 277do_pgd: __idmap_kpti_get_pgtable_ent    pgd
 278        tbnz    pgd, #1, walk_puds
 279next_pgd:
 280        __idmap_kpti_put_pgtable_ent_ng pgd
 281skip_pgd:
 282        add     cur_pgdp, cur_pgdp, #8
 283        cmp     cur_pgdp, end_pgdp
 284        b.ne    do_pgd
 285
 286        /* Publish the updated tables and nuke all the TLBs */
 287        dsb     sy
 288        tlbi    vmalle1is
 289        dsb     ish
 290        isb
 291
 292        /* We're done: fire up the MMU again */
 293        mrs     x18, sctlr_el1
 294        orr     x18, x18, #SCTLR_ELx_M
 295        msr     sctlr_el1, x18
 296        isb
 297
 298        /* Set the flag to zero to indicate that we're all done */
 299        str     wzr, [flag_ptr]
 300        ret
 301
 302        /* PUD */
 303walk_puds:
 304        .if CONFIG_PGTABLE_LEVELS > 3
 305        pte_to_phys     cur_pudp, pgd
 306        add     end_pudp, cur_pudp, #(PTRS_PER_PUD * 8)
 307do_pud: __idmap_kpti_get_pgtable_ent    pud
 308        tbnz    pud, #1, walk_pmds
 309next_pud:
 310        __idmap_kpti_put_pgtable_ent_ng pud
 311skip_pud:
 312        add     cur_pudp, cur_pudp, 8
 313        cmp     cur_pudp, end_pudp
 314        b.ne    do_pud
 315        b       next_pgd
 316        .else /* CONFIG_PGTABLE_LEVELS <= 3 */
 317        mov     pud, pgd
 318        b       walk_pmds
 319next_pud:
 320        b       next_pgd
 321        .endif
 322
 323        /* PMD */
 324walk_pmds:
 325        .if CONFIG_PGTABLE_LEVELS > 2
 326        pte_to_phys     cur_pmdp, pud
 327        add     end_pmdp, cur_pmdp, #(PTRS_PER_PMD * 8)
 328do_pmd: __idmap_kpti_get_pgtable_ent    pmd
 329        tbnz    pmd, #1, walk_ptes
 330next_pmd:
 331        __idmap_kpti_put_pgtable_ent_ng pmd
 332skip_pmd:
 333        add     cur_pmdp, cur_pmdp, #8
 334        cmp     cur_pmdp, end_pmdp
 335        b.ne    do_pmd
 336        b       next_pud
 337        .else /* CONFIG_PGTABLE_LEVELS <= 2 */
 338        mov     pmd, pud
 339        b       walk_ptes
 340next_pmd:
 341        b       next_pud
 342        .endif
 343
 344        /* PTE */
 345walk_ptes:
 346        pte_to_phys     cur_ptep, pmd
 347        add     end_ptep, cur_ptep, #(PTRS_PER_PTE * 8)
 348do_pte: __idmap_kpti_get_pgtable_ent    pte
 349        __idmap_kpti_put_pgtable_ent_ng pte
 350skip_pte:
 351        add     cur_ptep, cur_ptep, #8
 352        cmp     cur_ptep, end_ptep
 353        b.ne    do_pte
 354        b       next_pmd
 355
 356        /* Secondary CPUs end up here */
 357__idmap_kpti_secondary:
 358        /* Uninstall swapper before surgery begins */
 359        __idmap_cpu_set_reserved_ttbr1 x18, x17
 360
 361        /* Increment the flag to let the boot CPU we're ready */
 3621:      ldxr    w18, [flag_ptr]
 363        add     w18, w18, #1
 364        stxr    w17, w18, [flag_ptr]
 365        cbnz    w17, 1b
 366
 367        /* Wait for the boot CPU to finish messing around with swapper */
 368        sevl
 3691:      wfe
 370        ldxr    w18, [flag_ptr]
 371        cbnz    w18, 1b
 372
 373        /* All done, act like nothing happened */
 374        offset_ttbr1 swapper_ttb
 375        msr     ttbr1_el1, swapper_ttb
 376        isb
 377        ret
 378
 379        .unreq  cpu
 380        .unreq  num_cpus
 381        .unreq  swapper_pa
 382        .unreq  swapper_ttb
 383        .unreq  flag_ptr
 384        .unreq  cur_pgdp
 385        .unreq  end_pgdp
 386        .unreq  pgd
 387        .unreq  cur_pudp
 388        .unreq  end_pudp
 389        .unreq  pud
 390        .unreq  cur_pmdp
 391        .unreq  end_pmdp
 392        .unreq  pmd
 393        .unreq  cur_ptep
 394        .unreq  end_ptep
 395        .unreq  pte
 396ENDPROC(idmap_kpti_install_ng_mappings)
 397        .popsection
 398#endif
 399
 400/*
 401 *      __cpu_setup
 402 *
 403 *      Initialise the processor for turning the MMU on.  Return in x0 the
 404 *      value of the SCTLR_EL1 register.
 405 */
 406        .pushsection ".idmap.text", "awx"
 407ENTRY(__cpu_setup)
 408        tlbi    vmalle1                         // Invalidate local TLB
 409        dsb     nsh
 410
 411        mov     x0, #3 << 20
 412        msr     cpacr_el1, x0                   // Enable FP/ASIMD
 413        mov     x0, #1 << 12                    // Reset mdscr_el1 and disable
 414        msr     mdscr_el1, x0                   // access to the DCC from EL0
 415        isb                                     // Unmask debug exceptions now,
 416        enable_dbg                              // since this is per-cpu
 417        reset_pmuserenr_el0 x0                  // Disable PMU access from EL0
 418        /*
 419         * Memory region attributes for LPAE:
 420         *
 421         *   n = AttrIndx[2:0]
 422         *                      n       MAIR
 423         *   DEVICE_nGnRnE      000     00000000
 424         *   DEVICE_nGnRE       001     00000100
 425         *   DEVICE_GRE         010     00001100
 426         *   NORMAL_NC          011     01000100
 427         *   NORMAL             100     11111111
 428         *   NORMAL_WT          101     10111011
 429         */
 430        ldr     x5, =MAIR(0x00, MT_DEVICE_nGnRnE) | \
 431                     MAIR(0x04, MT_DEVICE_nGnRE) | \
 432                     MAIR(0x0c, MT_DEVICE_GRE) | \
 433                     MAIR(0x44, MT_NORMAL_NC) | \
 434                     MAIR(0xff, MT_NORMAL) | \
 435                     MAIR(0xbb, MT_NORMAL_WT)
 436        msr     mair_el1, x5
 437        /*
 438         * Prepare SCTLR
 439         */
 440        mov_q   x0, SCTLR_EL1_SET
 441        /*
 442         * Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for
 443         * both user and kernel.
 444         */
 445        mov_q   x10, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
 446                        TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
 447                        TCR_TBI0 | TCR_A1 | TCR_KASAN_FLAGS
 448        tcr_clear_errata_bits x10, x9, x5
 449
 450#ifdef CONFIG_ARM64_USER_VA_BITS_52
 451        ldr_l           x9, vabits_user
 452        sub             x9, xzr, x9
 453        add             x9, x9, #64
 454#else
 455        ldr_l           x9, idmap_t0sz
 456#endif
 457        tcr_set_t0sz    x10, x9
 458
 459        /*
 460         * Set the IPS bits in TCR_EL1.
 461         */
 462        tcr_compute_pa_size x10, #TCR_IPS_SHIFT, x5, x6
 463#ifdef CONFIG_ARM64_HW_AFDBM
 464        /*
 465         * Enable hardware update of the Access Flags bit.
 466         * Hardware dirty bit management is enabled later,
 467         * via capabilities.
 468         */
 469        mrs     x9, ID_AA64MMFR1_EL1
 470        and     x9, x9, #0xf
 471        cbz     x9, 1f
 472        orr     x10, x10, #TCR_HA               // hardware Access flag update
 4731:
 474#endif  /* CONFIG_ARM64_HW_AFDBM */
 475        msr     tcr_el1, x10
 476        ret                                     // return to head.S
 477ENDPROC(__cpu_setup)
 478