linux/arch/arm64/mm/proc.S
<<
>>
Prefs
   1/*
   2 * Based on arch/arm/mm/proc.S
   3 *
   4 * Copyright (C) 2001 Deep Blue Solutions Ltd.
   5 * Copyright (C) 2012 ARM Ltd.
   6 * Author: Catalin Marinas <catalin.marinas@arm.com>
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU General Public License
  18 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21#include <linux/init.h>
  22#include <linux/linkage.h>
  23#include <asm/assembler.h>
  24#include <asm/asm-offsets.h>
  25#include <asm/hwcap.h>
  26#include <asm/pgtable.h>
  27#include <asm/pgtable-hwdef.h>
  28#include <asm/cpufeature.h>
  29#include <asm/alternative.h>
  30
  31#ifdef CONFIG_ARM64_64K_PAGES
  32#define TCR_TG_FLAGS    TCR_TG0_64K | TCR_TG1_64K
  33#elif defined(CONFIG_ARM64_16K_PAGES)
  34#define TCR_TG_FLAGS    TCR_TG0_16K | TCR_TG1_16K
  35#else /* CONFIG_ARM64_4K_PAGES */
  36#define TCR_TG_FLAGS    TCR_TG0_4K | TCR_TG1_4K
  37#endif
  38
  39#define TCR_SMP_FLAGS   TCR_SHARED
  40
  41/* PTWs cacheable, inner/outer WBWA */
  42#define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA
  43
  44#define MAIR(attr, mt)  ((attr) << ((mt) * 8))
  45
  46/*
  47 *      cpu_do_idle()
  48 *
  49 *      Idle the processor (wait for interrupt).
  50 */
  51ENTRY(cpu_do_idle)
  52        dsb     sy                              // WFI may enter a low-power mode
  53        wfi
  54        ret
  55ENDPROC(cpu_do_idle)
  56
  57#ifdef CONFIG_CPU_PM
  58/**
  59 * cpu_do_suspend - save CPU registers context
  60 *
  61 * x0: virtual address of context pointer
  62 */
  63ENTRY(cpu_do_suspend)
  64        mrs     x2, tpidr_el0
  65        mrs     x3, tpidrro_el0
  66        mrs     x4, contextidr_el1
  67        mrs     x5, cpacr_el1
  68        mrs     x6, tcr_el1
  69        mrs     x7, vbar_el1
  70        mrs     x8, mdscr_el1
  71        mrs     x9, oslsr_el1
  72        mrs     x10, sctlr_el1
  73        stp     x2, x3, [x0]
  74        stp     x4, xzr, [x0, #16]
  75        stp     x5, x6, [x0, #32]
  76        stp     x7, x8, [x0, #48]
  77        stp     x9, x10, [x0, #64]
  78        ret
  79ENDPROC(cpu_do_suspend)
  80
  81/**
  82 * cpu_do_resume - restore CPU register context
  83 *
  84 * x0: Address of context pointer
  85 */
  86ENTRY(cpu_do_resume)
  87        ldp     x2, x3, [x0]
  88        ldp     x4, x5, [x0, #16]
  89        ldp     x6, x8, [x0, #32]
  90        ldp     x9, x10, [x0, #48]
  91        ldp     x11, x12, [x0, #64]
  92        msr     tpidr_el0, x2
  93        msr     tpidrro_el0, x3
  94        msr     contextidr_el1, x4
  95        msr     cpacr_el1, x6
  96
  97        /* Don't change t0sz here, mask those bits when restoring */
  98        mrs     x5, tcr_el1
  99        bfi     x8, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
 100
 101        msr     tcr_el1, x8
 102        msr     vbar_el1, x9
 103
 104        /*
 105         * __cpu_setup() cleared MDSCR_EL1.MDE and friends, before unmasking
 106         * debug exceptions. By restoring MDSCR_EL1 here, we may take a debug
 107         * exception. Mask them until local_dbg_restore() in cpu_suspend()
 108         * resets them.
 109         */
 110        disable_dbg
 111        msr     mdscr_el1, x10
 112
 113        msr     sctlr_el1, x12
 114        /*
 115         * Restore oslsr_el1 by writing oslar_el1
 116         */
 117        ubfx    x11, x11, #1, #1
 118        msr     oslar_el1, x11
 119        reset_pmuserenr_el0 x0                  // Disable PMU access from EL0
 120        isb
 121        ret
 122ENDPROC(cpu_do_resume)
 123#endif
 124
 125/*
 126 *      cpu_do_switch_mm(pgd_phys, tsk)
 127 *
 128 *      Set the translation table base pointer to be pgd_phys.
 129 *
 130 *      - pgd_phys - physical address of new TTB
 131 */
 132ENTRY(cpu_do_switch_mm)
 133        mmid    x1, x1                          // get mm->context.id
 134        bfi     x0, x1, #48, #16                // set the ASID
 135        msr     ttbr0_el1, x0                   // set TTBR0
 136        isb
 137alternative_if_not ARM64_WORKAROUND_CAVIUM_27456
 138        ret
 139        nop
 140        nop
 141        nop
 142alternative_else
 143        ic      iallu
 144        dsb     nsh
 145        isb
 146        ret
 147alternative_endif
 148ENDPROC(cpu_do_switch_mm)
 149
 150        .pushsection ".idmap.text", "ax"
 151/*
 152 * void idmap_cpu_replace_ttbr1(phys_addr_t new_pgd)
 153 *
 154 * This is the low-level counterpart to cpu_replace_ttbr1, and should not be
 155 * called by anything else. It can only be executed from a TTBR0 mapping.
 156 */
 157ENTRY(idmap_cpu_replace_ttbr1)
 158        mrs     x2, daif
 159        msr     daifset, #0xf
 160
 161        adrp    x1, empty_zero_page
 162        msr     ttbr1_el1, x1
 163        isb
 164
 165        tlbi    vmalle1
 166        dsb     nsh
 167        isb
 168
 169        msr     ttbr1_el1, x0
 170        isb
 171
 172        msr     daif, x2
 173
 174        ret
 175ENDPROC(idmap_cpu_replace_ttbr1)
 176        .popsection
 177
 178/*
 179 *      __cpu_setup
 180 *
 181 *      Initialise the processor for turning the MMU on.  Return in x0 the
 182 *      value of the SCTLR_EL1 register.
 183 */
 184ENTRY(__cpu_setup)
 185        tlbi    vmalle1                         // Invalidate local TLB
 186        dsb     nsh
 187
 188        mov     x0, #3 << 20
 189        msr     cpacr_el1, x0                   // Enable FP/ASIMD
 190        mov     x0, #1 << 12                    // Reset mdscr_el1 and disable
 191        msr     mdscr_el1, x0                   // access to the DCC from EL0
 192        isb                                     // Unmask debug exceptions now,
 193        enable_dbg                              // since this is per-cpu
 194        reset_pmuserenr_el0 x0                  // Disable PMU access from EL0
 195        /*
 196         * Memory region attributes for LPAE:
 197         *
 198         *   n = AttrIndx[2:0]
 199         *                      n       MAIR
 200         *   DEVICE_nGnRnE      000     00000000
 201         *   DEVICE_nGnRE       001     00000100
 202         *   DEVICE_GRE         010     00001100
 203         *   NORMAL_NC          011     01000100
 204         *   NORMAL             100     11111111
 205         *   NORMAL_WT          101     10111011
 206         */
 207        ldr     x5, =MAIR(0x00, MT_DEVICE_nGnRnE) | \
 208                     MAIR(0x04, MT_DEVICE_nGnRE) | \
 209                     MAIR(0x0c, MT_DEVICE_GRE) | \
 210                     MAIR(0x44, MT_NORMAL_NC) | \
 211                     MAIR(0xff, MT_NORMAL) | \
 212                     MAIR(0xbb, MT_NORMAL_WT)
 213        msr     mair_el1, x5
 214        /*
 215         * Prepare SCTLR
 216         */
 217        adr     x5, crval
 218        ldp     w5, w6, [x5]
 219        mrs     x0, sctlr_el1
 220        bic     x0, x0, x5                      // clear bits
 221        orr     x0, x0, x6                      // set bits
 222        /*
 223         * Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for
 224         * both user and kernel.
 225         */
 226        ldr     x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
 227                        TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0
 228        tcr_set_idmap_t0sz      x10, x9
 229
 230        /*
 231         * Read the PARange bits from ID_AA64MMFR0_EL1 and set the IPS bits in
 232         * TCR_EL1.
 233         */
 234        mrs     x9, ID_AA64MMFR0_EL1
 235        bfi     x10, x9, #32, #3
 236#ifdef CONFIG_ARM64_HW_AFDBM
 237        /*
 238         * Hardware update of the Access and Dirty bits.
 239         */
 240        mrs     x9, ID_AA64MMFR1_EL1
 241        and     x9, x9, #0xf
 242        cbz     x9, 2f
 243        cmp     x9, #2
 244        b.lt    1f
 245        orr     x10, x10, #TCR_HD               // hardware Dirty flag update
 2461:      orr     x10, x10, #TCR_HA               // hardware Access flag update
 2472:
 248#endif  /* CONFIG_ARM64_HW_AFDBM */
 249        msr     tcr_el1, x10
 250        ret                                     // return to head.S
 251ENDPROC(__cpu_setup)
 252
 253        /*
 254         * We set the desired value explicitly, including those of the
 255         * reserved bits. The values of bits EE & E0E were set early in
 256         * el2_setup, which are left untouched below.
 257         *
 258         *                 n n            T
 259         *       U E      WT T UD     US IHBS
 260         *       CE0      XWHW CZ     ME TEEA S
 261         * .... .IEE .... NEAI TE.I ..AD DEN0 ACAM
 262         * 0011 0... 1101 ..0. ..0. 10.. .0.. .... < hardware reserved
 263         * .... .1.. .... 01.1 11.1 ..01 0.01 1101 < software settings
 264         */
 265        .type   crval, #object
 266crval:
 267        .word   0xfcffffff                      // clear
 268        .word   0x34d5d91d                      // set
 269