linux/arch/arm64/include/asm/mmu_context.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 * Based on arch/arm/include/asm/mmu_context.h
   4 *
   5 * Copyright (C) 1996 Russell King.
   6 * Copyright (C) 2012 ARM Ltd.
   7 */
   8#ifndef __ASM_MMU_CONTEXT_H
   9#define __ASM_MMU_CONTEXT_H
  10
  11#ifndef __ASSEMBLY__
  12
  13#include <linux/compiler.h>
  14#include <linux/sched.h>
  15#include <linux/sched/hotplug.h>
  16#include <linux/mm_types.h>
  17
  18#include <asm/cacheflush.h>
  19#include <asm/cpufeature.h>
  20#include <asm/proc-fns.h>
  21#include <asm-generic/mm_hooks.h>
  22#include <asm/cputype.h>
  23#include <asm/pgtable.h>
  24#include <asm/sysreg.h>
  25#include <asm/tlbflush.h>
  26
  27extern bool rodata_full;
  28
  29static inline void contextidr_thread_switch(struct task_struct *next)
  30{
  31        if (!IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR))
  32                return;
  33
  34        write_sysreg(task_pid_nr(next), contextidr_el1);
  35        isb();
  36}
  37
  38/*
  39 * Set TTBR0 to empty_zero_page. No translations will be possible via TTBR0.
  40 */
  41static inline void cpu_set_reserved_ttbr0(void)
  42{
  43        unsigned long ttbr = phys_to_ttbr(__pa_symbol(empty_zero_page));
  44
  45        write_sysreg(ttbr, ttbr0_el1);
  46        isb();
  47}
  48
  49static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
  50{
  51        BUG_ON(pgd == swapper_pg_dir);
  52        cpu_set_reserved_ttbr0();
  53        cpu_do_switch_mm(virt_to_phys(pgd),mm);
  54}
  55
  56/*
  57 * TCR.T0SZ value to use when the ID map is active. Usually equals
  58 * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in
  59 * physical memory, in which case it will be smaller.
  60 */
  61extern u64 idmap_t0sz;
  62extern u64 idmap_ptrs_per_pgd;
  63
  64static inline bool __cpu_uses_extended_idmap(void)
  65{
  66        if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52))
  67                return false;
  68
  69        return unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS));
  70}
  71
  72/*
  73 * True if the extended ID map requires an extra level of translation table
  74 * to be configured.
  75 */
  76static inline bool __cpu_uses_extended_idmap_level(void)
  77{
  78        return ARM64_HW_PGTABLE_LEVELS(64 - idmap_t0sz) > CONFIG_PGTABLE_LEVELS;
  79}
  80
  81/*
  82 * Set TCR.T0SZ to its default value (based on VA_BITS)
  83 */
  84static inline void __cpu_set_tcr_t0sz(unsigned long t0sz)
  85{
  86        unsigned long tcr;
  87
  88        if (!__cpu_uses_extended_idmap())
  89                return;
  90
  91        tcr = read_sysreg(tcr_el1);
  92        tcr &= ~TCR_T0SZ_MASK;
  93        tcr |= t0sz << TCR_T0SZ_OFFSET;
  94        write_sysreg(tcr, tcr_el1);
  95        isb();
  96}
  97
  98#define cpu_set_default_tcr_t0sz()      __cpu_set_tcr_t0sz(TCR_T0SZ(vabits_actual))
  99#define cpu_set_idmap_tcr_t0sz()        __cpu_set_tcr_t0sz(idmap_t0sz)
 100
 101/*
 102 * Remove the idmap from TTBR0_EL1 and install the pgd of the active mm.
 103 *
 104 * The idmap lives in the same VA range as userspace, but uses global entries
 105 * and may use a different TCR_EL1.T0SZ. To avoid issues resulting from
 106 * speculative TLB fetches, we must temporarily install the reserved page
 107 * tables while we invalidate the TLBs and set up the correct TCR_EL1.T0SZ.
 108 *
 109 * If current is a not a user task, the mm covers the TTBR1_EL1 page tables,
 110 * which should not be installed in TTBR0_EL1. In this case we can leave the
 111 * reserved page tables in place.
 112 */
 113static inline void cpu_uninstall_idmap(void)
 114{
 115        struct mm_struct *mm = current->active_mm;
 116
 117        cpu_set_reserved_ttbr0();
 118        local_flush_tlb_all();
 119        cpu_set_default_tcr_t0sz();
 120
 121        if (mm != &init_mm && !system_uses_ttbr0_pan())
 122                cpu_switch_mm(mm->pgd, mm);
 123}
 124
 125static inline void cpu_install_idmap(void)
 126{
 127        cpu_set_reserved_ttbr0();
 128        local_flush_tlb_all();
 129        cpu_set_idmap_tcr_t0sz();
 130
 131        cpu_switch_mm(lm_alias(idmap_pg_dir), &init_mm);
 132}
 133
 134/*
 135 * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD,
 136 * avoiding the possibility of conflicting TLB entries being allocated.
 137 */
 138static inline void cpu_replace_ttbr1(pgd_t *pgdp)
 139{
 140        typedef void (ttbr_replace_func)(phys_addr_t);
 141        extern ttbr_replace_func idmap_cpu_replace_ttbr1;
 142        ttbr_replace_func *replace_phys;
 143
 144        /* phys_to_ttbr() zeros lower 2 bits of ttbr with 52-bit PA */
 145        phys_addr_t ttbr1 = phys_to_ttbr(virt_to_phys(pgdp));
 146
 147        if (system_supports_cnp() && !WARN_ON(pgdp != lm_alias(swapper_pg_dir))) {
 148                /*
 149                 * cpu_replace_ttbr1() is used when there's a boot CPU
 150                 * up (i.e. cpufeature framework is not up yet) and
 151                 * latter only when we enable CNP via cpufeature's
 152                 * enable() callback.
 153                 * Also we rely on the cpu_hwcap bit being set before
 154                 * calling the enable() function.
 155                 */
 156                ttbr1 |= TTBR_CNP_BIT;
 157        }
 158
 159        replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1);
 160
 161        cpu_install_idmap();
 162        replace_phys(ttbr1);
 163        cpu_uninstall_idmap();
 164}
 165
 166/*
 167 * It would be nice to return ASIDs back to the allocator, but unfortunately
 168 * that introduces a race with a generation rollover where we could erroneously
 169 * free an ASID allocated in a future generation. We could workaround this by
 170 * freeing the ASID from the context of the dying mm (e.g. in arch_exit_mmap),
 171 * but we'd then need to make sure that we didn't dirty any TLBs afterwards.
 172 * Setting a reserved TTBR0 or EPD0 would work, but it all gets ugly when you
 173 * take CPU migration into account.
 174 */
 175#define destroy_context(mm)             do { } while(0)
 176void check_and_switch_context(struct mm_struct *mm, unsigned int cpu);
 177
 178#define init_new_context(tsk,mm)        ({ atomic64_set(&(mm)->context.id, 0); 0; })
 179
 180#ifdef CONFIG_ARM64_SW_TTBR0_PAN
 181static inline void update_saved_ttbr0(struct task_struct *tsk,
 182                                      struct mm_struct *mm)
 183{
 184        u64 ttbr;
 185
 186        if (!system_uses_ttbr0_pan())
 187                return;
 188
 189        if (mm == &init_mm)
 190                ttbr = __pa_symbol(empty_zero_page);
 191        else
 192                ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48;
 193
 194        WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr);
 195}
 196#else
 197static inline void update_saved_ttbr0(struct task_struct *tsk,
 198                                      struct mm_struct *mm)
 199{
 200}
 201#endif
 202
 203static inline void
 204enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
 205{
 206        /*
 207         * We don't actually care about the ttbr0 mapping, so point it at the
 208         * zero page.
 209         */
 210        update_saved_ttbr0(tsk, &init_mm);
 211}
 212
 213static inline void __switch_mm(struct mm_struct *next)
 214{
 215        unsigned int cpu = smp_processor_id();
 216
 217        /*
 218         * init_mm.pgd does not contain any user mappings and it is always
 219         * active for kernel addresses in TTBR1. Just set the reserved TTBR0.
 220         */
 221        if (next == &init_mm) {
 222                cpu_set_reserved_ttbr0();
 223                return;
 224        }
 225
 226        check_and_switch_context(next, cpu);
 227}
 228
 229static inline void
 230switch_mm(struct mm_struct *prev, struct mm_struct *next,
 231          struct task_struct *tsk)
 232{
 233        if (prev != next)
 234                __switch_mm(next);
 235
 236        /*
 237         * Update the saved TTBR0_EL1 of the scheduled-in task as the previous
 238         * value may have not been initialised yet (activate_mm caller) or the
 239         * ASID has changed since the last run (following the context switch
 240         * of another thread of the same process).
 241         */
 242        update_saved_ttbr0(tsk, next);
 243}
 244
 245#define deactivate_mm(tsk,mm)   do { } while (0)
 246#define activate_mm(prev,next)  switch_mm(prev, next, current)
 247
 248void verify_cpu_asid_bits(void);
 249void post_ttbr_update_workaround(void);
 250
 251#endif /* !__ASSEMBLY__ */
 252
 253#endif /* !__ASM_MMU_CONTEXT_H */
 254