linux/arch/arm64/include/asm/mmu_context.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 * Based on arch/arm/include/asm/mmu_context.h
   4 *
   5 * Copyright (C) 1996 Russell King.
   6 * Copyright (C) 2012 ARM Ltd.
   7 */
   8#ifndef __ASM_MMU_CONTEXT_H
   9#define __ASM_MMU_CONTEXT_H
  10
  11#ifndef __ASSEMBLY__
  12
  13#include <linux/compiler.h>
  14#include <linux/sched.h>
  15#include <linux/sched/hotplug.h>
  16#include <linux/mm_types.h>
  17#include <linux/pgtable.h>
  18
  19#include <asm/cacheflush.h>
  20#include <asm/cpufeature.h>
  21#include <asm/proc-fns.h>
  22#include <asm-generic/mm_hooks.h>
  23#include <asm/cputype.h>
  24#include <asm/sysreg.h>
  25#include <asm/tlbflush.h>
  26
  27extern bool rodata_full;
  28
  29static inline void contextidr_thread_switch(struct task_struct *next)
  30{
  31        if (!IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR))
  32                return;
  33
  34        write_sysreg(task_pid_nr(next), contextidr_el1);
  35        isb();
  36}
  37
  38/*
  39 * Set TTBR0 to reserved_pg_dir. No translations will be possible via TTBR0.
  40 */
  41static inline void cpu_set_reserved_ttbr0(void)
  42{
  43        unsigned long ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
  44
  45        write_sysreg(ttbr, ttbr0_el1);
  46        isb();
  47}
  48
  49void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
  50
  51static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
  52{
  53        BUG_ON(pgd == swapper_pg_dir);
  54        cpu_set_reserved_ttbr0();
  55        cpu_do_switch_mm(virt_to_phys(pgd),mm);
  56}
  57
  58/*
  59 * TCR.T0SZ value to use when the ID map is active. Usually equals
  60 * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in
  61 * physical memory, in which case it will be smaller.
  62 */
  63extern u64 idmap_t0sz;
  64extern u64 idmap_ptrs_per_pgd;
  65
  66/*
  67 * Ensure TCR.T0SZ is set to the provided value.
  68 */
  69static inline void __cpu_set_tcr_t0sz(unsigned long t0sz)
  70{
  71        unsigned long tcr = read_sysreg(tcr_el1);
  72
  73        if ((tcr & TCR_T0SZ_MASK) >> TCR_T0SZ_OFFSET == t0sz)
  74                return;
  75
  76        tcr &= ~TCR_T0SZ_MASK;
  77        tcr |= t0sz << TCR_T0SZ_OFFSET;
  78        write_sysreg(tcr, tcr_el1);
  79        isb();
  80}
  81
  82#define cpu_set_default_tcr_t0sz()      __cpu_set_tcr_t0sz(TCR_T0SZ(vabits_actual))
  83#define cpu_set_idmap_tcr_t0sz()        __cpu_set_tcr_t0sz(idmap_t0sz)
  84
  85/*
  86 * Remove the idmap from TTBR0_EL1 and install the pgd of the active mm.
  87 *
  88 * The idmap lives in the same VA range as userspace, but uses global entries
  89 * and may use a different TCR_EL1.T0SZ. To avoid issues resulting from
  90 * speculative TLB fetches, we must temporarily install the reserved page
  91 * tables while we invalidate the TLBs and set up the correct TCR_EL1.T0SZ.
  92 *
  93 * If current is a not a user task, the mm covers the TTBR1_EL1 page tables,
  94 * which should not be installed in TTBR0_EL1. In this case we can leave the
  95 * reserved page tables in place.
  96 */
  97static inline void cpu_uninstall_idmap(void)
  98{
  99        struct mm_struct *mm = current->active_mm;
 100
 101        cpu_set_reserved_ttbr0();
 102        local_flush_tlb_all();
 103        cpu_set_default_tcr_t0sz();
 104
 105        if (mm != &init_mm && !system_uses_ttbr0_pan())
 106                cpu_switch_mm(mm->pgd, mm);
 107}
 108
 109static inline void cpu_install_idmap(void)
 110{
 111        cpu_set_reserved_ttbr0();
 112        local_flush_tlb_all();
 113        cpu_set_idmap_tcr_t0sz();
 114
 115        cpu_switch_mm(lm_alias(idmap_pg_dir), &init_mm);
 116}
 117
 118/*
 119 * Load our new page tables. A strict BBM approach requires that we ensure that
 120 * TLBs are free of any entries that may overlap with the global mappings we are
 121 * about to install.
 122 *
 123 * For a real hibernate/resume/kexec cycle TTBR0 currently points to a zero
 124 * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI runtime
 125 * services), while for a userspace-driven test_resume cycle it points to
 126 * userspace page tables (and we must point it at a zero page ourselves).
 127 *
 128 * We change T0SZ as part of installing the idmap. This is undone by
 129 * cpu_uninstall_idmap() in __cpu_suspend_exit().
 130 */
 131static inline void cpu_install_ttbr0(phys_addr_t ttbr0, unsigned long t0sz)
 132{
 133        cpu_set_reserved_ttbr0();
 134        local_flush_tlb_all();
 135        __cpu_set_tcr_t0sz(t0sz);
 136
 137        /* avoid cpu_switch_mm() and its SW-PAN and CNP interactions */
 138        write_sysreg(ttbr0, ttbr0_el1);
 139        isb();
 140}
 141
 142/*
 143 * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD,
 144 * avoiding the possibility of conflicting TLB entries being allocated.
 145 */
 146static inline void __nocfi cpu_replace_ttbr1(pgd_t *pgdp)
 147{
 148        typedef void (ttbr_replace_func)(phys_addr_t);
 149        extern ttbr_replace_func idmap_cpu_replace_ttbr1;
 150        ttbr_replace_func *replace_phys;
 151
 152        /* phys_to_ttbr() zeros lower 2 bits of ttbr with 52-bit PA */
 153        phys_addr_t ttbr1 = phys_to_ttbr(virt_to_phys(pgdp));
 154
 155        if (system_supports_cnp() && !WARN_ON(pgdp != lm_alias(swapper_pg_dir))) {
 156                /*
 157                 * cpu_replace_ttbr1() is used when there's a boot CPU
 158                 * up (i.e. cpufeature framework is not up yet) and
 159                 * latter only when we enable CNP via cpufeature's
 160                 * enable() callback.
 161                 * Also we rely on the cpu_hwcap bit being set before
 162                 * calling the enable() function.
 163                 */
 164                ttbr1 |= TTBR_CNP_BIT;
 165        }
 166
 167        replace_phys = (void *)__pa_symbol(function_nocfi(idmap_cpu_replace_ttbr1));
 168
 169        cpu_install_idmap();
 170        replace_phys(ttbr1);
 171        cpu_uninstall_idmap();
 172}
 173
 174/*
 175 * It would be nice to return ASIDs back to the allocator, but unfortunately
 176 * that introduces a race with a generation rollover where we could erroneously
 177 * free an ASID allocated in a future generation. We could workaround this by
 178 * freeing the ASID from the context of the dying mm (e.g. in arch_exit_mmap),
 179 * but we'd then need to make sure that we didn't dirty any TLBs afterwards.
 180 * Setting a reserved TTBR0 or EPD0 would work, but it all gets ugly when you
 181 * take CPU migration into account.
 182 */
 183void check_and_switch_context(struct mm_struct *mm);
 184
 185#define init_new_context(tsk, mm) init_new_context(tsk, mm)
 186static inline int
 187init_new_context(struct task_struct *tsk, struct mm_struct *mm)
 188{
 189        atomic64_set(&mm->context.id, 0);
 190        refcount_set(&mm->context.pinned, 0);
 191        return 0;
 192}
 193
 194#ifdef CONFIG_ARM64_SW_TTBR0_PAN
 195static inline void update_saved_ttbr0(struct task_struct *tsk,
 196                                      struct mm_struct *mm)
 197{
 198        u64 ttbr;
 199
 200        if (!system_uses_ttbr0_pan())
 201                return;
 202
 203        if (mm == &init_mm)
 204                ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
 205        else
 206                ttbr = phys_to_ttbr(virt_to_phys(mm->pgd)) | ASID(mm) << 48;
 207
 208        WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr);
 209}
 210#else
 211static inline void update_saved_ttbr0(struct task_struct *tsk,
 212                                      struct mm_struct *mm)
 213{
 214}
 215#endif
 216
 217#define enter_lazy_tlb enter_lazy_tlb
 218static inline void
 219enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
 220{
 221        /*
 222         * We don't actually care about the ttbr0 mapping, so point it at the
 223         * zero page.
 224         */
 225        update_saved_ttbr0(tsk, &init_mm);
 226}
 227
 228static inline void __switch_mm(struct mm_struct *next)
 229{
 230        /*
 231         * init_mm.pgd does not contain any user mappings and it is always
 232         * active for kernel addresses in TTBR1. Just set the reserved TTBR0.
 233         */
 234        if (next == &init_mm) {
 235                cpu_set_reserved_ttbr0();
 236                return;
 237        }
 238
 239        check_and_switch_context(next);
 240}
 241
 242static inline void
 243switch_mm(struct mm_struct *prev, struct mm_struct *next,
 244          struct task_struct *tsk)
 245{
 246        if (prev != next)
 247                __switch_mm(next);
 248
 249        /*
 250         * Update the saved TTBR0_EL1 of the scheduled-in task as the previous
 251         * value may have not been initialised yet (activate_mm caller) or the
 252         * ASID has changed since the last run (following the context switch
 253         * of another thread of the same process).
 254         */
 255        update_saved_ttbr0(tsk, next);
 256}
 257
 258static inline const struct cpumask *
 259task_cpu_possible_mask(struct task_struct *p)
 260{
 261        if (!static_branch_unlikely(&arm64_mismatched_32bit_el0))
 262                return cpu_possible_mask;
 263
 264        if (!is_compat_thread(task_thread_info(p)))
 265                return cpu_possible_mask;
 266
 267        return system_32bit_el0_cpumask();
 268}
 269#define task_cpu_possible_mask  task_cpu_possible_mask
 270
 271void verify_cpu_asid_bits(void);
 272void post_ttbr_update_workaround(void);
 273
 274unsigned long arm64_mm_context_get(struct mm_struct *mm);
 275void arm64_mm_context_put(struct mm_struct *mm);
 276
 277#include <asm-generic/mmu_context.h>
 278
 279#endif /* !__ASSEMBLY__ */
 280
 281#endif /* !__ASM_MMU_CONTEXT_H */
 282