linux/arch/arm64/include/asm/mmu_context.h
<<
>>
Prefs
   1/*
   2 * Based on arch/arm/include/asm/mmu_context.h
   3 *
   4 * Copyright (C) 1996 Russell King.
   5 * Copyright (C) 2012 ARM Ltd.
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  18 */
  19#ifndef __ASM_MMU_CONTEXT_H
  20#define __ASM_MMU_CONTEXT_H
  21
  22#ifndef __ASSEMBLY__
  23
  24#include <linux/compiler.h>
  25#include <linux/sched.h>
  26#include <linux/sched/hotplug.h>
  27#include <linux/mm_types.h>
  28
  29#include <asm/cacheflush.h>
  30#include <asm/cpufeature.h>
  31#include <asm/proc-fns.h>
  32#include <asm-generic/mm_hooks.h>
  33#include <asm/cputype.h>
  34#include <asm/pgtable.h>
  35#include <asm/sysreg.h>
  36#include <asm/tlbflush.h>
  37
  38extern bool rodata_full;
  39
  40static inline void contextidr_thread_switch(struct task_struct *next)
  41{
  42        if (!IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR))
  43                return;
  44
  45        write_sysreg(task_pid_nr(next), contextidr_el1);
  46        isb();
  47}
  48
  49/*
  50 * Set TTBR0 to empty_zero_page. No translations will be possible via TTBR0.
  51 */
  52static inline void cpu_set_reserved_ttbr0(void)
  53{
  54        unsigned long ttbr = phys_to_ttbr(__pa_symbol(empty_zero_page));
  55
  56        write_sysreg(ttbr, ttbr0_el1);
  57        isb();
  58}
  59
  60static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
  61{
  62        BUG_ON(pgd == swapper_pg_dir);
  63        cpu_set_reserved_ttbr0();
  64        cpu_do_switch_mm(virt_to_phys(pgd),mm);
  65}
  66
  67/*
  68 * TCR.T0SZ value to use when the ID map is active. Usually equals
  69 * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in
  70 * physical memory, in which case it will be smaller.
  71 */
  72extern u64 idmap_t0sz;
  73extern u64 idmap_ptrs_per_pgd;
  74
  75static inline bool __cpu_uses_extended_idmap(void)
  76{
  77        if (IS_ENABLED(CONFIG_ARM64_USER_VA_BITS_52))
  78                return false;
  79
  80        return unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS));
  81}
  82
  83/*
  84 * True if the extended ID map requires an extra level of translation table
  85 * to be configured.
  86 */
  87static inline bool __cpu_uses_extended_idmap_level(void)
  88{
  89        return ARM64_HW_PGTABLE_LEVELS(64 - idmap_t0sz) > CONFIG_PGTABLE_LEVELS;
  90}
  91
  92/*
  93 * Set TCR.T0SZ to its default value (based on VA_BITS)
  94 */
  95static inline void __cpu_set_tcr_t0sz(unsigned long t0sz)
  96{
  97        unsigned long tcr;
  98
  99        if (!__cpu_uses_extended_idmap())
 100                return;
 101
 102        tcr = read_sysreg(tcr_el1);
 103        tcr &= ~TCR_T0SZ_MASK;
 104        tcr |= t0sz << TCR_T0SZ_OFFSET;
 105        write_sysreg(tcr, tcr_el1);
 106        isb();
 107}
 108
 109#define cpu_set_default_tcr_t0sz()      __cpu_set_tcr_t0sz(TCR_T0SZ(VA_BITS))
 110#define cpu_set_idmap_tcr_t0sz()        __cpu_set_tcr_t0sz(idmap_t0sz)
 111
 112/*
 113 * Remove the idmap from TTBR0_EL1 and install the pgd of the active mm.
 114 *
 115 * The idmap lives in the same VA range as userspace, but uses global entries
 116 * and may use a different TCR_EL1.T0SZ. To avoid issues resulting from
 117 * speculative TLB fetches, we must temporarily install the reserved page
 118 * tables while we invalidate the TLBs and set up the correct TCR_EL1.T0SZ.
 119 *
 120 * If current is a not a user task, the mm covers the TTBR1_EL1 page tables,
 121 * which should not be installed in TTBR0_EL1. In this case we can leave the
 122 * reserved page tables in place.
 123 */
 124static inline void cpu_uninstall_idmap(void)
 125{
 126        struct mm_struct *mm = current->active_mm;
 127
 128        cpu_set_reserved_ttbr0();
 129        local_flush_tlb_all();
 130        cpu_set_default_tcr_t0sz();
 131
 132        if (mm != &init_mm && !system_uses_ttbr0_pan())
 133                cpu_switch_mm(mm->pgd, mm);
 134}
 135
 136static inline void cpu_install_idmap(void)
 137{
 138        cpu_set_reserved_ttbr0();
 139        local_flush_tlb_all();
 140        cpu_set_idmap_tcr_t0sz();
 141
 142        cpu_switch_mm(lm_alias(idmap_pg_dir), &init_mm);
 143}
 144
 145/*
 146 * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD,
 147 * avoiding the possibility of conflicting TLB entries being allocated.
 148 */
 149static inline void cpu_replace_ttbr1(pgd_t *pgdp)
 150{
 151        typedef void (ttbr_replace_func)(phys_addr_t);
 152        extern ttbr_replace_func idmap_cpu_replace_ttbr1;
 153        ttbr_replace_func *replace_phys;
 154
 155        /* phys_to_ttbr() zeros lower 2 bits of ttbr with 52-bit PA */
 156        phys_addr_t ttbr1 = phys_to_ttbr(virt_to_phys(pgdp));
 157
 158        if (system_supports_cnp() && !WARN_ON(pgdp != lm_alias(swapper_pg_dir))) {
 159                /*
 160                 * cpu_replace_ttbr1() is used when there's a boot CPU
 161                 * up (i.e. cpufeature framework is not up yet) and
 162                 * latter only when we enable CNP via cpufeature's
 163                 * enable() callback.
 164                 * Also we rely on the cpu_hwcap bit being set before
 165                 * calling the enable() function.
 166                 */
 167                ttbr1 |= TTBR_CNP_BIT;
 168        }
 169
 170        replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1);
 171
 172        cpu_install_idmap();
 173        replace_phys(ttbr1);
 174        cpu_uninstall_idmap();
 175}
 176
 177/*
 178 * It would be nice to return ASIDs back to the allocator, but unfortunately
 179 * that introduces a race with a generation rollover where we could erroneously
 180 * free an ASID allocated in a future generation. We could workaround this by
 181 * freeing the ASID from the context of the dying mm (e.g. in arch_exit_mmap),
 182 * but we'd then need to make sure that we didn't dirty any TLBs afterwards.
 183 * Setting a reserved TTBR0 or EPD0 would work, but it all gets ugly when you
 184 * take CPU migration into account.
 185 */
 186#define destroy_context(mm)             do { } while(0)
 187void check_and_switch_context(struct mm_struct *mm, unsigned int cpu);
 188
 189#define init_new_context(tsk,mm)        ({ atomic64_set(&(mm)->context.id, 0); 0; })
 190
 191#ifdef CONFIG_ARM64_SW_TTBR0_PAN
 192static inline void update_saved_ttbr0(struct task_struct *tsk,
 193                                      struct mm_struct *mm)
 194{
 195        u64 ttbr;
 196
 197        if (!system_uses_ttbr0_pan())
 198                return;
 199
 200        if (mm == &init_mm)
 201                ttbr = __pa_symbol(empty_zero_page);
 202        else
 203                ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48;
 204
 205        WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr);
 206}
 207#else
 208static inline void update_saved_ttbr0(struct task_struct *tsk,
 209                                      struct mm_struct *mm)
 210{
 211}
 212#endif
 213
 214static inline void
 215enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
 216{
 217        /*
 218         * We don't actually care about the ttbr0 mapping, so point it at the
 219         * zero page.
 220         */
 221        update_saved_ttbr0(tsk, &init_mm);
 222}
 223
 224static inline void __switch_mm(struct mm_struct *next)
 225{
 226        unsigned int cpu = smp_processor_id();
 227
 228        /*
 229         * init_mm.pgd does not contain any user mappings and it is always
 230         * active for kernel addresses in TTBR1. Just set the reserved TTBR0.
 231         */
 232        if (next == &init_mm) {
 233                cpu_set_reserved_ttbr0();
 234                return;
 235        }
 236
 237        check_and_switch_context(next, cpu);
 238}
 239
 240static inline void
 241switch_mm(struct mm_struct *prev, struct mm_struct *next,
 242          struct task_struct *tsk)
 243{
 244        if (prev != next)
 245                __switch_mm(next);
 246
 247        /*
 248         * Update the saved TTBR0_EL1 of the scheduled-in task as the previous
 249         * value may have not been initialised yet (activate_mm caller) or the
 250         * ASID has changed since the last run (following the context switch
 251         * of another thread of the same process).
 252         */
 253        update_saved_ttbr0(tsk, next);
 254}
 255
 256#define deactivate_mm(tsk,mm)   do { } while (0)
 257#define activate_mm(prev,next)  switch_mm(prev, next, current)
 258
 259void verify_cpu_asid_bits(void);
 260void post_ttbr_update_workaround(void);
 261
 262#endif /* !__ASSEMBLY__ */
 263
 264#endif /* !__ASM_MMU_CONTEXT_H */
 265