linux/arch/arm64/include/asm/mmu.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 * Copyright (C) 2012 ARM Ltd.
   4 */
   5#ifndef __ASM_MMU_H
   6#define __ASM_MMU_H
   7
   8#include <asm/cputype.h>
   9
  10#define MMCF_AARCH32    0x1     /* mm context flag for AArch32 executables */
  11#define USER_ASID_BIT   48
  12#define USER_ASID_FLAG  (UL(1) << USER_ASID_BIT)
  13#define TTBR_ASID_MASK  (UL(0xffff) << 48)
  14
  15#ifndef __ASSEMBLY__
  16
  17#include <linux/refcount.h>
  18
  19typedef struct {
  20        atomic64_t      id;
  21#ifdef CONFIG_COMPAT
  22        void            *sigpage;
  23#endif
  24        refcount_t      pinned;
  25        void            *vdso;
  26        unsigned long   flags;
  27} mm_context_t;
  28
  29/*
  30 * We use atomic64_read() here because the ASID for an 'mm_struct' can
  31 * be reallocated when scheduling one of its threads following a
  32 * rollover event (see new_context() and flush_context()). In this case,
  33 * a concurrent TLBI (e.g. via try_to_unmap_one() and ptep_clear_flush())
  34 * may use a stale ASID. This is fine in principle as the new ASID is
  35 * guaranteed to be clean in the TLB, but the TLBI routines have to take
  36 * care to handle the following race:
  37 *
  38 *    CPU 0                    CPU 1                          CPU 2
  39 *
  40 *    // ptep_clear_flush(mm)
  41 *    xchg_relaxed(pte, 0)
  42 *    DSB ISHST
  43 *    old = ASID(mm)
  44 *         |                                                  <rollover>
  45 *         |                   new = new_context(mm)
  46 *         \-----------------> atomic_set(mm->context.id, new)
  47 *                             cpu_switch_mm(mm)
  48 *                             // Hardware walk of pte using new ASID
  49 *    TLBI(old)
  50 *
  51 * In this scenario, the barrier on CPU 0 and the dependency on CPU 1
  52 * ensure that the page-table walker on CPU 1 *must* see the invalid PTE
  53 * written by CPU 0.
  54 */
  55#define ASID(mm)        (atomic64_read(&(mm)->context.id) & 0xffff)
  56
  57static inline bool arm64_kernel_unmapped_at_el0(void)
  58{
  59        return cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
  60}
  61
  62extern void arm64_memblock_init(void);
  63extern void paging_init(void);
  64extern void bootmem_init(void);
  65extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
  66extern void init_mem_pgprot(void);
  67extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
  68                               unsigned long virt, phys_addr_t size,
  69                               pgprot_t prot, bool page_mappings_only);
  70extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot);
  71extern void mark_linear_text_alias_ro(void);
  72extern bool kaslr_requires_kpti(void);
  73
  74#define INIT_MM_CONTEXT(name)   \
  75        .pgd = init_pg_dir,
  76
  77#endif  /* !__ASSEMBLY__ */
  78#endif
  79