linux/arch/arm64/include/asm/mmu_context.h
<<
>>
Prefs
   1/*
   2 * Based on arch/arm/include/asm/mmu_context.h
   3 *
   4 * Copyright (C) 1996 Russell King.
   5 * Copyright (C) 2012 ARM Ltd.
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  18 */
  19#ifndef __ASM_MMU_CONTEXT_H
  20#define __ASM_MMU_CONTEXT_H
  21
  22#include <linux/compiler.h>
  23#include <linux/sched.h>
  24
  25#include <asm/cacheflush.h>
  26#include <asm/proc-fns.h>
  27#include <asm-generic/mm_hooks.h>
  28#include <asm/cputype.h>
  29#include <asm/pgtable.h>
  30
  31#define MAX_ASID_BITS   16
  32
  33extern unsigned int cpu_last_asid;
  34
  35void __init_new_context(struct task_struct *tsk, struct mm_struct *mm);
  36void __new_context(struct mm_struct *mm);
  37
  38#ifdef CONFIG_PID_IN_CONTEXTIDR
  39static inline void contextidr_thread_switch(struct task_struct *next)
  40{
  41        asm(
  42        "       msr     contextidr_el1, %0\n"
  43        "       isb"
  44        :
  45        : "r" (task_pid_nr(next)));
  46}
  47#else
  48static inline void contextidr_thread_switch(struct task_struct *next)
  49{
  50}
  51#endif
  52
  53/*
  54 * Set TTBR0 to empty_zero_page. No translations will be possible via TTBR0.
  55 */
  56static inline void cpu_set_reserved_ttbr0(void)
  57{
  58        unsigned long ttbr = page_to_phys(empty_zero_page);
  59
  60        asm(
  61        "       msr     ttbr0_el1, %0                   // set TTBR0\n"
  62        "       isb"
  63        :
  64        : "r" (ttbr));
  65}
  66
  67static inline void switch_new_context(struct mm_struct *mm)
  68{
  69        unsigned long flags;
  70
  71        __new_context(mm);
  72
  73        local_irq_save(flags);
  74        cpu_switch_mm(mm->pgd, mm);
  75        local_irq_restore(flags);
  76}
  77
  78static inline void check_and_switch_context(struct mm_struct *mm,
  79                                            struct task_struct *tsk)
  80{
  81        /*
  82         * Required during context switch to avoid speculative page table
  83         * walking with the wrong TTBR.
  84         */
  85        cpu_set_reserved_ttbr0();
  86
  87        if (!((mm->context.id ^ cpu_last_asid) >> MAX_ASID_BITS))
  88                /*
  89                 * The ASID is from the current generation, just switch to the
  90                 * new pgd. This condition is only true for calls from
  91                 * context_switch() and interrupts are already disabled.
  92                 */
  93                cpu_switch_mm(mm->pgd, mm);
  94        else if (irqs_disabled())
  95                /*
  96                 * Defer the new ASID allocation until after the context
  97                 * switch critical region since __new_context() cannot be
  98                 * called with interrupts disabled.
  99                 */
 100                set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
 101        else
 102                /*
 103                 * That is a direct call to switch_mm() or activate_mm() with
 104                 * interrupts enabled and a new context.
 105                 */
 106                switch_new_context(mm);
 107}
 108
 109#define init_new_context(tsk,mm)        (__init_new_context(tsk,mm),0)
 110#define destroy_context(mm)             do { } while(0)
 111
 112#define finish_arch_post_lock_switch \
 113        finish_arch_post_lock_switch
 114static inline void finish_arch_post_lock_switch(void)
 115{
 116        if (test_and_clear_thread_flag(TIF_SWITCH_MM)) {
 117                struct mm_struct *mm = current->mm;
 118                unsigned long flags;
 119
 120                __new_context(mm);
 121
 122                local_irq_save(flags);
 123                cpu_switch_mm(mm->pgd, mm);
 124                local_irq_restore(flags);
 125        }
 126}
 127
 128/*
 129 * This is called when "tsk" is about to enter lazy TLB mode.
 130 *
 131 * mm:  describes the currently active mm context
 132 * tsk: task which is entering lazy tlb
 133 * cpu: cpu number which is entering lazy tlb
 134 *
 135 * tsk->mm will be NULL
 136 */
 137static inline void
 138enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
 139{
 140}
 141
 142/*
 143 * This is the actual mm switch as far as the scheduler
 144 * is concerned.  No registers are touched.  We avoid
 145 * calling the CPU specific function when the mm hasn't
 146 * actually changed.
 147 */
 148static inline void
 149switch_mm(struct mm_struct *prev, struct mm_struct *next,
 150          struct task_struct *tsk)
 151{
 152        unsigned int cpu = smp_processor_id();
 153
 154        if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next)
 155                check_and_switch_context(next, tsk);
 156}
 157
 158#define deactivate_mm(tsk,mm)   do { } while (0)
 159#define activate_mm(prev,next)  switch_mm(prev, next, NULL)
 160
 161#endif
 162