linux/arch/sh/include/asm/mmu_context.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * Copyright (C) 1999 Niibe Yutaka
   4 * Copyright (C) 2003 - 2007 Paul Mundt
   5 *
   6 * ASID handling idea taken from MIPS implementation.
   7 */
   8#ifndef __ASM_SH_MMU_CONTEXT_H
   9#define __ASM_SH_MMU_CONTEXT_H
  10
  11#ifdef __KERNEL__
  12#include <cpu/mmu_context.h>
  13#include <asm/tlbflush.h>
  14#include <linux/uaccess.h>
  15#include <linux/mm_types.h>
  16
  17#include <asm/io.h>
  18#include <asm-generic/mm_hooks.h>
  19
  20/*
  21 * The MMU "context" consists of two things:
  22 *    (a) TLB cache version (or round, cycle whatever expression you like)
  23 *    (b) ASID (Address Space IDentifier)
  24 */
  25#ifdef CONFIG_CPU_HAS_PTEAEX
  26#define MMU_CONTEXT_ASID_MASK           0x0000ffff
  27#else
  28#define MMU_CONTEXT_ASID_MASK           0x000000ff
  29#endif
  30
  31#define MMU_CONTEXT_VERSION_MASK        (~0UL & ~MMU_CONTEXT_ASID_MASK)
  32#define MMU_CONTEXT_FIRST_VERSION       (MMU_CONTEXT_ASID_MASK + 1)
  33
  34/* Impossible ASID value, to differentiate from NO_CONTEXT. */
  35#define MMU_NO_ASID                     MMU_CONTEXT_FIRST_VERSION
  36#define NO_CONTEXT                      0UL
  37
  38#define asid_cache(cpu)         (cpu_data[cpu].asid_cache)
  39
  40#ifdef CONFIG_MMU
  41#define cpu_context(cpu, mm)    ((mm)->context.id[cpu])
  42
  43#define cpu_asid(cpu, mm)       \
  44        (cpu_context((cpu), (mm)) & MMU_CONTEXT_ASID_MASK)
  45
  46/*
  47 * Virtual Page Number mask
  48 */
  49#define MMU_VPN_MASK    0xfffff000
  50
  51#include <asm/mmu_context_32.h>
  52
  53/*
  54 * Get MMU context if needed.
  55 */
  56static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu)
  57{
  58        unsigned long asid = asid_cache(cpu);
  59
  60        /* Check if we have old version of context. */
  61        if (((cpu_context(cpu, mm) ^ asid) & MMU_CONTEXT_VERSION_MASK) == 0)
  62                /* It's up to date, do nothing */
  63                return;
  64
  65        /* It's old, we need to get new context with new version. */
  66        if (!(++asid & MMU_CONTEXT_ASID_MASK)) {
  67                /*
  68                 * We exhaust ASID of this version.
  69                 * Flush all TLB and start new cycle.
  70                 */
  71                local_flush_tlb_all();
  72
  73                /*
  74                 * Fix version; Note that we avoid version #0
  75                 * to distinguish NO_CONTEXT.
  76                 */
  77                if (!asid)
  78                        asid = MMU_CONTEXT_FIRST_VERSION;
  79        }
  80
  81        cpu_context(cpu, mm) = asid_cache(cpu) = asid;
  82}
  83
  84/*
  85 * Initialize the context related info for a new mm_struct
  86 * instance.
  87 */
  88static inline int init_new_context(struct task_struct *tsk,
  89                                   struct mm_struct *mm)
  90{
  91        int i;
  92
  93        for_each_online_cpu(i)
  94                cpu_context(i, mm) = NO_CONTEXT;
  95
  96        return 0;
  97}
  98
  99/*
 100 * After we have set current->mm to a new value, this activates
 101 * the context for the new mm so we see the new mappings.
 102 */
 103static inline void activate_context(struct mm_struct *mm, unsigned int cpu)
 104{
 105        get_mmu_context(mm, cpu);
 106        set_asid(cpu_asid(cpu, mm));
 107}
 108
 109static inline void switch_mm(struct mm_struct *prev,
 110                             struct mm_struct *next,
 111                             struct task_struct *tsk)
 112{
 113        unsigned int cpu = smp_processor_id();
 114
 115        if (likely(prev != next)) {
 116                cpumask_set_cpu(cpu, mm_cpumask(next));
 117                set_TTB(next->pgd);
 118                activate_context(next, cpu);
 119        } else
 120                if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)))
 121                        activate_context(next, cpu);
 122}
 123
 124#define activate_mm(prev, next)         switch_mm((prev),(next),NULL)
 125#define deactivate_mm(tsk,mm)           do { } while (0)
 126#define enter_lazy_tlb(mm,tsk)          do { } while (0)
 127
 128#else
 129
 130#define set_asid(asid)                  do { } while (0)
 131#define get_asid()                      (0)
 132#define cpu_asid(cpu, mm)               ({ (void)cpu; NO_CONTEXT; })
 133#define switch_and_save_asid(asid)      (0)
 134#define set_TTB(pgd)                    do { } while (0)
 135#define get_TTB()                       (0)
 136
 137#include <asm-generic/mmu_context.h>
 138
 139#endif /* CONFIG_MMU */
 140
 141#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4)
 142/*
 143 * If this processor has an MMU, we need methods to turn it off/on ..
 144 * paging_init() will also have to be updated for the processor in
 145 * question.
 146 */
 147static inline void enable_mmu(void)
 148{
 149        unsigned int cpu = smp_processor_id();
 150
 151        /* Enable MMU */
 152        __raw_writel(MMU_CONTROL_INIT, MMUCR);
 153        ctrl_barrier();
 154
 155        if (asid_cache(cpu) == NO_CONTEXT)
 156                asid_cache(cpu) = MMU_CONTEXT_FIRST_VERSION;
 157
 158        set_asid(asid_cache(cpu) & MMU_CONTEXT_ASID_MASK);
 159}
 160
 161static inline void disable_mmu(void)
 162{
 163        unsigned long cr;
 164
 165        cr = __raw_readl(MMUCR);
 166        cr &= ~MMU_CONTROL_INIT;
 167        __raw_writel(cr, MMUCR);
 168
 169        ctrl_barrier();
 170}
 171#else
 172/*
 173 * MMU control handlers for processors lacking memory
 174 * management hardware.
 175 */
 176#define enable_mmu()    do { } while (0)
 177#define disable_mmu()   do { } while (0)
 178#endif
 179
 180#endif /* __KERNEL__ */
 181#endif /* __ASM_SH_MMU_CONTEXT_H */
 182