linux/arch/sh/include/asm/mmu_context.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * Copyright (C) 1999 Niibe Yutaka
   4 * Copyright (C) 2003 - 2007 Paul Mundt
   5 *
   6 * ASID handling idea taken from MIPS implementation.
   7 */
   8#ifndef __ASM_SH_MMU_CONTEXT_H
   9#define __ASM_SH_MMU_CONTEXT_H
  10
  11#ifdef __KERNEL__
  12#include <cpu/mmu_context.h>
  13#include <asm/tlbflush.h>
  14#include <linux/uaccess.h>
  15#include <linux/mm_types.h>
  16
  17#include <asm/io.h>
  18#include <asm-generic/mm_hooks.h>
  19
  20/*
  21 * The MMU "context" consists of two things:
  22 *    (a) TLB cache version (or round, cycle whatever expression you like)
  23 *    (b) ASID (Address Space IDentifier)
  24 */
  25#ifdef CONFIG_CPU_HAS_PTEAEX
  26#define MMU_CONTEXT_ASID_MASK           0x0000ffff
  27#else
  28#define MMU_CONTEXT_ASID_MASK           0x000000ff
  29#endif
  30
  31#define MMU_CONTEXT_VERSION_MASK        (~0UL & ~MMU_CONTEXT_ASID_MASK)
  32#define MMU_CONTEXT_FIRST_VERSION       (MMU_CONTEXT_ASID_MASK + 1)
  33
  34/* Impossible ASID value, to differentiate from NO_CONTEXT. */
  35#define MMU_NO_ASID                     MMU_CONTEXT_FIRST_VERSION
  36#define NO_CONTEXT                      0UL
  37
  38#define asid_cache(cpu)         (cpu_data[cpu].asid_cache)
  39
  40#ifdef CONFIG_MMU
  41#define cpu_context(cpu, mm)    ((mm)->context.id[cpu])
  42
  43#define cpu_asid(cpu, mm)       \
  44        (cpu_context((cpu), (mm)) & MMU_CONTEXT_ASID_MASK)
  45
  46/*
  47 * Virtual Page Number mask
  48 */
  49#define MMU_VPN_MASK    0xfffff000
  50
  51#if defined(CONFIG_SUPERH32)
  52#include <asm/mmu_context_32.h>
  53#else
  54#include <asm/mmu_context_64.h>
  55#endif
  56
  57/*
  58 * Get MMU context if needed.
  59 */
  60static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu)
  61{
  62        unsigned long asid = asid_cache(cpu);
  63
  64        /* Check if we have old version of context. */
  65        if (((cpu_context(cpu, mm) ^ asid) & MMU_CONTEXT_VERSION_MASK) == 0)
  66                /* It's up to date, do nothing */
  67                return;
  68
  69        /* It's old, we need to get new context with new version. */
  70        if (!(++asid & MMU_CONTEXT_ASID_MASK)) {
  71                /*
  72                 * We exhaust ASID of this version.
  73                 * Flush all TLB and start new cycle.
  74                 */
  75                local_flush_tlb_all();
  76
  77#ifdef CONFIG_SUPERH64
  78                /*
  79                 * The SH-5 cache uses the ASIDs, requiring both the I and D
  80                 * cache to be flushed when the ASID is exhausted. Weak.
  81                 */
  82                flush_cache_all();
  83#endif
  84
  85                /*
  86                 * Fix version; Note that we avoid version #0
  87                 * to distinguish NO_CONTEXT.
  88                 */
  89                if (!asid)
  90                        asid = MMU_CONTEXT_FIRST_VERSION;
  91        }
  92
  93        cpu_context(cpu, mm) = asid_cache(cpu) = asid;
  94}
  95
  96/*
  97 * Initialize the context related info for a new mm_struct
  98 * instance.
  99 */
 100static inline int init_new_context(struct task_struct *tsk,
 101                                   struct mm_struct *mm)
 102{
 103        int i;
 104
 105        for_each_online_cpu(i)
 106                cpu_context(i, mm) = NO_CONTEXT;
 107
 108        return 0;
 109}
 110
 111/*
 112 * After we have set current->mm to a new value, this activates
 113 * the context for the new mm so we see the new mappings.
 114 */
 115static inline void activate_context(struct mm_struct *mm, unsigned int cpu)
 116{
 117        get_mmu_context(mm, cpu);
 118        set_asid(cpu_asid(cpu, mm));
 119}
 120
 121static inline void switch_mm(struct mm_struct *prev,
 122                             struct mm_struct *next,
 123                             struct task_struct *tsk)
 124{
 125        unsigned int cpu = smp_processor_id();
 126
 127        if (likely(prev != next)) {
 128                cpumask_set_cpu(cpu, mm_cpumask(next));
 129                set_TTB(next->pgd);
 130                activate_context(next, cpu);
 131        } else
 132                if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)))
 133                        activate_context(next, cpu);
 134}
 135
 136#define activate_mm(prev, next)         switch_mm((prev),(next),NULL)
 137#define deactivate_mm(tsk,mm)           do { } while (0)
 138#define enter_lazy_tlb(mm,tsk)          do { } while (0)
 139
 140#else
 141
 142#define set_asid(asid)                  do { } while (0)
 143#define get_asid()                      (0)
 144#define cpu_asid(cpu, mm)               ({ (void)cpu; NO_CONTEXT; })
 145#define switch_and_save_asid(asid)      (0)
 146#define set_TTB(pgd)                    do { } while (0)
 147#define get_TTB()                       (0)
 148
 149#include <asm-generic/mmu_context.h>
 150
 151#endif /* CONFIG_MMU */
 152
 153#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4)
 154/*
 155 * If this processor has an MMU, we need methods to turn it off/on ..
 156 * paging_init() will also have to be updated for the processor in
 157 * question.
 158 */
 159static inline void enable_mmu(void)
 160{
 161        unsigned int cpu = smp_processor_id();
 162
 163        /* Enable MMU */
 164        __raw_writel(MMU_CONTROL_INIT, MMUCR);
 165        ctrl_barrier();
 166
 167        if (asid_cache(cpu) == NO_CONTEXT)
 168                asid_cache(cpu) = MMU_CONTEXT_FIRST_VERSION;
 169
 170        set_asid(asid_cache(cpu) & MMU_CONTEXT_ASID_MASK);
 171}
 172
 173static inline void disable_mmu(void)
 174{
 175        unsigned long cr;
 176
 177        cr = __raw_readl(MMUCR);
 178        cr &= ~MMU_CONTROL_INIT;
 179        __raw_writel(cr, MMUCR);
 180
 181        ctrl_barrier();
 182}
 183#else
 184/*
 185 * MMU control handlers for processors lacking memory
 186 * management hardware.
 187 */
 188#define enable_mmu()    do { } while (0)
 189#define disable_mmu()   do { } while (0)
 190#endif
 191
 192#endif /* __KERNEL__ */
 193#endif /* __ASM_SH_MMU_CONTEXT_H */
 194