linux/arch/alpha/include/asm/mmu_context.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef __ALPHA_MMU_CONTEXT_H
   3#define __ALPHA_MMU_CONTEXT_H
   4
   5/*
   6 * get a new mmu context..
   7 *
   8 * Copyright (C) 1996, Linus Torvalds
   9 */
  10
  11#include <linux/mm_types.h>
  12#include <linux/sched.h>
  13
  14#include <asm/machvec.h>
  15#include <asm/compiler.h>
  16#include <asm-generic/mm_hooks.h>
  17
  18/*
  19 * Force a context reload. This is needed when we change the page
  20 * table pointer or when we update the ASN of the current process.
  21 */
  22
  23/* Don't get into trouble with dueling __EXTERN_INLINEs.  */
  24#ifndef __EXTERN_INLINE
  25#include <asm/io.h>
  26#endif
  27
  28
  29static inline unsigned long
  30__reload_thread(struct pcb_struct *pcb)
  31{
  32        register unsigned long a0 __asm__("$16");
  33        register unsigned long v0 __asm__("$0");
  34
  35        a0 = virt_to_phys(pcb);
  36        __asm__ __volatile__(
  37                "call_pal %2 #__reload_thread"
  38                : "=r"(v0), "=r"(a0)
  39                : "i"(PAL_swpctx), "r"(a0)
  40                : "$1", "$22", "$23", "$24", "$25");
  41
  42        return v0;
  43}
  44
  45
  46/*
  47 * The maximum ASN's the processor supports.  On the EV4 this is 63
  48 * but the PAL-code doesn't actually use this information.  On the
  49 * EV5 this is 127, and EV6 has 255.
  50 *
  51 * On the EV4, the ASNs are more-or-less useless anyway, as they are
  52 * only used as an icache tag, not for TB entries.  On the EV5 and EV6,
  53 * ASN's also validate the TB entries, and thus make a lot more sense.
  54 *
  55 * The EV4 ASN's don't even match the architecture manual, ugh.  And
  56 * I quote: "If a processor implements address space numbers (ASNs),
  57 * and the old PTE has the Address Space Match (ASM) bit clear (ASNs
  58 * in use) and the Valid bit set, then entries can also effectively be
  59 * made coherent by assigning a new, unused ASN to the currently
  60 * running process and not reusing the previous ASN before calling the
  61 * appropriate PALcode routine to invalidate the translation buffer (TB)". 
  62 *
  63 * In short, the EV4 has a "kind of" ASN capability, but it doesn't actually
  64 * work correctly and can thus not be used (explaining the lack of PAL-code
  65 * support).
  66 */
  67#define EV4_MAX_ASN 63
  68#define EV5_MAX_ASN 127
  69#define EV6_MAX_ASN 255
  70
  71#ifdef CONFIG_ALPHA_GENERIC
  72# define MAX_ASN        (alpha_mv.max_asn)
  73#else
  74# ifdef CONFIG_ALPHA_EV4
  75#  define MAX_ASN       EV4_MAX_ASN
  76# elif defined(CONFIG_ALPHA_EV5)
  77#  define MAX_ASN       EV5_MAX_ASN
  78# else
  79#  define MAX_ASN       EV6_MAX_ASN
  80# endif
  81#endif
  82
  83/*
  84 * cpu_last_asn(processor):
  85 * 63                                            0
  86 * +-------------+----------------+--------------+
  87 * | asn version | this processor | hardware asn |
  88 * +-------------+----------------+--------------+
  89 */
  90
  91#include <asm/smp.h>
  92#ifdef CONFIG_SMP
  93#define cpu_last_asn(cpuid)     (cpu_data[cpuid].last_asn)
  94#else
  95extern unsigned long last_asn;
  96#define cpu_last_asn(cpuid)     last_asn
  97#endif /* CONFIG_SMP */
  98
  99#define WIDTH_HARDWARE_ASN      8
 100#define ASN_FIRST_VERSION (1UL << WIDTH_HARDWARE_ASN)
 101#define HARDWARE_ASN_MASK ((1UL << WIDTH_HARDWARE_ASN) - 1)
 102
 103/*
 104 * NOTE! The way this is set up, the high bits of the "asn_cache" (and
 105 * the "mm->context") are the ASN _version_ code. A version of 0 is
 106 * always considered invalid, so to invalidate another process you only
 107 * need to do "p->mm->context = 0".
 108 *
 109 * If we need more ASN's than the processor has, we invalidate the old
 110 * user TLB's (tbiap()) and start a new ASN version. That will automatically
 111 * force a new asn for any other processes the next time they want to
 112 * run.
 113 */
 114
 115#ifndef __EXTERN_INLINE
 116#define __EXTERN_INLINE extern inline
 117#define __MMU_EXTERN_INLINE
 118#endif
 119
 120extern inline unsigned long
 121__get_new_mm_context(struct mm_struct *mm, long cpu)
 122{
 123        unsigned long asn = cpu_last_asn(cpu);
 124        unsigned long next = asn + 1;
 125
 126        if ((asn & HARDWARE_ASN_MASK) >= MAX_ASN) {
 127                tbiap();
 128                imb();
 129                next = (asn & ~HARDWARE_ASN_MASK) + ASN_FIRST_VERSION;
 130        }
 131        cpu_last_asn(cpu) = next;
 132        return next;
 133}
 134
 135__EXTERN_INLINE void
 136ev5_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
 137              struct task_struct *next)
 138{
 139        /* Check if our ASN is of an older version, and thus invalid. */
 140        unsigned long asn;
 141        unsigned long mmc;
 142        long cpu = smp_processor_id();
 143
 144#ifdef CONFIG_SMP
 145        cpu_data[cpu].asn_lock = 1;
 146        barrier();
 147#endif
 148        asn = cpu_last_asn(cpu);
 149        mmc = next_mm->context[cpu];
 150        if ((mmc ^ asn) & ~HARDWARE_ASN_MASK) {
 151                mmc = __get_new_mm_context(next_mm, cpu);
 152                next_mm->context[cpu] = mmc;
 153        }
 154#ifdef CONFIG_SMP
 155        else
 156                cpu_data[cpu].need_new_asn = 1;
 157#endif
 158
 159        /* Always update the PCB ASN.  Another thread may have allocated
 160           a new mm->context (via flush_tlb_mm) without the ASN serial
 161           number wrapping.  We have no way to detect when this is needed.  */
 162        task_thread_info(next)->pcb.asn = mmc & HARDWARE_ASN_MASK;
 163}
 164
 165__EXTERN_INLINE void
 166ev4_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
 167              struct task_struct *next)
 168{
 169        /* As described, ASN's are broken for TLB usage.  But we can
 170           optimize for switching between threads -- if the mm is
 171           unchanged from current we needn't flush.  */
 172        /* ??? May not be needed because EV4 PALcode recognizes that
 173           ASN's are broken and does a tbiap itself on swpctx, under
 174           the "Must set ASN or flush" rule.  At least this is true
 175           for a 1992 SRM, reports Joseph Martin (jmartin@hlo.dec.com).
 176           I'm going to leave this here anyway, just to Be Sure.  -- r~  */
 177        if (prev_mm != next_mm)
 178                tbiap();
 179
 180        /* Do continue to allocate ASNs, because we can still use them
 181           to avoid flushing the icache.  */
 182        ev5_switch_mm(prev_mm, next_mm, next);
 183}
 184
 185extern void __load_new_mm_context(struct mm_struct *);
 186
 187#ifdef CONFIG_SMP
 188#define check_mmu_context()                                     \
 189do {                                                            \
 190        int cpu = smp_processor_id();                           \
 191        cpu_data[cpu].asn_lock = 0;                             \
 192        barrier();                                              \
 193        if (cpu_data[cpu].need_new_asn) {                       \
 194                struct mm_struct * mm = current->active_mm;     \
 195                cpu_data[cpu].need_new_asn = 0;                 \
 196                if (!mm->context[cpu])                  \
 197                        __load_new_mm_context(mm);              \
 198        }                                                       \
 199} while(0)
 200#else
 201#define check_mmu_context()  do { } while(0)
 202#endif
 203
 204__EXTERN_INLINE void
 205ev5_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm)
 206{
 207        __load_new_mm_context(next_mm);
 208}
 209
 210__EXTERN_INLINE void
 211ev4_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm)
 212{
 213        __load_new_mm_context(next_mm);
 214        tbiap();
 215}
 216
 217#ifdef CONFIG_ALPHA_GENERIC
 218# define switch_mm(a,b,c)       alpha_mv.mv_switch_mm((a),(b),(c))
 219# define activate_mm(x,y)       alpha_mv.mv_activate_mm((x),(y))
 220#else
 221# ifdef CONFIG_ALPHA_EV4
 222#  define switch_mm(a,b,c)      ev4_switch_mm((a),(b),(c))
 223#  define activate_mm(x,y)      ev4_activate_mm((x),(y))
 224# else
 225#  define switch_mm(a,b,c)      ev5_switch_mm((a),(b),(c))
 226#  define activate_mm(x,y)      ev5_activate_mm((x),(y))
 227# endif
 228#endif
 229
 230#define init_new_context init_new_context
 231static inline int
 232init_new_context(struct task_struct *tsk, struct mm_struct *mm)
 233{
 234        int i;
 235
 236        for_each_online_cpu(i)
 237                mm->context[i] = 0;
 238        if (tsk != current)
 239                task_thread_info(tsk)->pcb.ptbr
 240                  = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
 241        return 0;
 242}
 243
 244#define enter_lazy_tlb enter_lazy_tlb
 245static inline void
 246enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
 247{
 248        task_thread_info(tsk)->pcb.ptbr
 249          = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
 250}
 251
 252#include <asm-generic/mmu_context.h>
 253
 254#ifdef __MMU_EXTERN_INLINE
 255#undef __EXTERN_INLINE
 256#undef __MMU_EXTERN_INLINE
 257#endif
 258
 259#endif /* __ALPHA_MMU_CONTEXT_H */
 260