linux/arch/ia64/include/asm/mmu_context.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _ASM_IA64_MMU_CONTEXT_H
   3#define _ASM_IA64_MMU_CONTEXT_H
   4
   5/*
   6 * Copyright (C) 1998-2002 Hewlett-Packard Co
   7 *      David Mosberger-Tang <davidm@hpl.hp.com>
   8 */
   9
  10/*
  11 * Routines to manage the allocation of task context numbers.  Task context
  12 * numbers are used to reduce or eliminate the need to perform TLB flushes
  13 * due to context switches.  Context numbers are implemented using ia-64
  14 * region ids.  Since the IA-64 TLB does not consider the region number when
  15 * performing a TLB lookup, we need to assign a unique region id to each
  16 * region in a process.  We use the least significant three bits in aregion
  17 * id for this purpose.
  18 */
  19
  20#define IA64_REGION_ID_KERNEL   0 /* the kernel's region id (tlb.c depends on this being 0) */
  21
  22#define ia64_rid(ctx,addr)      (((ctx) << 3) | (addr >> 61))
  23
  24# include <asm/page.h>
  25# ifndef __ASSEMBLY__
  26
  27#include <linux/compiler.h>
  28#include <linux/percpu.h>
  29#include <linux/sched.h>
  30#include <linux/mm_types.h>
  31#include <linux/spinlock.h>
  32
  33#include <asm/processor.h>
  34#include <asm-generic/mm_hooks.h>
  35
  36struct ia64_ctx {
  37        spinlock_t lock;
  38        unsigned int next;      /* next context number to use */
  39        unsigned int limit;     /* available free range */
  40        unsigned int max_ctx;   /* max. context value supported by all CPUs */
  41                                /* call wrap_mmu_context when next >= max */
  42        unsigned long *bitmap;  /* bitmap size is max_ctx+1 */
  43        unsigned long *flushmap;/* pending rid to be flushed */
  44};
  45
  46extern struct ia64_ctx ia64_ctx;
  47DECLARE_PER_CPU(u8, ia64_need_tlb_flush);
  48
  49extern void mmu_context_init (void);
  50extern void wrap_mmu_context (struct mm_struct *mm);
  51
  52static inline void
  53enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk)
  54{
  55}
  56
  57/*
  58 * When the context counter wraps around all TLBs need to be flushed because
  59 * an old context number might have been reused. This is signalled by the
  60 * ia64_need_tlb_flush per-CPU variable, which is checked in the routine
  61 * below. Called by activate_mm(). <efocht@ess.nec.de>
  62 */
  63static inline void
  64delayed_tlb_flush (void)
  65{
  66        extern void local_flush_tlb_all (void);
  67        unsigned long flags;
  68
  69        if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush))) {
  70                spin_lock_irqsave(&ia64_ctx.lock, flags);
  71                if (__ia64_per_cpu_var(ia64_need_tlb_flush)) {
  72                        local_flush_tlb_all();
  73                        __ia64_per_cpu_var(ia64_need_tlb_flush) = 0;
  74                }
  75                spin_unlock_irqrestore(&ia64_ctx.lock, flags);
  76        }
  77}
  78
  79static inline nv_mm_context_t
  80get_mmu_context (struct mm_struct *mm)
  81{
  82        unsigned long flags;
  83        nv_mm_context_t context = mm->context;
  84
  85        if (likely(context))
  86                goto out;
  87
  88        spin_lock_irqsave(&ia64_ctx.lock, flags);
  89        /* re-check, now that we've got the lock: */
  90        context = mm->context;
  91        if (context == 0) {
  92                cpumask_clear(mm_cpumask(mm));
  93                if (ia64_ctx.next >= ia64_ctx.limit) {
  94                        ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap,
  95                                        ia64_ctx.max_ctx, ia64_ctx.next);
  96                        ia64_ctx.limit = find_next_bit(ia64_ctx.bitmap,
  97                                        ia64_ctx.max_ctx, ia64_ctx.next);
  98                        if (ia64_ctx.next >= ia64_ctx.max_ctx)
  99                                wrap_mmu_context(mm);
 100                }
 101                mm->context = context = ia64_ctx.next++;
 102                __set_bit(context, ia64_ctx.bitmap);
 103        }
 104        spin_unlock_irqrestore(&ia64_ctx.lock, flags);
 105out:
 106        /*
 107         * Ensure we're not starting to use "context" before any old
 108         * uses of it are gone from our TLB.
 109         */
 110        delayed_tlb_flush();
 111
 112        return context;
 113}
 114
 115/*
 116 * Initialize context number to some sane value.  MM is guaranteed to be a
 117 * brand-new address-space, so no TLB flushing is needed, ever.
 118 */
 119static inline int
 120init_new_context (struct task_struct *p, struct mm_struct *mm)
 121{
 122        mm->context = 0;
 123        return 0;
 124}
 125
 126static inline void
 127destroy_context (struct mm_struct *mm)
 128{
 129        /* Nothing to do.  */
 130}
 131
 132static inline void
 133reload_context (nv_mm_context_t context)
 134{
 135        unsigned long rid;
 136        unsigned long rid_incr = 0;
 137        unsigned long rr0, rr1, rr2, rr3, rr4, old_rr4;
 138
 139        old_rr4 = ia64_get_rr(RGN_BASE(RGN_HPAGE));
 140        rid = context << 3;     /* make space for encoding the region number */
 141        rid_incr = 1 << 8;
 142
 143        /* encode the region id, preferred page size, and VHPT enable bit: */
 144        rr0 = (rid << 8) | (PAGE_SHIFT << 2) | 1;
 145        rr1 = rr0 + 1*rid_incr;
 146        rr2 = rr0 + 2*rid_incr;
 147        rr3 = rr0 + 3*rid_incr;
 148        rr4 = rr0 + 4*rid_incr;
 149#ifdef  CONFIG_HUGETLB_PAGE
 150        rr4 = (rr4 & (~(0xfcUL))) | (old_rr4 & 0xfc);
 151
 152#  if RGN_HPAGE != 4
 153#    error "reload_context assumes RGN_HPAGE is 4"
 154#  endif
 155#endif
 156
 157        ia64_set_rr0_to_rr4(rr0, rr1, rr2, rr3, rr4);
 158        ia64_srlz_i();                  /* srlz.i implies srlz.d */
 159}
 160
 161/*
 162 * Must be called with preemption off
 163 */
 164static inline void
 165activate_context (struct mm_struct *mm)
 166{
 167        nv_mm_context_t context;
 168
 169        do {
 170                context = get_mmu_context(mm);
 171                if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
 172                        cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
 173                reload_context(context);
 174                /*
 175                 * in the unlikely event of a TLB-flush by another thread,
 176                 * redo the load.
 177                 */
 178        } while (unlikely(context != mm->context));
 179}
 180
 181#define deactivate_mm(tsk,mm)   do { } while (0)
 182
 183/*
 184 * Switch from address space PREV to address space NEXT.
 185 */
 186static inline void
 187activate_mm (struct mm_struct *prev, struct mm_struct *next)
 188{
 189        /*
 190         * We may get interrupts here, but that's OK because interrupt
 191         * handlers cannot touch user-space.
 192         */
 193        ia64_set_kr(IA64_KR_PT_BASE, __pa(next->pgd));
 194        activate_context(next);
 195}
 196
 197#define switch_mm(prev_mm,next_mm,next_task)    activate_mm(prev_mm, next_mm)
 198
 199# endif /* ! __ASSEMBLY__ */
 200#endif /* _ASM_IA64_MMU_CONTEXT_H */
 201