linux/arch/arm/mm/context.c
<<
>>
Prefs
   1/*
   2 *  linux/arch/arm/mm/context.c
   3 *
   4 *  Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 */
  10#include <linux/init.h>
  11#include <linux/sched.h>
  12#include <linux/mm.h>
  13#include <linux/smp.h>
  14#include <linux/percpu.h>
  15
  16#include <asm/mmu_context.h>
  17#include <asm/tlbflush.h>
  18
  19static DEFINE_SPINLOCK(cpu_asid_lock);
  20unsigned int cpu_last_asid = ASID_FIRST_VERSION;
  21#ifdef CONFIG_SMP
  22DEFINE_PER_CPU(struct mm_struct *, current_mm);
  23#endif
  24
  25/*
  26 * We fork()ed a process, and we need a new context for the child
  27 * to run in.  We reserve version 0 for initial tasks so we will
  28 * always allocate an ASID. The ASID 0 is reserved for the TTBR
  29 * register changing sequence.
  30 */
  31void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
  32{
  33        mm->context.id = 0;
  34        spin_lock_init(&mm->context.id_lock);
  35}
  36
  37static void flush_context(void)
  38{
  39        /* set the reserved ASID before flushing the TLB */
  40        asm("mcr        p15, 0, %0, c13, c0, 1\n" : : "r" (0));
  41        isb();
  42        local_flush_tlb_all();
  43        if (icache_is_vivt_asid_tagged()) {
  44                __flush_icache_all();
  45                dsb();
  46        }
  47}
  48
  49#ifdef CONFIG_SMP
  50
  51static void set_mm_context(struct mm_struct *mm, unsigned int asid)
  52{
  53        unsigned long flags;
  54
  55        /*
  56         * Locking needed for multi-threaded applications where the
  57         * same mm->context.id could be set from different CPUs during
  58         * the broadcast. This function is also called via IPI so the
  59         * mm->context.id_lock has to be IRQ-safe.
  60         */
  61        spin_lock_irqsave(&mm->context.id_lock, flags);
  62        if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) {
  63                /*
  64                 * Old version of ASID found. Set the new one and
  65                 * reset mm_cpumask(mm).
  66                 */
  67                mm->context.id = asid;
  68                cpumask_clear(mm_cpumask(mm));
  69        }
  70        spin_unlock_irqrestore(&mm->context.id_lock, flags);
  71
  72        /*
  73         * Set the mm_cpumask(mm) bit for the current CPU.
  74         */
  75        cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
  76}
  77
  78/*
  79 * Reset the ASID on the current CPU. This function call is broadcast
  80 * from the CPU handling the ASID rollover and holding cpu_asid_lock.
  81 */
  82static void reset_context(void *info)
  83{
  84        unsigned int asid;
  85        unsigned int cpu = smp_processor_id();
  86        struct mm_struct *mm = per_cpu(current_mm, cpu);
  87
  88        /*
  89         * Check if a current_mm was set on this CPU as it might still
  90         * be in the early booting stages and using the reserved ASID.
  91         */
  92        if (!mm)
  93                return;
  94
  95        smp_rmb();
  96        asid = cpu_last_asid + cpu + 1;
  97
  98        flush_context();
  99        set_mm_context(mm, asid);
 100
 101        /* set the new ASID */
 102        asm("mcr        p15, 0, %0, c13, c0, 1\n" : : "r" (mm->context.id));
 103        isb();
 104}
 105
 106#else
 107
 108static inline void set_mm_context(struct mm_struct *mm, unsigned int asid)
 109{
 110        mm->context.id = asid;
 111        cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id()));
 112}
 113
 114#endif
 115
 116void __new_context(struct mm_struct *mm)
 117{
 118        unsigned int asid;
 119
 120        spin_lock(&cpu_asid_lock);
 121#ifdef CONFIG_SMP
 122        /*
 123         * Check the ASID again, in case the change was broadcast from
 124         * another CPU before we acquired the lock.
 125         */
 126        if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) {
 127                cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
 128                spin_unlock(&cpu_asid_lock);
 129                return;
 130        }
 131#endif
 132        /*
 133         * At this point, it is guaranteed that the current mm (with
 134         * an old ASID) isn't active on any other CPU since the ASIDs
 135         * are changed simultaneously via IPI.
 136         */
 137        asid = ++cpu_last_asid;
 138        if (asid == 0)
 139                asid = cpu_last_asid = ASID_FIRST_VERSION;
 140
 141        /*
 142         * If we've used up all our ASIDs, we need
 143         * to start a new version and flush the TLB.
 144         */
 145        if (unlikely((asid & ~ASID_MASK) == 0)) {
 146                asid = cpu_last_asid + smp_processor_id() + 1;
 147                flush_context();
 148#ifdef CONFIG_SMP
 149                smp_wmb();
 150                smp_call_function(reset_context, NULL, 1);
 151#endif
 152                cpu_last_asid += NR_CPUS;
 153        }
 154
 155        set_mm_context(mm, asid);
 156        spin_unlock(&cpu_asid_lock);
 157}
 158