linux/arch/arm/mm/context.c
<<
>>
Prefs
   1/*
   2 *  linux/arch/arm/mm/context.c
   3 *
   4 *  Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
   5 *  Copyright (C) 2012 ARM Limited
   6 *
   7 *  Author: Will Deacon <will.deacon@arm.com>
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as
  11 * published by the Free Software Foundation.
  12 */
  13#include <linux/init.h>
  14#include <linux/sched.h>
  15#include <linux/mm.h>
  16#include <linux/smp.h>
  17#include <linux/percpu.h>
  18
  19#include <asm/mmu_context.h>
  20#include <asm/smp_plat.h>
  21#include <asm/thread_notify.h>
  22#include <asm/tlbflush.h>
  23#include <asm/proc-fns.h>
  24
  25/*
  26 * On ARMv6, we have the following structure in the Context ID:
  27 *
  28 * 31                         7          0
  29 * +-------------------------+-----------+
  30 * |      process ID         |   ASID    |
  31 * +-------------------------+-----------+
  32 * |              context ID             |
  33 * +-------------------------------------+
  34 *
  35 * The ASID is used to tag entries in the CPU caches and TLBs.
  36 * The context ID is used by debuggers and trace logic, and
  37 * should be unique within all running processes.
  38 *
  39 * In big endian operation, the two 32 bit words are swapped if accessed
  40 * by non-64-bit operations.
  41 */
  42#define ASID_FIRST_VERSION      (1ULL << ASID_BITS)
  43#define NUM_USER_ASIDS          ASID_FIRST_VERSION
  44
  45static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
  46static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
  47static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
  48
  49static DEFINE_PER_CPU(atomic64_t, active_asids);
  50static DEFINE_PER_CPU(u64, reserved_asids);
  51static cpumask_t tlb_flush_pending;
  52
  53#ifdef CONFIG_ARM_ERRATA_798181
  54void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
  55                             cpumask_t *mask)
  56{
  57        int cpu;
  58        unsigned long flags;
  59        u64 context_id, asid;
  60
  61        raw_spin_lock_irqsave(&cpu_asid_lock, flags);
  62        context_id = mm->context.id.counter;
  63        for_each_online_cpu(cpu) {
  64                if (cpu == this_cpu)
  65                        continue;
  66                /*
  67                 * We only need to send an IPI if the other CPUs are
  68                 * running the same ASID as the one being invalidated.
  69                 */
  70                asid = per_cpu(active_asids, cpu).counter;
  71                if (asid == 0)
  72                        asid = per_cpu(reserved_asids, cpu);
  73                if (context_id == asid)
  74                        cpumask_set_cpu(cpu, mask);
  75        }
  76        raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
  77}
  78#endif
  79
  80#ifdef CONFIG_ARM_LPAE
  81/*
  82 * With LPAE, the ASID and page tables are updated atomicly, so there is
  83 * no need for a reserved set of tables (the active ASID tracking prevents
  84 * any issues across a rollover).
  85 */
  86#define cpu_set_reserved_ttbr0()
  87#else
  88static void cpu_set_reserved_ttbr0(void)
  89{
  90        u32 ttb;
  91        /*
  92         * Copy TTBR1 into TTBR0.
  93         * This points at swapper_pg_dir, which contains only global
  94         * entries so any speculative walks are perfectly safe.
  95         */
  96        asm volatile(
  97        "       mrc     p15, 0, %0, c2, c0, 1           @ read TTBR1\n"
  98        "       mcr     p15, 0, %0, c2, c0, 0           @ set TTBR0\n"
  99        : "=r" (ttb));
 100        isb();
 101}
 102#endif
 103
 104#ifdef CONFIG_PID_IN_CONTEXTIDR
 105static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd,
 106                               void *t)
 107{
 108        u32 contextidr;
 109        pid_t pid;
 110        struct thread_info *thread = t;
 111
 112        if (cmd != THREAD_NOTIFY_SWITCH)
 113                return NOTIFY_DONE;
 114
 115        pid = task_pid_nr(thread->task) << ASID_BITS;
 116        asm volatile(
 117        "       mrc     p15, 0, %0, c13, c0, 1\n"
 118        "       and     %0, %0, %2\n"
 119        "       orr     %0, %0, %1\n"
 120        "       mcr     p15, 0, %0, c13, c0, 1\n"
 121        : "=r" (contextidr), "+r" (pid)
 122        : "I" (~ASID_MASK));
 123        isb();
 124
 125        return NOTIFY_OK;
 126}
 127
 128static struct notifier_block contextidr_notifier_block = {
 129        .notifier_call = contextidr_notifier,
 130};
 131
 132static int __init contextidr_notifier_init(void)
 133{
 134        return thread_register_notifier(&contextidr_notifier_block);
 135}
 136arch_initcall(contextidr_notifier_init);
 137#endif
 138
 139static void flush_context(unsigned int cpu)
 140{
 141        int i;
 142        u64 asid;
 143
 144        /* Update the list of reserved ASIDs and the ASID bitmap. */
 145        bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
 146        for_each_possible_cpu(i) {
 147                asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
 148                /*
 149                 * If this CPU has already been through a
 150                 * rollover, but hasn't run another task in
 151                 * the meantime, we must preserve its reserved
 152                 * ASID, as this is the only trace we have of
 153                 * the process it is still running.
 154                 */
 155                if (asid == 0)
 156                        asid = per_cpu(reserved_asids, i);
 157                __set_bit(asid & ~ASID_MASK, asid_map);
 158                per_cpu(reserved_asids, i) = asid;
 159        }
 160
 161        /* Queue a TLB invalidate and flush the I-cache if necessary. */
 162        cpumask_setall(&tlb_flush_pending);
 163
 164        if (icache_is_vivt_asid_tagged())
 165                __flush_icache_all();
 166}
 167
 168static bool check_update_reserved_asid(u64 asid, u64 newasid)
 169{
 170        int cpu;
 171        bool hit = false;
 172
 173        /*
 174         * Iterate over the set of reserved ASIDs looking for a match.
 175         * If we find one, then we can update our mm to use newasid
 176         * (i.e. the same ASID in the current generation) but we can't
 177         * exit the loop early, since we need to ensure that all copies
 178         * of the old ASID are updated to reflect the mm. Failure to do
 179         * so could result in us missing the reserved ASID in a future
 180         * generation.
 181         */
 182        for_each_possible_cpu(cpu) {
 183                if (per_cpu(reserved_asids, cpu) == asid) {
 184                        hit = true;
 185                        per_cpu(reserved_asids, cpu) = newasid;
 186                }
 187        }
 188
 189        return hit;
 190}
 191
 192static u64 new_context(struct mm_struct *mm, unsigned int cpu)
 193{
 194        static u32 cur_idx = 1;
 195        u64 asid = atomic64_read(&mm->context.id);
 196        u64 generation = atomic64_read(&asid_generation);
 197
 198        if (asid != 0) {
 199                u64 newasid = generation | (asid & ~ASID_MASK);
 200
 201                /*
 202                 * If our current ASID was active during a rollover, we
 203                 * can continue to use it and this was just a false alarm.
 204                 */
 205                if (check_update_reserved_asid(asid, newasid))
 206                        return newasid;
 207
 208                /*
 209                 * We had a valid ASID in a previous life, so try to re-use
 210                 * it if possible.,
 211                 */
 212                asid &= ~ASID_MASK;
 213                if (!__test_and_set_bit(asid, asid_map))
 214                        return newasid;
 215        }
 216
 217        /*
 218         * Allocate a free ASID. If we can't find one, take a note of the
 219         * currently active ASIDs and mark the TLBs as requiring flushes.
 220         * We always count from ASID #1, as we reserve ASID #0 to switch
 221         * via TTBR0 and to avoid speculative page table walks from hitting
 222         * in any partial walk caches, which could be populated from
 223         * overlapping level-1 descriptors used to map both the module
 224         * area and the userspace stack.
 225         */
 226        asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
 227        if (asid == NUM_USER_ASIDS) {
 228                generation = atomic64_add_return(ASID_FIRST_VERSION,
 229                                                 &asid_generation);
 230                flush_context(cpu);
 231                asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
 232        }
 233
 234        __set_bit(asid, asid_map);
 235        cur_idx = asid;
 236        cpumask_clear(mm_cpumask(mm));
 237        return asid | generation;
 238}
 239
 240void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
 241{
 242        unsigned long flags;
 243        unsigned int cpu = smp_processor_id();
 244        u64 asid;
 245
 246        if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
 247                __check_vmalloc_seq(mm);
 248
 249        /*
 250         * We cannot update the pgd and the ASID atomicly with classic
 251         * MMU, so switch exclusively to global mappings to avoid
 252         * speculative page table walking with the wrong TTBR.
 253         */
 254        cpu_set_reserved_ttbr0();
 255
 256        asid = atomic64_read(&mm->context.id);
 257        if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
 258            && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
 259                goto switch_mm_fastpath;
 260
 261        raw_spin_lock_irqsave(&cpu_asid_lock, flags);
 262        /* Check that our ASID belongs to the current generation. */
 263        asid = atomic64_read(&mm->context.id);
 264        if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
 265                asid = new_context(mm, cpu);
 266                atomic64_set(&mm->context.id, asid);
 267        }
 268
 269        if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
 270                local_flush_bp_all();
 271                local_flush_tlb_all();
 272        }
 273
 274        atomic64_set(&per_cpu(active_asids, cpu), asid);
 275        cpumask_set_cpu(cpu, mm_cpumask(mm));
 276        raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
 277
 278switch_mm_fastpath:
 279        cpu_switch_mm(mm->pgd, mm);
 280}
 281