linux/arch/arm64/mm/context.c
<<
>>
Prefs
   1/*
   2 * Based on arch/arm/mm/context.c
   3 *
   4 * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
   5 * Copyright (C) 2012 ARM Ltd.
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#include <linux/bitops.h>
  21#include <linux/sched.h>
  22#include <linux/slab.h>
  23#include <linux/mm.h>
  24
  25#include <asm/cpufeature.h>
  26#include <asm/mmu_context.h>
  27#include <asm/smp.h>
  28#include <asm/tlbflush.h>
  29
  30static u32 asid_bits;
  31static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
  32
  33static atomic64_t asid_generation;
  34static unsigned long *asid_map;
  35
  36static DEFINE_PER_CPU(atomic64_t, active_asids);
  37static DEFINE_PER_CPU(u64, reserved_asids);
  38static cpumask_t tlb_flush_pending;
  39
  40#define ASID_MASK               (~GENMASK(asid_bits - 1, 0))
  41#define ASID_FIRST_VERSION      (1UL << asid_bits)
  42#define NUM_USER_ASIDS          ASID_FIRST_VERSION
  43
  44/* Get the ASIDBits supported by the current CPU */
  45static u32 get_cpu_asid_bits(void)
  46{
  47        u32 asid;
  48        int fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64MMFR0_EL1),
  49                                                ID_AA64MMFR0_ASID_SHIFT);
  50
  51        switch (fld) {
  52        default:
  53                pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n",
  54                                        smp_processor_id(),  fld);
  55                /* Fallthrough */
  56        case 0:
  57                asid = 8;
  58                break;
  59        case 2:
  60                asid = 16;
  61        }
  62
  63        return asid;
  64}
  65
  66/* Check if the current cpu's ASIDBits is compatible with asid_bits */
  67void verify_cpu_asid_bits(void)
  68{
  69        u32 asid = get_cpu_asid_bits();
  70
  71        if (asid < asid_bits) {
  72                /*
  73                 * We cannot decrease the ASID size at runtime, so panic if we support
  74                 * fewer ASID bits than the boot CPU.
  75                 */
  76                pr_crit("CPU%d: smaller ASID size(%u) than boot CPU (%u)\n",
  77                                smp_processor_id(), asid, asid_bits);
  78                update_cpu_boot_status(CPU_PANIC_KERNEL);
  79                cpu_park_loop();
  80        }
  81}
  82
  83static void flush_context(unsigned int cpu)
  84{
  85        int i;
  86        u64 asid;
  87
  88        /* Update the list of reserved ASIDs and the ASID bitmap. */
  89        bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
  90
  91        /*
  92         * Ensure the generation bump is observed before we xchg the
  93         * active_asids.
  94         */
  95        smp_wmb();
  96
  97        for_each_possible_cpu(i) {
  98                asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0);
  99                /*
 100                 * If this CPU has already been through a
 101                 * rollover, but hasn't run another task in
 102                 * the meantime, we must preserve its reserved
 103                 * ASID, as this is the only trace we have of
 104                 * the process it is still running.
 105                 */
 106                if (asid == 0)
 107                        asid = per_cpu(reserved_asids, i);
 108                __set_bit(asid & ~ASID_MASK, asid_map);
 109                per_cpu(reserved_asids, i) = asid;
 110        }
 111
 112        /* Queue a TLB invalidate and flush the I-cache if necessary. */
 113        cpumask_setall(&tlb_flush_pending);
 114
 115        if (icache_is_aivivt())
 116                __flush_icache_all();
 117}
 118
 119static bool check_update_reserved_asid(u64 asid, u64 newasid)
 120{
 121        int cpu;
 122        bool hit = false;
 123
 124        /*
 125         * Iterate over the set of reserved ASIDs looking for a match.
 126         * If we find one, then we can update our mm to use newasid
 127         * (i.e. the same ASID in the current generation) but we can't
 128         * exit the loop early, since we need to ensure that all copies
 129         * of the old ASID are updated to reflect the mm. Failure to do
 130         * so could result in us missing the reserved ASID in a future
 131         * generation.
 132         */
 133        for_each_possible_cpu(cpu) {
 134                if (per_cpu(reserved_asids, cpu) == asid) {
 135                        hit = true;
 136                        per_cpu(reserved_asids, cpu) = newasid;
 137                }
 138        }
 139
 140        return hit;
 141}
 142
 143static u64 new_context(struct mm_struct *mm, unsigned int cpu)
 144{
 145        static u32 cur_idx = 1;
 146        u64 asid = atomic64_read(&mm->context.id);
 147        u64 generation = atomic64_read(&asid_generation);
 148
 149        if (asid != 0) {
 150                u64 newasid = generation | (asid & ~ASID_MASK);
 151
 152                /*
 153                 * If our current ASID was active during a rollover, we
 154                 * can continue to use it and this was just a false alarm.
 155                 */
 156                if (check_update_reserved_asid(asid, newasid))
 157                        return newasid;
 158
 159                /*
 160                 * We had a valid ASID in a previous life, so try to re-use
 161                 * it if possible.
 162                 */
 163                asid &= ~ASID_MASK;
 164                if (!__test_and_set_bit(asid, asid_map))
 165                        return newasid;
 166        }
 167
 168        /*
 169         * Allocate a free ASID. If we can't find one, take a note of the
 170         * currently active ASIDs and mark the TLBs as requiring flushes.
 171         * We always count from ASID #1, as we use ASID #0 when setting a
 172         * reserved TTBR0 for the init_mm.
 173         */
 174        asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
 175        if (asid != NUM_USER_ASIDS)
 176                goto set_asid;
 177
 178        /* We're out of ASIDs, so increment the global generation count */
 179        generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION,
 180                                                 &asid_generation);
 181        flush_context(cpu);
 182
 183        /* We have at least 1 ASID per CPU, so this will always succeed */
 184        asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
 185
 186set_asid:
 187        __set_bit(asid, asid_map);
 188        cur_idx = asid;
 189        return asid | generation;
 190}
 191
 192void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
 193{
 194        unsigned long flags;
 195        u64 asid;
 196
 197        asid = atomic64_read(&mm->context.id);
 198
 199        /*
 200         * The memory ordering here is subtle. We rely on the control
 201         * dependency between the generation read and the update of
 202         * active_asids to ensure that we are synchronised with a
 203         * parallel rollover (i.e. this pairs with the smp_wmb() in
 204         * flush_context).
 205         */
 206        if (!((asid ^ atomic64_read(&asid_generation)) >> asid_bits)
 207            && atomic64_xchg_relaxed(&per_cpu(active_asids, cpu), asid))
 208                goto switch_mm_fastpath;
 209
 210        raw_spin_lock_irqsave(&cpu_asid_lock, flags);
 211        /* Check that our ASID belongs to the current generation. */
 212        asid = atomic64_read(&mm->context.id);
 213        if ((asid ^ atomic64_read(&asid_generation)) >> asid_bits) {
 214                asid = new_context(mm, cpu);
 215                atomic64_set(&mm->context.id, asid);
 216        }
 217
 218        if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
 219                local_flush_tlb_all();
 220
 221        atomic64_set(&per_cpu(active_asids, cpu), asid);
 222        raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
 223
 224switch_mm_fastpath:
 225        cpu_switch_mm(mm->pgd, mm);
 226}
 227
 228static int asids_init(void)
 229{
 230        asid_bits = get_cpu_asid_bits();
 231        /* If we end up with more CPUs than ASIDs, expect things to crash */
 232        WARN_ON(NUM_USER_ASIDS < num_possible_cpus());
 233        atomic64_set(&asid_generation, ASID_FIRST_VERSION);
 234        asid_map = kzalloc(BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(*asid_map),
 235                           GFP_KERNEL);
 236        if (!asid_map)
 237                panic("Failed to allocate bitmap for %lu ASIDs\n",
 238                      NUM_USER_ASIDS);
 239
 240        pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS);
 241        return 0;
 242}
 243early_initcall(asids_init);
 244