linux/arch/powerpc/mm/mmu_context_nohash.c
<<
>>
Prefs
   1/*
   2 * This file contains the routines for handling the MMU on those
   3 * PowerPC implementations where the MMU is not using the hash
   4 * table, such as 8xx, 4xx, BookE's etc...
   5 *
   6 * Copyright 2008 Ben Herrenschmidt <benh@kernel.crashing.org>
   7 *                IBM Corp.
   8 *
   9 *  Derived from previous arch/powerpc/mm/mmu_context.c
  10 *  and arch/powerpc/include/asm/mmu_context.h
  11 *
  12 *  This program is free software; you can redistribute it and/or
  13 *  modify it under the terms of the GNU General Public License
  14 *  as published by the Free Software Foundation; either version
  15 *  2 of the License, or (at your option) any later version.
  16 *
  17 * TODO:
  18 *
  19 *   - The global context lock will not scale very well
  20 *   - The maps should be dynamically allocated to allow for processors
  21 *     that support more PID bits at runtime
  22 *   - Implement flush_tlb_mm() by making the context stale and picking
  23 *     a new one
  24 *   - More aggressively clear stale map bits and maybe find some way to
  25 *     also clear mm->cpu_vm_mask bits when processes are migrated
  26 */
  27
  28//#define DEBUG_MAP_CONSISTENCY
  29//#define DEBUG_CLAMP_LAST_CONTEXT   31
  30//#define DEBUG_HARDER
  31
  32/* We don't use DEBUG because it tends to be compiled in always nowadays
  33 * and this would generate way too much output
  34 */
  35#ifdef DEBUG_HARDER
  36#define pr_hard(args...)        printk(KERN_DEBUG args)
  37#define pr_hardcont(args...)    printk(KERN_CONT args)
  38#else
  39#define pr_hard(args...)        do { } while(0)
  40#define pr_hardcont(args...)    do { } while(0)
  41#endif
  42
  43#include <linux/kernel.h>
  44#include <linux/mm.h>
  45#include <linux/init.h>
  46#include <linux/spinlock.h>
  47#include <linux/bootmem.h>
  48#include <linux/notifier.h>
  49#include <linux/cpu.h>
  50
  51#include <asm/mmu_context.h>
  52#include <asm/tlbflush.h>
  53
  54static unsigned int first_context, last_context;
  55static unsigned int next_context, nr_free_contexts;
  56static unsigned long *context_map;
  57static unsigned long *stale_map[NR_CPUS];
  58static struct mm_struct **context_mm;
  59static DEFINE_SPINLOCK(context_lock);
  60
  61#define CTX_MAP_SIZE    \
  62        (sizeof(unsigned long) * (last_context / BITS_PER_LONG + 1))
  63
  64
  65/* Steal a context from a task that has one at the moment.
  66 *
  67 * This is used when we are running out of available PID numbers
  68 * on the processors.
  69 *
  70 * This isn't an LRU system, it just frees up each context in
  71 * turn (sort-of pseudo-random replacement :).  This would be the
  72 * place to implement an LRU scheme if anyone was motivated to do it.
  73 *  -- paulus
  74 *
  75 * For context stealing, we use a slightly different approach for
  76 * SMP and UP. Basically, the UP one is simpler and doesn't use
  77 * the stale map as we can just flush the local CPU
  78 *  -- benh
  79 */
  80#ifdef CONFIG_SMP
  81static unsigned int steal_context_smp(unsigned int id)
  82{
  83        struct mm_struct *mm;
  84        unsigned int cpu, max, i;
  85
  86        max = last_context - first_context;
  87
  88        /* Attempt to free next_context first and then loop until we manage */
  89        while (max--) {
  90                /* Pick up the victim mm */
  91                mm = context_mm[id];
  92
  93                /* We have a candidate victim, check if it's active, on SMP
  94                 * we cannot steal active contexts
  95                 */
  96                if (mm->context.active) {
  97                        id++;
  98                        if (id > last_context)
  99                                id = first_context;
 100                        continue;
 101                }
 102                pr_hardcont(" | steal %d from 0x%p", id, mm);
 103
 104                /* Mark this mm has having no context anymore */
 105                mm->context.id = MMU_NO_CONTEXT;
 106
 107                /* Mark it stale on all CPUs that used this mm. For threaded
 108                 * implementations, we set it on all threads on each core
 109                 * represented in the mask. A future implementation will use
 110                 * a core map instead but this will do for now.
 111                 */
 112                for_each_cpu(cpu, mm_cpumask(mm)) {
 113                        for (i = cpu_first_thread_in_core(cpu);
 114                             i <= cpu_last_thread_in_core(cpu); i++)
 115                                __set_bit(id, stale_map[i]);
 116                        cpu = i - 1;
 117                }
 118                return id;
 119        }
 120
 121        /* This will happen if you have more CPUs than available contexts,
 122         * all we can do here is wait a bit and try again
 123         */
 124        spin_unlock(&context_lock);
 125        cpu_relax();
 126        spin_lock(&context_lock);
 127
 128        /* This will cause the caller to try again */
 129        return MMU_NO_CONTEXT;
 130}
 131#endif  /* CONFIG_SMP */
 132
 133/* Note that this will also be called on SMP if all other CPUs are
 134 * offlined, which means that it may be called for cpu != 0. For
 135 * this to work, we somewhat assume that CPUs that are onlined
 136 * come up with a fully clean TLB (or are cleaned when offlined)
 137 */
 138static unsigned int steal_context_up(unsigned int id)
 139{
 140        struct mm_struct *mm;
 141        int cpu = smp_processor_id();
 142
 143        /* Pick up the victim mm */
 144        mm = context_mm[id];
 145
 146        pr_hardcont(" | steal %d from 0x%p", id, mm);
 147
 148        /* Flush the TLB for that context */
 149        local_flush_tlb_mm(mm);
 150
 151        /* Mark this mm has having no context anymore */
 152        mm->context.id = MMU_NO_CONTEXT;
 153
 154        /* XXX This clear should ultimately be part of local_flush_tlb_mm */
 155        __clear_bit(id, stale_map[cpu]);
 156
 157        return id;
 158}
 159
 160#ifdef DEBUG_MAP_CONSISTENCY
 161static void context_check_map(void)
 162{
 163        unsigned int id, nrf, nact;
 164
 165        nrf = nact = 0;
 166        for (id = first_context; id <= last_context; id++) {
 167                int used = test_bit(id, context_map);
 168                if (!used)
 169                        nrf++;
 170                if (used != (context_mm[id] != NULL))
 171                        pr_err("MMU: Context %d is %s and MM is %p !\n",
 172                               id, used ? "used" : "free", context_mm[id]);
 173                if (context_mm[id] != NULL)
 174                        nact += context_mm[id]->context.active;
 175        }
 176        if (nrf != nr_free_contexts) {
 177                pr_err("MMU: Free context count out of sync ! (%d vs %d)\n",
 178                       nr_free_contexts, nrf);
 179                nr_free_contexts = nrf;
 180        }
 181        if (nact > num_online_cpus())
 182                pr_err("MMU: More active contexts than CPUs ! (%d vs %d)\n",
 183                       nact, num_online_cpus());
 184        if (first_context > 0 && !test_bit(0, context_map))
 185                pr_err("MMU: Context 0 has been freed !!!\n");
 186}
 187#else
 188static void context_check_map(void) { }
 189#endif
 190
 191void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
 192{
 193        unsigned int i, id, cpu = smp_processor_id();
 194        unsigned long *map;
 195
 196        /* No lockless fast path .. yet */
 197        spin_lock(&context_lock);
 198
 199        pr_hard("[%d] activating context for mm @%p, active=%d, id=%d",
 200                cpu, next, next->context.active, next->context.id);
 201
 202#ifdef CONFIG_SMP
 203        /* Mark us active and the previous one not anymore */
 204        next->context.active++;
 205        if (prev) {
 206                pr_hardcont(" (old=0x%p a=%d)", prev, prev->context.active);
 207                WARN_ON(prev->context.active < 1);
 208                prev->context.active--;
 209        }
 210
 211 again:
 212#endif /* CONFIG_SMP */
 213
 214        /* If we already have a valid assigned context, skip all that */
 215        id = next->context.id;
 216        if (likely(id != MMU_NO_CONTEXT)) {
 217#ifdef DEBUG_MAP_CONSISTENCY
 218                if (context_mm[id] != next)
 219                        pr_err("MMU: mm 0x%p has id %d but context_mm[%d] says 0x%p\n",
 220                               next, id, id, context_mm[id]);
 221#endif
 222                goto ctxt_ok;
 223        }
 224
 225        /* We really don't have a context, let's try to acquire one */
 226        id = next_context;
 227        if (id > last_context)
 228                id = first_context;
 229        map = context_map;
 230
 231        /* No more free contexts, let's try to steal one */
 232        if (nr_free_contexts == 0) {
 233#ifdef CONFIG_SMP
 234                if (num_online_cpus() > 1) {
 235                        id = steal_context_smp(id);
 236                        if (id == MMU_NO_CONTEXT)
 237                                goto again;
 238                        goto stolen;
 239                }
 240#endif /* CONFIG_SMP */
 241                id = steal_context_up(id);
 242                goto stolen;
 243        }
 244        nr_free_contexts--;
 245
 246        /* We know there's at least one free context, try to find it */
 247        while (__test_and_set_bit(id, map)) {
 248                id = find_next_zero_bit(map, last_context+1, id);
 249                if (id > last_context)
 250                        id = first_context;
 251        }
 252 stolen:
 253        next_context = id + 1;
 254        context_mm[id] = next;
 255        next->context.id = id;
 256        pr_hardcont(" | new id=%d,nrf=%d", id, nr_free_contexts);
 257
 258        context_check_map();
 259 ctxt_ok:
 260
 261        /* If that context got marked stale on this CPU, then flush the
 262         * local TLB for it and unmark it before we use it
 263         */
 264        if (test_bit(id, stale_map[cpu])) {
 265                pr_hardcont(" | stale flush %d [%d..%d]",
 266                            id, cpu_first_thread_in_core(cpu),
 267                            cpu_last_thread_in_core(cpu));
 268
 269                local_flush_tlb_mm(next);
 270
 271                /* XXX This clear should ultimately be part of local_flush_tlb_mm */
 272                for (i = cpu_first_thread_in_core(cpu);
 273                     i <= cpu_last_thread_in_core(cpu); i++) {
 274                        __clear_bit(id, stale_map[i]);
 275                }
 276        }
 277
 278        /* Flick the MMU and release lock */
 279        pr_hardcont(" -> %d\n", id);
 280        set_context(id, next->pgd);
 281        spin_unlock(&context_lock);
 282}
 283
 284/*
 285 * Set up the context for a new address space.
 286 */
 287int init_new_context(struct task_struct *t, struct mm_struct *mm)
 288{
 289        pr_hard("initing context for mm @%p\n", mm);
 290
 291        mm->context.id = MMU_NO_CONTEXT;
 292        mm->context.active = 0;
 293
 294        return 0;
 295}
 296
 297/*
 298 * We're finished using the context for an address space.
 299 */
 300void destroy_context(struct mm_struct *mm)
 301{
 302        unsigned long flags;
 303        unsigned int id;
 304
 305        if (mm->context.id == MMU_NO_CONTEXT)
 306                return;
 307
 308        WARN_ON(mm->context.active != 0);
 309
 310        spin_lock_irqsave(&context_lock, flags);
 311        id = mm->context.id;
 312        if (id != MMU_NO_CONTEXT) {
 313                __clear_bit(id, context_map);
 314                mm->context.id = MMU_NO_CONTEXT;
 315#ifdef DEBUG_MAP_CONSISTENCY
 316                mm->context.active = 0;
 317#endif
 318                context_mm[id] = NULL;
 319                nr_free_contexts++;
 320        }
 321        spin_unlock_irqrestore(&context_lock, flags);
 322}
 323
 324#ifdef CONFIG_SMP
 325
 326static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
 327                                            unsigned long action, void *hcpu)
 328{
 329        unsigned int cpu = (unsigned int)(long)hcpu;
 330#ifdef CONFIG_HOTPLUG_CPU
 331        struct task_struct *p;
 332#endif
 333        /* We don't touch CPU 0 map, it's allocated at aboot and kept
 334         * around forever
 335         */
 336        if (cpu == 0)
 337                return NOTIFY_OK;
 338
 339        switch (action) {
 340        case CPU_ONLINE:
 341        case CPU_ONLINE_FROZEN:
 342                pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu);
 343                stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL);
 344                break;
 345#ifdef CONFIG_HOTPLUG_CPU
 346        case CPU_DEAD:
 347        case CPU_DEAD_FROZEN:
 348                pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu);
 349                kfree(stale_map[cpu]);
 350                stale_map[cpu] = NULL;
 351
 352                /* We also clear the cpu_vm_mask bits of CPUs going away */
 353                read_lock(&tasklist_lock);
 354                for_each_process(p) {
 355                        if (p->mm)
 356                                cpu_mask_clear_cpu(cpu, mm_cpumask(p->mm));
 357                }
 358                read_unlock(&tasklist_lock);
 359        break;
 360#endif /* CONFIG_HOTPLUG_CPU */
 361        }
 362        return NOTIFY_OK;
 363}
 364
 365static struct notifier_block __cpuinitdata mmu_context_cpu_nb = {
 366        .notifier_call  = mmu_context_cpu_notify,
 367};
 368
 369#endif /* CONFIG_SMP */
 370
 371/*
 372 * Initialize the context management stuff.
 373 */
 374void __init mmu_context_init(void)
 375{
 376        /* Mark init_mm as being active on all possible CPUs since
 377         * we'll get called with prev == init_mm the first time
 378         * we schedule on a given CPU
 379         */
 380        init_mm.context.active = NR_CPUS;
 381
 382        /*
 383         *   The MPC8xx has only 16 contexts.  We rotate through them on each
 384         * task switch.  A better way would be to keep track of tasks that
 385         * own contexts, and implement an LRU usage.  That way very active
 386         * tasks don't always have to pay the TLB reload overhead.  The
 387         * kernel pages are mapped shared, so the kernel can run on behalf
 388         * of any task that makes a kernel entry.  Shared does not mean they
 389         * are not protected, just that the ASID comparison is not performed.
 390         *      -- Dan
 391         *
 392         * The IBM4xx has 256 contexts, so we can just rotate through these
 393         * as a way of "switching" contexts.  If the TID of the TLB is zero,
 394         * the PID/TID comparison is disabled, so we can use a TID of zero
 395         * to represent all kernel pages as shared among all contexts.
 396         *      -- Dan
 397         */
 398        if (mmu_has_feature(MMU_FTR_TYPE_8xx)) {
 399                first_context = 0;
 400                last_context = 15;
 401        } else {
 402                first_context = 1;
 403                last_context = 255;
 404        }
 405
 406#ifdef DEBUG_CLAMP_LAST_CONTEXT
 407        last_context = DEBUG_CLAMP_LAST_CONTEXT;
 408#endif
 409        /*
 410         * Allocate the maps used by context management
 411         */
 412        context_map = alloc_bootmem(CTX_MAP_SIZE);
 413        context_mm = alloc_bootmem(sizeof(void *) * (last_context + 1));
 414        stale_map[0] = alloc_bootmem(CTX_MAP_SIZE);
 415
 416#ifdef CONFIG_SMP
 417        register_cpu_notifier(&mmu_context_cpu_nb);
 418#endif
 419
 420        printk(KERN_INFO
 421               "MMU: Allocated %zu bytes of context maps for %d contexts\n",
 422               2 * CTX_MAP_SIZE + (sizeof(void *) * (last_context + 1)),
 423               last_context - first_context + 1);
 424
 425        /*
 426         * Some processors have too few contexts to reserve one for
 427         * init_mm, and require using context 0 for a normal task.
 428         * Other processors reserve the use of context zero for the kernel.
 429         * This code assumes first_context < 32.
 430         */
 431        context_map[0] = (1 << first_context) - 1;
 432        next_context = first_context;
 433        nr_free_contexts = last_context - first_context + 1;
 434}
 435
 436