linux/arch/powerpc/mm/tlb_64.c
<<
>>
Prefs
   1/*
   2 * This file contains the routines for flushing entries from the
   3 * TLB and MMU hash table.
   4 *
   5 *  Derived from arch/ppc64/mm/init.c:
   6 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
   7 *
   8 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
   9 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
  10 *    Copyright (C) 1996 Paul Mackerras
  11 *
  12 *  Derived from "arch/i386/mm/init.c"
  13 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  14 *
  15 *  Dave Engebretsen <engebret@us.ibm.com>
  16 *      Rework for PPC64 port.
  17 *
  18 *  This program is free software; you can redistribute it and/or
  19 *  modify it under the terms of the GNU General Public License
  20 *  as published by the Free Software Foundation; either version
  21 *  2 of the License, or (at your option) any later version.
  22 */
  23
  24#include <linux/kernel.h>
  25#include <linux/mm.h>
  26#include <linux/init.h>
  27#include <linux/percpu.h>
  28#include <linux/hardirq.h>
  29#include <asm/pgalloc.h>
  30#include <asm/tlbflush.h>
  31#include <asm/tlb.h>
  32#include <asm/bug.h>
  33
  34DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
  35
  36/* This is declared as we are using the more or less generic
  37 * include/asm-powerpc/tlb.h file -- tgall
  38 */
  39DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
  40DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
  41unsigned long pte_freelist_forced_free;
  42
  43struct pte_freelist_batch
  44{
  45        struct rcu_head rcu;
  46        unsigned int    index;
  47        pgtable_free_t  tables[0];
  48};
  49
  50DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
  51unsigned long pte_freelist_forced_free;
  52
  53#define PTE_FREELIST_SIZE \
  54        ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \
  55          / sizeof(pgtable_free_t))
  56
  57static void pte_free_smp_sync(void *arg)
  58{
  59        /* Do nothing, just ensure we sync with all CPUs */
  60}
  61
  62/* This is only called when we are critically out of memory
  63 * (and fail to get a page in pte_free_tlb).
  64 */
  65static void pgtable_free_now(pgtable_free_t pgf)
  66{
  67        pte_freelist_forced_free++;
  68
  69        smp_call_function(pte_free_smp_sync, NULL, 0, 1);
  70
  71        pgtable_free(pgf);
  72}
  73
  74static void pte_free_rcu_callback(struct rcu_head *head)
  75{
  76        struct pte_freelist_batch *batch =
  77                container_of(head, struct pte_freelist_batch, rcu);
  78        unsigned int i;
  79
  80        for (i = 0; i < batch->index; i++)
  81                pgtable_free(batch->tables[i]);
  82
  83        free_page((unsigned long)batch);
  84}
  85
  86static void pte_free_submit(struct pte_freelist_batch *batch)
  87{
  88        INIT_RCU_HEAD(&batch->rcu);
  89        call_rcu(&batch->rcu, pte_free_rcu_callback);
  90}
  91
  92void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
  93{
  94        /* This is safe since tlb_gather_mmu has disabled preemption */
  95        cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
  96        struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
  97
  98        if (atomic_read(&tlb->mm->mm_users) < 2 ||
  99            cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) {
 100                pgtable_free(pgf);
 101                return;
 102        }
 103
 104        if (*batchp == NULL) {
 105                *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
 106                if (*batchp == NULL) {
 107                        pgtable_free_now(pgf);
 108                        return;
 109                }
 110                (*batchp)->index = 0;
 111        }
 112        (*batchp)->tables[(*batchp)->index++] = pgf;
 113        if ((*batchp)->index == PTE_FREELIST_SIZE) {
 114                pte_free_submit(*batchp);
 115                *batchp = NULL;
 116        }
 117}
 118
 119/*
 120 * A linux PTE was changed and the corresponding hash table entry
 121 * neesd to be flushed. This function will either perform the flush
 122 * immediately or will batch it up if the current CPU has an active
 123 * batch on it.
 124 *
 125 * Must be called from within some kind of spinlock/non-preempt region...
 126 */
 127void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
 128                     pte_t *ptep, unsigned long pte, int huge)
 129{
 130        struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
 131        unsigned long vsid, vaddr;
 132        unsigned int psize;
 133        int ssize;
 134        real_pte_t rpte;
 135        int i;
 136
 137        i = batch->index;
 138
 139        /* We mask the address for the base page size. Huge pages will
 140         * have applied their own masking already
 141         */
 142        addr &= PAGE_MASK;
 143
 144        /* Get page size (maybe move back to caller).
 145         *
 146         * NOTE: when using special 64K mappings in 4K environment like
 147         * for SPEs, we obtain the page size from the slice, which thus
 148         * must still exist (and thus the VMA not reused) at the time
 149         * of this call
 150         */
 151        if (huge) {
 152#ifdef CONFIG_HUGETLB_PAGE
 153                psize = mmu_huge_psize;
 154#else
 155                BUG();
 156                psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
 157#endif
 158        } else
 159                psize = pte_pagesize_index(mm, addr, pte);
 160
 161        /* Build full vaddr */
 162        if (!is_kernel_addr(addr)) {
 163                ssize = user_segment_size(addr);
 164                vsid = get_vsid(mm->context.id, addr, ssize);
 165                WARN_ON(vsid == 0);
 166        } else {
 167                vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
 168                ssize = mmu_kernel_ssize;
 169        }
 170        vaddr = hpt_va(addr, vsid, ssize);
 171        rpte = __real_pte(__pte(pte), ptep);
 172
 173        /*
 174         * Check if we have an active batch on this CPU. If not, just
 175         * flush now and return. For now, we don global invalidates
 176         * in that case, might be worth testing the mm cpu mask though
 177         * and decide to use local invalidates instead...
 178         */
 179        if (!batch->active) {
 180                flush_hash_page(vaddr, rpte, psize, ssize, 0);
 181                return;
 182        }
 183
 184        /*
 185         * This can happen when we are in the middle of a TLB batch and
 186         * we encounter memory pressure (eg copy_page_range when it tries
 187         * to allocate a new pte). If we have to reclaim memory and end
 188         * up scanning and resetting referenced bits then our batch context
 189         * will change mid stream.
 190         *
 191         * We also need to ensure only one page size is present in a given
 192         * batch
 193         */
 194        if (i != 0 && (mm != batch->mm || batch->psize != psize ||
 195                       batch->ssize != ssize)) {
 196                __flush_tlb_pending(batch);
 197                i = 0;
 198        }
 199        if (i == 0) {
 200                batch->mm = mm;
 201                batch->psize = psize;
 202                batch->ssize = ssize;
 203        }
 204        batch->pte[i] = rpte;
 205        batch->vaddr[i] = vaddr;
 206        batch->index = ++i;
 207        if (i >= PPC64_TLB_BATCH_NR)
 208                __flush_tlb_pending(batch);
 209}
 210
 211/*
 212 * This function is called when terminating an mmu batch or when a batch
 213 * is full. It will perform the flush of all the entries currently stored
 214 * in a batch.
 215 *
 216 * Must be called from within some kind of spinlock/non-preempt region...
 217 */
 218void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
 219{
 220        cpumask_t tmp;
 221        int i, local = 0;
 222
 223        i = batch->index;
 224        tmp = cpumask_of_cpu(smp_processor_id());
 225        if (cpus_equal(batch->mm->cpu_vm_mask, tmp))
 226                local = 1;
 227        if (i == 1)
 228                flush_hash_page(batch->vaddr[0], batch->pte[0],
 229                                batch->psize, batch->ssize, local);
 230        else
 231                flush_hash_range(i, local);
 232        batch->index = 0;
 233}
 234
 235void pte_free_finish(void)
 236{
 237        /* This is safe since tlb_gather_mmu has disabled preemption */
 238        struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
 239
 240        if (*batchp == NULL)
 241                return;
 242        pte_free_submit(*batchp);
 243        *batchp = NULL;
 244}
 245
 246/**
 247 * __flush_hash_table_range - Flush all HPTEs for a given address range
 248 *                            from the hash table (and the TLB). But keeps
 249 *                            the linux PTEs intact.
 250 *
 251 * @mm          : mm_struct of the target address space (generally init_mm)
 252 * @start       : starting address
 253 * @end         : ending address (not included in the flush)
 254 *
 255 * This function is mostly to be used by some IO hotplug code in order
 256 * to remove all hash entries from a given address range used to map IO
 257 * space on a removed PCI-PCI bidge without tearing down the full mapping
 258 * since 64K pages may overlap with other bridges when using 64K pages
 259 * with 4K HW pages on IO space.
 260 *
 261 * Because of that usage pattern, it's only available with CONFIG_HOTPLUG
 262 * and is implemented for small size rather than speed.
 263 */
 264#ifdef CONFIG_HOTPLUG
 265
 266void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
 267                              unsigned long end)
 268{
 269        unsigned long flags;
 270
 271        start = _ALIGN_DOWN(start, PAGE_SIZE);
 272        end = _ALIGN_UP(end, PAGE_SIZE);
 273
 274        BUG_ON(!mm->pgd);
 275
 276        /* Note: Normally, we should only ever use a batch within a
 277         * PTE locked section. This violates the rule, but will work
 278         * since we don't actually modify the PTEs, we just flush the
 279         * hash while leaving the PTEs intact (including their reference
 280         * to being hashed). This is not the most performance oriented
 281         * way to do things but is fine for our needs here.
 282         */
 283        local_irq_save(flags);
 284        arch_enter_lazy_mmu_mode();
 285        for (; start < end; start += PAGE_SIZE) {
 286                pte_t *ptep = find_linux_pte(mm->pgd, start);
 287                unsigned long pte;
 288
 289                if (ptep == NULL)
 290                        continue;
 291                pte = pte_val(*ptep);
 292                if (!(pte & _PAGE_HASHPTE))
 293                        continue;
 294                hpte_need_flush(mm, start, ptep, pte, 0);
 295        }
 296        arch_leave_lazy_mmu_mode();
 297        local_irq_restore(flags);
 298}
 299
 300#endif /* CONFIG_HOTPLUG */
 301