linux/arch/powerpc/mm/book3s64/hash_tlb.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * This file contains the routines for flushing entries from the
   4 * TLB and MMU hash table.
   5 *
   6 *  Derived from arch/ppc64/mm/init.c:
   7 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
   8 *
   9 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
  10 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
  11 *    Copyright (C) 1996 Paul Mackerras
  12 *
  13 *  Derived from "arch/i386/mm/init.c"
  14 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  15 *
  16 *  Dave Engebretsen <engebret@us.ibm.com>
  17 *      Rework for PPC64 port.
  18 */
  19
  20#include <linux/kernel.h>
  21#include <linux/mm.h>
  22#include <linux/percpu.h>
  23#include <linux/hardirq.h>
  24#include <asm/pgalloc.h>
  25#include <asm/tlbflush.h>
  26#include <asm/tlb.h>
  27#include <asm/bug.h>
  28#include <asm/pte-walk.h>
  29
  30
  31#include <trace/events/thp.h>
  32
  33DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
  34
  35/*
  36 * A linux PTE was changed and the corresponding hash table entry
  37 * neesd to be flushed. This function will either perform the flush
  38 * immediately or will batch it up if the current CPU has an active
  39 * batch on it.
  40 */
  41void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
  42                     pte_t *ptep, unsigned long pte, int huge)
  43{
  44        unsigned long vpn;
  45        struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
  46        unsigned long vsid;
  47        unsigned int psize;
  48        int ssize;
  49        real_pte_t rpte;
  50        int i, offset;
  51
  52        i = batch->index;
  53
  54        /*
  55         * Get page size (maybe move back to caller).
  56         *
  57         * NOTE: when using special 64K mappings in 4K environment like
  58         * for SPEs, we obtain the page size from the slice, which thus
  59         * must still exist (and thus the VMA not reused) at the time
  60         * of this call
  61         */
  62        if (huge) {
  63#ifdef CONFIG_HUGETLB_PAGE
  64                psize = get_slice_psize(mm, addr);
  65                /* Mask the address for the correct page size */
  66                addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1);
  67                if (unlikely(psize == MMU_PAGE_16G))
  68                        offset = PTRS_PER_PUD;
  69                else
  70                        offset = PTRS_PER_PMD;
  71#else
  72                BUG();
  73                psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
  74#endif
  75        } else {
  76                psize = pte_pagesize_index(mm, addr, pte);
  77                /*
  78                 * Mask the address for the standard page size.  If we
  79                 * have a 64k page kernel, but the hardware does not
  80                 * support 64k pages, this might be different from the
  81                 * hardware page size encoded in the slice table.
  82                 */
  83                addr &= PAGE_MASK;
  84                offset = PTRS_PER_PTE;
  85        }
  86
  87
  88        /* Build full vaddr */
  89        if (!is_kernel_addr(addr)) {
  90                ssize = user_segment_size(addr);
  91                vsid = get_user_vsid(&mm->context, addr, ssize);
  92        } else {
  93                vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
  94                ssize = mmu_kernel_ssize;
  95        }
  96        WARN_ON(vsid == 0);
  97        vpn = hpt_vpn(addr, vsid, ssize);
  98        rpte = __real_pte(__pte(pte), ptep, offset);
  99
 100        /*
 101         * Check if we have an active batch on this CPU. If not, just
 102         * flush now and return.
 103         */
 104        if (!batch->active) {
 105                flush_hash_page(vpn, rpte, psize, ssize, mm_is_thread_local(mm));
 106                put_cpu_var(ppc64_tlb_batch);
 107                return;
 108        }
 109
 110        /*
 111         * This can happen when we are in the middle of a TLB batch and
 112         * we encounter memory pressure (eg copy_page_range when it tries
 113         * to allocate a new pte). If we have to reclaim memory and end
 114         * up scanning and resetting referenced bits then our batch context
 115         * will change mid stream.
 116         *
 117         * We also need to ensure only one page size is present in a given
 118         * batch
 119         */
 120        if (i != 0 && (mm != batch->mm || batch->psize != psize ||
 121                       batch->ssize != ssize)) {
 122                __flush_tlb_pending(batch);
 123                i = 0;
 124        }
 125        if (i == 0) {
 126                batch->mm = mm;
 127                batch->psize = psize;
 128                batch->ssize = ssize;
 129        }
 130        batch->pte[i] = rpte;
 131        batch->vpn[i] = vpn;
 132        batch->index = ++i;
 133        if (i >= PPC64_TLB_BATCH_NR)
 134                __flush_tlb_pending(batch);
 135        put_cpu_var(ppc64_tlb_batch);
 136}
 137
 138/*
 139 * This function is called when terminating an mmu batch or when a batch
 140 * is full. It will perform the flush of all the entries currently stored
 141 * in a batch.
 142 *
 143 * Must be called from within some kind of spinlock/non-preempt region...
 144 */
 145void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
 146{
 147        int i, local;
 148
 149        i = batch->index;
 150        local = mm_is_thread_local(batch->mm);
 151        if (i == 1)
 152                flush_hash_page(batch->vpn[0], batch->pte[0],
 153                                batch->psize, batch->ssize, local);
 154        else
 155                flush_hash_range(i, local);
 156        batch->index = 0;
 157}
 158
 159void hash__tlb_flush(struct mmu_gather *tlb)
 160{
 161        struct ppc64_tlb_batch *tlbbatch = &get_cpu_var(ppc64_tlb_batch);
 162
 163        /*
 164         * If there's a TLB batch pending, then we must flush it because the
 165         * pages are going to be freed and we really don't want to have a CPU
 166         * access a freed page because it has a stale TLB
 167         */
 168        if (tlbbatch->index)
 169                __flush_tlb_pending(tlbbatch);
 170
 171        put_cpu_var(ppc64_tlb_batch);
 172}
 173
 174/**
 175 * __flush_hash_table_range - Flush all HPTEs for a given address range
 176 *                            from the hash table (and the TLB). But keeps
 177 *                            the linux PTEs intact.
 178 *
 179 * @start       : starting address
 180 * @end         : ending address (not included in the flush)
 181 *
 182 * This function is mostly to be used by some IO hotplug code in order
 183 * to remove all hash entries from a given address range used to map IO
 184 * space on a removed PCI-PCI bidge without tearing down the full mapping
 185 * since 64K pages may overlap with other bridges when using 64K pages
 186 * with 4K HW pages on IO space.
 187 *
 188 * Because of that usage pattern, it is implemented for small size rather
 189 * than speed.
 190 */
 191void __flush_hash_table_range(unsigned long start, unsigned long end)
 192{
 193        int hugepage_shift;
 194        unsigned long flags;
 195
 196        start = ALIGN_DOWN(start, PAGE_SIZE);
 197        end = ALIGN(end, PAGE_SIZE);
 198
 199
 200        /*
 201         * Note: Normally, we should only ever use a batch within a
 202         * PTE locked section. This violates the rule, but will work
 203         * since we don't actually modify the PTEs, we just flush the
 204         * hash while leaving the PTEs intact (including their reference
 205         * to being hashed). This is not the most performance oriented
 206         * way to do things but is fine for our needs here.
 207         */
 208        local_irq_save(flags);
 209        arch_enter_lazy_mmu_mode();
 210        for (; start < end; start += PAGE_SIZE) {
 211                pte_t *ptep = find_init_mm_pte(start, &hugepage_shift);
 212                unsigned long pte;
 213
 214                if (ptep == NULL)
 215                        continue;
 216                pte = pte_val(*ptep);
 217                if (!(pte & H_PAGE_HASHPTE))
 218                        continue;
 219                hpte_need_flush(&init_mm, start, ptep, pte, hugepage_shift);
 220        }
 221        arch_leave_lazy_mmu_mode();
 222        local_irq_restore(flags);
 223}
 224
 225void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr)
 226{
 227        pte_t *pte;
 228        pte_t *start_pte;
 229        unsigned long flags;
 230
 231        addr = ALIGN_DOWN(addr, PMD_SIZE);
 232        /*
 233         * Note: Normally, we should only ever use a batch within a
 234         * PTE locked section. This violates the rule, but will work
 235         * since we don't actually modify the PTEs, we just flush the
 236         * hash while leaving the PTEs intact (including their reference
 237         * to being hashed). This is not the most performance oriented
 238         * way to do things but is fine for our needs here.
 239         */
 240        local_irq_save(flags);
 241        arch_enter_lazy_mmu_mode();
 242        start_pte = pte_offset_map(pmd, addr);
 243        for (pte = start_pte; pte < start_pte + PTRS_PER_PTE; pte++) {
 244                unsigned long pteval = pte_val(*pte);
 245                if (pteval & H_PAGE_HASHPTE)
 246                        hpte_need_flush(mm, addr, pte, pteval, 0);
 247                addr += PAGE_SIZE;
 248        }
 249        arch_leave_lazy_mmu_mode();
 250        local_irq_restore(flags);
 251}
 252