linux/arch/powerpc/mm/tlb_hash32.c
<<
>>
Prefs
   1/*
   2 * This file contains the routines for TLB flushing.
   3 * On machines where the MMU uses a hash table to store virtual to
   4 * physical translations, these routines flush entries from the
   5 * hash table also.
   6 *  -- paulus
   7 *
   8 *  Derived from arch/ppc/mm/init.c:
   9 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  10 *
  11 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
  12 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
  13 *    Copyright (C) 1996 Paul Mackerras
  14 *
  15 *  Derived from "arch/i386/mm/init.c"
  16 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  17 *
  18 *  This program is free software; you can redistribute it and/or
  19 *  modify it under the terms of the GNU General Public License
  20 *  as published by the Free Software Foundation; either version
  21 *  2 of the License, or (at your option) any later version.
  22 *
  23 */
  24
  25#include <linux/kernel.h>
  26#include <linux/mm.h>
  27#include <linux/init.h>
  28#include <linux/highmem.h>
  29#include <linux/pagemap.h>
  30
  31#include <asm/tlbflush.h>
  32#include <asm/tlb.h>
  33
  34#include "mmu_decl.h"
  35
  36/*
  37 * Called when unmapping pages to flush entries from the TLB/hash table.
  38 */
  39void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr)
  40{
  41        unsigned long ptephys;
  42
  43        if (Hash != 0) {
  44                ptephys = __pa(ptep) & PAGE_MASK;
  45                flush_hash_pages(mm->context.id, addr, ptephys, 1);
  46        }
  47}
  48EXPORT_SYMBOL(flush_hash_entry);
  49
  50/*
  51 * Called by ptep_set_access_flags, must flush on CPUs for which the
  52 * DSI handler can't just "fixup" the TLB on a write fault
  53 */
  54void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr)
  55{
  56        if (Hash != 0)
  57                return;
  58        _tlbie(addr);
  59}
  60
  61/*
  62 * Called at the end of a mmu_gather operation to make sure the
  63 * TLB flush is completely done.
  64 */
  65void tlb_flush(struct mmu_gather *tlb)
  66{
  67        if (Hash == 0) {
  68                /*
  69                 * 603 needs to flush the whole TLB here since
  70                 * it doesn't use a hash table.
  71                 */
  72                _tlbia();
  73        }
  74
  75        /* Push out batch of freed page tables */
  76        pte_free_finish();
  77}
  78
  79/*
  80 * TLB flushing:
  81 *
  82 *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
  83 *  - flush_tlb_page(vma, vmaddr) flushes one page
  84 *  - flush_tlb_range(vma, start, end) flushes a range of pages
  85 *  - flush_tlb_kernel_range(start, end) flushes kernel pages
  86 *
  87 * since the hardware hash table functions as an extension of the
  88 * tlb as far as the linux tables are concerned, flush it too.
  89 *    -- Cort
  90 */
  91
  92/*
  93 * 750 SMP is a Bad Idea because the 750 doesn't broadcast all
  94 * the cache operations on the bus.  Hence we need to use an IPI
  95 * to get the other CPU(s) to invalidate their TLBs.
  96 */
  97#ifdef CONFIG_SMP_750
  98#define FINISH_FLUSH    smp_send_tlb_invalidate(0)
  99#else
 100#define FINISH_FLUSH    do { } while (0)
 101#endif
 102
 103static void flush_range(struct mm_struct *mm, unsigned long start,
 104                        unsigned long end)
 105{
 106        pmd_t *pmd;
 107        unsigned long pmd_end;
 108        int count;
 109        unsigned int ctx = mm->context.id;
 110
 111        if (Hash == 0) {
 112                _tlbia();
 113                return;
 114        }
 115        start &= PAGE_MASK;
 116        if (start >= end)
 117                return;
 118        end = (end - 1) | ~PAGE_MASK;
 119        pmd = pmd_offset(pud_offset(pgd_offset(mm, start), start), start);
 120        for (;;) {
 121                pmd_end = ((start + PGDIR_SIZE) & PGDIR_MASK) - 1;
 122                if (pmd_end > end)
 123                        pmd_end = end;
 124                if (!pmd_none(*pmd)) {
 125                        count = ((pmd_end - start) >> PAGE_SHIFT) + 1;
 126                        flush_hash_pages(ctx, start, pmd_val(*pmd), count);
 127                }
 128                if (pmd_end == end)
 129                        break;
 130                start = pmd_end + 1;
 131                ++pmd;
 132        }
 133}
 134
 135/*
 136 * Flush kernel TLB entries in the given range
 137 */
 138void flush_tlb_kernel_range(unsigned long start, unsigned long end)
 139{
 140        flush_range(&init_mm, start, end);
 141        FINISH_FLUSH;
 142}
 143EXPORT_SYMBOL(flush_tlb_kernel_range);
 144
 145/*
 146 * Flush all the (user) entries for the address space described by mm.
 147 */
 148void flush_tlb_mm(struct mm_struct *mm)
 149{
 150        struct vm_area_struct *mp;
 151
 152        if (Hash == 0) {
 153                _tlbia();
 154                return;
 155        }
 156
 157        /*
 158         * It is safe to go down the mm's list of vmas when called
 159         * from dup_mmap, holding mmap_sem.  It would also be safe from
 160         * unmap_region or exit_mmap, but not from vmtruncate on SMP -
 161         * but it seems dup_mmap is the only SMP case which gets here.
 162         */
 163        for (mp = mm->mmap; mp != NULL; mp = mp->vm_next)
 164                flush_range(mp->vm_mm, mp->vm_start, mp->vm_end);
 165        FINISH_FLUSH;
 166}
 167EXPORT_SYMBOL(flush_tlb_mm);
 168
 169void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
 170{
 171        struct mm_struct *mm;
 172        pmd_t *pmd;
 173
 174        if (Hash == 0) {
 175                _tlbie(vmaddr);
 176                return;
 177        }
 178        mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm;
 179        pmd = pmd_offset(pud_offset(pgd_offset(mm, vmaddr), vmaddr), vmaddr);
 180        if (!pmd_none(*pmd))
 181                flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1);
 182        FINISH_FLUSH;
 183}
 184EXPORT_SYMBOL(flush_tlb_page);
 185
 186/*
 187 * For each address in the range, find the pte for the address
 188 * and check _PAGE_HASHPTE bit; if it is set, find and destroy
 189 * the corresponding HPTE.
 190 */
 191void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
 192                     unsigned long end)
 193{
 194        flush_range(vma->vm_mm, start, end);
 195        FINISH_FLUSH;
 196}
 197EXPORT_SYMBOL(flush_tlb_range);
 198