linux/include/asm-generic/tlb.h
<<
>>
Prefs
   1/* include/asm-generic/tlb.h
   2 *
   3 *      Generic TLB shootdown code
   4 *
   5 * Copyright 2001 Red Hat, Inc.
   6 * Based on code from mm/memory.c Copyright Linus Torvalds and others.
   7 *
   8 * This program is free software; you can redistribute it and/or
   9 * modify it under the terms of the GNU General Public License
  10 * as published by the Free Software Foundation; either version
  11 * 2 of the License, or (at your option) any later version.
  12 */
  13#ifndef _ASM_GENERIC__TLB_H
  14#define _ASM_GENERIC__TLB_H
  15
  16#include <linux/swap.h>
  17#include <asm/pgalloc.h>
  18#include <asm/tlbflush.h>
  19
  20/*
  21 * For UP we don't need to worry about TLB flush
  22 * and page free order so much..
  23 */
  24#ifdef CONFIG_SMP
  25  #ifdef ARCH_FREE_PTR_NR
  26    #define FREE_PTR_NR   ARCH_FREE_PTR_NR
  27  #else
  28    #define FREE_PTE_NR 506
  29  #endif
  30  #define tlb_fast_mode(tlb) ((tlb)->nr == ~0U)
  31#else
  32  #define FREE_PTE_NR   1
  33  #define tlb_fast_mode(tlb) 1
  34#endif
  35
  36/* struct mmu_gather is an opaque type used by the mm code for passing around
  37 * any data needed by arch specific code for tlb_remove_page.
  38 */
  39struct mmu_gather {
  40        struct mm_struct        *mm;
  41        unsigned int            nr;     /* set to ~0U means fast mode */
  42        unsigned int            need_flush;/* Really unmapped some ptes? */
  43        unsigned int            fullmm; /* non-zero means full mm flush */
  44        struct page *           pages[FREE_PTE_NR];
  45};
  46
  47/* Users of the generic TLB shootdown code must declare this storage space. */
  48DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
  49
  50/* tlb_gather_mmu
  51 *      Return a pointer to an initialized struct mmu_gather.
  52 */
  53static inline struct mmu_gather *
  54tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
  55{
  56        struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
  57
  58        tlb->mm = mm;
  59
  60        /* Use fast mode if only one CPU is online */
  61        tlb->nr = num_online_cpus() > 1 ? 0U : ~0U;
  62
  63        tlb->fullmm = full_mm_flush;
  64
  65        return tlb;
  66}
  67
  68static inline void
  69tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
  70{
  71        if (!tlb->need_flush)
  72                return;
  73        tlb->need_flush = 0;
  74        tlb_flush(tlb);
  75        if (!tlb_fast_mode(tlb)) {
  76                free_pages_and_swap_cache(tlb->pages, tlb->nr);
  77                tlb->nr = 0;
  78        }
  79}
  80
  81/* tlb_finish_mmu
  82 *      Called at the end of the shootdown operation to free up any resources
  83 *      that were required.
  84 */
  85static inline void
  86tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
  87{
  88        tlb_flush_mmu(tlb, start, end);
  89
  90        /* keep the page table cache within bounds */
  91        check_pgt_cache();
  92
  93        put_cpu_var(mmu_gathers);
  94}
  95
  96/* tlb_remove_page
  97 *      Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
  98 *      handling the additional races in SMP caused by other CPUs caching valid
  99 *      mappings in their TLBs.
 100 */
 101static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 102{
 103        tlb->need_flush = 1;
 104        if (tlb_fast_mode(tlb)) {
 105                free_page_and_swap_cache(page);
 106                return;
 107        }
 108        tlb->pages[tlb->nr++] = page;
 109        if (tlb->nr >= FREE_PTE_NR)
 110                tlb_flush_mmu(tlb, 0, 0);
 111}
 112
 113/**
 114 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
 115 *
 116 * Record the fact that pte's were really umapped in ->need_flush, so we can
 117 * later optimise away the tlb invalidate.   This helps when userspace is
 118 * unmapping already-unmapped pages, which happens quite a lot.
 119 */
 120#define tlb_remove_tlb_entry(tlb, ptep, address)                \
 121        do {                                                    \
 122                tlb->need_flush = 1;                            \
 123                __tlb_remove_tlb_entry(tlb, ptep, address);     \
 124        } while (0)
 125
 126#define pte_free_tlb(tlb, ptep, address)                        \
 127        do {                                                    \
 128                tlb->need_flush = 1;                            \
 129                __pte_free_tlb(tlb, ptep, address);             \
 130        } while (0)
 131
 132#ifndef __ARCH_HAS_4LEVEL_HACK
 133#define pud_free_tlb(tlb, pudp, address)                        \
 134        do {                                                    \
 135                tlb->need_flush = 1;                            \
 136                __pud_free_tlb(tlb, pudp, address);             \
 137        } while (0)
 138#endif
 139
 140#define pmd_free_tlb(tlb, pmdp, address)                        \
 141        do {                                                    \
 142                tlb->need_flush = 1;                            \
 143                __pmd_free_tlb(tlb, pmdp, address);             \
 144        } while (0)
 145
 146#define tlb_migrate_finish(mm) do {} while (0)
 147
 148#endif /* _ASM_GENERIC__TLB_H */
 149