linux/arch/sparc/mm/tlb.c
<<
>>
Prefs
   1/* arch/sparc64/mm/tlb.c
   2 *
   3 * Copyright (C) 2004 David S. Miller <davem@redhat.com>
   4 */
   5
   6#include <linux/kernel.h>
   7#include <linux/percpu.h>
   8#include <linux/mm.h>
   9#include <linux/swap.h>
  10#include <linux/preempt.h>
  11
  12#include <asm/pgtable.h>
  13#include <asm/pgalloc.h>
  14#include <asm/tlbflush.h>
  15#include <asm/cacheflush.h>
  16#include <asm/mmu_context.h>
  17#include <asm/tlb.h>
  18
  19/* Heavily inspired by the ppc64 code.  */
  20
  21static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
  22
  23void flush_tlb_pending(void)
  24{
  25        struct tlb_batch *tb = &get_cpu_var(tlb_batch);
  26        struct mm_struct *mm = tb->mm;
  27
  28        if (!tb->tlb_nr)
  29                goto out;
  30
  31        flush_tsb_user(tb);
  32
  33        if (CTX_VALID(mm->context)) {
  34                if (tb->tlb_nr == 1) {
  35                        global_flush_tlb_page(mm, tb->vaddrs[0]);
  36                } else {
  37#ifdef CONFIG_SMP
  38                        smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
  39                                              &tb->vaddrs[0]);
  40#else
  41                        __flush_tlb_pending(CTX_HWBITS(tb->mm->context),
  42                                            tb->tlb_nr, &tb->vaddrs[0]);
  43#endif
  44                }
  45        }
  46
  47        tb->tlb_nr = 0;
  48
  49out:
  50        put_cpu_var(tlb_batch);
  51}
  52
  53void arch_enter_lazy_mmu_mode(void)
  54{
  55        struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
  56
  57        tb->active = 1;
  58}
  59
  60void arch_leave_lazy_mmu_mode(void)
  61{
  62        struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
  63
  64        if (tb->tlb_nr)
  65                flush_tlb_pending();
  66        tb->active = 0;
  67}
  68
  69static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
  70                              bool exec, bool huge)
  71{
  72        struct tlb_batch *tb = &get_cpu_var(tlb_batch);
  73        unsigned long nr;
  74
  75        vaddr &= PAGE_MASK;
  76        if (exec)
  77                vaddr |= 0x1UL;
  78
  79        nr = tb->tlb_nr;
  80
  81        if (unlikely(nr != 0 && mm != tb->mm)) {
  82                flush_tlb_pending();
  83                nr = 0;
  84        }
  85
  86        if (!tb->active) {
  87                flush_tsb_user_page(mm, vaddr, huge);
  88                global_flush_tlb_page(mm, vaddr);
  89                goto out;
  90        }
  91
  92        if (nr == 0) {
  93                tb->mm = mm;
  94                tb->huge = huge;
  95        }
  96
  97        if (tb->huge != huge) {
  98                flush_tlb_pending();
  99                tb->huge = huge;
 100                nr = 0;
 101        }
 102
 103        tb->vaddrs[nr] = vaddr;
 104        tb->tlb_nr = ++nr;
 105        if (nr >= TLB_BATCH_NR)
 106                flush_tlb_pending();
 107
 108out:
 109        put_cpu_var(tlb_batch);
 110}
 111
 112void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
 113                   pte_t *ptep, pte_t orig, int fullmm)
 114{
 115        bool huge = is_hugetlb_pte(orig);
 116
 117        if (tlb_type != hypervisor &&
 118            pte_dirty(orig)) {
 119                unsigned long paddr, pfn = pte_pfn(orig);
 120                struct address_space *mapping;
 121                struct page *page;
 122
 123                if (!pfn_valid(pfn))
 124                        goto no_cache_flush;
 125
 126                page = pfn_to_page(pfn);
 127                if (PageReserved(page))
 128                        goto no_cache_flush;
 129
 130                /* A real file page? */
 131                mapping = page_mapping(page);
 132                if (!mapping)
 133                        goto no_cache_flush;
 134
 135                paddr = (unsigned long) page_address(page);
 136                if ((paddr ^ vaddr) & (1 << 13))
 137                        flush_dcache_page_all(mm, page);
 138        }
 139
 140no_cache_flush:
 141        if (!fullmm)
 142                tlb_batch_add_one(mm, vaddr, pte_exec(orig), huge);
 143}
 144
 145#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 146static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
 147                               pmd_t pmd)
 148{
 149        unsigned long end;
 150        pte_t *pte;
 151
 152        pte = pte_offset_map(&pmd, vaddr);
 153        end = vaddr + HPAGE_SIZE;
 154        while (vaddr < end) {
 155                if (pte_val(*pte) & _PAGE_VALID) {
 156                        bool exec = pte_exec(*pte);
 157
 158                        tlb_batch_add_one(mm, vaddr, exec, false);
 159                }
 160                pte++;
 161                vaddr += PAGE_SIZE;
 162        }
 163        pte_unmap(pte);
 164}
 165
 166void set_pmd_at(struct mm_struct *mm, unsigned long addr,
 167                pmd_t *pmdp, pmd_t pmd)
 168{
 169        pmd_t orig = *pmdp;
 170
 171        *pmdp = pmd;
 172
 173        if (mm == &init_mm)
 174                return;
 175
 176        if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) {
 177                /*
 178                 * Note that this routine only sets pmds for THP pages.
 179                 * Hugetlb pages are handled elsewhere.  We need to check
 180                 * for huge zero page.  Huge zero pages are like hugetlb
 181                 * pages in that there is no RSS, but there is the need
 182                 * for TSB entries.  So, huge zero page counts go into
 183                 * hugetlb_pte_count.
 184                 */
 185                if (pmd_val(pmd) & _PAGE_PMD_HUGE) {
 186                        if (is_huge_zero_page(pmd_page(pmd)))
 187                                mm->context.hugetlb_pte_count++;
 188                        else
 189                                mm->context.thp_pte_count++;
 190                } else {
 191                        if (is_huge_zero_page(pmd_page(orig)))
 192                                mm->context.hugetlb_pte_count--;
 193                        else
 194                                mm->context.thp_pte_count--;
 195                }
 196
 197                /* Do not try to allocate the TSB hash table if we
 198                 * don't have one already.  We have various locks held
 199                 * and thus we'll end up doing a GFP_KERNEL allocation
 200                 * in an atomic context.
 201                 *
 202                 * Instead, we let the first TLB miss on a hugepage
 203                 * take care of this.
 204                 */
 205        }
 206
 207        if (!pmd_none(orig)) {
 208                addr &= HPAGE_MASK;
 209                if (pmd_trans_huge(orig)) {
 210                        pte_t orig_pte = __pte(pmd_val(orig));
 211                        bool exec = pte_exec(orig_pte);
 212
 213                        tlb_batch_add_one(mm, addr, exec, true);
 214                        tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec,
 215                                        true);
 216                } else {
 217                        tlb_batch_pmd_scan(mm, addr, orig);
 218                }
 219        }
 220}
 221
 222/*
 223 * This routine is only called when splitting a THP
 224 */
 225void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
 226                     pmd_t *pmdp)
 227{
 228        pmd_t entry = *pmdp;
 229
 230        pmd_val(entry) &= ~_PAGE_VALID;
 231
 232        set_pmd_at(vma->vm_mm, address, pmdp, entry);
 233        flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
 234
 235        /*
 236         * set_pmd_at() will not be called in a way to decrement
 237         * thp_pte_count when splitting a THP, so do it now.
 238         * Sanity check pmd before doing the actual decrement.
 239         */
 240        if ((pmd_val(entry) & _PAGE_PMD_HUGE) &&
 241            !is_huge_zero_page(pmd_page(entry)))
 242                (vma->vm_mm)->context.thp_pte_count--;
 243}
 244
 245void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
 246                                pgtable_t pgtable)
 247{
 248        struct list_head *lh = (struct list_head *) pgtable;
 249
 250        assert_spin_locked(&mm->page_table_lock);
 251
 252        /* FIFO */
 253        if (!pmd_huge_pte(mm, pmdp))
 254                INIT_LIST_HEAD(lh);
 255        else
 256                list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
 257        pmd_huge_pte(mm, pmdp) = pgtable;
 258}
 259
 260pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
 261{
 262        struct list_head *lh;
 263        pgtable_t pgtable;
 264
 265        assert_spin_locked(&mm->page_table_lock);
 266
 267        /* FIFO */
 268        pgtable = pmd_huge_pte(mm, pmdp);
 269        lh = (struct list_head *) pgtable;
 270        if (list_empty(lh))
 271                pmd_huge_pte(mm, pmdp) = NULL;
 272        else {
 273                pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
 274                list_del(lh);
 275        }
 276        pte_val(pgtable[0]) = 0;
 277        pte_val(pgtable[1]) = 0;
 278
 279        return pgtable;
 280}
 281#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 282