linux/arch/sparc/mm/tlb.c
<<
>>
Prefs
   1/* arch/sparc64/mm/tlb.c
   2 *
   3 * Copyright (C) 2004 David S. Miller <davem@redhat.com>
   4 */
   5
   6#include <linux/kernel.h>
   7#include <linux/init.h>
   8#include <linux/percpu.h>
   9#include <linux/mm.h>
  10#include <linux/swap.h>
  11#include <linux/preempt.h>
  12
  13#include <asm/pgtable.h>
  14#include <asm/pgalloc.h>
  15#include <asm/tlbflush.h>
  16#include <asm/cacheflush.h>
  17#include <asm/mmu_context.h>
  18#include <asm/tlb.h>
  19
  20/* Heavily inspired by the ppc64 code.  */
  21
  22static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
  23
  24void flush_tlb_pending(void)
  25{
  26        struct tlb_batch *tb = &get_cpu_var(tlb_batch);
  27
  28        if (tb->tlb_nr) {
  29                flush_tsb_user(tb);
  30
  31                if (CTX_VALID(tb->mm->context)) {
  32#ifdef CONFIG_SMP
  33                        smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
  34                                              &tb->vaddrs[0]);
  35#else
  36                        __flush_tlb_pending(CTX_HWBITS(tb->mm->context),
  37                                            tb->tlb_nr, &tb->vaddrs[0]);
  38#endif
  39                }
  40                tb->tlb_nr = 0;
  41        }
  42
  43        put_cpu_var(tlb_batch);
  44}
  45
  46static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
  47                              bool exec)
  48{
  49        struct tlb_batch *tb = &get_cpu_var(tlb_batch);
  50        unsigned long nr;
  51
  52        vaddr &= PAGE_MASK;
  53        if (exec)
  54                vaddr |= 0x1UL;
  55
  56        nr = tb->tlb_nr;
  57
  58        if (unlikely(nr != 0 && mm != tb->mm)) {
  59                flush_tlb_pending();
  60                nr = 0;
  61        }
  62
  63        if (nr == 0)
  64                tb->mm = mm;
  65
  66        tb->vaddrs[nr] = vaddr;
  67        tb->tlb_nr = ++nr;
  68        if (nr >= TLB_BATCH_NR)
  69                flush_tlb_pending();
  70
  71        put_cpu_var(tlb_batch);
  72}
  73
  74void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
  75                   pte_t *ptep, pte_t orig, int fullmm)
  76{
  77        if (tlb_type != hypervisor &&
  78            pte_dirty(orig)) {
  79                unsigned long paddr, pfn = pte_pfn(orig);
  80                struct address_space *mapping;
  81                struct page *page;
  82
  83                if (!pfn_valid(pfn))
  84                        goto no_cache_flush;
  85
  86                page = pfn_to_page(pfn);
  87                if (PageReserved(page))
  88                        goto no_cache_flush;
  89
  90                /* A real file page? */
  91                mapping = page_mapping(page);
  92                if (!mapping)
  93                        goto no_cache_flush;
  94
  95                paddr = (unsigned long) page_address(page);
  96                if ((paddr ^ vaddr) & (1 << 13))
  97                        flush_dcache_page_all(mm, page);
  98        }
  99
 100no_cache_flush:
 101        if (!fullmm)
 102                tlb_batch_add_one(mm, vaddr, pte_exec(orig));
 103}
 104
 105#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 106static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
 107                               pmd_t pmd, bool exec)
 108{
 109        unsigned long end;
 110        pte_t *pte;
 111
 112        pte = pte_offset_map(&pmd, vaddr);
 113        end = vaddr + HPAGE_SIZE;
 114        while (vaddr < end) {
 115                if (pte_val(*pte) & _PAGE_VALID)
 116                        tlb_batch_add_one(mm, vaddr, exec);
 117                pte++;
 118                vaddr += PAGE_SIZE;
 119        }
 120        pte_unmap(pte);
 121}
 122
 123void set_pmd_at(struct mm_struct *mm, unsigned long addr,
 124                pmd_t *pmdp, pmd_t pmd)
 125{
 126        pmd_t orig = *pmdp;
 127
 128        *pmdp = pmd;
 129
 130        if (mm == &init_mm)
 131                return;
 132
 133        if ((pmd_val(pmd) ^ pmd_val(orig)) & PMD_ISHUGE) {
 134                if (pmd_val(pmd) & PMD_ISHUGE)
 135                        mm->context.huge_pte_count++;
 136                else
 137                        mm->context.huge_pte_count--;
 138                if (mm->context.huge_pte_count == 1)
 139                        hugetlb_setup(mm);
 140        }
 141
 142        if (!pmd_none(orig)) {
 143                bool exec = ((pmd_val(orig) & PMD_HUGE_EXEC) != 0);
 144
 145                addr &= HPAGE_MASK;
 146                if (pmd_val(orig) & PMD_ISHUGE)
 147                        tlb_batch_add_one(mm, addr, exec);
 148                else
 149                        tlb_batch_pmd_scan(mm, addr, orig, exec);
 150        }
 151}
 152
 153void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable)
 154{
 155        struct list_head *lh = (struct list_head *) pgtable;
 156
 157        assert_spin_locked(&mm->page_table_lock);
 158
 159        /* FIFO */
 160        if (!mm->pmd_huge_pte)
 161                INIT_LIST_HEAD(lh);
 162        else
 163                list_add(lh, (struct list_head *) mm->pmd_huge_pte);
 164        mm->pmd_huge_pte = pgtable;
 165}
 166
 167pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm)
 168{
 169        struct list_head *lh;
 170        pgtable_t pgtable;
 171
 172        assert_spin_locked(&mm->page_table_lock);
 173
 174        /* FIFO */
 175        pgtable = mm->pmd_huge_pte;
 176        lh = (struct list_head *) pgtable;
 177        if (list_empty(lh))
 178                mm->pmd_huge_pte = NULL;
 179        else {
 180                mm->pmd_huge_pte = (pgtable_t) lh->next;
 181                list_del(lh);
 182        }
 183        pte_val(pgtable[0]) = 0;
 184        pte_val(pgtable[1]) = 0;
 185
 186        return pgtable;
 187}
 188#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 189