linux/arch/powerpc/mm/hugetlbpage-book3e.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * PPC Huge TLB Page Support for Book3E MMU
   4 *
   5 * Copyright (C) 2009 David Gibson, IBM Corporation.
   6 * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor
   7 *
   8 */
   9#include <linux/mm.h>
  10#include <linux/hugetlb.h>
  11
  12#include <asm/mmu.h>
  13
  14#ifdef CONFIG_PPC_FSL_BOOK3E
  15#ifdef CONFIG_PPC64
  16static inline int tlb1_next(void)
  17{
  18        struct paca_struct *paca = get_paca();
  19        struct tlb_core_data *tcd;
  20        int this, next;
  21
  22        tcd = paca->tcd_ptr;
  23        this = tcd->esel_next;
  24
  25        next = this + 1;
  26        if (next >= tcd->esel_max)
  27                next = tcd->esel_first;
  28
  29        tcd->esel_next = next;
  30        return this;
  31}
  32#else
  33static inline int tlb1_next(void)
  34{
  35        int index, ncams;
  36
  37        ncams = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
  38
  39        index = this_cpu_read(next_tlbcam_idx);
  40
  41        /* Just round-robin the entries and wrap when we hit the end */
  42        if (unlikely(index == ncams - 1))
  43                __this_cpu_write(next_tlbcam_idx, tlbcam_index);
  44        else
  45                __this_cpu_inc(next_tlbcam_idx);
  46
  47        return index;
  48}
  49#endif /* !PPC64 */
  50#endif /* FSL */
  51
  52static inline int mmu_get_tsize(int psize)
  53{
  54        return mmu_psize_defs[psize].enc;
  55}
  56
  57#if defined(CONFIG_PPC_FSL_BOOK3E) && defined(CONFIG_PPC64)
  58#include <asm/paca.h>
  59
  60static inline void book3e_tlb_lock(void)
  61{
  62        struct paca_struct *paca = get_paca();
  63        unsigned long tmp;
  64        int token = smp_processor_id() + 1;
  65
  66        /*
  67         * Besides being unnecessary in the absence of SMT, this
  68         * check prevents trying to do lbarx/stbcx. on e5500 which
  69         * doesn't implement either feature.
  70         */
  71        if (!cpu_has_feature(CPU_FTR_SMT))
  72                return;
  73
  74        asm volatile("1: lbarx %0, 0, %1;"
  75                     "cmpwi %0, 0;"
  76                     "bne 2f;"
  77                     "stbcx. %2, 0, %1;"
  78                     "bne 1b;"
  79                     "b 3f;"
  80                     "2: lbzx %0, 0, %1;"
  81                     "cmpwi %0, 0;"
  82                     "bne 2b;"
  83                     "b 1b;"
  84                     "3:"
  85                     : "=&r" (tmp)
  86                     : "r" (&paca->tcd_ptr->lock), "r" (token)
  87                     : "memory");
  88}
  89
  90static inline void book3e_tlb_unlock(void)
  91{
  92        struct paca_struct *paca = get_paca();
  93
  94        if (!cpu_has_feature(CPU_FTR_SMT))
  95                return;
  96
  97        isync();
  98        paca->tcd_ptr->lock = 0;
  99}
 100#else
 101static inline void book3e_tlb_lock(void)
 102{
 103}
 104
 105static inline void book3e_tlb_unlock(void)
 106{
 107}
 108#endif
 109
 110static inline int book3e_tlb_exists(unsigned long ea, unsigned long pid)
 111{
 112        int found = 0;
 113
 114        mtspr(SPRN_MAS6, pid << 16);
 115        if (mmu_has_feature(MMU_FTR_USE_TLBRSRV)) {
 116                asm volatile(
 117                        "li     %0,0\n"
 118                        "tlbsx. 0,%1\n"
 119                        "bne    1f\n"
 120                        "li     %0,1\n"
 121                        "1:\n"
 122                        : "=&r"(found) : "r"(ea));
 123        } else {
 124                asm volatile(
 125                        "tlbsx  0,%1\n"
 126                        "mfspr  %0,0x271\n"
 127                        "srwi   %0,%0,31\n"
 128                        : "=&r"(found) : "r"(ea));
 129        }
 130
 131        return found;
 132}
 133
 134void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
 135                            pte_t pte)
 136{
 137        unsigned long mas1, mas2;
 138        u64 mas7_3;
 139        unsigned long psize, tsize, shift;
 140        unsigned long flags;
 141        struct mm_struct *mm;
 142
 143#ifdef CONFIG_PPC_FSL_BOOK3E
 144        int index;
 145#endif
 146
 147        if (unlikely(is_kernel_addr(ea)))
 148                return;
 149
 150        mm = vma->vm_mm;
 151
 152        psize = vma_mmu_pagesize(vma);
 153        shift = __ilog2(psize);
 154        tsize = shift - 10;
 155        /*
 156         * We can't be interrupted while we're setting up the MAS
 157         * regusters or after we've confirmed that no tlb exists.
 158         */
 159        local_irq_save(flags);
 160
 161        book3e_tlb_lock();
 162
 163        if (unlikely(book3e_tlb_exists(ea, mm->context.id))) {
 164                book3e_tlb_unlock();
 165                local_irq_restore(flags);
 166                return;
 167        }
 168
 169#ifdef CONFIG_PPC_FSL_BOOK3E
 170        /* We have to use the CAM(TLB1) on FSL parts for hugepages */
 171        index = tlb1_next();
 172        mtspr(SPRN_MAS0, MAS0_ESEL(index) | MAS0_TLBSEL(1));
 173#endif
 174
 175        mas1 = MAS1_VALID | MAS1_TID(mm->context.id) | MAS1_TSIZE(tsize);
 176        mas2 = ea & ~((1UL << shift) - 1);
 177        mas2 |= (pte_val(pte) >> PTE_WIMGE_SHIFT) & MAS2_WIMGE_MASK;
 178        mas7_3 = (u64)pte_pfn(pte) << PAGE_SHIFT;
 179        mas7_3 |= (pte_val(pte) >> PTE_BAP_SHIFT) & MAS3_BAP_MASK;
 180        if (!pte_dirty(pte))
 181                mas7_3 &= ~(MAS3_SW|MAS3_UW);
 182
 183        mtspr(SPRN_MAS1, mas1);
 184        mtspr(SPRN_MAS2, mas2);
 185
 186        if (mmu_has_feature(MMU_FTR_USE_PAIRED_MAS)) {
 187                mtspr(SPRN_MAS7_MAS3, mas7_3);
 188        } else {
 189                if (mmu_has_feature(MMU_FTR_BIG_PHYS))
 190                        mtspr(SPRN_MAS7, upper_32_bits(mas7_3));
 191                mtspr(SPRN_MAS3, lower_32_bits(mas7_3));
 192        }
 193
 194        asm volatile ("tlbwe");
 195
 196        book3e_tlb_unlock();
 197        local_irq_restore(flags);
 198}
 199
 200void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
 201{
 202        struct hstate *hstate = hstate_file(vma->vm_file);
 203        unsigned long tsize = huge_page_shift(hstate) - 10;
 204
 205        __flush_tlb_page(vma->vm_mm, vmaddr, tsize, 0);
 206}
 207