linux/arch/parisc/mm/hugetlbpage.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * PARISC64 Huge TLB page support.
   4 *
   5 * This parisc implementation is heavily based on the SPARC and x86 code.
   6 *
   7 * Copyright (C) 2015 Helge Deller <deller@gmx.de>
   8 */
   9
  10#include <linux/fs.h>
  11#include <linux/mm.h>
  12#include <linux/sched/mm.h>
  13#include <linux/hugetlb.h>
  14#include <linux/pagemap.h>
  15#include <linux/sysctl.h>
  16
  17#include <asm/mman.h>
  18#include <asm/tlb.h>
  19#include <asm/tlbflush.h>
  20#include <asm/cacheflush.h>
  21#include <asm/mmu_context.h>
  22
  23
  24unsigned long
  25hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
  26                unsigned long len, unsigned long pgoff, unsigned long flags)
  27{
  28        struct hstate *h = hstate_file(file);
  29
  30        if (len & ~huge_page_mask(h))
  31                return -EINVAL;
  32        if (len > TASK_SIZE)
  33                return -ENOMEM;
  34
  35        if (flags & MAP_FIXED)
  36                if (prepare_hugepage_range(file, addr, len))
  37                        return -EINVAL;
  38
  39        if (addr)
  40                addr = ALIGN(addr, huge_page_size(h));
  41
  42        /* we need to make sure the colouring is OK */
  43        return arch_get_unmapped_area(file, addr, len, pgoff, flags);
  44}
  45
  46
  47pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
  48                        unsigned long addr, unsigned long sz)
  49{
  50        pgd_t *pgd;
  51        p4d_t *p4d;
  52        pud_t *pud;
  53        pmd_t *pmd;
  54        pte_t *pte = NULL;
  55
  56        /* We must align the address, because our caller will run
  57         * set_huge_pte_at() on whatever we return, which writes out
  58         * all of the sub-ptes for the hugepage range.  So we have
  59         * to give it the first such sub-pte.
  60         */
  61        addr &= HPAGE_MASK;
  62
  63        pgd = pgd_offset(mm, addr);
  64        p4d = p4d_offset(pgd, addr);
  65        pud = pud_alloc(mm, p4d, addr);
  66        if (pud) {
  67                pmd = pmd_alloc(mm, pud, addr);
  68                if (pmd)
  69                        pte = pte_alloc_map(mm, pmd, addr);
  70        }
  71        return pte;
  72}
  73
  74pte_t *huge_pte_offset(struct mm_struct *mm,
  75                       unsigned long addr, unsigned long sz)
  76{
  77        pgd_t *pgd;
  78        p4d_t *p4d;
  79        pud_t *pud;
  80        pmd_t *pmd;
  81        pte_t *pte = NULL;
  82
  83        addr &= HPAGE_MASK;
  84
  85        pgd = pgd_offset(mm, addr);
  86        if (!pgd_none(*pgd)) {
  87                p4d = p4d_offset(pgd, addr);
  88                if (!p4d_none(*p4d)) {
  89                        pud = pud_offset(p4d, addr);
  90                        if (!pud_none(*pud)) {
  91                                pmd = pmd_offset(pud, addr);
  92                                if (!pmd_none(*pmd))
  93                                        pte = pte_offset_map(pmd, addr);
  94                        }
  95                }
  96        }
  97        return pte;
  98}
  99
 100/* Purge data and instruction TLB entries.  Must be called holding
 101 * the pa_tlb_lock.  The TLB purge instructions are slow on SMP
 102 * machines since the purge must be broadcast to all CPUs.
 103 */
 104static inline void purge_tlb_entries_huge(struct mm_struct *mm, unsigned long addr)
 105{
 106        int i;
 107
 108        /* We may use multiple physical huge pages (e.g. 2x1 MB) to emulate
 109         * Linux standard huge pages (e.g. 2 MB) */
 110        BUILD_BUG_ON(REAL_HPAGE_SHIFT > HPAGE_SHIFT);
 111
 112        addr &= HPAGE_MASK;
 113        addr |= _HUGE_PAGE_SIZE_ENCODING_DEFAULT;
 114
 115        for (i = 0; i < (1 << (HPAGE_SHIFT-REAL_HPAGE_SHIFT)); i++) {
 116                purge_tlb_entries(mm, addr);
 117                addr += (1UL << REAL_HPAGE_SHIFT);
 118        }
 119}
 120
 121/* __set_huge_pte_at() must be called holding the pa_tlb_lock. */
 122static void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
 123                     pte_t *ptep, pte_t entry)
 124{
 125        unsigned long addr_start;
 126        int i;
 127
 128        addr &= HPAGE_MASK;
 129        addr_start = addr;
 130
 131        for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
 132                set_pte(ptep, entry);
 133                ptep++;
 134
 135                addr += PAGE_SIZE;
 136                pte_val(entry) += PAGE_SIZE;
 137        }
 138
 139        purge_tlb_entries_huge(mm, addr_start);
 140}
 141
 142void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
 143                     pte_t *ptep, pte_t entry)
 144{
 145        __set_huge_pte_at(mm, addr, ptep, entry);
 146}
 147
 148
 149pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
 150                              pte_t *ptep)
 151{
 152        pte_t entry;
 153
 154        entry = *ptep;
 155        __set_huge_pte_at(mm, addr, ptep, __pte(0));
 156
 157        return entry;
 158}
 159
 160
 161void huge_ptep_set_wrprotect(struct mm_struct *mm,
 162                                unsigned long addr, pte_t *ptep)
 163{
 164        pte_t old_pte;
 165
 166        old_pte = *ptep;
 167        __set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
 168}
 169
 170int huge_ptep_set_access_flags(struct vm_area_struct *vma,
 171                                unsigned long addr, pte_t *ptep,
 172                                pte_t pte, int dirty)
 173{
 174        int changed;
 175        struct mm_struct *mm = vma->vm_mm;
 176
 177        changed = !pte_same(*ptep, pte);
 178        if (changed) {
 179                __set_huge_pte_at(mm, addr, ptep, pte);
 180        }
 181        return changed;
 182}
 183
 184
 185int pmd_huge(pmd_t pmd)
 186{
 187        return 0;
 188}
 189
 190int pud_huge(pud_t pud)
 191{
 192        return 0;
 193}
 194