linux/arch/parisc/mm/hugetlbpage.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * PARISC64 Huge TLB page support.
   4 *
   5 * This parisc implementation is heavily based on the SPARC and x86 code.
   6 *
   7 * Copyright (C) 2015 Helge Deller <deller@gmx.de>
   8 */
   9
  10#include <linux/fs.h>
  11#include <linux/mm.h>
  12#include <linux/sched/mm.h>
  13#include <linux/hugetlb.h>
  14#include <linux/pagemap.h>
  15#include <linux/sysctl.h>
  16
  17#include <asm/mman.h>
  18#include <asm/pgalloc.h>
  19#include <asm/tlb.h>
  20#include <asm/tlbflush.h>
  21#include <asm/cacheflush.h>
  22#include <asm/mmu_context.h>
  23
  24
  25unsigned long
  26hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
  27                unsigned long len, unsigned long pgoff, unsigned long flags)
  28{
  29        struct hstate *h = hstate_file(file);
  30
  31        if (len & ~huge_page_mask(h))
  32                return -EINVAL;
  33        if (len > TASK_SIZE)
  34                return -ENOMEM;
  35
  36        if (flags & MAP_FIXED)
  37                if (prepare_hugepage_range(file, addr, len))
  38                        return -EINVAL;
  39
  40        if (addr)
  41                addr = ALIGN(addr, huge_page_size(h));
  42
  43        /* we need to make sure the colouring is OK */
  44        return arch_get_unmapped_area(file, addr, len, pgoff, flags);
  45}
  46
  47
  48pte_t *huge_pte_alloc(struct mm_struct *mm,
  49                        unsigned long addr, unsigned long sz)
  50{
  51        pgd_t *pgd;
  52        pud_t *pud;
  53        pmd_t *pmd;
  54        pte_t *pte = NULL;
  55
  56        /* We must align the address, because our caller will run
  57         * set_huge_pte_at() on whatever we return, which writes out
  58         * all of the sub-ptes for the hugepage range.  So we have
  59         * to give it the first such sub-pte.
  60         */
  61        addr &= HPAGE_MASK;
  62
  63        pgd = pgd_offset(mm, addr);
  64        pud = pud_alloc(mm, pgd, addr);
  65        if (pud) {
  66                pmd = pmd_alloc(mm, pud, addr);
  67                if (pmd)
  68                        pte = pte_alloc_map(mm, pmd, addr);
  69        }
  70        return pte;
  71}
  72
  73pte_t *huge_pte_offset(struct mm_struct *mm,
  74                       unsigned long addr, unsigned long sz)
  75{
  76        pgd_t *pgd;
  77        pud_t *pud;
  78        pmd_t *pmd;
  79        pte_t *pte = NULL;
  80
  81        addr &= HPAGE_MASK;
  82
  83        pgd = pgd_offset(mm, addr);
  84        if (!pgd_none(*pgd)) {
  85                pud = pud_offset(pgd, addr);
  86                if (!pud_none(*pud)) {
  87                        pmd = pmd_offset(pud, addr);
  88                        if (!pmd_none(*pmd))
  89                                pte = pte_offset_map(pmd, addr);
  90                }
  91        }
  92        return pte;
  93}
  94
  95/* Purge data and instruction TLB entries.  Must be called holding
  96 * the pa_tlb_lock.  The TLB purge instructions are slow on SMP
  97 * machines since the purge must be broadcast to all CPUs.
  98 */
  99static inline void purge_tlb_entries_huge(struct mm_struct *mm, unsigned long addr)
 100{
 101        int i;
 102
 103        /* We may use multiple physical huge pages (e.g. 2x1 MB) to emulate
 104         * Linux standard huge pages (e.g. 2 MB) */
 105        BUILD_BUG_ON(REAL_HPAGE_SHIFT > HPAGE_SHIFT);
 106
 107        addr &= HPAGE_MASK;
 108        addr |= _HUGE_PAGE_SIZE_ENCODING_DEFAULT;
 109
 110        for (i = 0; i < (1 << (HPAGE_SHIFT-REAL_HPAGE_SHIFT)); i++) {
 111                purge_tlb_entries(mm, addr);
 112                addr += (1UL << REAL_HPAGE_SHIFT);
 113        }
 114}
 115
 116/* __set_huge_pte_at() must be called holding the pa_tlb_lock. */
 117static void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
 118                     pte_t *ptep, pte_t entry)
 119{
 120        unsigned long addr_start;
 121        int i;
 122
 123        addr &= HPAGE_MASK;
 124        addr_start = addr;
 125
 126        for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
 127                set_pte(ptep, entry);
 128                ptep++;
 129
 130                addr += PAGE_SIZE;
 131                pte_val(entry) += PAGE_SIZE;
 132        }
 133
 134        purge_tlb_entries_huge(mm, addr_start);
 135}
 136
 137void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
 138                     pte_t *ptep, pte_t entry)
 139{
 140        unsigned long flags;
 141
 142        spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);
 143        __set_huge_pte_at(mm, addr, ptep, entry);
 144        spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);
 145}
 146
 147
 148pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
 149                              pte_t *ptep)
 150{
 151        unsigned long flags;
 152        pte_t entry;
 153
 154        spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);
 155        entry = *ptep;
 156        __set_huge_pte_at(mm, addr, ptep, __pte(0));
 157        spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);
 158
 159        return entry;
 160}
 161
 162
 163void huge_ptep_set_wrprotect(struct mm_struct *mm,
 164                                unsigned long addr, pte_t *ptep)
 165{
 166        unsigned long flags;
 167        pte_t old_pte;
 168
 169        spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);
 170        old_pte = *ptep;
 171        __set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
 172        spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);
 173}
 174
 175int huge_ptep_set_access_flags(struct vm_area_struct *vma,
 176                                unsigned long addr, pte_t *ptep,
 177                                pte_t pte, int dirty)
 178{
 179        unsigned long flags;
 180        int changed;
 181        struct mm_struct *mm = vma->vm_mm;
 182
 183        spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);
 184        changed = !pte_same(*ptep, pte);
 185        if (changed) {
 186                __set_huge_pte_at(mm, addr, ptep, pte);
 187        }
 188        spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);
 189        return changed;
 190}
 191
 192
 193int pmd_huge(pmd_t pmd)
 194{
 195        return 0;
 196}
 197
 198int pud_huge(pud_t pud)
 199{
 200        return 0;
 201}
 202