linux/arch/parisc/include/asm/pgalloc.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _ASM_PGALLOC_H
   3#define _ASM_PGALLOC_H
   4
   5#include <linux/gfp.h>
   6#include <linux/mm.h>
   7#include <linux/threads.h>
   8#include <asm/processor.h>
   9#include <asm/fixmap.h>
  10
  11#include <asm/cache.h>
  12
  13#define __HAVE_ARCH_PMD_ALLOC_ONE
  14#define __HAVE_ARCH_PMD_FREE
  15#define __HAVE_ARCH_PGD_FREE
  16#include <asm-generic/pgalloc.h>
  17
  18/* Allocate the top level pgd (page directory)
  19 *
  20 * Here (for 64 bit kernels) we implement a Hybrid L2/L3 scheme: we
  21 * allocate the first pmd adjacent to the pgd.  This means that we can
  22 * subtract a constant offset to get to it.  The pmd and pgd sizes are
  23 * arranged so that a single pmd covers 4GB (giving a full 64-bit
  24 * process access to 8TB) so our lookups are effectively L2 for the
  25 * first 4GB of the kernel (i.e. for all ILP32 processes and all the
  26 * kernel for machines with under 4GB of memory) */
  27static inline pgd_t *pgd_alloc(struct mm_struct *mm)
  28{
  29        pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL,
  30                                               PGD_ALLOC_ORDER);
  31        pgd_t *actual_pgd = pgd;
  32
  33        if (likely(pgd != NULL)) {
  34                memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER);
  35#if CONFIG_PGTABLE_LEVELS == 3
  36                actual_pgd += PTRS_PER_PGD;
  37                /* Populate first pmd with allocated memory.  We mark it
  38                 * with PxD_FLAG_ATTACHED as a signal to the system that this
  39                 * pmd entry may not be cleared. */
  40                set_pgd(actual_pgd, __pgd((PxD_FLAG_PRESENT |
  41                                        PxD_FLAG_VALID |
  42                                        PxD_FLAG_ATTACHED)
  43                        + (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT)));
  44                /* The first pmd entry also is marked with PxD_FLAG_ATTACHED as
  45                 * a signal that this pmd may not be freed */
  46                set_pgd(pgd, __pgd(PxD_FLAG_ATTACHED));
  47#endif
  48        }
  49        spin_lock_init(pgd_spinlock(actual_pgd));
  50        return actual_pgd;
  51}
  52
  53static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
  54{
  55#if CONFIG_PGTABLE_LEVELS == 3
  56        pgd -= PTRS_PER_PGD;
  57#endif
  58        free_pages((unsigned long)pgd, PGD_ALLOC_ORDER);
  59}
  60
  61#if CONFIG_PGTABLE_LEVELS == 3
  62
  63/* Three Level Page Table Support for pmd's */
  64
  65static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
  66{
  67        set_pud(pud, __pud((PxD_FLAG_PRESENT | PxD_FLAG_VALID) +
  68                        (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT)));
  69}
  70
  71static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
  72{
  73        return (pmd_t *)__get_free_pages(GFP_PGTABLE_KERNEL, PMD_ORDER);
  74}
  75
  76static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
  77{
  78        if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) {
  79                /*
  80                 * This is the permanent pmd attached to the pgd;
  81                 * cannot free it.
  82                 * Increment the counter to compensate for the decrement
  83                 * done by generic mm code.
  84                 */
  85                mm_inc_nr_pmds(mm);
  86                return;
  87        }
  88        free_pages((unsigned long)pmd, PMD_ORDER);
  89}
  90
  91#endif
  92
  93static inline void
  94pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
  95{
  96#if CONFIG_PGTABLE_LEVELS == 3
  97        /* preserve the gateway marker if this is the beginning of
  98         * the permanent pmd */
  99        if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
 100                set_pmd(pmd, __pmd((PxD_FLAG_PRESENT |
 101                                PxD_FLAG_VALID |
 102                                PxD_FLAG_ATTACHED)
 103                        + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT)));
 104        else
 105#endif
 106                set_pmd(pmd, __pmd((PxD_FLAG_PRESENT | PxD_FLAG_VALID)
 107                        + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT)));
 108}
 109
 110#define pmd_populate(mm, pmd, pte_page) \
 111        pmd_populate_kernel(mm, pmd, page_address(pte_page))
 112#define pmd_pgtable(pmd) pmd_page(pmd)
 113
 114#endif
 115