linux/arch/parisc/include/asm/pgalloc.h
<<
>>
Prefs
   1#ifndef _ASM_PGALLOC_H
   2#define _ASM_PGALLOC_H
   3
   4#include <linux/gfp.h>
   5#include <linux/mm.h>
   6#include <linux/threads.h>
   7#include <asm/processor.h>
   8#include <asm/fixmap.h>
   9
  10#include <asm/cache.h>
  11
  12/* Allocate the top level pgd (page directory)
  13 *
  14 * Here (for 64 bit kernels) we implement a Hybrid L2/L3 scheme: we
  15 * allocate the first pmd adjacent to the pgd.  This means that we can
  16 * subtract a constant offset to get to it.  The pmd and pgd sizes are
  17 * arranged so that a single pmd covers 4GB (giving a full 64-bit
  18 * process access to 8TB) so our lookups are effectively L2 for the
  19 * first 4GB of the kernel (i.e. for all ILP32 processes and all the
  20 * kernel for machines with under 4GB of memory) */
  21static inline pgd_t *pgd_alloc(struct mm_struct *mm)
  22{
  23        pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL,
  24                                               PGD_ALLOC_ORDER);
  25        pgd_t *actual_pgd = pgd;
  26
  27        if (likely(pgd != NULL)) {
  28                memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER);
  29#if CONFIG_PGTABLE_LEVELS == 3
  30                actual_pgd += PTRS_PER_PGD;
  31                /* Populate first pmd with allocated memory.  We mark it
  32                 * with PxD_FLAG_ATTACHED as a signal to the system that this
  33                 * pmd entry may not be cleared. */
  34                __pgd_val_set(*actual_pgd, (PxD_FLAG_PRESENT | 
  35                                        PxD_FLAG_VALID | 
  36                                        PxD_FLAG_ATTACHED) 
  37                        + (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT));
  38                /* The first pmd entry also is marked with PxD_FLAG_ATTACHED as
  39                 * a signal that this pmd may not be freed */
  40                __pgd_val_set(*pgd, PxD_FLAG_ATTACHED);
  41#endif
  42        }
  43        return actual_pgd;
  44}
  45
  46static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
  47{
  48#if CONFIG_PGTABLE_LEVELS == 3
  49        pgd -= PTRS_PER_PGD;
  50#endif
  51        free_pages((unsigned long)pgd, PGD_ALLOC_ORDER);
  52}
  53
  54#if CONFIG_PGTABLE_LEVELS == 3
  55
  56/* Three Level Page Table Support for pmd's */
  57
  58static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
  59{
  60        __pgd_val_set(*pgd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID) +
  61                        (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
  62}
  63
  64static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
  65{
  66        pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL, PMD_ORDER);
  67        if (pmd)
  68                memset(pmd, 0, PAGE_SIZE<<PMD_ORDER);
  69        return pmd;
  70}
  71
  72static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
  73{
  74        if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) {
  75                /*
  76                 * This is the permanent pmd attached to the pgd;
  77                 * cannot free it.
  78                 * Increment the counter to compensate for the decrement
  79                 * done by generic mm code.
  80                 */
  81                mm_inc_nr_pmds(mm);
  82                return;
  83        }
  84        free_pages((unsigned long)pmd, PMD_ORDER);
  85}
  86
  87#else
  88
  89/* Two Level Page Table Support for pmd's */
  90
  91/*
  92 * allocating and freeing a pmd is trivial: the 1-entry pmd is
  93 * inside the pgd, so has no extra memory associated with it.
  94 */
  95
  96#define pmd_alloc_one(mm, addr)         ({ BUG(); ((pmd_t *)2); })
  97#define pmd_free(mm, x)                 do { } while (0)
  98#define pgd_populate(mm, pmd, pte)      BUG()
  99
 100#endif
 101
 102static inline void
 103pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
 104{
 105#if CONFIG_PGTABLE_LEVELS == 3
 106        /* preserve the gateway marker if this is the beginning of
 107         * the permanent pmd */
 108        if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
 109                __pmd_val_set(*pmd, (PxD_FLAG_PRESENT |
 110                                 PxD_FLAG_VALID |
 111                                 PxD_FLAG_ATTACHED) 
 112                        + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT));
 113        else
 114#endif
 115                __pmd_val_set(*pmd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID) 
 116                        + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT));
 117}
 118
 119#define pmd_populate(mm, pmd, pte_page) \
 120        pmd_populate_kernel(mm, pmd, page_address(pte_page))
 121#define pmd_pgtable(pmd) pmd_page(pmd)
 122
 123static inline pgtable_t
 124pte_alloc_one(struct mm_struct *mm, unsigned long address)
 125{
 126        struct page *page = alloc_page(GFP_KERNEL|__GFP_ZERO);
 127        if (!page)
 128                return NULL;
 129        if (!pgtable_page_ctor(page)) {
 130                __free_page(page);
 131                return NULL;
 132        }
 133        return page;
 134}
 135
 136static inline pte_t *
 137pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
 138{
 139        pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
 140        return pte;
 141}
 142
 143static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
 144{
 145        free_page((unsigned long)pte);
 146}
 147
 148static inline void pte_free(struct mm_struct *mm, struct page *pte)
 149{
 150        pgtable_page_dtor(pte);
 151        pte_free_kernel(mm, page_address(pte));
 152}
 153
 154#define check_pgt_cache()       do { } while (0)
 155
 156#endif
 157