linux/arch/unicore32/include/asm/pgalloc.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 * linux/arch/unicore32/include/asm/pgalloc.h
   4 *
   5 * Code specific to PKUnity SoC and UniCore ISA
   6 *
   7 * Copyright (C) 2001-2010 GUAN Xue-tao
   8 */
   9#ifndef __UNICORE_PGALLOC_H__
  10#define __UNICORE_PGALLOC_H__
  11
  12#include <asm/pgtable-hwdef.h>
  13#include <asm/processor.h>
  14#include <asm/cacheflush.h>
  15#include <asm/tlbflush.h>
  16
  17#define check_pgt_cache()               do { } while (0)
  18
  19#define _PAGE_USER_TABLE        (PMD_TYPE_TABLE | PMD_PRESENT)
  20#define _PAGE_KERNEL_TABLE      (PMD_TYPE_TABLE | PMD_PRESENT)
  21
  22extern pgd_t *get_pgd_slow(struct mm_struct *mm);
  23extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd);
  24
  25#define pgd_alloc(mm)                   get_pgd_slow(mm)
  26#define pgd_free(mm, pgd)               free_pgd_slow(mm, pgd)
  27
  28#define PGALLOC_GFP     (GFP_KERNEL | __GFP_ZERO)
  29
  30/*
  31 * Allocate one PTE table.
  32 */
  33static inline pte_t *
  34pte_alloc_one_kernel(struct mm_struct *mm)
  35{
  36        pte_t *pte;
  37
  38        pte = (pte_t *)__get_free_page(PGALLOC_GFP);
  39        if (pte)
  40                clean_dcache_area(pte, PTRS_PER_PTE * sizeof(pte_t));
  41
  42        return pte;
  43}
  44
  45static inline pgtable_t
  46pte_alloc_one(struct mm_struct *mm)
  47{
  48        struct page *pte;
  49
  50        pte = alloc_pages(PGALLOC_GFP, 0);
  51        if (!pte)
  52                return NULL;
  53        if (!PageHighMem(pte)) {
  54                void *page = page_address(pte);
  55                clean_dcache_area(page, PTRS_PER_PTE * sizeof(pte_t));
  56        }
  57        if (!pgtable_page_ctor(pte)) {
  58                __free_page(pte);
  59        }
  60
  61        return pte;
  62}
  63
  64/*
  65 * Free one PTE table.
  66 */
  67static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
  68{
  69        if (pte)
  70                free_page((unsigned long)pte);
  71}
  72
  73static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
  74{
  75        pgtable_page_dtor(pte);
  76        __free_page(pte);
  77}
  78
  79static inline void __pmd_populate(pmd_t *pmdp, unsigned long pmdval)
  80{
  81        set_pmd(pmdp, __pmd(pmdval));
  82        flush_pmd_entry(pmdp);
  83}
  84
  85/*
  86 * Populate the pmdp entry with a pointer to the pte.  This pmd is part
  87 * of the mm address space.
  88 */
  89static inline void
  90pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
  91{
  92        unsigned long pte_ptr = (unsigned long)ptep;
  93
  94        /*
  95         * The pmd must be loaded with the physical
  96         * address of the PTE table
  97         */
  98        __pmd_populate(pmdp, __pa(pte_ptr) | _PAGE_KERNEL_TABLE);
  99}
 100
 101static inline void
 102pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
 103{
 104        __pmd_populate(pmdp,
 105                        page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE);
 106}
 107#define pmd_pgtable(pmd) pmd_page(pmd)
 108
 109#endif
 110