linux/arch/unicore32/include/asm/pgalloc.h
<<
>>
Prefs
   1/*
   2 * linux/arch/unicore32/include/asm/pgalloc.h
   3 *
   4 * Code specific to PKUnity SoC and UniCore ISA
   5 *
   6 * Copyright (C) 2001-2010 GUAN Xue-tao
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 */
  12#ifndef __UNICORE_PGALLOC_H__
  13#define __UNICORE_PGALLOC_H__
  14
  15#include <asm/pgtable-hwdef.h>
  16#include <asm/processor.h>
  17#include <asm/cacheflush.h>
  18#include <asm/tlbflush.h>
  19
  20#define check_pgt_cache()               do { } while (0)
  21
  22#define _PAGE_USER_TABLE        (PMD_TYPE_TABLE | PMD_PRESENT)
  23#define _PAGE_KERNEL_TABLE      (PMD_TYPE_TABLE | PMD_PRESENT)
  24
  25extern pgd_t *get_pgd_slow(struct mm_struct *mm);
  26extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd);
  27
  28#define pgd_alloc(mm)                   get_pgd_slow(mm)
  29#define pgd_free(mm, pgd)               free_pgd_slow(mm, pgd)
  30
  31#define PGALLOC_GFP     (GFP_KERNEL | __GFP_ZERO)
  32
  33/*
  34 * Allocate one PTE table.
  35 */
  36static inline pte_t *
  37pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
  38{
  39        pte_t *pte;
  40
  41        pte = (pte_t *)__get_free_page(PGALLOC_GFP);
  42        if (pte)
  43                clean_dcache_area(pte, PTRS_PER_PTE * sizeof(pte_t));
  44
  45        return pte;
  46}
  47
  48static inline pgtable_t
  49pte_alloc_one(struct mm_struct *mm, unsigned long addr)
  50{
  51        struct page *pte;
  52
  53        pte = alloc_pages(PGALLOC_GFP, 0);
  54        if (!pte)
  55                return NULL;
  56        if (!PageHighMem(pte)) {
  57                void *page = page_address(pte);
  58                clean_dcache_area(page, PTRS_PER_PTE * sizeof(pte_t));
  59        }
  60        if (!pgtable_page_ctor(pte)) {
  61                __free_page(pte);
  62        }
  63
  64        return pte;
  65}
  66
  67/*
  68 * Free one PTE table.
  69 */
  70static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
  71{
  72        if (pte)
  73                free_page((unsigned long)pte);
  74}
  75
  76static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
  77{
  78        pgtable_page_dtor(pte);
  79        __free_page(pte);
  80}
  81
  82static inline void __pmd_populate(pmd_t *pmdp, unsigned long pmdval)
  83{
  84        set_pmd(pmdp, __pmd(pmdval));
  85        flush_pmd_entry(pmdp);
  86}
  87
  88/*
  89 * Populate the pmdp entry with a pointer to the pte.  This pmd is part
  90 * of the mm address space.
  91 */
  92static inline void
  93pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
  94{
  95        unsigned long pte_ptr = (unsigned long)ptep;
  96
  97        /*
  98         * The pmd must be loaded with the physical
  99         * address of the PTE table
 100         */
 101        __pmd_populate(pmdp, __pa(pte_ptr) | _PAGE_KERNEL_TABLE);
 102}
 103
 104static inline void
 105pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
 106{
 107        __pmd_populate(pmdp,
 108                        page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE);
 109}
 110#define pmd_pgtable(pmd) pmd_page(pmd)
 111
 112#endif
 113