linux/arch/tile/include/asm/pgalloc.h
<<
>>
Prefs
   1/*
   2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
   3 *
   4 *   This program is free software; you can redistribute it and/or
   5 *   modify it under the terms of the GNU General Public License
   6 *   as published by the Free Software Foundation, version 2.
   7 *
   8 *   This program is distributed in the hope that it will be useful, but
   9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
  10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11 *   NON INFRINGEMENT.  See the GNU General Public License for
  12 *   more details.
  13 */
  14
  15#ifndef _ASM_TILE_PGALLOC_H
  16#define _ASM_TILE_PGALLOC_H
  17
  18#include <linux/threads.h>
  19#include <linux/mm.h>
  20#include <linux/mmzone.h>
  21#include <asm/fixmap.h>
  22#include <asm/page.h>
  23#include <hv/hypervisor.h>
  24
  25/* Bits for the size of the second-level page table. */
  26#define L2_KERNEL_PGTABLE_SHIFT _HV_LOG2_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT)
  27
  28/* How big is a kernel L2 page table? */
  29#define L2_KERNEL_PGTABLE_SIZE (1UL << L2_KERNEL_PGTABLE_SHIFT)
  30
  31/* We currently allocate user L2 page tables by page (unlike kernel L2s). */
  32#if L2_KERNEL_PGTABLE_SHIFT < PAGE_SHIFT
  33#define L2_USER_PGTABLE_SHIFT PAGE_SHIFT
  34#else
  35#define L2_USER_PGTABLE_SHIFT L2_KERNEL_PGTABLE_SHIFT
  36#endif
  37
  38/* How many pages do we need, as an "order", for a user L2 page table? */
  39#define L2_USER_PGTABLE_ORDER (L2_USER_PGTABLE_SHIFT - PAGE_SHIFT)
  40
  41static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
  42{
  43#ifdef CONFIG_64BIT
  44        set_pte(pmdp, pmd);
  45#else
  46        set_pte(&pmdp->pud.pgd, pmd.pud.pgd);
  47#endif
  48}
  49
  50static inline void pmd_populate_kernel(struct mm_struct *mm,
  51                                       pmd_t *pmd, pte_t *ptep)
  52{
  53        set_pmd(pmd, ptfn_pmd(HV_CPA_TO_PTFN(__pa(ptep)),
  54                              __pgprot(_PAGE_PRESENT)));
  55}
  56
  57static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
  58                                pgtable_t page)
  59{
  60        set_pmd(pmd, ptfn_pmd(HV_CPA_TO_PTFN(PFN_PHYS(page_to_pfn(page))),
  61                              __pgprot(_PAGE_PRESENT)));
  62}
  63
  64/*
  65 * Allocate and free page tables.
  66 */
  67
  68extern pgd_t *pgd_alloc(struct mm_struct *mm);
  69extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
  70
  71extern pgtable_t pgtable_alloc_one(struct mm_struct *mm, unsigned long address,
  72                                   int order);
  73extern void pgtable_free(struct mm_struct *mm, struct page *pte, int order);
  74
  75static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
  76                                      unsigned long address)
  77{
  78        return pgtable_alloc_one(mm, address, L2_USER_PGTABLE_ORDER);
  79}
  80
  81static inline void pte_free(struct mm_struct *mm, struct page *pte)
  82{
  83        pgtable_free(mm, pte, L2_USER_PGTABLE_ORDER);
  84}
  85
  86#define pmd_pgtable(pmd) pmd_page(pmd)
  87
  88static inline pte_t *
  89pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
  90{
  91        return pfn_to_kaddr(page_to_pfn(pte_alloc_one(mm, address)));
  92}
  93
  94static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
  95{
  96        BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
  97        pte_free(mm, virt_to_page(pte));
  98}
  99
 100extern void __pgtable_free_tlb(struct mmu_gather *tlb, struct page *pte,
 101                               unsigned long address, int order);
 102static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
 103                                  unsigned long address)
 104{
 105        __pgtable_free_tlb(tlb, pte, address, L2_USER_PGTABLE_ORDER);
 106}
 107
 108#define check_pgt_cache()       do { } while (0)
 109
 110/*
 111 * Get the small-page pte_t lowmem entry for a given pfn.
 112 * This may or may not be in use, depending on whether the initial
 113 * huge-page entry for the page has already been shattered.
 114 */
 115pte_t *get_prealloc_pte(unsigned long pfn);
 116
 117/* During init, we can shatter kernel huge pages if needed. */
 118void shatter_pmd(pmd_t *pmd);
 119
 120/* After init, a more complex technique is required. */
 121void shatter_huge_page(unsigned long addr);
 122
 123#ifdef __tilegx__
 124
 125#define pud_populate(mm, pud, pmd) \
 126  pmd_populate_kernel((mm), (pmd_t *)(pud), (pte_t *)(pmd))
 127
 128/* Bits for the size of the L1 (intermediate) page table. */
 129#define L1_KERNEL_PGTABLE_SHIFT _HV_LOG2_L1_SIZE(HPAGE_SHIFT)
 130
 131/* How big is a kernel L2 page table? */
 132#define L1_KERNEL_PGTABLE_SIZE (1UL << L1_KERNEL_PGTABLE_SHIFT)
 133
 134/* We currently allocate L1 page tables by page. */
 135#if L1_KERNEL_PGTABLE_SHIFT < PAGE_SHIFT
 136#define L1_USER_PGTABLE_SHIFT PAGE_SHIFT
 137#else
 138#define L1_USER_PGTABLE_SHIFT L1_KERNEL_PGTABLE_SHIFT
 139#endif
 140
 141/* How many pages do we need, as an "order", for an L1 page table? */
 142#define L1_USER_PGTABLE_ORDER (L1_USER_PGTABLE_SHIFT - PAGE_SHIFT)
 143
 144static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
 145{
 146        struct page *p = pgtable_alloc_one(mm, address, L1_USER_PGTABLE_ORDER);
 147        return (pmd_t *)page_to_virt(p);
 148}
 149
 150static inline void pmd_free(struct mm_struct *mm, pmd_t *pmdp)
 151{
 152        pgtable_free(mm, virt_to_page(pmdp), L1_USER_PGTABLE_ORDER);
 153}
 154
 155static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
 156                                  unsigned long address)
 157{
 158        __pgtable_free_tlb(tlb, virt_to_page(pmdp), address,
 159                           L1_USER_PGTABLE_ORDER);
 160}
 161
 162#endif /* __tilegx__ */
 163
 164#endif /* _ASM_TILE_PGALLOC_H */
 165