linux/arch/frv/include/asm/pgtable.h
<<
>>
Prefs
   1/* pgtable.h: FR-V page table mangling
   2 *
   3 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
   4 * Written by David Howells (dhowells@redhat.com)
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License
   8 * as published by the Free Software Foundation; either version
   9 * 2 of the License, or (at your option) any later version.
  10 *
  11 * Derived from:
  12 *      include/asm-m68knommu/pgtable.h
  13 *      include/asm-i386/pgtable.h
  14 */
  15
  16#ifndef _ASM_PGTABLE_H
  17#define _ASM_PGTABLE_H
  18
  19#include <asm-generic/5level-fixup.h>
  20#include <asm/mem-layout.h>
  21#include <asm/setup.h>
  22#include <asm/processor.h>
  23
  24#ifndef __ASSEMBLY__
  25#include <linux/threads.h>
  26#include <linux/slab.h>
  27#include <linux/list.h>
  28#include <linux/spinlock.h>
  29#include <linux/sched.h>
  30struct vm_area_struct;
  31#endif
  32
  33#ifndef __ASSEMBLY__
  34#if defined(CONFIG_HIGHPTE)
  35typedef unsigned long pte_addr_t;
  36#else
  37typedef pte_t *pte_addr_t;
  38#endif
  39#endif
  40
  41/*****************************************************************************/
  42/*
  43 * MMU-less operation case first
  44 */
  45#ifndef CONFIG_MMU
  46
  47#define pgd_present(pgd)        (1)             /* pages are always present on NO_MM */
  48#define pgd_none(pgd)           (0)
  49#define pgd_bad(pgd)            (0)
  50#define pgd_clear(pgdp)
  51#define kern_addr_valid(addr)   (1)
  52#define pmd_offset(a, b)        ((void *) 0)
  53
  54#define PAGE_NONE               __pgprot(0)     /* these mean nothing to NO_MM */
  55#define PAGE_SHARED             __pgprot(0)     /* these mean nothing to NO_MM */
  56#define PAGE_COPY               __pgprot(0)     /* these mean nothing to NO_MM */
  57#define PAGE_READONLY           __pgprot(0)     /* these mean nothing to NO_MM */
  58#define PAGE_KERNEL             __pgprot(0)     /* these mean nothing to NO_MM */
  59
  60#define __swp_type(x)           (0)
  61#define __swp_offset(x)         (0)
  62#define __swp_entry(typ,off)    ((swp_entry_t) { ((typ) | ((off) << 7)) })
  63#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
  64#define __swp_entry_to_pte(x)   ((pte_t) { (x).val })
  65
  66#define ZERO_PAGE(vaddr)        ({ BUG(); NULL; })
  67
  68#define swapper_pg_dir          ((pgd_t *) NULL)
  69
  70#define pgtable_cache_init()            do {} while (0)
  71
  72#include <asm-generic/pgtable.h>
  73
  74#else /* !CONFIG_MMU */
  75/*****************************************************************************/
  76/*
  77 * then MMU operation
  78 */
  79
  80/*
  81 * ZERO_PAGE is a global shared page that is always zero: used
  82 * for zero-mapped memory areas etc..
  83 */
  84#ifndef __ASSEMBLY__
  85extern unsigned long empty_zero_page;
  86#define ZERO_PAGE(vaddr)        virt_to_page(empty_zero_page)
  87#endif
  88
  89/*
  90 * we use 2-level page tables, folding the PMD (mid-level table) into the PGE (top-level entry)
  91 * [see Documentation/frv/mmu-layout.txt]
  92 *
  93 * Page Directory:
  94 *  - Size: 16KB
  95 *  - 64 PGEs per PGD
  96 *  - Each PGE holds 1 PUD and covers 64MB
  97 *
  98 * Page Upper Directory:
  99 *  - Size: 256B
 100 *  - 1 PUE per PUD
 101 *  - Each PUE holds 1 PMD and covers 64MB
 102 *
 103 * Page Mid-Level Directory
 104 *  - Size: 256B
 105 *  - 1 PME per PMD
 106 *  - Each PME holds 64 STEs, all of which point to separate chunks of the same Page Table
 107 *  - All STEs are instantiated at the same time
 108 *
 109 * Page Table
 110 *  - Size: 16KB
 111 *  - 4096 PTEs per PT
 112 *  - Each Linux PT is subdivided into 64 FR451 PT's, each of which holds 64 entries
 113 *
 114 * Pages
 115 *  - Size: 4KB
 116 *
 117 * total PTEs
 118 *      = 1 PML4E * 64 PGEs * 1 PUEs * 1 PMEs * 4096 PTEs
 119 *      = 1 PML4E * 64 PGEs * 64 STEs * 64 PTEs/FR451-PT
 120 *      = 262144 (or 256 * 1024)
 121 */
 122#define PGDIR_SHIFT             26
 123#define PGDIR_SIZE              (1UL << PGDIR_SHIFT)
 124#define PGDIR_MASK              (~(PGDIR_SIZE - 1))
 125#define PTRS_PER_PGD            64
 126
 127#define __PAGETABLE_PUD_FOLDED
 128#define PUD_SHIFT               26
 129#define PTRS_PER_PUD            1
 130#define PUD_SIZE                (1UL << PUD_SHIFT)
 131#define PUD_MASK                (~(PUD_SIZE - 1))
 132#define PUE_SIZE                256
 133
 134#define __PAGETABLE_PMD_FOLDED
 135#define PMD_SHIFT               26
 136#define PMD_SIZE                (1UL << PMD_SHIFT)
 137#define PMD_MASK                (~(PMD_SIZE - 1))
 138#define PTRS_PER_PMD            1
 139#define PME_SIZE                256
 140
 141#define __frv_PT_SIZE           256
 142
 143#define PTRS_PER_PTE            4096
 144
 145#define USER_PGDS_IN_LAST_PML4  (TASK_SIZE / PGDIR_SIZE)
 146#define FIRST_USER_ADDRESS      0UL
 147
 148#define USER_PGD_PTRS           (PAGE_OFFSET >> PGDIR_SHIFT)
 149#define KERNEL_PGD_PTRS         (PTRS_PER_PGD - USER_PGD_PTRS)
 150
 151#define TWOLEVEL_PGDIR_SHIFT    26
 152#define BOOT_USER_PGD_PTRS      (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
 153#define BOOT_KERNEL_PGD_PTRS    (PTRS_PER_PGD - BOOT_USER_PGD_PTRS)
 154
 155#ifndef __ASSEMBLY__
 156
 157extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 158
 159#define pte_ERROR(e) \
 160        printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte)
 161#define pmd_ERROR(e) \
 162        printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
 163#define pud_ERROR(e) \
 164        printk("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pmd_val(pud_val(e)))
 165#define pgd_ERROR(e) \
 166        printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pmd_val(pud_val(pgd_val(e))))
 167
 168/*
 169 * Certain architectures need to do special things when PTEs
 170 * within a page table are directly modified.  Thus, the following
 171 * hook is made available.
 172 */
 173#define set_pte(pteptr, pteval)                         \
 174do {                                                    \
 175        *(pteptr) = (pteval);                           \
 176        asm volatile("dcf %M0" :: "U"(*pteptr));        \
 177} while(0)
 178#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
 179
 180/*
 181 * pgd_offset() returns a (pgd_t *)
 182 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
 183 */
 184#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
 185
 186/*
 187 * a shortcut which implies the use of the kernel's pgd, instead
 188 * of a process's
 189 */
 190#define pgd_offset_k(address) pgd_offset(&init_mm, address)
 191
 192/*
 193 * The "pgd_xxx()" functions here are trivial for a folded two-level
 194 * setup: the pud is never bad, and a pud always exists (as it's folded
 195 * into the pgd entry)
 196 */
 197static inline int pgd_none(pgd_t pgd)           { return 0; }
 198static inline int pgd_bad(pgd_t pgd)            { return 0; }
 199static inline int pgd_present(pgd_t pgd)        { return 1; }
 200static inline void pgd_clear(pgd_t *pgd)        { }
 201
 202#define pgd_populate(mm, pgd, pud)              do { } while (0)
 203/*
 204 * (puds are folded into pgds so this doesn't get actually called,
 205 * but the define is needed for a generic inline function.)
 206 */
 207#define set_pgd(pgdptr, pgdval)                         \
 208do {                                                    \
 209        memcpy((pgdptr), &(pgdval), sizeof(pgd_t));     \
 210        asm volatile("dcf %M0" :: "U"(*(pgdptr)));      \
 211} while(0)
 212
 213static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
 214{
 215        return (pud_t *) pgd;
 216}
 217
 218#define pgd_page(pgd)                           (pud_page((pud_t){ pgd }))
 219#define pgd_page_vaddr(pgd)                     (pud_page_vaddr((pud_t){ pgd }))
 220
 221/*
 222 * allocating and freeing a pud is trivial: the 1-entry pud is
 223 * inside the pgd, so has no extra memory associated with it.
 224 */
 225#define pud_alloc_one(mm, address)              NULL
 226#define pud_free(mm, x)                         do { } while (0)
 227#define __pud_free_tlb(tlb, x, address)         do { } while (0)
 228
 229/*
 230 * The "pud_xxx()" functions here are trivial for a folded two-level
 231 * setup: the pmd is never bad, and a pmd always exists (as it's folded
 232 * into the pud entry)
 233 */
 234static inline int pud_none(pud_t pud)           { return 0; }
 235static inline int pud_bad(pud_t pud)            { return 0; }
 236static inline int pud_present(pud_t pud)        { return 1; }
 237static inline void pud_clear(pud_t *pud)        { }
 238
 239#define pud_populate(mm, pmd, pte)              do { } while (0)
 240
 241/*
 242 * (pmds are folded into puds so this doesn't get actually called,
 243 * but the define is needed for a generic inline function.)
 244 */
 245#define set_pud(pudptr, pudval)                 set_pmd((pmd_t *)(pudptr), (pmd_t) { pudval })
 246
 247#define pud_page(pud)                           (pmd_page((pmd_t){ pud }))
 248#define pud_page_vaddr(pud)                     (pmd_page_vaddr((pmd_t){ pud }))
 249
 250/*
 251 * (pmds are folded into pgds so this doesn't get actually called,
 252 * but the define is needed for a generic inline function.)
 253 */
 254extern void __set_pmd(pmd_t *pmdptr, unsigned long __pmd);
 255
 256#define set_pmd(pmdptr, pmdval)                 \
 257do {                                            \
 258        __set_pmd((pmdptr), (pmdval).ste[0]);   \
 259} while(0)
 260
 261#define __pmd_index(address)                    0
 262
 263static inline pmd_t *pmd_offset(pud_t *dir, unsigned long address)
 264{
 265        return (pmd_t *) dir + __pmd_index(address);
 266}
 267
 268#define pte_same(a, b)          ((a).pte == (b).pte)
 269#define pte_page(x)             (mem_map + ((unsigned long)(((x).pte >> PAGE_SHIFT))))
 270#define pte_none(x)             (!(x).pte)
 271#define pte_pfn(x)              ((unsigned long)(((x).pte >> PAGE_SHIFT)))
 272#define pfn_pte(pfn, prot)      __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
 273#define pfn_pmd(pfn, prot)      __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
 274
 275#define VMALLOC_VMADDR(x)       ((unsigned long) (x))
 276
 277#endif /* !__ASSEMBLY__ */
 278
 279/*
 280 * control flags in AMPR registers and TLB entries
 281 */
 282#define _PAGE_BIT_PRESENT       xAMPRx_V_BIT
 283#define _PAGE_BIT_WP            DAMPRx_WP_BIT
 284#define _PAGE_BIT_NOCACHE       xAMPRx_C_BIT
 285#define _PAGE_BIT_SUPER         xAMPRx_S_BIT
 286#define _PAGE_BIT_ACCESSED      xAMPRx_RESERVED8_BIT
 287#define _PAGE_BIT_DIRTY         xAMPRx_M_BIT
 288#define _PAGE_BIT_NOTGLOBAL     xAMPRx_NG_BIT
 289
 290#define _PAGE_PRESENT           xAMPRx_V
 291#define _PAGE_WP                DAMPRx_WP
 292#define _PAGE_NOCACHE           xAMPRx_C
 293#define _PAGE_SUPER             xAMPRx_S
 294#define _PAGE_ACCESSED          xAMPRx_RESERVED8        /* accessed if set */
 295#define _PAGE_DIRTY             xAMPRx_M
 296#define _PAGE_NOTGLOBAL         xAMPRx_NG
 297
 298#define _PAGE_RESERVED_MASK     (xAMPRx_RESERVED8 | xAMPRx_RESERVED13)
 299
 300#define _PAGE_PROTNONE          0x000   /* If not present */
 301
 302#define _PAGE_CHG_MASK          (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
 303
 304#define __PGPROT_BASE \
 305        (_PAGE_PRESENT | xAMPRx_SS_16Kb | xAMPRx_D | _PAGE_NOTGLOBAL | _PAGE_ACCESSED)
 306
 307#define PAGE_NONE       __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
 308#define PAGE_SHARED     __pgprot(__PGPROT_BASE)
 309#define PAGE_COPY       __pgprot(__PGPROT_BASE | _PAGE_WP)
 310#define PAGE_READONLY   __pgprot(__PGPROT_BASE | _PAGE_WP)
 311
 312#define __PAGE_KERNEL           (__PGPROT_BASE | _PAGE_SUPER | _PAGE_DIRTY)
 313#define __PAGE_KERNEL_NOCACHE   (__PGPROT_BASE | _PAGE_SUPER | _PAGE_DIRTY | _PAGE_NOCACHE)
 314#define __PAGE_KERNEL_RO        (__PGPROT_BASE | _PAGE_SUPER | _PAGE_DIRTY | _PAGE_WP)
 315
 316#define MAKE_GLOBAL(x) __pgprot((x) & ~_PAGE_NOTGLOBAL)
 317
 318#define PAGE_KERNEL             MAKE_GLOBAL(__PAGE_KERNEL)
 319#define PAGE_KERNEL_RO          MAKE_GLOBAL(__PAGE_KERNEL_RO)
 320#define PAGE_KERNEL_NOCACHE     MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
 321
 322#define _PAGE_TABLE             (_PAGE_PRESENT | xAMPRx_SS_16Kb)
 323
 324#ifndef __ASSEMBLY__
 325
 326/*
 327 * The FR451 can do execute protection by virtue of having separate TLB miss handlers for
 328 * instruction access and for data access. However, we don't have enough reserved bits to say
 329 * "execute only", so we don't bother. If you can read it, you can execute it and vice versa.
 330 */
 331#define __P000  PAGE_NONE
 332#define __P001  PAGE_READONLY
 333#define __P010  PAGE_COPY
 334#define __P011  PAGE_COPY
 335#define __P100  PAGE_READONLY
 336#define __P101  PAGE_READONLY
 337#define __P110  PAGE_COPY
 338#define __P111  PAGE_COPY
 339
 340#define __S000  PAGE_NONE
 341#define __S001  PAGE_READONLY
 342#define __S010  PAGE_SHARED
 343#define __S011  PAGE_SHARED
 344#define __S100  PAGE_READONLY
 345#define __S101  PAGE_READONLY
 346#define __S110  PAGE_SHARED
 347#define __S111  PAGE_SHARED
 348
 349/*
 350 * Define this to warn about kernel memory accesses that are
 351 * done without a 'access_ok(VERIFY_WRITE,..)'
 352 */
 353#undef TEST_ACCESS_OK
 354
 355#define pte_present(x)  (pte_val(x) & _PAGE_PRESENT)
 356#define pte_clear(mm,addr,xp)   do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
 357
 358#define pmd_none(x)     (!pmd_val(x))
 359#define pmd_present(x)  (pmd_val(x) & _PAGE_PRESENT)
 360#define pmd_bad(x)      (pmd_val(x) & xAMPRx_SS)
 361#define pmd_clear(xp)   do { __set_pmd(xp, 0); } while(0)
 362
 363#define pmd_page_vaddr(pmd) \
 364        ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
 365
 366#ifndef CONFIG_DISCONTIGMEM
 367#define pmd_page(pmd)   (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
 368#endif
 369
 370#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
 371
 372/*
 373 * The following only work if pte_present() is true.
 374 * Undefined behaviour if not..
 375 */
 376static inline int pte_dirty(pte_t pte)          { return (pte).pte & _PAGE_DIRTY; }
 377static inline int pte_young(pte_t pte)          { return (pte).pte & _PAGE_ACCESSED; }
 378static inline int pte_write(pte_t pte)          { return !((pte).pte & _PAGE_WP); }
 379static inline int pte_special(pte_t pte)        { return 0; }
 380
 381static inline pte_t pte_mkclean(pte_t pte)      { (pte).pte &= ~_PAGE_DIRTY; return pte; }
 382static inline pte_t pte_mkold(pte_t pte)        { (pte).pte &= ~_PAGE_ACCESSED; return pte; }
 383static inline pte_t pte_wrprotect(pte_t pte)    { (pte).pte |= _PAGE_WP; return pte; }
 384static inline pte_t pte_mkdirty(pte_t pte)      { (pte).pte |= _PAGE_DIRTY; return pte; }
 385static inline pte_t pte_mkyoung(pte_t pte)      { (pte).pte |= _PAGE_ACCESSED; return pte; }
 386static inline pte_t pte_mkwrite(pte_t pte)      { (pte).pte &= ~_PAGE_WP; return pte; }
 387static inline pte_t pte_mkspecial(pte_t pte)    { return pte; }
 388
 389static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
 390{
 391        int i = test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep);
 392        asm volatile("dcf %M0" :: "U"(*ptep));
 393        return i;
 394}
 395
 396static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 397{
 398        unsigned long x = xchg(&ptep->pte, 0);
 399        asm volatile("dcf %M0" :: "U"(*ptep));
 400        return __pte(x);
 401}
 402
 403static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 404{
 405        set_bit(_PAGE_BIT_WP, ptep);
 406        asm volatile("dcf %M0" :: "U"(*ptep));
 407}
 408
 409/*
 410 * Macro to mark a page protection value as "uncacheable"
 411 */
 412#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NOCACHE))
 413
 414/*
 415 * Conversion functions: convert a page and protection to a page entry,
 416 * and a page entry and page directory to the page they refer to.
 417 */
 418
 419#define mk_pte(page, pgprot)    pfn_pte(page_to_pfn(page), (pgprot))
 420#define mk_pte_huge(entry)      ((entry).pte_low |= _PAGE_PRESENT | _PAGE_PSE)
 421
 422/* This takes a physical page address that is used by the remapping functions */
 423#define mk_pte_phys(physpage, pgprot)   pfn_pte((physpage) >> PAGE_SHIFT, pgprot)
 424
 425static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 426{
 427        pte.pte &= _PAGE_CHG_MASK;
 428        pte.pte |= pgprot_val(newprot);
 429        return pte;
 430}
 431
 432/* to find an entry in a page-table-directory. */
 433#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
 434#define pgd_index_k(addr) pgd_index(addr)
 435
 436/* Find an entry in the bottom-level page table.. */
 437#define __pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 438
 439/*
 440 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
 441 *
 442 * this macro returns the index of the entry in the pte page which would
 443 * control the given virtual address
 444 */
 445#define pte_index(address) \
 446                (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 447#define pte_offset_kernel(dir, address) \
 448        ((pte_t *) pmd_page_vaddr(*(dir)) +  pte_index(address))
 449
 450#if defined(CONFIG_HIGHPTE)
 451#define pte_offset_map(dir, address) \
 452        ((pte_t *)kmap_atomic(pmd_page(*(dir))) + pte_index(address))
 453#define pte_unmap(pte) kunmap_atomic(pte)
 454#else
 455#define pte_offset_map(dir, address) \
 456        ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
 457#define pte_unmap(pte) do { } while (0)
 458#endif
 459
 460/*
 461 * Handle swap and file entries
 462 * - the PTE is encoded in the following format:
 463 *      bit 0:          Must be 0 (!_PAGE_PRESENT)
 464 *      bits 1-6:       Swap type
 465 *      bits 7-31:      Swap offset
 466 */
 467#define __swp_type(x)                   (((x).val >> 1) & 0x1f)
 468#define __swp_offset(x)                 ((x).val >> 7)
 469#define __swp_entry(type, offset)       ((swp_entry_t) { ((type) << 1) | ((offset) << 7) })
 470#define __pte_to_swp_entry(_pte)        ((swp_entry_t) { (_pte).pte })
 471#define __swp_entry_to_pte(x)           ((pte_t) { (x).val })
 472
 473/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
 474#define PageSkip(page)          (0)
 475#define kern_addr_valid(addr)   (1)
 476
 477#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
 478#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
 479#define __HAVE_ARCH_PTEP_SET_WRPROTECT
 480#define __HAVE_ARCH_PTE_SAME
 481#include <asm-generic/pgtable.h>
 482
 483/*
 484 * preload information about a newly instantiated PTE into the SCR0/SCR1 PGE cache
 485 */
 486static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
 487{
 488        struct mm_struct *mm;
 489        unsigned long ampr;
 490
 491        mm = current->mm;
 492        if (mm) {
 493                pgd_t *pge = pgd_offset(mm, address);
 494                pud_t *pue = pud_offset(pge, address);
 495                pmd_t *pme = pmd_offset(pue, address);
 496
 497                ampr = pme->ste[0] & 0xffffff00;
 498                ampr |= xAMPRx_L | xAMPRx_SS_16Kb | xAMPRx_S | xAMPRx_C |
 499                        xAMPRx_V;
 500        } else {
 501                address = ULONG_MAX;
 502                ampr = 0;
 503        }
 504
 505        asm volatile("movgs %0,scr0\n"
 506                     "movgs %0,scr1\n"
 507                     "movgs %1,dampr4\n"
 508                     "movgs %1,dampr5\n"
 509                     :
 510                     : "r"(address), "r"(ampr)
 511                     );
 512}
 513
 514#ifdef CONFIG_PROC_FS
 515extern char *proc_pid_status_frv_cxnr(struct mm_struct *mm, char *buffer);
 516#endif
 517
 518extern void __init pgtable_cache_init(void);
 519
 520#endif /* !__ASSEMBLY__ */
 521#endif /* !CONFIG_MMU */
 522
 523#ifndef __ASSEMBLY__
 524extern void __init paging_init(void);
 525#endif /* !__ASSEMBLY__ */
 526#define HAVE_ARCH_UNMAPPED_AREA
 527
 528#endif /* _ASM_PGTABLE_H */
 529