linux/arch/x86/include/asm/pgtable_32.h
<<
>>
Prefs
   1#ifndef _ASM_X86_PGTABLE_32_H
   2#define _ASM_X86_PGTABLE_32_H
   3
   4#include <asm/pgtable_32_types.h>
   5
   6/*
   7 * The Linux memory management assumes a three-level page table setup. On
   8 * the i386, we use that, but "fold" the mid level into the top-level page
   9 * table, so that we physically have the same two-level page table as the
  10 * i386 mmu expects.
  11 *
  12 * This file contains the functions and defines necessary to modify and use
  13 * the i386 page table tree.
  14 */
  15#ifndef __ASSEMBLY__
  16#include <asm/processor.h>
  17#include <asm/fixmap.h>
  18#include <linux/threads.h>
  19#include <asm/paravirt.h>
  20
  21#include <linux/bitops.h>
  22#include <linux/slab.h>
  23#include <linux/list.h>
  24#include <linux/spinlock.h>
  25
  26struct mm_struct;
  27struct vm_area_struct;
  28
  29extern pgd_t swapper_pg_dir[1024];
  30
  31static inline void pgtable_cache_init(void) { }
  32static inline void check_pgt_cache(void) { }
  33void paging_init(void);
  34
  35extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
  36
  37
  38/*
  39 * Define this if things work differently on an i386 and an i486:
  40 * it will (on an i486) warn about kernel memory accesses that are
  41 * done without a 'access_ok(VERIFY_WRITE,..)'
  42 */
  43#undef TEST_ACCESS_OK
  44
  45#ifdef CONFIG_X86_PAE
  46# include <asm/pgtable-3level.h>
  47#else
  48# include <asm/pgtable-2level.h>
  49#endif
  50
  51#if defined(CONFIG_HIGHPTE)
  52#define __KM_PTE                        \
  53        (in_nmi() ? KM_NMI_PTE :        \
  54         in_irq() ? KM_IRQ_PTE :        \
  55         KM_PTE0)
  56#define pte_offset_map(dir, address)                                    \
  57        ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), __KM_PTE) +         \
  58         pte_index((address)))
  59#define pte_offset_map_nested(dir, address)                             \
  60        ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE1) +          \
  61         pte_index((address)))
  62#define pte_unmap(pte) kunmap_atomic((pte), __KM_PTE)
  63#define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1)
  64#else
  65#define pte_offset_map(dir, address)                                    \
  66        ((pte_t *)page_address(pmd_page(*(dir))) + pte_index((address)))
  67#define pte_offset_map_nested(dir, address) pte_offset_map((dir), (address))
  68#define pte_unmap(pte) do { } while (0)
  69#define pte_unmap_nested(pte) do { } while (0)
  70#endif
  71
  72/* Clear a kernel PTE and flush it from the TLB */
  73#define kpte_clear_flush(ptep, vaddr)           \
  74do {                                            \
  75        pte_clear(&init_mm, (vaddr), (ptep));   \
  76        __flush_tlb_one((vaddr));               \
  77} while (0)
  78
  79/*
  80 * The i386 doesn't have any external MMU info: the kernel page
  81 * tables contain all the necessary information.
  82 */
  83#define update_mmu_cache(vma, address, pte) do { } while (0)
  84
  85#endif /* !__ASSEMBLY__ */
  86
  87/*
  88 * kern_addr_valid() is (1) for FLATMEM and (0) for
  89 * SPARSEMEM and DISCONTIGMEM
  90 */
  91#ifdef CONFIG_FLATMEM
  92#define kern_addr_valid(addr)   (1)
  93#else
  94#define kern_addr_valid(kaddr)  (0)
  95#endif
  96
  97#endif /* _ASM_X86_PGTABLE_32_H */
  98