linux/arch/x86/include/asm/page.h
<<
>>
Prefs
   1#ifndef _ASM_X86_PAGE_H
   2#define _ASM_X86_PAGE_H
   3
   4#include <linux/types.h>
   5
   6#ifdef __KERNEL__
   7
   8#include <asm/page_types.h>
   9
  10#ifdef CONFIG_X86_64
  11#include <asm/page_64.h>
  12#else
  13#include <asm/page_32.h>
  14#endif  /* CONFIG_X86_64 */
  15
  16#ifndef __ASSEMBLY__
  17
  18struct page;
  19
  20#include <linux/range.h>
  21extern struct range pfn_mapped[];
  22extern int nr_pfn_mapped;
  23
  24static inline void clear_user_page(void *page, unsigned long vaddr,
  25                                   struct page *pg)
  26{
  27        clear_page(page);
  28}
  29
  30static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
  31                                  struct page *topage)
  32{
  33        copy_page(to, from);
  34}
  35
  36#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
  37        alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
  38#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
  39
  40#define __pa(x)         __phys_addr((unsigned long)(x))
  41#define __pa_nodebug(x) __phys_addr_nodebug((unsigned long)(x))
  42/* __pa_symbol should be used for C visible symbols.
  43   This seems to be the official gcc blessed way to do such arithmetic. */
  44/*
  45 * We need __phys_reloc_hide() here because gcc may assume that there is no
  46 * overflow during __pa() calculation and can optimize it unexpectedly.
  47 * Newer versions of gcc provide -fno-strict-overflow switch to handle this
  48 * case properly. Once all supported versions of gcc understand it, we can
  49 * remove this Voodoo magic stuff. (i.e. once gcc3.x is deprecated)
  50 */
  51#define __pa_symbol(x) \
  52        __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
  53
  54#define __va(x)                 ((void *)((unsigned long)(x)+PAGE_OFFSET))
  55
  56#define __boot_va(x)            __va(x)
  57#define __boot_pa(x)            __pa(x)
  58
  59/*
  60 * virt_to_page(kaddr) returns a valid pointer if and only if
  61 * virt_addr_valid(kaddr) returns true.
  62 */
  63#define virt_to_page(kaddr)     pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
  64#define pfn_to_kaddr(pfn)      __va((pfn) << PAGE_SHIFT)
  65extern bool __virt_addr_valid(unsigned long kaddr);
  66#define virt_addr_valid(kaddr)  __virt_addr_valid((unsigned long) (kaddr))
  67
  68#endif  /* __ASSEMBLY__ */
  69
  70#include <asm-generic/memory_model.h>
  71#include <asm-generic/getorder.h>
  72
  73#define __HAVE_ARCH_GATE_AREA 1
  74
  75#endif  /* __KERNEL__ */
  76#endif /* _ASM_X86_PAGE_H */
  77