linux/arch/x86/include/asm/page.h
<<
>>
Prefs
   1#ifndef _ASM_X86_PAGE_H
   2#define _ASM_X86_PAGE_H
   3
   4#include <linux/types.h>
   5
   6#ifdef __KERNEL__
   7
   8#include <asm/page_types.h>
   9
  10#ifdef CONFIG_X86_64
  11#include <asm/page_64.h>
  12#else
  13#include <asm/page_32.h>
  14#endif  /* CONFIG_X86_64 */
  15
  16#ifndef __ASSEMBLY__
  17
  18struct page;
  19
  20static inline void clear_user_page(void *page, unsigned long vaddr,
  21                                   struct page *pg)
  22{
  23        clear_page(page);
  24}
  25
  26static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
  27                                  struct page *topage)
  28{
  29        copy_page(to, from);
  30}
  31
  32#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
  33        alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
  34#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
  35
  36#define __pa(x)         __phys_addr((unsigned long)(x))
  37#define __pa_nodebug(x) __phys_addr_nodebug((unsigned long)(x))
  38/* __pa_symbol should be used for C visible symbols.
  39   This seems to be the official gcc blessed way to do such arithmetic. */
  40/*
  41 * We need __phys_reloc_hide() here because gcc may assume that there is no
  42 * overflow during __pa() calculation and can optimize it unexpectedly.
  43 * Newer versions of gcc provide -fno-strict-overflow switch to handle this
  44 * case properly. Once all supported versions of gcc understand it, we can
  45 * remove this Voodoo magic stuff. (i.e. once gcc3.x is deprecated)
  46 */
  47#define __pa_symbol(x)  __pa(__phys_reloc_hide((unsigned long)(x)))
  48
  49#define __va(x)                 ((void *)((unsigned long)(x)+PAGE_OFFSET))
  50
  51#define __boot_va(x)            __va(x)
  52#define __boot_pa(x)            __pa(x)
  53
  54/*
  55 * virt_to_page(kaddr) returns a valid pointer if and only if
  56 * virt_addr_valid(kaddr) returns true.
  57 */
  58#define virt_to_page(kaddr)     pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
  59#define pfn_to_kaddr(pfn)      __va((pfn) << PAGE_SHIFT)
  60extern bool __virt_addr_valid(unsigned long kaddr);
  61#define virt_addr_valid(kaddr)  __virt_addr_valid((unsigned long) (kaddr))
  62
  63#endif  /* __ASSEMBLY__ */
  64
  65#include <asm-generic/memory_model.h>
  66#include <asm-generic/getorder.h>
  67
  68#define __HAVE_ARCH_GATE_AREA 1
  69
  70#endif  /* __KERNEL__ */
  71#endif /* _ASM_X86_PAGE_H */
  72