linux/include/linux/highmem.h
<<
>>
Prefs
   1#ifndef _LINUX_HIGHMEM_H
   2#define _LINUX_HIGHMEM_H
   3
   4#include <linux/fs.h>
   5#include <linux/kernel.h>
   6#include <linux/bug.h>
   7#include <linux/mm.h>
   8#include <linux/uaccess.h>
   9#include <linux/hardirq.h>
  10
  11#include <asm/cacheflush.h>
  12
  13#ifndef ARCH_HAS_FLUSH_ANON_PAGE
  14static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
  15{
  16}
  17#endif
  18
  19#ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
  20static inline void flush_kernel_dcache_page(struct page *page)
  21{
  22}
  23static inline void flush_kernel_vmap_range(void *vaddr, int size)
  24{
  25}
  26static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
  27{
  28}
  29#endif
  30
  31#include <asm/kmap_types.h>
  32
  33#ifdef CONFIG_HIGHMEM
  34#include <asm/highmem.h>
  35
  36/* declarations for linux/mm/highmem.c */
  37unsigned int nr_free_highpages(void);
  38extern unsigned long totalhigh_pages;
  39
  40void kmap_flush_unused(void);
  41
  42struct page *kmap_to_page(void *addr);
  43
  44#else /* CONFIG_HIGHMEM */
  45
  46static inline unsigned int nr_free_highpages(void) { return 0; }
  47
  48static inline struct page *kmap_to_page(void *addr)
  49{
  50        return virt_to_page(addr);
  51}
  52
  53#define totalhigh_pages 0UL
  54
  55#ifndef ARCH_HAS_KMAP
  56static inline void *kmap(struct page *page)
  57{
  58        might_sleep();
  59        return page_address(page);
  60}
  61
  62static inline void kunmap(struct page *page)
  63{
  64}
  65
  66static inline void *kmap_atomic(struct page *page)
  67{
  68        preempt_disable();
  69        pagefault_disable();
  70        return page_address(page);
  71}
  72#define kmap_atomic_prot(page, prot)    kmap_atomic(page)
  73
  74static inline void __kunmap_atomic(void *addr)
  75{
  76        pagefault_enable();
  77        preempt_enable();
  78}
  79
  80#define kmap_atomic_pfn(pfn)    kmap_atomic(pfn_to_page(pfn))
  81
  82#define kmap_flush_unused()     do {} while(0)
  83#endif
  84
  85#endif /* CONFIG_HIGHMEM */
  86
  87#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
  88
  89DECLARE_PER_CPU(int, __kmap_atomic_idx);
  90
  91static inline int kmap_atomic_idx_push(void)
  92{
  93        int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
  94
  95#ifdef CONFIG_DEBUG_HIGHMEM
  96        WARN_ON_ONCE(in_irq() && !irqs_disabled());
  97        BUG_ON(idx >= KM_TYPE_NR);
  98#endif
  99        return idx;
 100}
 101
 102static inline int kmap_atomic_idx(void)
 103{
 104        return __this_cpu_read(__kmap_atomic_idx) - 1;
 105}
 106
 107static inline void kmap_atomic_idx_pop(void)
 108{
 109#ifdef CONFIG_DEBUG_HIGHMEM
 110        int idx = __this_cpu_dec_return(__kmap_atomic_idx);
 111
 112        BUG_ON(idx < 0);
 113#else
 114        __this_cpu_dec(__kmap_atomic_idx);
 115#endif
 116}
 117
 118#endif
 119
 120/*
 121 * Prevent people trying to call kunmap_atomic() as if it were kunmap()
 122 * kunmap_atomic() should get the return value of kmap_atomic, not the page.
 123 */
 124#define kunmap_atomic(addr)                                     \
 125do {                                                            \
 126        BUILD_BUG_ON(__same_type((addr), struct page *));       \
 127        __kunmap_atomic(addr);                                  \
 128} while (0)
 129
 130
 131/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
 132#ifndef clear_user_highpage
 133static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
 134{
 135        void *addr = kmap_atomic(page);
 136        clear_user_page(addr, vaddr, page);
 137        kunmap_atomic(addr);
 138}
 139#endif
 140
 141#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
 142/**
 143 * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
 144 * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
 145 * @vma: The VMA the page is to be allocated for
 146 * @vaddr: The virtual address the page will be inserted into
 147 *
 148 * This function will allocate a page for a VMA but the caller is expected
 149 * to specify via movableflags whether the page will be movable in the
 150 * future or not
 151 *
 152 * An architecture may override this function by defining
 153 * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
 154 * implementation.
 155 */
 156static inline struct page *
 157__alloc_zeroed_user_highpage(gfp_t movableflags,
 158                        struct vm_area_struct *vma,
 159                        unsigned long vaddr)
 160{
 161        struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
 162                        vma, vaddr);
 163
 164        if (page)
 165                clear_user_highpage(page, vaddr);
 166
 167        return page;
 168}
 169#endif
 170
 171/**
 172 * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
 173 * @vma: The VMA the page is to be allocated for
 174 * @vaddr: The virtual address the page will be inserted into
 175 *
 176 * This function will allocate a page for a VMA that the caller knows will
 177 * be able to migrate in the future using move_pages() or reclaimed
 178 */
 179static inline struct page *
 180alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
 181                                        unsigned long vaddr)
 182{
 183        return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
 184}
 185
 186static inline void clear_highpage(struct page *page)
 187{
 188        void *kaddr = kmap_atomic(page);
 189        clear_page(kaddr);
 190        kunmap_atomic(kaddr);
 191}
 192
 193static inline void zero_user_segments(struct page *page,
 194        unsigned start1, unsigned end1,
 195        unsigned start2, unsigned end2)
 196{
 197        void *kaddr = kmap_atomic(page);
 198
 199        BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);
 200
 201        if (end1 > start1)
 202                memset(kaddr + start1, 0, end1 - start1);
 203
 204        if (end2 > start2)
 205                memset(kaddr + start2, 0, end2 - start2);
 206
 207        kunmap_atomic(kaddr);
 208        flush_dcache_page(page);
 209}
 210
 211static inline void zero_user_segment(struct page *page,
 212        unsigned start, unsigned end)
 213{
 214        zero_user_segments(page, start, end, 0, 0);
 215}
 216
 217static inline void zero_user(struct page *page,
 218        unsigned start, unsigned size)
 219{
 220        zero_user_segments(page, start, start + size, 0, 0);
 221}
 222
 223#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
 224
 225static inline void copy_user_highpage(struct page *to, struct page *from,
 226        unsigned long vaddr, struct vm_area_struct *vma)
 227{
 228        char *vfrom, *vto;
 229
 230        vfrom = kmap_atomic(from);
 231        vto = kmap_atomic(to);
 232        copy_user_page(vto, vfrom, vaddr, to);
 233        kunmap_atomic(vto);
 234        kunmap_atomic(vfrom);
 235}
 236
 237#endif
 238
 239static inline void copy_highpage(struct page *to, struct page *from)
 240{
 241        char *vfrom, *vto;
 242
 243        vfrom = kmap_atomic(from);
 244        vto = kmap_atomic(to);
 245        copy_page(vto, vfrom);
 246        kunmap_atomic(vto);
 247        kunmap_atomic(vfrom);
 248}
 249
 250#endif /* _LINUX_HIGHMEM_H */
 251