linux/include/linux/vmalloc.h
<<
>>
Prefs
   1#ifndef _LINUX_VMALLOC_H
   2#define _LINUX_VMALLOC_H
   3
   4#include <linux/spinlock.h>
   5#include <linux/init.h>
   6#include <linux/list.h>
   7#include <linux/llist.h>
   8#include <asm/page.h>           /* pgprot_t */
   9#include <linux/rbtree.h>
  10
  11struct vm_area_struct;          /* vma defining user mapping in mm_types.h */
  12struct notifier_block;          /* in notifier.h */
  13
  14/* bits in flags of vmalloc's vm_struct below */
  15#define VM_IOREMAP              0x00000001      /* ioremap() and friends */
  16#define VM_ALLOC                0x00000002      /* vmalloc() */
  17#define VM_MAP                  0x00000004      /* vmap()ed pages */
  18#define VM_USERMAP              0x00000008      /* suitable for remap_vmalloc_range */
  19#define VM_UNINITIALIZED        0x00000020      /* vm_struct is not fully initialized */
  20#define VM_NO_GUARD             0x00000040      /* don't add guard page */
  21#define VM_KASAN                0x00000080      /* has allocated kasan shadow memory */
  22/* bits [20..32] reserved for arch specific ioremap internals */
  23
  24/*
  25 * Maximum alignment for ioremap() regions.
  26 * Can be overriden by arch-specific value.
  27 */
  28#ifndef IOREMAP_MAX_ORDER
  29#define IOREMAP_MAX_ORDER       (7 + PAGE_SHIFT)        /* 128 pages */
  30#endif
  31
  32struct vm_struct {
  33        struct vm_struct        *next;
  34        void                    *addr;
  35        unsigned long           size;
  36        unsigned long           flags;
  37        struct page             **pages;
  38        unsigned int            nr_pages;
  39        phys_addr_t             phys_addr;
  40        const void              *caller;
  41};
  42
  43struct vmap_area {
  44        unsigned long va_start;
  45        unsigned long va_end;
  46        unsigned long flags;
  47        struct rb_node rb_node;         /* address sorted rbtree */
  48        struct list_head list;          /* address sorted list */
  49        struct llist_node purge_list;    /* "lazy purge" list */
  50        struct vm_struct *vm;
  51        struct rcu_head rcu_head;
  52};
  53
  54/*
  55 *      Highlevel APIs for driver use
  56 */
  57extern void vm_unmap_ram(const void *mem, unsigned int count);
  58extern void *vm_map_ram(struct page **pages, unsigned int count,
  59                                int node, pgprot_t prot);
  60extern void vm_unmap_aliases(void);
  61
  62#ifdef CONFIG_MMU
  63extern void __init vmalloc_init(void);
  64#else
  65static inline void vmalloc_init(void)
  66{
  67}
  68#endif
  69
  70extern void *vmalloc(unsigned long size);
  71extern void *vzalloc(unsigned long size);
  72extern void *vmalloc_user(unsigned long size);
  73extern void *vmalloc_node(unsigned long size, int node);
  74extern void *vzalloc_node(unsigned long size, int node);
  75extern void *vmalloc_exec(unsigned long size);
  76extern void *vmalloc_32(unsigned long size);
  77extern void *vmalloc_32_user(unsigned long size);
  78extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
  79extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
  80                        unsigned long start, unsigned long end, gfp_t gfp_mask,
  81                        pgprot_t prot, unsigned long vm_flags, int node,
  82                        const void *caller);
  83
  84extern void vfree(const void *addr);
  85
  86extern void *vmap(struct page **pages, unsigned int count,
  87                        unsigned long flags, pgprot_t prot);
  88extern void vunmap(const void *addr);
  89
  90extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
  91                                       unsigned long uaddr, void *kaddr,
  92                                       unsigned long size);
  93
  94extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
  95                                                        unsigned long pgoff);
  96void vmalloc_sync_all(void);
  97 
  98/*
  99 *      Lowlevel-APIs (not for driver use!)
 100 */
 101
 102static inline size_t get_vm_area_size(const struct vm_struct *area)
 103{
 104        if (!(area->flags & VM_NO_GUARD))
 105                /* return actual size without guard page */
 106                return area->size - PAGE_SIZE;
 107        else
 108                return area->size;
 109
 110}
 111
 112extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
 113extern struct vm_struct *get_vm_area_caller(unsigned long size,
 114                                        unsigned long flags, const void *caller);
 115extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
 116                                        unsigned long start, unsigned long end);
 117extern struct vm_struct *__get_vm_area_caller(unsigned long size,
 118                                        unsigned long flags,
 119                                        unsigned long start, unsigned long end,
 120                                        const void *caller);
 121extern struct vm_struct *remove_vm_area(const void *addr);
 122extern struct vm_struct *find_vm_area(const void *addr);
 123
 124extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
 125                        struct page **pages);
 126#ifdef CONFIG_MMU
 127extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
 128                                    pgprot_t prot, struct page **pages);
 129extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size);
 130extern void unmap_kernel_range(unsigned long addr, unsigned long size);
 131#else
 132static inline int
 133map_kernel_range_noflush(unsigned long start, unsigned long size,
 134                        pgprot_t prot, struct page **pages)
 135{
 136        return size >> PAGE_SHIFT;
 137}
 138static inline void
 139unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
 140{
 141}
 142static inline void
 143unmap_kernel_range(unsigned long addr, unsigned long size)
 144{
 145}
 146#endif
 147
 148/* Allocate/destroy a 'vmalloc' VM area. */
 149extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
 150extern void free_vm_area(struct vm_struct *area);
 151
 152/* for /dev/kmem */
 153extern long vread(char *buf, char *addr, unsigned long count);
 154extern long vwrite(char *buf, char *addr, unsigned long count);
 155
 156/*
 157 *      Internals.  Dont't use..
 158 */
 159extern struct list_head vmap_area_list;
 160extern __init void vm_area_add_early(struct vm_struct *vm);
 161extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
 162
 163#ifdef CONFIG_SMP
 164# ifdef CONFIG_MMU
 165struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
 166                                     const size_t *sizes, int nr_vms,
 167                                     size_t align);
 168
 169void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
 170# else
 171static inline struct vm_struct **
 172pcpu_get_vm_areas(const unsigned long *offsets,
 173                const size_t *sizes, int nr_vms,
 174                size_t align)
 175{
 176        return NULL;
 177}
 178
 179static inline void
 180pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
 181{
 182}
 183# endif
 184#endif
 185
 186#ifdef CONFIG_MMU
 187#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
 188#else
 189#define VMALLOC_TOTAL 0UL
 190#endif
 191
 192int register_vmap_purge_notifier(struct notifier_block *nb);
 193int unregister_vmap_purge_notifier(struct notifier_block *nb);
 194
 195#endif /* _LINUX_VMALLOC_H */
 196