linux/include/linux/vmalloc.h
<<
>>
Prefs
   1#ifndef _LINUX_VMALLOC_H
   2#define _LINUX_VMALLOC_H
   3
   4#include <linux/spinlock.h>
   5#include <linux/init.h>
   6#include <linux/list.h>
   7#include <asm/page.h>           /* pgprot_t */
   8#include <linux/rbtree.h>
   9
  10struct vm_area_struct;          /* vma defining user mapping in mm_types.h */
  11
  12/* bits in flags of vmalloc's vm_struct below */
  13#define VM_IOREMAP              0x00000001      /* ioremap() and friends */
  14#define VM_ALLOC                0x00000002      /* vmalloc() */
  15#define VM_MAP                  0x00000004      /* vmap()ed pages */
  16#define VM_USERMAP              0x00000008      /* suitable for remap_vmalloc_range */
  17#define VM_VPAGES               0x00000010      /* buffer for pages was vmalloc'ed */
  18#define VM_UNINITIALIZED        0x00000020      /* vm_struct is not fully initialized */
  19#define VM_NO_GUARD             0x00000040      /* don't add guard page */
  20#define VM_KASAN                0x00000080      /* has allocated kasan shadow memory */
  21/* bits [20..32] reserved for arch specific ioremap internals */
  22
  23/*
  24 * Maximum alignment for ioremap() regions.
  25 * Can be overriden by arch-specific value.
  26 */
  27#ifndef IOREMAP_MAX_ORDER
  28#define IOREMAP_MAX_ORDER       (7 + PAGE_SHIFT)        /* 128 pages */
  29#endif
  30
  31struct vm_struct {
  32        struct vm_struct        *next;
  33        void                    *addr;
  34        unsigned long           size;
  35        unsigned long           flags;
  36        struct page             **pages;
  37        unsigned int            nr_pages;
  38        phys_addr_t             phys_addr;
  39        const void              *caller;
  40};
  41
  42struct vmap_area {
  43        unsigned long va_start;
  44        unsigned long va_end;
  45        unsigned long flags;
  46        struct rb_node rb_node;         /* address sorted rbtree */
  47        struct list_head list;          /* address sorted list */
  48        struct list_head purge_list;    /* "lazy purge" list */
  49        struct vm_struct *vm;
  50        struct rcu_head rcu_head;
  51};
  52
  53/*
  54 *      Highlevel APIs for driver use
  55 */
  56extern void vm_unmap_ram(const void *mem, unsigned int count);
  57extern void *vm_map_ram(struct page **pages, unsigned int count,
  58                                int node, pgprot_t prot);
  59extern void vm_unmap_aliases(void);
  60
  61#ifdef CONFIG_MMU
  62extern void __init vmalloc_init(void);
  63#else
  64static inline void vmalloc_init(void)
  65{
  66}
  67#endif
  68
  69extern void *vmalloc(unsigned long size);
  70extern void *vzalloc(unsigned long size);
  71extern void *vmalloc_user(unsigned long size);
  72extern void *vmalloc_node(unsigned long size, int node);
  73extern void *vzalloc_node(unsigned long size, int node);
  74extern void *vmalloc_exec(unsigned long size);
  75extern void *vmalloc_32(unsigned long size);
  76extern void *vmalloc_32_user(unsigned long size);
  77extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
  78extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
  79                        unsigned long start, unsigned long end, gfp_t gfp_mask,
  80                        pgprot_t prot, unsigned long vm_flags, int node,
  81                        const void *caller);
  82
  83extern void vfree(const void *addr);
  84
  85extern void *vmap(struct page **pages, unsigned int count,
  86                        unsigned long flags, pgprot_t prot);
  87extern void vunmap(const void *addr);
  88
  89extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
  90                                       unsigned long uaddr, void *kaddr,
  91                                       unsigned long size);
  92
  93extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
  94                                                        unsigned long pgoff);
  95void vmalloc_sync_all(void);
  96 
  97/*
  98 *      Lowlevel-APIs (not for driver use!)
  99 */
 100
 101static inline size_t get_vm_area_size(const struct vm_struct *area)
 102{
 103        if (!(area->flags & VM_NO_GUARD))
 104                /* return actual size without guard page */
 105                return area->size - PAGE_SIZE;
 106        else
 107                return area->size;
 108
 109}
 110
 111extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
 112extern struct vm_struct *get_vm_area_caller(unsigned long size,
 113                                        unsigned long flags, const void *caller);
 114extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
 115                                        unsigned long start, unsigned long end);
 116extern struct vm_struct *__get_vm_area_caller(unsigned long size,
 117                                        unsigned long flags,
 118                                        unsigned long start, unsigned long end,
 119                                        const void *caller);
 120extern struct vm_struct *remove_vm_area(const void *addr);
 121extern struct vm_struct *find_vm_area(const void *addr);
 122
 123extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
 124                        struct page **pages);
 125#ifdef CONFIG_MMU
 126extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
 127                                    pgprot_t prot, struct page **pages);
 128extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size);
 129extern void unmap_kernel_range(unsigned long addr, unsigned long size);
 130#else
 131static inline int
 132map_kernel_range_noflush(unsigned long start, unsigned long size,
 133                        pgprot_t prot, struct page **pages)
 134{
 135        return size >> PAGE_SHIFT;
 136}
 137static inline void
 138unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
 139{
 140}
 141static inline void
 142unmap_kernel_range(unsigned long addr, unsigned long size)
 143{
 144}
 145#endif
 146
 147/* Allocate/destroy a 'vmalloc' VM area. */
 148extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
 149extern void free_vm_area(struct vm_struct *area);
 150
 151/* for /dev/kmem */
 152extern long vread(char *buf, char *addr, unsigned long count);
 153extern long vwrite(char *buf, char *addr, unsigned long count);
 154
 155/*
 156 *      Internals.  Dont't use..
 157 */
 158extern struct list_head vmap_area_list;
 159extern __init void vm_area_add_early(struct vm_struct *vm);
 160extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
 161
 162#ifdef CONFIG_SMP
 163# ifdef CONFIG_MMU
 164struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
 165                                     const size_t *sizes, int nr_vms,
 166                                     size_t align);
 167
 168void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
 169# else
 170static inline struct vm_struct **
 171pcpu_get_vm_areas(const unsigned long *offsets,
 172                const size_t *sizes, int nr_vms,
 173                size_t align)
 174{
 175        return NULL;
 176}
 177
 178static inline void
 179pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
 180{
 181}
 182# endif
 183#endif
 184
 185#ifdef CONFIG_MMU
 186#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
 187#else
 188#define VMALLOC_TOTAL 0UL
 189#endif
 190
 191#endif /* _LINUX_VMALLOC_H */
 192