linux/include/linux/vmalloc.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_VMALLOC_H
   3#define _LINUX_VMALLOC_H
   4
   5#include <linux/spinlock.h>
   6#include <linux/init.h>
   7#include <linux/list.h>
   8#include <linux/llist.h>
   9#include <asm/page.h>           /* pgprot_t */
  10#include <linux/rbtree.h>
  11#include <linux/overflow.h>
  12
  13#include <asm/vmalloc.h>
  14
  15struct vm_area_struct;          /* vma defining user mapping in mm_types.h */
  16struct notifier_block;          /* in notifier.h */
  17
  18/* bits in flags of vmalloc's vm_struct below */
  19#define VM_IOREMAP              0x00000001      /* ioremap() and friends */
  20#define VM_ALLOC                0x00000002      /* vmalloc() */
  21#define VM_MAP                  0x00000004      /* vmap()ed pages */
  22#define VM_USERMAP              0x00000008      /* suitable for remap_vmalloc_range */
  23#define VM_DMA_COHERENT         0x00000010      /* dma_alloc_coherent */
  24#define VM_UNINITIALIZED        0x00000020      /* vm_struct is not fully initialized */
  25#define VM_NO_GUARD             0x00000040      /* don't add guard page */
  26#define VM_KASAN                0x00000080      /* has allocated kasan shadow memory */
  27#define VM_FLUSH_RESET_PERMS    0x00000100      /* reset direct map and flush TLB on unmap, can't be freed in atomic context */
  28#define VM_MAP_PUT_PAGES        0x00000200      /* put pages and free array in vfree */
  29#define VM_NO_HUGE_VMAP         0x00000400      /* force PAGE_SIZE pte mapping */
  30
  31/*
  32 * VM_KASAN is used slightly differently depending on CONFIG_KASAN_VMALLOC.
  33 *
  34 * If IS_ENABLED(CONFIG_KASAN_VMALLOC), VM_KASAN is set on a vm_struct after
  35 * shadow memory has been mapped. It's used to handle allocation errors so that
  36 * we don't try to poison shadow on free if it was never allocated.
  37 *
  38 * Otherwise, VM_KASAN is set for kasan_module_alloc() allocations and used to
  39 * determine which allocations need the module shadow freed.
  40 */
  41
  42/* bits [20..32] reserved for arch specific ioremap internals */
  43
  44/*
  45 * Maximum alignment for ioremap() regions.
  46 * Can be overridden by arch-specific value.
  47 */
  48#ifndef IOREMAP_MAX_ORDER
  49#define IOREMAP_MAX_ORDER       (7 + PAGE_SHIFT)        /* 128 pages */
  50#endif
  51
  52struct vm_struct {
  53        struct vm_struct        *next;
  54        void                    *addr;
  55        unsigned long           size;
  56        unsigned long           flags;
  57        struct page             **pages;
  58#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
  59        unsigned int            page_order;
  60#endif
  61        unsigned int            nr_pages;
  62        phys_addr_t             phys_addr;
  63        const void              *caller;
  64};
  65
  66struct vmap_area {
  67        unsigned long va_start;
  68        unsigned long va_end;
  69
  70        struct rb_node rb_node;         /* address sorted rbtree */
  71        struct list_head list;          /* address sorted list */
  72
  73        /*
  74         * The following two variables can be packed, because
  75         * a vmap_area object can be either:
  76         *    1) in "free" tree (root is vmap_area_root)
  77         *    2) or "busy" tree (root is free_vmap_area_root)
  78         */
  79        union {
  80                unsigned long subtree_max_size; /* in "free" tree */
  81                struct vm_struct *vm;           /* in "busy" tree */
  82        };
  83};
  84
  85/* archs that select HAVE_ARCH_HUGE_VMAP should override one or more of these */
  86#ifndef arch_vmap_p4d_supported
  87static inline bool arch_vmap_p4d_supported(pgprot_t prot)
  88{
  89        return false;
  90}
  91#endif
  92
  93#ifndef arch_vmap_pud_supported
  94static inline bool arch_vmap_pud_supported(pgprot_t prot)
  95{
  96        return false;
  97}
  98#endif
  99
 100#ifndef arch_vmap_pmd_supported
 101static inline bool arch_vmap_pmd_supported(pgprot_t prot)
 102{
 103        return false;
 104}
 105#endif
 106
 107#ifndef arch_vmap_pte_range_map_size
 108static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end,
 109                                                         u64 pfn, unsigned int max_page_shift)
 110{
 111        return PAGE_SIZE;
 112}
 113#endif
 114
 115#ifndef arch_vmap_pte_supported_shift
 116static inline int arch_vmap_pte_supported_shift(unsigned long size)
 117{
 118        return PAGE_SHIFT;
 119}
 120#endif
 121
 122/*
 123 *      Highlevel APIs for driver use
 124 */
 125extern void vm_unmap_ram(const void *mem, unsigned int count);
 126extern void *vm_map_ram(struct page **pages, unsigned int count, int node);
 127extern void vm_unmap_aliases(void);
 128
 129#ifdef CONFIG_MMU
 130extern void __init vmalloc_init(void);
 131extern unsigned long vmalloc_nr_pages(void);
 132#else
 133static inline void vmalloc_init(void)
 134{
 135}
 136static inline unsigned long vmalloc_nr_pages(void) { return 0; }
 137#endif
 138
 139extern void *vmalloc(unsigned long size);
 140extern void *vzalloc(unsigned long size);
 141extern void *vmalloc_user(unsigned long size);
 142extern void *vmalloc_node(unsigned long size, int node);
 143extern void *vzalloc_node(unsigned long size, int node);
 144extern void *vmalloc_32(unsigned long size);
 145extern void *vmalloc_32_user(unsigned long size);
 146extern void *__vmalloc(unsigned long size, gfp_t gfp_mask);
 147extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
 148                        unsigned long start, unsigned long end, gfp_t gfp_mask,
 149                        pgprot_t prot, unsigned long vm_flags, int node,
 150                        const void *caller);
 151void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
 152                int node, const void *caller);
 153void *vmalloc_no_huge(unsigned long size);
 154
 155extern void vfree(const void *addr);
 156extern void vfree_atomic(const void *addr);
 157
 158extern void *vmap(struct page **pages, unsigned int count,
 159                        unsigned long flags, pgprot_t prot);
 160void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot);
 161extern void vunmap(const void *addr);
 162
 163extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
 164                                       unsigned long uaddr, void *kaddr,
 165                                       unsigned long pgoff, unsigned long size);
 166
 167extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
 168                                                        unsigned long pgoff);
 169
 170/*
 171 * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values
 172 * and let generic vmalloc and ioremap code know when arch_sync_kernel_mappings()
 173 * needs to be called.
 174 */
 175#ifndef ARCH_PAGE_TABLE_SYNC_MASK
 176#define ARCH_PAGE_TABLE_SYNC_MASK 0
 177#endif
 178
 179/*
 180 * There is no default implementation for arch_sync_kernel_mappings(). It is
 181 * relied upon the compiler to optimize calls out if ARCH_PAGE_TABLE_SYNC_MASK
 182 * is 0.
 183 */
 184void arch_sync_kernel_mappings(unsigned long start, unsigned long end);
 185
 186/*
 187 *      Lowlevel-APIs (not for driver use!)
 188 */
 189
 190static inline size_t get_vm_area_size(const struct vm_struct *area)
 191{
 192        if (!(area->flags & VM_NO_GUARD))
 193                /* return actual size without guard page */
 194                return area->size - PAGE_SIZE;
 195        else
 196                return area->size;
 197
 198}
 199
 200extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
 201extern struct vm_struct *get_vm_area_caller(unsigned long size,
 202                                        unsigned long flags, const void *caller);
 203extern struct vm_struct *__get_vm_area_caller(unsigned long size,
 204                                        unsigned long flags,
 205                                        unsigned long start, unsigned long end,
 206                                        const void *caller);
 207void free_vm_area(struct vm_struct *area);
 208extern struct vm_struct *remove_vm_area(const void *addr);
 209extern struct vm_struct *find_vm_area(const void *addr);
 210
 211static inline bool is_vm_area_hugepages(const void *addr)
 212{
 213        /*
 214         * This may not 100% tell if the area is mapped with > PAGE_SIZE
 215         * page table entries, if for some reason the architecture indicates
 216         * larger sizes are available but decides not to use them, nothing
 217         * prevents that. This only indicates the size of the physical page
 218         * allocated in the vmalloc layer.
 219         */
 220#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
 221        return find_vm_area(addr)->page_order > 0;
 222#else
 223        return false;
 224#endif
 225}
 226
 227#ifdef CONFIG_MMU
 228void vunmap_range(unsigned long addr, unsigned long end);
 229static inline void set_vm_flush_reset_perms(void *addr)
 230{
 231        struct vm_struct *vm = find_vm_area(addr);
 232
 233        if (vm)
 234                vm->flags |= VM_FLUSH_RESET_PERMS;
 235}
 236
 237#else
 238static inline void set_vm_flush_reset_perms(void *addr)
 239{
 240}
 241#endif
 242
 243/* for /proc/kcore */
 244extern long vread(char *buf, char *addr, unsigned long count);
 245
 246/*
 247 *      Internals.  Don't use..
 248 */
 249extern struct list_head vmap_area_list;
 250extern __init void vm_area_add_early(struct vm_struct *vm);
 251extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
 252
 253#ifdef CONFIG_SMP
 254# ifdef CONFIG_MMU
 255struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
 256                                     const size_t *sizes, int nr_vms,
 257                                     size_t align);
 258
 259void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
 260# else
 261static inline struct vm_struct **
 262pcpu_get_vm_areas(const unsigned long *offsets,
 263                const size_t *sizes, int nr_vms,
 264                size_t align)
 265{
 266        return NULL;
 267}
 268
 269static inline void
 270pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
 271{
 272}
 273# endif
 274#endif
 275
 276#ifdef CONFIG_MMU
 277#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
 278#else
 279#define VMALLOC_TOTAL 0UL
 280#endif
 281
 282int register_vmap_purge_notifier(struct notifier_block *nb);
 283int unregister_vmap_purge_notifier(struct notifier_block *nb);
 284
 285#if defined(CONFIG_MMU) && defined(CONFIG_PRINTK)
 286bool vmalloc_dump_obj(void *object);
 287#else
 288static inline bool vmalloc_dump_obj(void *object) { return false; }
 289#endif
 290
 291#endif /* _LINUX_VMALLOC_H */
 292