linux/include/linux/gfp.h
<<
>>
Prefs
   1#ifndef __LINUX_GFP_H
   2#define __LINUX_GFP_H
   3
   4#include <linux/mmzone.h>
   5#include <linux/stddef.h>
   6#include <linux/linkage.h>
   7#include <linux/topology.h>
   8#include <linux/mmdebug.h>
   9
  10struct vm_area_struct;
  11
  12/*
  13 * GFP bitmasks..
  14 *
  15 * Zone modifiers (see linux/mmzone.h - low three bits)
  16 *
  17 * Do not put any conditional on these. If necessary modify the definitions
  18 * without the underscores and use the consistently. The definitions here may
  19 * be used in bit comparisons.
  20 */
  21#define __GFP_DMA       ((__force gfp_t)0x01u)
  22#define __GFP_HIGHMEM   ((__force gfp_t)0x02u)
  23#define __GFP_DMA32     ((__force gfp_t)0x04u)
  24#define __GFP_MOVABLE   ((__force gfp_t)0x08u)  /* Page is movable */
  25#define GFP_ZONEMASK    (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
  26/*
  27 * Action modifiers - doesn't change the zoning
  28 *
  29 * __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt
  30 * _might_ fail.  This depends upon the particular VM implementation.
  31 *
  32 * __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
  33 * cannot handle allocation failures.
  34 *
  35 * __GFP_NORETRY: The VM implementation must not retry indefinitely.
  36 *
  37 * __GFP_MOVABLE: Flag that this page will be movable by the page migration
  38 * mechanism or reclaimed
  39 */
  40#define __GFP_WAIT      ((__force gfp_t)0x10u)  /* Can wait and reschedule? */
  41#define __GFP_HIGH      ((__force gfp_t)0x20u)  /* Should access emergency pools? */
  42#define __GFP_IO        ((__force gfp_t)0x40u)  /* Can start physical IO? */
  43#define __GFP_FS        ((__force gfp_t)0x80u)  /* Can call down to low-level FS? */
  44#define __GFP_COLD      ((__force gfp_t)0x100u) /* Cache-cold page required */
  45#define __GFP_NOWARN    ((__force gfp_t)0x200u) /* Suppress page allocation failure warning */
  46#define __GFP_REPEAT    ((__force gfp_t)0x400u) /* See above */
  47#define __GFP_NOFAIL    ((__force gfp_t)0x800u) /* See above */
  48#define __GFP_NORETRY   ((__force gfp_t)0x1000u)/* See above */
  49#define __GFP_COMP      ((__force gfp_t)0x4000u)/* Add compound page metadata */
  50#define __GFP_ZERO      ((__force gfp_t)0x8000u)/* Return zeroed page on success */
  51#define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */
  52#define __GFP_HARDWALL   ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */
  53#define __GFP_THISNODE  ((__force gfp_t)0x40000u)/* No fallback, no policies */
  54#define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */
  55
  56#ifdef CONFIG_KMEMCHECK
  57#define __GFP_NOTRACK   ((__force gfp_t)0x200000u)  /* Don't track with kmemcheck */
  58#else
  59#define __GFP_NOTRACK   ((__force gfp_t)0)
  60#endif
  61
  62/*
  63 * This may seem redundant, but it's a way of annotating false positives vs.
  64 * allocations that simply cannot be supported (e.g. page tables).
  65 */
  66#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
  67
  68#define __GFP_BITS_SHIFT 22     /* Room for 22 __GFP_FOO bits */
  69#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
  70
  71/* This equals 0, but use constants in case they ever change */
  72#define GFP_NOWAIT      (GFP_ATOMIC & ~__GFP_HIGH)
  73/* GFP_ATOMIC means both !wait (__GFP_WAIT not set) and use emergency pool */
  74#define GFP_ATOMIC      (__GFP_HIGH)
  75#define GFP_NOIO        (__GFP_WAIT)
  76#define GFP_NOFS        (__GFP_WAIT | __GFP_IO)
  77#define GFP_KERNEL      (__GFP_WAIT | __GFP_IO | __GFP_FS)
  78#define GFP_TEMPORARY   (__GFP_WAIT | __GFP_IO | __GFP_FS | \
  79                         __GFP_RECLAIMABLE)
  80#define GFP_USER        (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
  81#define GFP_HIGHUSER    (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \
  82                         __GFP_HIGHMEM)
  83#define GFP_HIGHUSER_MOVABLE    (__GFP_WAIT | __GFP_IO | __GFP_FS | \
  84                                 __GFP_HARDWALL | __GFP_HIGHMEM | \
  85                                 __GFP_MOVABLE)
  86
  87#ifdef CONFIG_NUMA
  88#define GFP_THISNODE    (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY)
  89#else
  90#define GFP_THISNODE    ((__force gfp_t)0)
  91#endif
  92
  93/* This mask makes up all the page movable related flags */
  94#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE)
  95
  96/* Control page allocator reclaim behavior */
  97#define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\
  98                        __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\
  99                        __GFP_NORETRY|__GFP_NOMEMALLOC)
 100
 101/* Control slab gfp mask during early boot */
 102#define GFP_BOOT_MASK __GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS)
 103
 104/* Control allocation constraints */
 105#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
 106
 107/* Do not use these with a slab allocator */
 108#define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
 109
 110/* Flag - indicates that the buffer will be suitable for DMA.  Ignored on some
 111   platforms, used as appropriate on others */
 112
 113#define GFP_DMA         __GFP_DMA
 114
 115/* 4GB DMA on some platforms */
 116#define GFP_DMA32       __GFP_DMA32
 117
 118/* Convert GFP flags to their corresponding migrate type */
 119static inline int allocflags_to_migratetype(gfp_t gfp_flags)
 120{
 121        WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
 122
 123        if (unlikely(page_group_by_mobility_disabled))
 124                return MIGRATE_UNMOVABLE;
 125
 126        /* Group based on mobility */
 127        return (((gfp_flags & __GFP_MOVABLE) != 0) << 1) |
 128                ((gfp_flags & __GFP_RECLAIMABLE) != 0);
 129}
 130
 131#ifdef CONFIG_HIGHMEM
 132#define OPT_ZONE_HIGHMEM ZONE_HIGHMEM
 133#else
 134#define OPT_ZONE_HIGHMEM ZONE_NORMAL
 135#endif
 136
 137#ifdef CONFIG_ZONE_DMA
 138#define OPT_ZONE_DMA ZONE_DMA
 139#else
 140#define OPT_ZONE_DMA ZONE_NORMAL
 141#endif
 142
 143#ifdef CONFIG_ZONE_DMA32
 144#define OPT_ZONE_DMA32 ZONE_DMA32
 145#else
 146#define OPT_ZONE_DMA32 ZONE_NORMAL
 147#endif
 148
 149/*
 150 * GFP_ZONE_TABLE is a word size bitstring that is used for looking up the
 151 * zone to use given the lowest 4 bits of gfp_t. Entries are ZONE_SHIFT long
 152 * and there are 16 of them to cover all possible combinations of
 153 * __GFP_DMA, __GFP_DMA32, __GFP_MOVABLE and __GFP_HIGHMEM
 154 *
 155 * The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA.
 156 * But GFP_MOVABLE is not only a zone specifier but also an allocation
 157 * policy. Therefore __GFP_MOVABLE plus another zone selector is valid.
 158 * Only 1bit of the lowest 3 bit (DMA,DMA32,HIGHMEM) can be set to "1".
 159 *
 160 *       bit       result
 161 *       =================
 162 *       0x0    => NORMAL
 163 *       0x1    => DMA or NORMAL
 164 *       0x2    => HIGHMEM or NORMAL
 165 *       0x3    => BAD (DMA+HIGHMEM)
 166 *       0x4    => DMA32 or DMA or NORMAL
 167 *       0x5    => BAD (DMA+DMA32)
 168 *       0x6    => BAD (HIGHMEM+DMA32)
 169 *       0x7    => BAD (HIGHMEM+DMA32+DMA)
 170 *       0x8    => NORMAL (MOVABLE+0)
 171 *       0x9    => DMA or NORMAL (MOVABLE+DMA)
 172 *       0xa    => MOVABLE (Movable is valid only if HIGHMEM is set too)
 173 *       0xb    => BAD (MOVABLE+HIGHMEM+DMA)
 174 *       0xc    => DMA32 (MOVABLE+HIGHMEM+DMA32)
 175 *       0xd    => BAD (MOVABLE+DMA32+DMA)
 176 *       0xe    => BAD (MOVABLE+DMA32+HIGHMEM)
 177 *       0xf    => BAD (MOVABLE+DMA32+HIGHMEM+DMA)
 178 *
 179 * ZONES_SHIFT must be <= 2 on 32 bit platforms.
 180 */
 181
 182#if 16 * ZONES_SHIFT > BITS_PER_LONG
 183#error ZONES_SHIFT too large to create GFP_ZONE_TABLE integer
 184#endif
 185
 186#define GFP_ZONE_TABLE ( \
 187        (ZONE_NORMAL << 0 * ZONES_SHIFT)                                \
 188        | (OPT_ZONE_DMA << __GFP_DMA * ZONES_SHIFT)                     \
 189        | (OPT_ZONE_HIGHMEM << __GFP_HIGHMEM * ZONES_SHIFT)             \
 190        | (OPT_ZONE_DMA32 << __GFP_DMA32 * ZONES_SHIFT)                 \
 191        | (ZONE_NORMAL << __GFP_MOVABLE * ZONES_SHIFT)                  \
 192        | (OPT_ZONE_DMA << (__GFP_MOVABLE | __GFP_DMA) * ZONES_SHIFT)   \
 193        | (ZONE_MOVABLE << (__GFP_MOVABLE | __GFP_HIGHMEM) * ZONES_SHIFT)\
 194        | (OPT_ZONE_DMA32 << (__GFP_MOVABLE | __GFP_DMA32) * ZONES_SHIFT)\
 195)
 196
 197/*
 198 * GFP_ZONE_BAD is a bitmap for all combination of __GFP_DMA, __GFP_DMA32
 199 * __GFP_HIGHMEM and __GFP_MOVABLE that are not permitted. One flag per
 200 * entry starting with bit 0. Bit is set if the combination is not
 201 * allowed.
 202 */
 203#define GFP_ZONE_BAD ( \
 204        1 << (__GFP_DMA | __GFP_HIGHMEM)                                \
 205        | 1 << (__GFP_DMA | __GFP_DMA32)                                \
 206        | 1 << (__GFP_DMA32 | __GFP_HIGHMEM)                            \
 207        | 1 << (__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM)                \
 208        | 1 << (__GFP_MOVABLE | __GFP_HIGHMEM | __GFP_DMA)              \
 209        | 1 << (__GFP_MOVABLE | __GFP_DMA32 | __GFP_DMA)                \
 210        | 1 << (__GFP_MOVABLE | __GFP_DMA32 | __GFP_HIGHMEM)            \
 211        | 1 << (__GFP_MOVABLE | __GFP_DMA32 | __GFP_DMA | __GFP_HIGHMEM)\
 212)
 213
 214static inline enum zone_type gfp_zone(gfp_t flags)
 215{
 216        enum zone_type z;
 217        int bit = flags & GFP_ZONEMASK;
 218
 219        z = (GFP_ZONE_TABLE >> (bit * ZONES_SHIFT)) &
 220                                         ((1 << ZONES_SHIFT) - 1);
 221
 222        if (__builtin_constant_p(bit))
 223                MAYBE_BUILD_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
 224        else {
 225#ifdef CONFIG_DEBUG_VM
 226                BUG_ON((GFP_ZONE_BAD >> bit) & 1);
 227#endif
 228        }
 229        return z;
 230}
 231
 232/*
 233 * There is only one page-allocator function, and two main namespaces to
 234 * it. The alloc_page*() variants return 'struct page *' and as such
 235 * can allocate highmem pages, the *get*page*() variants return
 236 * virtual kernel addresses to the allocated page(s).
 237 */
 238
 239static inline int gfp_zonelist(gfp_t flags)
 240{
 241        if (NUMA_BUILD && unlikely(flags & __GFP_THISNODE))
 242                return 1;
 243
 244        return 0;
 245}
 246
 247/*
 248 * We get the zone list from the current node and the gfp_mask.
 249 * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones.
 250 * There are two zonelists per node, one for all zones with memory and
 251 * one containing just zones from the node the zonelist belongs to.
 252 *
 253 * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets
 254 * optimized to &contig_page_data at compile-time.
 255 */
 256static inline struct zonelist *node_zonelist(int nid, gfp_t flags)
 257{
 258        return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags);
 259}
 260
 261#ifndef HAVE_ARCH_FREE_PAGE
 262static inline void arch_free_page(struct page *page, int order) { }
 263#endif
 264#ifndef HAVE_ARCH_ALLOC_PAGE
 265static inline void arch_alloc_page(struct page *page, int order) { }
 266#endif
 267
 268struct page *
 269__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
 270                       struct zonelist *zonelist, nodemask_t *nodemask);
 271
 272static inline struct page *
 273__alloc_pages(gfp_t gfp_mask, unsigned int order,
 274                struct zonelist *zonelist)
 275{
 276        return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL);
 277}
 278
 279static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
 280                                                unsigned int order)
 281{
 282        /* Unknown node is current node */
 283        if (nid < 0)
 284                nid = numa_node_id();
 285
 286        return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
 287}
 288
 289static inline struct page *alloc_pages_exact_node(int nid, gfp_t gfp_mask,
 290                                                unsigned int order)
 291{
 292        VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
 293
 294        return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
 295}
 296
 297#ifdef CONFIG_NUMA
 298extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order);
 299
 300static inline struct page *
 301alloc_pages(gfp_t gfp_mask, unsigned int order)
 302{
 303        return alloc_pages_current(gfp_mask, order);
 304}
 305extern struct page *alloc_page_vma(gfp_t gfp_mask,
 306                        struct vm_area_struct *vma, unsigned long addr);
 307#else
 308#define alloc_pages(gfp_mask, order) \
 309                alloc_pages_node(numa_node_id(), gfp_mask, order)
 310#define alloc_page_vma(gfp_mask, vma, addr) alloc_pages(gfp_mask, 0)
 311#endif
 312#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
 313
 314extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
 315extern unsigned long get_zeroed_page(gfp_t gfp_mask);
 316
 317void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
 318void free_pages_exact(void *virt, size_t size);
 319
 320#define __get_free_page(gfp_mask) \
 321                __get_free_pages((gfp_mask),0)
 322
 323#define __get_dma_pages(gfp_mask, order) \
 324                __get_free_pages((gfp_mask) | GFP_DMA,(order))
 325
 326extern void __free_pages(struct page *page, unsigned int order);
 327extern void free_pages(unsigned long addr, unsigned int order);
 328extern void free_hot_page(struct page *page);
 329
 330#define __free_page(page) __free_pages((page), 0)
 331#define free_page(addr) free_pages((addr),0)
 332
 333void page_alloc_init(void);
 334void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
 335void drain_all_pages(void);
 336void drain_local_pages(void *dummy);
 337
 338extern gfp_t gfp_allowed_mask;
 339
 340static inline void set_gfp_allowed_mask(gfp_t mask)
 341{
 342        gfp_allowed_mask = mask;
 343}
 344
 345#endif /* __LINUX_GFP_H */
 346