linux/mm/mmzone.c
<<
>>
Prefs
   1/*
   2 * linux/mm/mmzone.c
   3 *
   4 * management codes for pgdats, zones and page flags
   5 */
   6
   7
   8#include <linux/stddef.h>
   9#include <linux/mm.h>
  10#include <linux/mmzone.h>
  11
  12struct pglist_data *first_online_pgdat(void)
  13{
  14        return NODE_DATA(first_online_node);
  15}
  16
  17struct pglist_data *next_online_pgdat(struct pglist_data *pgdat)
  18{
  19        int nid = next_online_node(pgdat->node_id);
  20
  21        if (nid == MAX_NUMNODES)
  22                return NULL;
  23        return NODE_DATA(nid);
  24}
  25
  26/*
  27 * next_zone - helper magic for for_each_zone()
  28 */
  29struct zone *next_zone(struct zone *zone)
  30{
  31        pg_data_t *pgdat = zone->zone_pgdat;
  32
  33        if (zone < pgdat->node_zones + MAX_NR_ZONES - 1)
  34                zone++;
  35        else {
  36                pgdat = next_online_pgdat(pgdat);
  37                if (pgdat)
  38                        zone = pgdat->node_zones;
  39                else
  40                        zone = NULL;
  41        }
  42        return zone;
  43}
  44
  45static inline int zref_in_nodemask(struct zoneref *zref, nodemask_t *nodes)
  46{
  47#ifdef CONFIG_NUMA
  48        return node_isset(zonelist_node_idx(zref), *nodes);
  49#else
  50        return 1;
  51#endif /* CONFIG_NUMA */
  52}
  53
  54/* Returns the next zone at or below highest_zoneidx in a zonelist */
  55struct zoneref *next_zones_zonelist(struct zoneref *z,
  56                                        enum zone_type highest_zoneidx,
  57                                        nodemask_t *nodes)
  58{
  59        /*
  60         * Find the next suitable zone to use for the allocation.
  61         * Only filter based on nodemask if it's set
  62         */
  63        if (likely(nodes == NULL))
  64                while (zonelist_zone_idx(z) > highest_zoneidx)
  65                        z++;
  66        else
  67                while (zonelist_zone_idx(z) > highest_zoneidx ||
  68                                (z->zone && !zref_in_nodemask(z, nodes)))
  69                        z++;
  70
  71        return z;
  72}
  73
  74#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
  75int memmap_valid_within(unsigned long pfn,
  76                                        struct page *page, struct zone *zone)
  77{
  78        if (page_to_pfn(page) != pfn)
  79                return 0;
  80
  81        if (page_zone(page) != zone)
  82                return 0;
  83
  84        return 1;
  85}
  86#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
  87
  88void lruvec_init(struct lruvec *lruvec)
  89{
  90        enum lru_list lru;
  91
  92        memset(lruvec, 0, sizeof(struct lruvec));
  93
  94        for_each_lru(lru)
  95                INIT_LIST_HEAD(&lruvec->lists[lru]);
  96}
  97
  98#if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS)
  99int page_cpupid_xchg_last(struct page *page, int cpupid)
 100{
 101        unsigned long old_flags, flags;
 102        int last_cpupid;
 103
 104        do {
 105                old_flags = flags = page->flags;
 106                last_cpupid = page_cpupid_last(page);
 107
 108                flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT);
 109                flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT;
 110        } while (unlikely(cmpxchg(&page->flags, old_flags, flags) != old_flags));
 111
 112        return last_cpupid;
 113}
 114#endif
 115