linux/include/linux/memremap.h
<<
>>
Prefs
   1#ifndef _LINUX_MEMREMAP_H_
   2#define _LINUX_MEMREMAP_H_
   3#include <linux/mm.h>
   4#include <linux/ioport.h>
   5#include <linux/percpu-refcount.h>
   6
   7struct resource;
   8struct device;
   9
  10/**
  11 * struct vmem_altmap - pre-allocated storage for vmemmap_populate
  12 * @base_pfn: base of the entire dev_pagemap mapping
  13 * @reserve: pages mapped, but reserved for driver use (relative to @base)
  14 * @free: free pages set aside in the mapping for memmap storage
  15 * @align: pages reserved to meet allocation alignments
  16 * @alloc: track pages consumed, private to vmemmap_populate()
  17 */
  18struct vmem_altmap {
  19        const unsigned long base_pfn;
  20        const unsigned long reserve;
  21        unsigned long free;
  22        unsigned long align;
  23        unsigned long alloc;
  24};
  25
  26unsigned long vmem_altmap_offset(struct vmem_altmap *altmap);
  27void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns);
  28
  29#if defined(CONFIG_SPARSEMEM_VMEMMAP) && defined(CONFIG_ZONE_DEVICE)
  30struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start);
  31#else
  32static inline struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start)
  33{
  34        return NULL;
  35}
  36#endif
  37
  38/**
  39 * struct dev_pagemap - metadata for ZONE_DEVICE mappings
  40 * @altmap: pre-allocated/reserved memory for vmemmap allocations
  41 * @res: physical address range covered by @ref
  42 * @ref: reference count that pins the devm_memremap_pages() mapping
  43 * @dev: host device of the mapping for debug
  44 */
  45struct dev_pagemap {
  46        struct vmem_altmap *altmap;
  47        const struct resource *res;
  48        struct percpu_ref *ref;
  49        struct device *dev;
  50};
  51
  52#ifdef CONFIG_ZONE_DEVICE
  53void *devm_memremap_pages(struct device *dev, struct resource *res,
  54                struct percpu_ref *ref, struct vmem_altmap *altmap);
  55struct dev_pagemap *find_dev_pagemap(resource_size_t phys);
  56#else
  57static inline void *devm_memremap_pages(struct device *dev,
  58                struct resource *res, struct percpu_ref *ref,
  59                struct vmem_altmap *altmap)
  60{
  61        /*
  62         * Fail attempts to call devm_memremap_pages() without
  63         * ZONE_DEVICE support enabled, this requires callers to fall
  64         * back to plain devm_memremap() based on config
  65         */
  66        WARN_ON_ONCE(1);
  67        return ERR_PTR(-ENXIO);
  68}
  69
  70static inline struct dev_pagemap *find_dev_pagemap(resource_size_t phys)
  71{
  72        return NULL;
  73}
  74#endif
  75
  76/**
  77 * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
  78 * @pfn: page frame number to lookup page_map
  79 * @pgmap: optional known pgmap that already has a reference
  80 *
  81 * @pgmap allows the overhead of a lookup to be bypassed when @pfn lands in the
  82 * same mapping.
  83 */
  84static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
  85                struct dev_pagemap *pgmap)
  86{
  87        const struct resource *res = pgmap ? pgmap->res : NULL;
  88        resource_size_t phys = PFN_PHYS(pfn);
  89
  90        /*
  91         * In the cached case we're already holding a live reference so
  92         * we can simply do a blind increment
  93         */
  94        if (res && phys >= res->start && phys <= res->end) {
  95                percpu_ref_get(pgmap->ref);
  96                return pgmap;
  97        }
  98
  99        /* fall back to slow path lookup */
 100        rcu_read_lock();
 101        pgmap = find_dev_pagemap(phys);
 102        if (pgmap && !percpu_ref_tryget_live(pgmap->ref))
 103                pgmap = NULL;
 104        rcu_read_unlock();
 105
 106        return pgmap;
 107}
 108
 109static inline void put_dev_pagemap(struct dev_pagemap *pgmap)
 110{
 111        if (pgmap)
 112                percpu_ref_put(pgmap->ref);
 113}
 114#endif /* _LINUX_MEMREMAP_H_ */
 115