linux/drivers/staging/android/ion/ion.h
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * drivers/staging/android/ion/ion.h
   4 *
   5 * Copyright (C) 2011 Google, Inc.
   6 */
   7
   8#ifndef _ION_H
   9#define _ION_H
  10
  11#include <linux/device.h>
  12#include <linux/dma-direction.h>
  13#include <linux/kref.h>
  14#include <linux/mm_types.h>
  15#include <linux/mutex.h>
  16#include <linux/rbtree.h>
  17#include <linux/sched.h>
  18#include <linux/shrinker.h>
  19#include <linux/types.h>
  20#include <linux/miscdevice.h>
  21
  22#include "../uapi/ion.h"
  23
  24/**
  25 * struct ion_platform_heap - defines a heap in the given platform
  26 * @type:       type of the heap from ion_heap_type enum
  27 * @id:         unique identifier for heap.  When allocating higher numb ers
  28 *              will be allocated from first.  At allocation these are passed
  29 *              as a bit mask and therefore can not exceed ION_NUM_HEAP_IDS.
  30 * @name:       used for debug purposes
  31 * @base:       base address of heap in physical memory if applicable
  32 * @size:       size of the heap in bytes if applicable
  33 * @priv:       private info passed from the board file
  34 *
  35 * Provided by the board file.
  36 */
  37struct ion_platform_heap {
  38        enum ion_heap_type type;
  39        unsigned int id;
  40        const char *name;
  41        phys_addr_t base;
  42        size_t size;
  43        phys_addr_t align;
  44        void *priv;
  45};
  46
  47/**
  48 * struct ion_buffer - metadata for a particular buffer
  49 * @ref:                reference count
  50 * @node:               node in the ion_device buffers tree
  51 * @dev:                back pointer to the ion_device
  52 * @heap:               back pointer to the heap the buffer came from
  53 * @flags:              buffer specific flags
  54 * @private_flags:      internal buffer specific flags
  55 * @size:               size of the buffer
  56 * @priv_virt:          private data to the buffer representable as
  57 *                      a void *
  58 * @lock:               protects the buffers cnt fields
  59 * @kmap_cnt:           number of times the buffer is mapped to the kernel
  60 * @vaddr:              the kernel mapping if kmap_cnt is not zero
  61 * @sg_table:           the sg table for the buffer if dmap_cnt is not zero
  62 */
  63struct ion_buffer {
  64        union {
  65                struct rb_node node;
  66                struct list_head list;
  67        };
  68        struct ion_device *dev;
  69        struct ion_heap *heap;
  70        unsigned long flags;
  71        unsigned long private_flags;
  72        size_t size;
  73        void *priv_virt;
  74        struct mutex lock;
  75        int kmap_cnt;
  76        void *vaddr;
  77        struct sg_table *sg_table;
  78        struct list_head attachments;
  79};
  80
  81void ion_buffer_destroy(struct ion_buffer *buffer);
  82
  83/**
  84 * struct ion_device - the metadata of the ion device node
  85 * @dev:                the actual misc device
  86 * @buffers:            an rb tree of all the existing buffers
  87 * @buffer_lock:        lock protecting the tree of buffers
  88 * @lock:               rwsem protecting the tree of heaps and clients
  89 */
  90struct ion_device {
  91        struct miscdevice dev;
  92        struct rb_root buffers;
  93        struct mutex buffer_lock;
  94        struct rw_semaphore lock;
  95        struct plist_head heaps;
  96        struct dentry *debug_root;
  97        int heap_cnt;
  98};
  99
 100/**
 101 * struct ion_heap_ops - ops to operate on a given heap
 102 * @allocate:           allocate memory
 103 * @free:               free memory
 104 * @map_kernel          map memory to the kernel
 105 * @unmap_kernel        unmap memory to the kernel
 106 * @map_user            map memory to userspace
 107 *
 108 * allocate, phys, and map_user return 0 on success, -errno on error.
 109 * map_dma and map_kernel return pointer on success, ERR_PTR on
 110 * error. @free will be called with ION_PRIV_FLAG_SHRINKER_FREE set in
 111 * the buffer's private_flags when called from a shrinker. In that
 112 * case, the pages being free'd must be truly free'd back to the
 113 * system, not put in a page pool or otherwise cached.
 114 */
 115struct ion_heap_ops {
 116        int (*allocate)(struct ion_heap *heap,
 117                        struct ion_buffer *buffer, unsigned long len,
 118                        unsigned long flags);
 119        void (*free)(struct ion_buffer *buffer);
 120        void * (*map_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
 121        void (*unmap_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
 122        int (*map_user)(struct ion_heap *mapper, struct ion_buffer *buffer,
 123                        struct vm_area_struct *vma);
 124        int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan);
 125};
 126
 127/**
 128 * heap flags - flags between the heaps and core ion code
 129 */
 130#define ION_HEAP_FLAG_DEFER_FREE BIT(0)
 131
 132/**
 133 * private flags - flags internal to ion
 134 */
 135/*
 136 * Buffer is being freed from a shrinker function. Skip any possible
 137 * heap-specific caching mechanism (e.g. page pools). Guarantees that
 138 * any buffer storage that came from the system allocator will be
 139 * returned to the system allocator.
 140 */
 141#define ION_PRIV_FLAG_SHRINKER_FREE BIT(0)
 142
 143/**
 144 * struct ion_heap - represents a heap in the system
 145 * @node:               rb node to put the heap on the device's tree of heaps
 146 * @dev:                back pointer to the ion_device
 147 * @type:               type of heap
 148 * @ops:                ops struct as above
 149 * @flags:              flags
 150 * @id:                 id of heap, also indicates priority of this heap when
 151 *                      allocating.  These are specified by platform data and
 152 *                      MUST be unique
 153 * @name:               used for debugging
 154 * @shrinker:           a shrinker for the heap
 155 * @free_list:          free list head if deferred free is used
 156 * @free_list_size      size of the deferred free list in bytes
 157 * @lock:               protects the free list
 158 * @waitqueue:          queue to wait on from deferred free thread
 159 * @task:               task struct of deferred free thread
 160 * @debug_show:         called when heap debug file is read to add any
 161 *                      heap specific debug info to output
 162 *
 163 * Represents a pool of memory from which buffers can be made.  In some
 164 * systems the only heap is regular system memory allocated via vmalloc.
 165 * On others, some blocks might require large physically contiguous buffers
 166 * that are allocated from a specially reserved heap.
 167 */
 168struct ion_heap {
 169        struct plist_node node;
 170        struct ion_device *dev;
 171        enum ion_heap_type type;
 172        struct ion_heap_ops *ops;
 173        unsigned long flags;
 174        unsigned int id;
 175        const char *name;
 176        struct shrinker shrinker;
 177        struct list_head free_list;
 178        size_t free_list_size;
 179        spinlock_t free_lock;
 180        wait_queue_head_t waitqueue;
 181        struct task_struct *task;
 182
 183        int (*debug_show)(struct ion_heap *heap, struct seq_file *s,
 184                          void *unused);
 185};
 186
 187/**
 188 * ion_buffer_cached - this ion buffer is cached
 189 * @buffer:             buffer
 190 *
 191 * indicates whether this ion buffer is cached
 192 */
 193bool ion_buffer_cached(struct ion_buffer *buffer);
 194
 195/**
 196 * ion_buffer_fault_user_mappings - fault in user mappings of this buffer
 197 * @buffer:             buffer
 198 *
 199 * indicates whether userspace mappings of this buffer will be faulted
 200 * in, this can affect how buffers are allocated from the heap.
 201 */
 202bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer);
 203
 204/**
 205 * ion_device_add_heap - adds a heap to the ion device
 206 * @heap:               the heap to add
 207 */
 208void ion_device_add_heap(struct ion_heap *heap);
 209
 210/**
 211 * some helpers for common operations on buffers using the sg_table
 212 * and vaddr fields
 213 */
 214void *ion_heap_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer);
 215void ion_heap_unmap_kernel(struct ion_heap *heap, struct ion_buffer *buffer);
 216int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
 217                      struct vm_area_struct *vma);
 218int ion_heap_buffer_zero(struct ion_buffer *buffer);
 219int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot);
 220
 221int ion_alloc(size_t len,
 222              unsigned int heap_id_mask,
 223              unsigned int flags);
 224
 225/**
 226 * ion_heap_init_shrinker
 227 * @heap:               the heap
 228 *
 229 * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag or defines the shrink op
 230 * this function will be called to setup a shrinker to shrink the freelists
 231 * and call the heap's shrink op.
 232 */
 233int ion_heap_init_shrinker(struct ion_heap *heap);
 234
 235/**
 236 * ion_heap_init_deferred_free -- initialize deferred free functionality
 237 * @heap:               the heap
 238 *
 239 * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag this function will
 240 * be called to setup deferred frees. Calls to free the buffer will
 241 * return immediately and the actual free will occur some time later
 242 */
 243int ion_heap_init_deferred_free(struct ion_heap *heap);
 244
 245/**
 246 * ion_heap_freelist_add - add a buffer to the deferred free list
 247 * @heap:               the heap
 248 * @buffer:             the buffer
 249 *
 250 * Adds an item to the deferred freelist.
 251 */
 252void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer);
 253
 254/**
 255 * ion_heap_freelist_drain - drain the deferred free list
 256 * @heap:               the heap
 257 * @size:               amount of memory to drain in bytes
 258 *
 259 * Drains the indicated amount of memory from the deferred freelist immediately.
 260 * Returns the total amount freed.  The total freed may be higher depending
 261 * on the size of the items in the list, or lower if there is insufficient
 262 * total memory on the freelist.
 263 */
 264size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size);
 265
 266/**
 267 * ion_heap_freelist_shrink - drain the deferred free
 268 *                              list, skipping any heap-specific
 269 *                              pooling or caching mechanisms
 270 *
 271 * @heap:               the heap
 272 * @size:               amount of memory to drain in bytes
 273 *
 274 * Drains the indicated amount of memory from the deferred freelist immediately.
 275 * Returns the total amount freed.  The total freed may be higher depending
 276 * on the size of the items in the list, or lower if there is insufficient
 277 * total memory on the freelist.
 278 *
 279 * Unlike with @ion_heap_freelist_drain, don't put any pages back into
 280 * page pools or otherwise cache the pages. Everything must be
 281 * genuinely free'd back to the system. If you're free'ing from a
 282 * shrinker you probably want to use this. Note that this relies on
 283 * the heap.ops.free callback honoring the ION_PRIV_FLAG_SHRINKER_FREE
 284 * flag.
 285 */
 286size_t ion_heap_freelist_shrink(struct ion_heap *heap,
 287                                size_t size);
 288
 289/**
 290 * ion_heap_freelist_size - returns the size of the freelist in bytes
 291 * @heap:               the heap
 292 */
 293size_t ion_heap_freelist_size(struct ion_heap *heap);
 294
 295/**
 296 * functions for creating and destroying a heap pool -- allows you
 297 * to keep a pool of pre allocated memory to use from your heap.  Keeping
 298 * a pool of memory that is ready for dma, ie any cached mapping have been
 299 * invalidated from the cache, provides a significant performance benefit on
 300 * many systems
 301 */
 302
 303/**
 304 * struct ion_page_pool - pagepool struct
 305 * @high_count:         number of highmem items in the pool
 306 * @low_count:          number of lowmem items in the pool
 307 * @high_items:         list of highmem items
 308 * @low_items:          list of lowmem items
 309 * @mutex:              lock protecting this struct and especially the count
 310 *                      item list
 311 * @gfp_mask:           gfp_mask to use from alloc
 312 * @order:              order of pages in the pool
 313 * @list:               plist node for list of pools
 314 * @cached:             it's cached pool or not
 315 *
 316 * Allows you to keep a pool of pre allocated pages to use from your heap.
 317 * Keeping a pool of pages that is ready for dma, ie any cached mapping have
 318 * been invalidated from the cache, provides a significant performance benefit
 319 * on many systems
 320 */
 321struct ion_page_pool {
 322        int high_count;
 323        int low_count;
 324        bool cached;
 325        struct list_head high_items;
 326        struct list_head low_items;
 327        struct mutex mutex;
 328        gfp_t gfp_mask;
 329        unsigned int order;
 330        struct plist_node list;
 331};
 332
 333struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
 334                                           bool cached);
 335void ion_page_pool_destroy(struct ion_page_pool *pool);
 336struct page *ion_page_pool_alloc(struct ion_page_pool *pool);
 337void ion_page_pool_free(struct ion_page_pool *pool, struct page *page);
 338
 339/** ion_page_pool_shrink - shrinks the size of the memory cached in the pool
 340 * @pool:               the pool
 341 * @gfp_mask:           the memory type to reclaim
 342 * @nr_to_scan:         number of items to shrink in pages
 343 *
 344 * returns the number of items freed in pages
 345 */
 346int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
 347                         int nr_to_scan);
 348
 349long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
 350
 351int ion_query_heaps(struct ion_heap_query *query);
 352
 353#endif /* _ION_H */
 354