linux/drivers/staging/android/ion/ion_priv.h
<<
>>
Prefs
   1/*
   2 * drivers/staging/android/ion/ion_priv.h
   3 *
   4 * Copyright (C) 2011 Google, Inc.
   5 *
   6 * This software is licensed under the terms of the GNU General Public
   7 * License version 2, as published by the Free Software Foundation, and
   8 * may be copied, distributed, and modified under those terms.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 */
  16
  17#ifndef _ION_PRIV_H
  18#define _ION_PRIV_H
  19
  20#include <linux/device.h>
  21#include <linux/dma-direction.h>
  22#include <linux/kref.h>
  23#include <linux/mm_types.h>
  24#include <linux/mutex.h>
  25#include <linux/rbtree.h>
  26#include <linux/sched.h>
  27#include <linux/shrinker.h>
  28#include <linux/types.h>
  29#include <linux/miscdevice.h>
  30
  31#include "ion.h"
  32
  33/**
  34 * struct ion_buffer - metadata for a particular buffer
  35 * @ref:                reference count
  36 * @node:               node in the ion_device buffers tree
  37 * @dev:                back pointer to the ion_device
  38 * @heap:               back pointer to the heap the buffer came from
  39 * @flags:              buffer specific flags
  40 * @private_flags:      internal buffer specific flags
  41 * @size:               size of the buffer
  42 * @priv_virt:          private data to the buffer representable as
  43 *                      a void *
  44 * @lock:               protects the buffers cnt fields
  45 * @kmap_cnt:           number of times the buffer is mapped to the kernel
  46 * @vaddr:              the kernel mapping if kmap_cnt is not zero
  47 * @dmap_cnt:           number of times the buffer is mapped for dma
  48 * @sg_table:           the sg table for the buffer if dmap_cnt is not zero
  49 * @pages:              flat array of pages in the buffer -- used by fault
  50 *                      handler and only valid for buffers that are faulted in
  51 * @vmas:               list of vma's mapping this buffer
  52 * @handle_count:       count of handles referencing this buffer
  53 * @task_comm:          taskcomm of last client to reference this buffer in a
  54 *                      handle, used for debugging
  55 * @pid:                pid of last client to reference this buffer in a
  56 *                      handle, used for debugging
  57*/
  58struct ion_buffer {
  59        struct kref ref;
  60        union {
  61                struct rb_node node;
  62                struct list_head list;
  63        };
  64        struct ion_device *dev;
  65        struct ion_heap *heap;
  66        unsigned long flags;
  67        unsigned long private_flags;
  68        size_t size;
  69        void *priv_virt;
  70        struct mutex lock;
  71        int kmap_cnt;
  72        void *vaddr;
  73        int dmap_cnt;
  74        struct sg_table *sg_table;
  75        struct page **pages;
  76        struct list_head vmas;
  77        /* used to track orphaned buffers */
  78        int handle_count;
  79        char task_comm[TASK_COMM_LEN];
  80        pid_t pid;
  81};
  82void ion_buffer_destroy(struct ion_buffer *buffer);
  83
  84/**
  85 * struct ion_device - the metadata of the ion device node
  86 * @dev:                the actual misc device
  87 * @buffers:            an rb tree of all the existing buffers
  88 * @buffer_lock:        lock protecting the tree of buffers
  89 * @lock:               rwsem protecting the tree of heaps and clients
  90 * @heaps:              list of all the heaps in the system
  91 * @user_clients:       list of all the clients created from userspace
  92 */
  93struct ion_device {
  94        struct miscdevice dev;
  95        struct rb_root buffers;
  96        struct mutex buffer_lock;
  97        struct rw_semaphore lock;
  98        struct plist_head heaps;
  99        long (*custom_ioctl)(struct ion_client *client, unsigned int cmd,
 100                             unsigned long arg);
 101        struct rb_root clients;
 102        struct dentry *debug_root;
 103        struct dentry *heaps_debug_root;
 104        struct dentry *clients_debug_root;
 105        int heap_cnt;
 106};
 107
 108/**
 109 * struct ion_client - a process/hw block local address space
 110 * @node:               node in the tree of all clients
 111 * @dev:                backpointer to ion device
 112 * @handles:            an rb tree of all the handles in this client
 113 * @idr:                an idr space for allocating handle ids
 114 * @lock:               lock protecting the tree of handles
 115 * @name:               used for debugging
 116 * @display_name:       used for debugging (unique version of @name)
 117 * @display_serial:     used for debugging (to make display_name unique)
 118 * @task:               used for debugging
 119 *
 120 * A client represents a list of buffers this client may access.
 121 * The mutex stored here is used to protect both handles tree
 122 * as well as the handles themselves, and should be held while modifying either.
 123 */
 124struct ion_client {
 125        struct rb_node node;
 126        struct ion_device *dev;
 127        struct rb_root handles;
 128        struct idr idr;
 129        struct mutex lock;
 130        const char *name;
 131        char *display_name;
 132        int display_serial;
 133        struct task_struct *task;
 134        pid_t pid;
 135        struct dentry *debug_root;
 136};
 137
 138/**
 139 * ion_handle - a client local reference to a buffer
 140 * @ref:                reference count
 141 * @client:             back pointer to the client the buffer resides in
 142 * @buffer:             pointer to the buffer
 143 * @node:               node in the client's handle rbtree
 144 * @kmap_cnt:           count of times this client has mapped to kernel
 145 * @id:                 client-unique id allocated by client->idr
 146 *
 147 * Modifications to node, map_cnt or mapping should be protected by the
 148 * lock in the client.  Other fields are never changed after initialization.
 149 */
 150struct ion_handle {
 151        struct kref ref;
 152        struct ion_client *client;
 153        struct ion_buffer *buffer;
 154        struct rb_node node;
 155        unsigned int kmap_cnt;
 156        int id;
 157};
 158
 159/**
 160 * struct ion_heap_ops - ops to operate on a given heap
 161 * @allocate:           allocate memory
 162 * @free:               free memory
 163 * @map_kernel          map memory to the kernel
 164 * @unmap_kernel        unmap memory to the kernel
 165 * @map_user            map memory to userspace
 166 *
 167 * allocate, phys, and map_user return 0 on success, -errno on error.
 168 * map_dma and map_kernel return pointer on success, ERR_PTR on
 169 * error. @free will be called with ION_PRIV_FLAG_SHRINKER_FREE set in
 170 * the buffer's private_flags when called from a shrinker. In that
 171 * case, the pages being free'd must be truly free'd back to the
 172 * system, not put in a page pool or otherwise cached.
 173 */
 174struct ion_heap_ops {
 175        int (*allocate)(struct ion_heap *heap,
 176                        struct ion_buffer *buffer, unsigned long len,
 177                        unsigned long align, unsigned long flags);
 178        void (*free)(struct ion_buffer *buffer);
 179        void * (*map_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
 180        void (*unmap_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
 181        int (*map_user)(struct ion_heap *mapper, struct ion_buffer *buffer,
 182                        struct vm_area_struct *vma);
 183        int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan);
 184};
 185
 186/**
 187 * heap flags - flags between the heaps and core ion code
 188 */
 189#define ION_HEAP_FLAG_DEFER_FREE (1 << 0)
 190
 191/**
 192 * private flags - flags internal to ion
 193 */
 194/*
 195 * Buffer is being freed from a shrinker function. Skip any possible
 196 * heap-specific caching mechanism (e.g. page pools). Guarantees that
 197 * any buffer storage that came from the system allocator will be
 198 * returned to the system allocator.
 199 */
 200#define ION_PRIV_FLAG_SHRINKER_FREE (1 << 0)
 201
 202/**
 203 * struct ion_heap - represents a heap in the system
 204 * @node:               rb node to put the heap on the device's tree of heaps
 205 * @dev:                back pointer to the ion_device
 206 * @type:               type of heap
 207 * @ops:                ops struct as above
 208 * @flags:              flags
 209 * @id:                 id of heap, also indicates priority of this heap when
 210 *                      allocating.  These are specified by platform data and
 211 *                      MUST be unique
 212 * @name:               used for debugging
 213 * @shrinker:           a shrinker for the heap
 214 * @free_list:          free list head if deferred free is used
 215 * @free_list_size      size of the deferred free list in bytes
 216 * @lock:               protects the free list
 217 * @waitqueue:          queue to wait on from deferred free thread
 218 * @task:               task struct of deferred free thread
 219 * @debug_show:         called when heap debug file is read to add any
 220 *                      heap specific debug info to output
 221 *
 222 * Represents a pool of memory from which buffers can be made.  In some
 223 * systems the only heap is regular system memory allocated via vmalloc.
 224 * On others, some blocks might require large physically contiguous buffers
 225 * that are allocated from a specially reserved heap.
 226 */
 227struct ion_heap {
 228        struct plist_node node;
 229        struct ion_device *dev;
 230        enum ion_heap_type type;
 231        struct ion_heap_ops *ops;
 232        unsigned long flags;
 233        unsigned int id;
 234        const char *name;
 235        struct shrinker shrinker;
 236        struct list_head free_list;
 237        size_t free_list_size;
 238        spinlock_t free_lock;
 239        wait_queue_head_t waitqueue;
 240        struct task_struct *task;
 241
 242        int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *);
 243};
 244
 245/**
 246 * ion_buffer_cached - this ion buffer is cached
 247 * @buffer:             buffer
 248 *
 249 * indicates whether this ion buffer is cached
 250 */
 251bool ion_buffer_cached(struct ion_buffer *buffer);
 252
 253/**
 254 * ion_buffer_fault_user_mappings - fault in user mappings of this buffer
 255 * @buffer:             buffer
 256 *
 257 * indicates whether userspace mappings of this buffer will be faulted
 258 * in, this can affect how buffers are allocated from the heap.
 259 */
 260bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer);
 261
 262/**
 263 * ion_device_create - allocates and returns an ion device
 264 * @custom_ioctl:       arch specific ioctl function if applicable
 265 *
 266 * returns a valid device or -PTR_ERR
 267 */
 268struct ion_device *ion_device_create(long (*custom_ioctl)
 269                                     (struct ion_client *client,
 270                                      unsigned int cmd,
 271                                      unsigned long arg));
 272
 273/**
 274 * ion_device_destroy - free and device and it's resource
 275 * @dev:                the device
 276 */
 277void ion_device_destroy(struct ion_device *dev);
 278
 279/**
 280 * ion_device_add_heap - adds a heap to the ion device
 281 * @dev:                the device
 282 * @heap:               the heap to add
 283 */
 284void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap);
 285
 286/**
 287 * some helpers for common operations on buffers using the sg_table
 288 * and vaddr fields
 289 */
 290void *ion_heap_map_kernel(struct ion_heap *, struct ion_buffer *);
 291void ion_heap_unmap_kernel(struct ion_heap *, struct ion_buffer *);
 292int ion_heap_map_user(struct ion_heap *, struct ion_buffer *,
 293                        struct vm_area_struct *);
 294int ion_heap_buffer_zero(struct ion_buffer *buffer);
 295int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot);
 296
 297/**
 298 * ion_heap_init_shrinker
 299 * @heap:               the heap
 300 *
 301 * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag or defines the shrink op
 302 * this function will be called to setup a shrinker to shrink the freelists
 303 * and call the heap's shrink op.
 304 */
 305void ion_heap_init_shrinker(struct ion_heap *heap);
 306
 307/**
 308 * ion_heap_init_deferred_free -- initialize deferred free functionality
 309 * @heap:               the heap
 310 *
 311 * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag this function will
 312 * be called to setup deferred frees. Calls to free the buffer will
 313 * return immediately and the actual free will occur some time later
 314 */
 315int ion_heap_init_deferred_free(struct ion_heap *heap);
 316
 317/**
 318 * ion_heap_freelist_add - add a buffer to the deferred free list
 319 * @heap:               the heap
 320 * @buffer:             the buffer
 321 *
 322 * Adds an item to the deferred freelist.
 323 */
 324void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer);
 325
 326/**
 327 * ion_heap_freelist_drain - drain the deferred free list
 328 * @heap:               the heap
 329 * @size:               amount of memory to drain in bytes
 330 *
 331 * Drains the indicated amount of memory from the deferred freelist immediately.
 332 * Returns the total amount freed.  The total freed may be higher depending
 333 * on the size of the items in the list, or lower if there is insufficient
 334 * total memory on the freelist.
 335 */
 336size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size);
 337
 338/**
 339 * ion_heap_freelist_shrink - drain the deferred free
 340 *                              list, skipping any heap-specific
 341 *                              pooling or caching mechanisms
 342 *
 343 * @heap:               the heap
 344 * @size:               amount of memory to drain in bytes
 345 *
 346 * Drains the indicated amount of memory from the deferred freelist immediately.
 347 * Returns the total amount freed.  The total freed may be higher depending
 348 * on the size of the items in the list, or lower if there is insufficient
 349 * total memory on the freelist.
 350 *
 351 * Unlike with @ion_heap_freelist_drain, don't put any pages back into
 352 * page pools or otherwise cache the pages. Everything must be
 353 * genuinely free'd back to the system. If you're free'ing from a
 354 * shrinker you probably want to use this. Note that this relies on
 355 * the heap.ops.free callback honoring the ION_PRIV_FLAG_SHRINKER_FREE
 356 * flag.
 357 */
 358size_t ion_heap_freelist_shrink(struct ion_heap *heap,
 359                                        size_t size);
 360
 361/**
 362 * ion_heap_freelist_size - returns the size of the freelist in bytes
 363 * @heap:               the heap
 364 */
 365size_t ion_heap_freelist_size(struct ion_heap *heap);
 366
 367
 368/**
 369 * functions for creating and destroying the built in ion heaps.
 370 * architectures can add their own custom architecture specific
 371 * heaps as appropriate.
 372 */
 373
 374struct ion_heap *ion_heap_create(struct ion_platform_heap *);
 375void ion_heap_destroy(struct ion_heap *);
 376struct ion_heap *ion_system_heap_create(struct ion_platform_heap *);
 377void ion_system_heap_destroy(struct ion_heap *);
 378
 379struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *);
 380void ion_system_contig_heap_destroy(struct ion_heap *);
 381
 382struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *);
 383void ion_carveout_heap_destroy(struct ion_heap *);
 384
 385struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *);
 386void ion_chunk_heap_destroy(struct ion_heap *);
 387struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *);
 388void ion_cma_heap_destroy(struct ion_heap *);
 389
 390/**
 391 * functions for creating and destroying a heap pool -- allows you
 392 * to keep a pool of pre allocated memory to use from your heap.  Keeping
 393 * a pool of memory that is ready for dma, ie any cached mapping have been
 394 * invalidated from the cache, provides a significant performance benefit on
 395 * many systems
 396 */
 397
 398/**
 399 * struct ion_page_pool - pagepool struct
 400 * @high_count:         number of highmem items in the pool
 401 * @low_count:          number of lowmem items in the pool
 402 * @high_items:         list of highmem items
 403 * @low_items:          list of lowmem items
 404 * @mutex:              lock protecting this struct and especially the count
 405 *                      item list
 406 * @gfp_mask:           gfp_mask to use from alloc
 407 * @order:              order of pages in the pool
 408 * @list:               plist node for list of pools
 409 * @cached:             it's cached pool or not
 410 *
 411 * Allows you to keep a pool of pre allocated pages to use from your heap.
 412 * Keeping a pool of pages that is ready for dma, ie any cached mapping have
 413 * been invalidated from the cache, provides a significant performance benefit
 414 * on many systems
 415 */
 416struct ion_page_pool {
 417        int high_count;
 418        int low_count;
 419        bool cached;
 420        struct list_head high_items;
 421        struct list_head low_items;
 422        struct mutex mutex;
 423        gfp_t gfp_mask;
 424        unsigned int order;
 425        struct plist_node list;
 426};
 427
 428struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
 429                                           bool cached);
 430void ion_page_pool_destroy(struct ion_page_pool *);
 431struct page *ion_page_pool_alloc(struct ion_page_pool *);
 432void ion_page_pool_free(struct ion_page_pool *, struct page *);
 433
 434/** ion_page_pool_shrink - shrinks the size of the memory cached in the pool
 435 * @pool:               the pool
 436 * @gfp_mask:           the memory type to reclaim
 437 * @nr_to_scan:         number of items to shrink in pages
 438 *
 439 * returns the number of items freed in pages
 440 */
 441int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
 442                          int nr_to_scan);
 443
 444/**
 445 * ion_pages_sync_for_device - cache flush pages for use with the specified
 446 *                             device
 447 * @dev:                the device the pages will be used with
 448 * @page:               the first page to be flushed
 449 * @size:               size in bytes of region to be flushed
 450 * @dir:                direction of dma transfer
 451 */
 452void ion_pages_sync_for_device(struct device *dev, struct page *page,
 453                size_t size, enum dma_data_direction dir);
 454
 455long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
 456
 457int ion_sync_for_device(struct ion_client *client, int fd);
 458
 459struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
 460                                                int id);
 461
 462void ion_free_nolock(struct ion_client *client, struct ion_handle *handle);
 463
 464int ion_handle_put_nolock(struct ion_handle *handle);
 465
 466struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
 467                                                int id);
 468
 469int ion_handle_put(struct ion_handle *handle);
 470
 471int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query);
 472
 473#endif /* _ION_PRIV_H */
 474