linux/drivers/staging/android/ion/ion_priv.h
<<
>>
Prefs
   1/*
   2 * drivers/staging/android/ion/ion_priv.h
   3 *
   4 * Copyright (C) 2011 Google, Inc.
   5 *
   6 * This software is licensed under the terms of the GNU General Public
   7 * License version 2, as published by the Free Software Foundation, and
   8 * may be copied, distributed, and modified under those terms.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 */
  16
  17#ifndef _ION_PRIV_H
  18#define _ION_PRIV_H
  19
  20#include <linux/device.h>
  21#include <linux/dma-direction.h>
  22#include <linux/kref.h>
  23#include <linux/mm_types.h>
  24#include <linux/mutex.h>
  25#include <linux/rbtree.h>
  26#include <linux/sched.h>
  27#include <linux/shrinker.h>
  28#include <linux/types.h>
  29
  30#include "ion.h"
  31
  32struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
  33
  34/**
  35 * struct ion_buffer - metadata for a particular buffer
  36 * @ref:                refernce count
  37 * @node:               node in the ion_device buffers tree
  38 * @dev:                back pointer to the ion_device
  39 * @heap:               back pointer to the heap the buffer came from
  40 * @flags:              buffer specific flags
  41 * @private_flags:      internal buffer specific flags
  42 * @size:               size of the buffer
  43 * @priv_virt:          private data to the buffer representable as
  44 *                      a void *
  45 * @priv_phys:          private data to the buffer representable as
  46 *                      an ion_phys_addr_t (and someday a phys_addr_t)
  47 * @lock:               protects the buffers cnt fields
  48 * @kmap_cnt:           number of times the buffer is mapped to the kernel
  49 * @vaddr:              the kenrel mapping if kmap_cnt is not zero
  50 * @dmap_cnt:           number of times the buffer is mapped for dma
  51 * @sg_table:           the sg table for the buffer if dmap_cnt is not zero
  52 * @pages:              flat array of pages in the buffer -- used by fault
  53 *                      handler and only valid for buffers that are faulted in
  54 * @vmas:               list of vma's mapping this buffer
  55 * @handle_count:       count of handles referencing this buffer
  56 * @task_comm:          taskcomm of last client to reference this buffer in a
  57 *                      handle, used for debugging
  58 * @pid:                pid of last client to reference this buffer in a
  59 *                      handle, used for debugging
  60*/
  61struct ion_buffer {
  62        struct kref ref;
  63        union {
  64                struct rb_node node;
  65                struct list_head list;
  66        };
  67        struct ion_device *dev;
  68        struct ion_heap *heap;
  69        unsigned long flags;
  70        unsigned long private_flags;
  71        size_t size;
  72        union {
  73                void *priv_virt;
  74                ion_phys_addr_t priv_phys;
  75        };
  76        struct mutex lock;
  77        int kmap_cnt;
  78        void *vaddr;
  79        int dmap_cnt;
  80        struct sg_table *sg_table;
  81        struct page **pages;
  82        struct list_head vmas;
  83        /* used to track orphaned buffers */
  84        int handle_count;
  85        char task_comm[TASK_COMM_LEN];
  86        pid_t pid;
  87};
  88void ion_buffer_destroy(struct ion_buffer *buffer);
  89
  90/**
  91 * struct ion_heap_ops - ops to operate on a given heap
  92 * @allocate:           allocate memory
  93 * @free:               free memory
  94 * @phys                get physical address of a buffer (only define on
  95 *                      physically contiguous heaps)
  96 * @map_dma             map the memory for dma to a scatterlist
  97 * @unmap_dma           unmap the memory for dma
  98 * @map_kernel          map memory to the kernel
  99 * @unmap_kernel        unmap memory to the kernel
 100 * @map_user            map memory to userspace
 101 *
 102 * allocate, phys, and map_user return 0 on success, -errno on error.
 103 * map_dma and map_kernel return pointer on success, ERR_PTR on
 104 * error. @free will be called with ION_PRIV_FLAG_SHRINKER_FREE set in
 105 * the buffer's private_flags when called from a shrinker. In that
 106 * case, the pages being free'd must be truly free'd back to the
 107 * system, not put in a page pool or otherwise cached.
 108 */
 109struct ion_heap_ops {
 110        int (*allocate)(struct ion_heap *heap,
 111                        struct ion_buffer *buffer, unsigned long len,
 112                        unsigned long align, unsigned long flags);
 113        void (*free)(struct ion_buffer *buffer);
 114        int (*phys)(struct ion_heap *heap, struct ion_buffer *buffer,
 115                    ion_phys_addr_t *addr, size_t *len);
 116        struct sg_table * (*map_dma)(struct ion_heap *heap,
 117                                     struct ion_buffer *buffer);
 118        void (*unmap_dma)(struct ion_heap *heap, struct ion_buffer *buffer);
 119        void * (*map_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
 120        void (*unmap_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
 121        int (*map_user)(struct ion_heap *mapper, struct ion_buffer *buffer,
 122                        struct vm_area_struct *vma);
 123        int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan);
 124};
 125
 126/**
 127 * heap flags - flags between the heaps and core ion code
 128 */
 129#define ION_HEAP_FLAG_DEFER_FREE (1 << 0)
 130
 131/**
 132 * private flags - flags internal to ion
 133 */
 134/*
 135 * Buffer is being freed from a shrinker function. Skip any possible
 136 * heap-specific caching mechanism (e.g. page pools). Guarantees that
 137 * any buffer storage that came from the system allocator will be
 138 * returned to the system allocator.
 139 */
 140#define ION_PRIV_FLAG_SHRINKER_FREE (1 << 0)
 141
 142/**
 143 * struct ion_heap - represents a heap in the system
 144 * @node:               rb node to put the heap on the device's tree of heaps
 145 * @dev:                back pointer to the ion_device
 146 * @type:               type of heap
 147 * @ops:                ops struct as above
 148 * @flags:              flags
 149 * @id:                 id of heap, also indicates priority of this heap when
 150 *                      allocating.  These are specified by platform data and
 151 *                      MUST be unique
 152 * @name:               used for debugging
 153 * @shrinker:           a shrinker for the heap
 154 * @free_list:          free list head if deferred free is used
 155 * @free_list_size      size of the deferred free list in bytes
 156 * @lock:               protects the free list
 157 * @waitqueue:          queue to wait on from deferred free thread
 158 * @task:               task struct of deferred free thread
 159 * @debug_show:         called when heap debug file is read to add any
 160 *                      heap specific debug info to output
 161 *
 162 * Represents a pool of memory from which buffers can be made.  In some
 163 * systems the only heap is regular system memory allocated via vmalloc.
 164 * On others, some blocks might require large physically contiguous buffers
 165 * that are allocated from a specially reserved heap.
 166 */
 167struct ion_heap {
 168        struct plist_node node;
 169        struct ion_device *dev;
 170        enum ion_heap_type type;
 171        struct ion_heap_ops *ops;
 172        unsigned long flags;
 173        unsigned int id;
 174        const char *name;
 175        struct shrinker shrinker;
 176        struct list_head free_list;
 177        size_t free_list_size;
 178        spinlock_t free_lock;
 179        wait_queue_head_t waitqueue;
 180        struct task_struct *task;
 181
 182        int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *);
 183};
 184
 185/**
 186 * ion_buffer_cached - this ion buffer is cached
 187 * @buffer:             buffer
 188 *
 189 * indicates whether this ion buffer is cached
 190 */
 191bool ion_buffer_cached(struct ion_buffer *buffer);
 192
 193/**
 194 * ion_buffer_fault_user_mappings - fault in user mappings of this buffer
 195 * @buffer:             buffer
 196 *
 197 * indicates whether userspace mappings of this buffer will be faulted
 198 * in, this can affect how buffers are allocated from the heap.
 199 */
 200bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer);
 201
 202/**
 203 * ion_device_create - allocates and returns an ion device
 204 * @custom_ioctl:       arch specific ioctl function if applicable
 205 *
 206 * returns a valid device or -PTR_ERR
 207 */
 208struct ion_device *ion_device_create(long (*custom_ioctl)
 209                                     (struct ion_client *client,
 210                                      unsigned int cmd,
 211                                      unsigned long arg));
 212
 213/**
 214 * ion_device_destroy - free and device and it's resource
 215 * @dev:                the device
 216 */
 217void ion_device_destroy(struct ion_device *dev);
 218
 219/**
 220 * ion_device_add_heap - adds a heap to the ion device
 221 * @dev:                the device
 222 * @heap:               the heap to add
 223 */
 224void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap);
 225
 226/**
 227 * some helpers for common operations on buffers using the sg_table
 228 * and vaddr fields
 229 */
 230void *ion_heap_map_kernel(struct ion_heap *, struct ion_buffer *);
 231void ion_heap_unmap_kernel(struct ion_heap *, struct ion_buffer *);
 232int ion_heap_map_user(struct ion_heap *, struct ion_buffer *,
 233                        struct vm_area_struct *);
 234int ion_heap_buffer_zero(struct ion_buffer *buffer);
 235int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot);
 236
 237/**
 238 * ion_heap_init_shrinker
 239 * @heap:               the heap
 240 *
 241 * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag or defines the shrink op
 242 * this function will be called to setup a shrinker to shrink the freelists
 243 * and call the heap's shrink op.
 244 */
 245void ion_heap_init_shrinker(struct ion_heap *heap);
 246
 247/**
 248 * ion_heap_init_deferred_free -- initialize deferred free functionality
 249 * @heap:               the heap
 250 *
 251 * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag this function will
 252 * be called to setup deferred frees. Calls to free the buffer will
 253 * return immediately and the actual free will occur some time later
 254 */
 255int ion_heap_init_deferred_free(struct ion_heap *heap);
 256
 257/**
 258 * ion_heap_freelist_add - add a buffer to the deferred free list
 259 * @heap:               the heap
 260 * @buffer:             the buffer
 261 *
 262 * Adds an item to the deferred freelist.
 263 */
 264void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer);
 265
 266/**
 267 * ion_heap_freelist_drain - drain the deferred free list
 268 * @heap:               the heap
 269 * @size:               ammount of memory to drain in bytes
 270 *
 271 * Drains the indicated amount of memory from the deferred freelist immediately.
 272 * Returns the total amount freed.  The total freed may be higher depending
 273 * on the size of the items in the list, or lower if there is insufficient
 274 * total memory on the freelist.
 275 */
 276size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size);
 277
 278/**
 279 * ion_heap_freelist_shrink - drain the deferred free
 280 *                              list, skipping any heap-specific
 281 *                              pooling or caching mechanisms
 282 *
 283 * @heap:               the heap
 284 * @size:               amount of memory to drain in bytes
 285 *
 286 * Drains the indicated amount of memory from the deferred freelist immediately.
 287 * Returns the total amount freed.  The total freed may be higher depending
 288 * on the size of the items in the list, or lower if there is insufficient
 289 * total memory on the freelist.
 290 *
 291 * Unlike with @ion_heap_freelist_drain, don't put any pages back into
 292 * page pools or otherwise cache the pages. Everything must be
 293 * genuinely free'd back to the system. If you're free'ing from a
 294 * shrinker you probably want to use this. Note that this relies on
 295 * the heap.ops.free callback honoring the ION_PRIV_FLAG_SHRINKER_FREE
 296 * flag.
 297 */
 298size_t ion_heap_freelist_shrink(struct ion_heap *heap,
 299                                        size_t size);
 300
 301/**
 302 * ion_heap_freelist_size - returns the size of the freelist in bytes
 303 * @heap:               the heap
 304 */
 305size_t ion_heap_freelist_size(struct ion_heap *heap);
 306
 307
 308/**
 309 * functions for creating and destroying the built in ion heaps.
 310 * architectures can add their own custom architecture specific
 311 * heaps as appropriate.
 312 */
 313
 314struct ion_heap *ion_heap_create(struct ion_platform_heap *);
 315void ion_heap_destroy(struct ion_heap *);
 316struct ion_heap *ion_system_heap_create(struct ion_platform_heap *);
 317void ion_system_heap_destroy(struct ion_heap *);
 318
 319struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *);
 320void ion_system_contig_heap_destroy(struct ion_heap *);
 321
 322struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *);
 323void ion_carveout_heap_destroy(struct ion_heap *);
 324
 325struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *);
 326void ion_chunk_heap_destroy(struct ion_heap *);
 327struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *);
 328void ion_cma_heap_destroy(struct ion_heap *);
 329
 330/**
 331 * kernel api to allocate/free from carveout -- used when carveout is
 332 * used to back an architecture specific custom heap
 333 */
 334ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size,
 335                                      unsigned long align);
 336void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
 337                       unsigned long size);
 338/**
 339 * The carveout heap returns physical addresses, since 0 may be a valid
 340 * physical address, this is used to indicate allocation failed
 341 */
 342#define ION_CARVEOUT_ALLOCATE_FAIL -1
 343
 344/**
 345 * functions for creating and destroying a heap pool -- allows you
 346 * to keep a pool of pre allocated memory to use from your heap.  Keeping
 347 * a pool of memory that is ready for dma, ie any cached mapping have been
 348 * invalidated from the cache, provides a significant peformance benefit on
 349 * many systems */
 350
 351/**
 352 * struct ion_page_pool - pagepool struct
 353 * @high_count:         number of highmem items in the pool
 354 * @low_count:          number of lowmem items in the pool
 355 * @high_items:         list of highmem items
 356 * @low_items:          list of lowmem items
 357 * @mutex:              lock protecting this struct and especially the count
 358 *                      item list
 359 * @gfp_mask:           gfp_mask to use from alloc
 360 * @order:              order of pages in the pool
 361 * @list:               plist node for list of pools
 362 *
 363 * Allows you to keep a pool of pre allocated pages to use from your heap.
 364 * Keeping a pool of pages that is ready for dma, ie any cached mapping have
 365 * been invalidated from the cache, provides a significant peformance benefit
 366 * on many systems
 367 */
 368struct ion_page_pool {
 369        int high_count;
 370        int low_count;
 371        struct list_head high_items;
 372        struct list_head low_items;
 373        struct mutex mutex;
 374        gfp_t gfp_mask;
 375        unsigned int order;
 376        struct plist_node list;
 377};
 378
 379struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order);
 380void ion_page_pool_destroy(struct ion_page_pool *);
 381struct page *ion_page_pool_alloc(struct ion_page_pool *);
 382void ion_page_pool_free(struct ion_page_pool *, struct page *);
 383
 384/** ion_page_pool_shrink - shrinks the size of the memory cached in the pool
 385 * @pool:               the pool
 386 * @gfp_mask:           the memory type to reclaim
 387 * @nr_to_scan:         number of items to shrink in pages
 388 *
 389 * returns the number of items freed in pages
 390 */
 391int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
 392                          int nr_to_scan);
 393
 394/**
 395 * ion_pages_sync_for_device - cache flush pages for use with the specified
 396 *                             device
 397 * @dev:                the device the pages will be used with
 398 * @page:               the first page to be flushed
 399 * @size:               size in bytes of region to be flushed
 400 * @dir:                direction of dma transfer
 401 */
 402void ion_pages_sync_for_device(struct device *dev, struct page *page,
 403                size_t size, enum dma_data_direction dir);
 404
 405#endif /* _ION_PRIV_H */
 406