1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * ION Memory Allocator kernel interface header 4 * 5 * Copyright (C) 2011 Google, Inc. 6 */ 7 8#ifndef _ION_H 9#define _ION_H 10 11#include <linux/device.h> 12#include <linux/dma-direction.h> 13#include <linux/kref.h> 14#include <linux/mm_types.h> 15#include <linux/mutex.h> 16#include <linux/rbtree.h> 17#include <linux/sched.h> 18#include <linux/shrinker.h> 19#include <linux/types.h> 20#include <linux/miscdevice.h> 21 22#include "../uapi/ion.h" 23 24/** 25 * struct ion_buffer - metadata for a particular buffer 26 * @list: element in list of deferred freeable buffers 27 * @dev: back pointer to the ion_device 28 * @heap: back pointer to the heap the buffer came from 29 * @flags: buffer specific flags 30 * @private_flags: internal buffer specific flags 31 * @size: size of the buffer 32 * @priv_virt: private data to the buffer representable as 33 * a void * 34 * @lock: protects the buffers cnt fields 35 * @kmap_cnt: number of times the buffer is mapped to the kernel 36 * @vaddr: the kernel mapping if kmap_cnt is not zero 37 * @sg_table: the sg table for the buffer 38 * @attachments: list of devices attached to this buffer 39 */ 40struct ion_buffer { 41 struct list_head list; 42 struct ion_device *dev; 43 struct ion_heap *heap; 44 unsigned long flags; 45 unsigned long private_flags; 46 size_t size; 47 void *priv_virt; 48 struct mutex lock; 49 int kmap_cnt; 50 void *vaddr; 51 struct sg_table *sg_table; 52 struct list_head attachments; 53}; 54 55void ion_buffer_destroy(struct ion_buffer *buffer); 56 57/** 58 * struct ion_device - the metadata of the ion device node 59 * @dev: the actual misc device 60 * @lock: rwsem protecting the tree of heaps and clients 61 */ 62struct ion_device { 63 struct miscdevice dev; 64 struct rw_semaphore lock; 65 struct plist_head heaps; 66 struct dentry *debug_root; 67 int heap_cnt; 68}; 69 70/** 71 * struct ion_heap_ops - ops to operate on a given heap 72 * @allocate: allocate memory 73 * @free: free memory 74 * @map_kernel map memory to the kernel 75 * @unmap_kernel unmap memory to the kernel 76 * @map_user map memory to userspace 77 * 78 * allocate, phys, and map_user return 0 on success, -errno on error. 79 * map_dma and map_kernel return pointer on success, ERR_PTR on 80 * error. @free will be called with ION_PRIV_FLAG_SHRINKER_FREE set in 81 * the buffer's private_flags when called from a shrinker. In that 82 * case, the pages being free'd must be truly free'd back to the 83 * system, not put in a page pool or otherwise cached. 84 */ 85struct ion_heap_ops { 86 int (*allocate)(struct ion_heap *heap, 87 struct ion_buffer *buffer, unsigned long len, 88 unsigned long flags); 89 void (*free)(struct ion_buffer *buffer); 90 void * (*map_kernel)(struct ion_heap *heap, struct ion_buffer *buffer); 91 void (*unmap_kernel)(struct ion_heap *heap, struct ion_buffer *buffer); 92 int (*map_user)(struct ion_heap *mapper, struct ion_buffer *buffer, 93 struct vm_area_struct *vma); 94 int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan); 95}; 96 97/** 98 * heap flags - flags between the heaps and core ion code 99 */ 100#define ION_HEAP_FLAG_DEFER_FREE BIT(0) 101 102/** 103 * private flags - flags internal to ion 104 */ 105/* 106 * Buffer is being freed from a shrinker function. Skip any possible 107 * heap-specific caching mechanism (e.g. page pools). Guarantees that 108 * any buffer storage that came from the system allocator will be 109 * returned to the system allocator. 110 */ 111#define ION_PRIV_FLAG_SHRINKER_FREE BIT(0) 112 113/** 114 * struct ion_heap - represents a heap in the system 115 * @node: rb node to put the heap on the device's tree of heaps 116 * @dev: back pointer to the ion_device 117 * @type: type of heap 118 * @ops: ops struct as above 119 * @flags: flags 120 * @id: id of heap, also indicates priority of this heap when 121 * allocating. These are specified by platform data and 122 * MUST be unique 123 * @name: used for debugging 124 * @shrinker: a shrinker for the heap 125 * @free_list: free list head if deferred free is used 126 * @free_list_size size of the deferred free list in bytes 127 * @lock: protects the free list 128 * @waitqueue: queue to wait on from deferred free thread 129 * @task: task struct of deferred free thread 130 * @num_of_buffers the number of currently allocated buffers 131 * @num_of_alloc_bytes the number of allocated bytes 132 * @alloc_bytes_wm the number of allocated bytes watermark 133 * 134 * Represents a pool of memory from which buffers can be made. In some 135 * systems the only heap is regular system memory allocated via vmalloc. 136 * On others, some blocks might require large physically contiguous buffers 137 * that are allocated from a specially reserved heap. 138 */ 139struct ion_heap { 140 struct plist_node node; 141 struct ion_device *dev; 142 enum ion_heap_type type; 143 struct ion_heap_ops *ops; 144 unsigned long flags; 145 unsigned int id; 146 const char *name; 147 148 /* deferred free support */ 149 struct shrinker shrinker; 150 struct list_head free_list; 151 size_t free_list_size; 152 spinlock_t free_lock; 153 wait_queue_head_t waitqueue; 154 struct task_struct *task; 155 156 /* heap statistics */ 157 u64 num_of_buffers; 158 u64 num_of_alloc_bytes; 159 u64 alloc_bytes_wm; 160 161 /* protect heap statistics */ 162 spinlock_t stat_lock; 163}; 164 165/** 166 * ion_device_add_heap - adds a heap to the ion device 167 * @heap: the heap to add 168 */ 169void ion_device_add_heap(struct ion_heap *heap); 170 171/** 172 * some helpers for common operations on buffers using the sg_table 173 * and vaddr fields 174 */ 175void *ion_heap_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer); 176void ion_heap_unmap_kernel(struct ion_heap *heap, struct ion_buffer *buffer); 177int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, 178 struct vm_area_struct *vma); 179int ion_heap_buffer_zero(struct ion_buffer *buffer); 180 181/** 182 * ion_heap_init_shrinker 183 * @heap: the heap 184 * 185 * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag or defines the shrink op 186 * this function will be called to setup a shrinker to shrink the freelists 187 * and call the heap's shrink op. 188 */ 189int ion_heap_init_shrinker(struct ion_heap *heap); 190 191/** 192 * ion_heap_init_deferred_free -- initialize deferred free functionality 193 * @heap: the heap 194 * 195 * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag this function will 196 * be called to setup deferred frees. Calls to free the buffer will 197 * return immediately and the actual free will occur some time later 198 */ 199int ion_heap_init_deferred_free(struct ion_heap *heap); 200 201/** 202 * ion_heap_freelist_add - add a buffer to the deferred free list 203 * @heap: the heap 204 * @buffer: the buffer 205 * 206 * Adds an item to the deferred freelist. 207 */ 208void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer); 209 210/** 211 * ion_heap_freelist_drain - drain the deferred free list 212 * @heap: the heap 213 * @size: amount of memory to drain in bytes 214 * 215 * Drains the indicated amount of memory from the deferred freelist immediately. 216 * Returns the total amount freed. The total freed may be higher depending 217 * on the size of the items in the list, or lower if there is insufficient 218 * total memory on the freelist. 219 */ 220size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size); 221 222/** 223 * ion_heap_freelist_shrink - drain the deferred free 224 * list, skipping any heap-specific 225 * pooling or caching mechanisms 226 * 227 * @heap: the heap 228 * @size: amount of memory to drain in bytes 229 * 230 * Drains the indicated amount of memory from the deferred freelist immediately. 231 * Returns the total amount freed. The total freed may be higher depending 232 * on the size of the items in the list, or lower if there is insufficient 233 * total memory on the freelist. 234 * 235 * Unlike with @ion_heap_freelist_drain, don't put any pages back into 236 * page pools or otherwise cache the pages. Everything must be 237 * genuinely free'd back to the system. If you're free'ing from a 238 * shrinker you probably want to use this. Note that this relies on 239 * the heap.ops.free callback honoring the ION_PRIV_FLAG_SHRINKER_FREE 240 * flag. 241 */ 242size_t ion_heap_freelist_shrink(struct ion_heap *heap, 243 size_t size); 244 245/** 246 * ion_heap_freelist_size - returns the size of the freelist in bytes 247 * @heap: the heap 248 */ 249size_t ion_heap_freelist_size(struct ion_heap *heap); 250 251/** 252 * functions for creating and destroying a heap pool -- allows you 253 * to keep a pool of pre allocated memory to use from your heap. Keeping 254 * a pool of memory that is ready for dma, ie any cached mapping have been 255 * invalidated from the cache, provides a significant performance benefit on 256 * many systems 257 */ 258 259/** 260 * struct ion_page_pool - pagepool struct 261 * @high_count: number of highmem items in the pool 262 * @low_count: number of lowmem items in the pool 263 * @high_items: list of highmem items 264 * @low_items: list of lowmem items 265 * @mutex: lock protecting this struct and especially the count 266 * item list 267 * @gfp_mask: gfp_mask to use from alloc 268 * @order: order of pages in the pool 269 * @list: plist node for list of pools 270 * 271 * Allows you to keep a pool of pre allocated pages to use from your heap. 272 * Keeping a pool of pages that is ready for dma, ie any cached mapping have 273 * been invalidated from the cache, provides a significant performance benefit 274 * on many systems 275 */ 276struct ion_page_pool { 277 int high_count; 278 int low_count; 279 struct list_head high_items; 280 struct list_head low_items; 281 struct mutex mutex; 282 gfp_t gfp_mask; 283 unsigned int order; 284 struct plist_node list; 285}; 286 287struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order); 288void ion_page_pool_destroy(struct ion_page_pool *pool); 289struct page *ion_page_pool_alloc(struct ion_page_pool *pool); 290void ion_page_pool_free(struct ion_page_pool *pool, struct page *page); 291 292/** ion_page_pool_shrink - shrinks the size of the memory cached in the pool 293 * @pool: the pool 294 * @gfp_mask: the memory type to reclaim 295 * @nr_to_scan: number of items to shrink in pages 296 * 297 * returns the number of items freed in pages 298 */ 299int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask, 300 int nr_to_scan); 301 302#endif /* _ION_H */ 303