1/* 2 * Header file for dma buffer sharing framework. 3 * 4 * Copyright(C) 2011 Linaro Limited. All rights reserved. 5 * Author: Sumit Semwal <sumit.semwal@ti.com> 6 * 7 * Many thanks to linaro-mm-sig list, and specially 8 * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and 9 * Daniel Vetter <daniel@ffwll.ch> for their support in creation and 10 * refining of this idea. 11 * 12 * This program is free software; you can redistribute it and/or modify it 13 * under the terms of the GNU General Public License version 2 as published by 14 * the Free Software Foundation. 15 * 16 * This program is distributed in the hope that it will be useful, but WITHOUT 17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 19 * more details. 20 * 21 * You should have received a copy of the GNU General Public License along with 22 * this program. If not, see <http://www.gnu.org/licenses/>. 23 */ 24#ifndef __DMA_BUF_H__ 25#define __DMA_BUF_H__ 26 27#include <linux/file.h> 28#include <linux/err.h> 29#include <linux/scatterlist.h> 30#include <linux/list.h> 31#include <linux/dma-mapping.h> 32#include <linux/fs.h> 33#include <linux/dma-fence.h> 34#include <linux/wait.h> 35 36struct device; 37struct dma_buf; 38struct dma_buf_attachment; 39 40/** 41 * struct dma_buf_ops - operations possible on struct dma_buf 42 * @map_atomic: [optional] maps a page from the buffer into kernel address 43 * space, users may not block until the subsequent unmap call. 44 * This callback must not sleep. 45 * @unmap_atomic: [optional] unmaps a atomically mapped page from the buffer. 46 * This Callback must not sleep. 47 * @map: [optional] maps a page from the buffer into kernel address space. 48 * @unmap: [optional] unmaps a page from the buffer. 49 * @vmap: [optional] creates a virtual mapping for the buffer into kernel 50 * address space. Same restrictions as for vmap and friends apply. 51 * @vunmap: [optional] unmaps a vmap from the buffer 52 */ 53struct dma_buf_ops { 54 /** 55 * @attach: 56 * 57 * This is called from dma_buf_attach() to make sure that a given 58 * &dma_buf_attachment.dev can access the provided &dma_buf. Exporters 59 * which support buffer objects in special locations like VRAM or 60 * device-specific carveout areas should check whether the buffer could 61 * be move to system memory (or directly accessed by the provided 62 * device), and otherwise need to fail the attach operation. 63 * 64 * The exporter should also in general check whether the current 65 * allocation fullfills the DMA constraints of the new device. If this 66 * is not the case, and the allocation cannot be moved, it should also 67 * fail the attach operation. 68 * 69 * Any exporter-private housekeeping data can be stored in the 70 * &dma_buf_attachment.priv pointer. 71 * 72 * This callback is optional. 73 * 74 * Returns: 75 * 76 * 0 on success, negative error code on failure. It might return -EBUSY 77 * to signal that backing storage is already allocated and incompatible 78 * with the requirements of requesting device. 79 */ 80 int (*attach)(struct dma_buf *, struct dma_buf_attachment *); 81 82 /** 83 * @detach: 84 * 85 * This is called by dma_buf_detach() to release a &dma_buf_attachment. 86 * Provided so that exporters can clean up any housekeeping for an 87 * &dma_buf_attachment. 88 * 89 * This callback is optional. 90 */ 91 void (*detach)(struct dma_buf *, struct dma_buf_attachment *); 92 93 /** 94 * @map_dma_buf: 95 * 96 * This is called by dma_buf_map_attachment() and is used to map a 97 * shared &dma_buf into device address space, and it is mandatory. It 98 * can only be called if @attach has been called successfully. This 99 * essentially pins the DMA buffer into place, and it cannot be moved 100 * any more 101 * 102 * This call may sleep, e.g. when the backing storage first needs to be 103 * allocated, or moved to a location suitable for all currently attached 104 * devices. 105 * 106 * Note that any specific buffer attributes required for this function 107 * should get added to device_dma_parameters accessible via 108 * &device.dma_params from the &dma_buf_attachment. The @attach callback 109 * should also check these constraints. 110 * 111 * If this is being called for the first time, the exporter can now 112 * choose to scan through the list of attachments for this buffer, 113 * collate the requirements of the attached devices, and choose an 114 * appropriate backing storage for the buffer. 115 * 116 * Based on enum dma_data_direction, it might be possible to have 117 * multiple users accessing at the same time (for reading, maybe), or 118 * any other kind of sharing that the exporter might wish to make 119 * available to buffer-users. 120 * 121 * Returns: 122 * 123 * A &sg_table scatter list of or the backing storage of the DMA buffer, 124 * already mapped into the device address space of the &device attached 125 * with the provided &dma_buf_attachment. 126 * 127 * On failure, returns a negative error value wrapped into a pointer. 128 * May also return -EINTR when a signal was received while being 129 * blocked. 130 */ 131 struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *, 132 enum dma_data_direction); 133 /** 134 * @unmap_dma_buf: 135 * 136 * This is called by dma_buf_unmap_attachment() and should unmap and 137 * release the &sg_table allocated in @map_dma_buf, and it is mandatory. 138 * It should also unpin the backing storage if this is the last mapping 139 * of the DMA buffer, it the exporter supports backing storage 140 * migration. 141 */ 142 void (*unmap_dma_buf)(struct dma_buf_attachment *, 143 struct sg_table *, 144 enum dma_data_direction); 145 146 /* TODO: Add try_map_dma_buf version, to return immed with -EBUSY 147 * if the call would block. 148 */ 149 150 /** 151 * @release: 152 * 153 * Called after the last dma_buf_put to release the &dma_buf, and 154 * mandatory. 155 */ 156 void (*release)(struct dma_buf *); 157 158 /** 159 * @begin_cpu_access: 160 * 161 * This is called from dma_buf_begin_cpu_access() and allows the 162 * exporter to ensure that the memory is actually available for cpu 163 * access - the exporter might need to allocate or swap-in and pin the 164 * backing storage. The exporter also needs to ensure that cpu access is 165 * coherent for the access direction. The direction can be used by the 166 * exporter to optimize the cache flushing, i.e. access with a different 167 * direction (read instead of write) might return stale or even bogus 168 * data (e.g. when the exporter needs to copy the data to temporary 169 * storage). 170 * 171 * This callback is optional. 172 * 173 * FIXME: This is both called through the DMA_BUF_IOCTL_SYNC command 174 * from userspace (where storage shouldn't be pinned to avoid handing 175 * de-factor mlock rights to userspace) and for the kernel-internal 176 * users of the various kmap interfaces, where the backing storage must 177 * be pinned to guarantee that the atomic kmap calls can succeed. Since 178 * there's no in-kernel users of the kmap interfaces yet this isn't a 179 * real problem. 180 * 181 * Returns: 182 * 183 * 0 on success or a negative error code on failure. This can for 184 * example fail when the backing storage can't be allocated. Can also 185 * return -ERESTARTSYS or -EINTR when the call has been interrupted and 186 * needs to be restarted. 187 */ 188 int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction); 189 190 /** 191 * @end_cpu_access: 192 * 193 * This is called from dma_buf_end_cpu_access() when the importer is 194 * done accessing the CPU. The exporter can use this to flush caches and 195 * unpin any resources pinned in @begin_cpu_access. 196 * The result of any dma_buf kmap calls after end_cpu_access is 197 * undefined. 198 * 199 * This callback is optional. 200 * 201 * Returns: 202 * 203 * 0 on success or a negative error code on failure. Can return 204 * -ERESTARTSYS or -EINTR when the call has been interrupted and needs 205 * to be restarted. 206 */ 207 int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction); 208 void *(*map)(struct dma_buf *, unsigned long); 209 void (*unmap)(struct dma_buf *, unsigned long, void *); 210 211 /** 212 * @mmap: 213 * 214 * This callback is used by the dma_buf_mmap() function 215 * 216 * Note that the mapping needs to be incoherent, userspace is expected 217 * to braket CPU access using the DMA_BUF_IOCTL_SYNC interface. 218 * 219 * Because dma-buf buffers have invariant size over their lifetime, the 220 * dma-buf core checks whether a vma is too large and rejects such 221 * mappings. The exporter hence does not need to duplicate this check. 222 * Drivers do not need to check this themselves. 223 * 224 * If an exporter needs to manually flush caches and hence needs to fake 225 * coherency for mmap support, it needs to be able to zap all the ptes 226 * pointing at the backing storage. Now linux mm needs a struct 227 * address_space associated with the struct file stored in vma->vm_file 228 * to do that with the function unmap_mapping_range. But the dma_buf 229 * framework only backs every dma_buf fd with the anon_file struct file, 230 * i.e. all dma_bufs share the same file. 231 * 232 * Hence exporters need to setup their own file (and address_space) 233 * association by setting vma->vm_file and adjusting vma->vm_pgoff in 234 * the dma_buf mmap callback. In the specific case of a gem driver the 235 * exporter could use the shmem file already provided by gem (and set 236 * vm_pgoff = 0). Exporters can then zap ptes by unmapping the 237 * corresponding range of the struct address_space associated with their 238 * own file. 239 * 240 * This callback is optional. 241 * 242 * Returns: 243 * 244 * 0 on success or a negative error code on failure. 245 */ 246 int (*mmap)(struct dma_buf *, struct vm_area_struct *vma); 247 248 void *(*vmap)(struct dma_buf *); 249 void (*vunmap)(struct dma_buf *, void *vaddr); 250}; 251 252/** 253 * struct dma_buf - shared buffer object 254 * @size: size of the buffer 255 * @file: file pointer used for sharing buffers across, and for refcounting. 256 * @attachments: list of dma_buf_attachment that denotes all devices attached. 257 * @ops: dma_buf_ops associated with this buffer object. 258 * @lock: used internally to serialize list manipulation, attach/detach and vmap/unmap 259 * @vmapping_counter: used internally to refcnt the vmaps 260 * @vmap_ptr: the current vmap ptr if vmapping_counter > 0 261 * @exp_name: name of the exporter; useful for debugging. 262 * @owner: pointer to exporter module; used for refcounting when exporter is a 263 * kernel module. 264 * @list_node: node for dma_buf accounting and debugging. 265 * @priv: exporter specific private data for this buffer object. 266 * @resv: reservation object linked to this dma-buf 267 * @poll: for userspace poll support 268 * @cb_excl: for userspace poll support 269 * @cb_shared: for userspace poll support 270 * 271 * This represents a shared buffer, created by calling dma_buf_export(). The 272 * userspace representation is a normal file descriptor, which can be created by 273 * calling dma_buf_fd(). 274 * 275 * Shared dma buffers are reference counted using dma_buf_put() and 276 * get_dma_buf(). 277 * 278 * Device DMA access is handled by the separate &struct dma_buf_attachment. 279 */ 280struct dma_buf { 281 size_t size; 282 struct file *file; 283 struct list_head attachments; 284 const struct dma_buf_ops *ops; 285 struct mutex lock; 286 unsigned vmapping_counter; 287 void *vmap_ptr; 288 const char *exp_name; 289 struct module *owner; 290 struct list_head list_node; 291 void *priv; 292 struct reservation_object *resv; 293 294 /* poll support */ 295 wait_queue_head_t poll; 296 297 struct dma_buf_poll_cb_t { 298 struct dma_fence_cb cb; 299 wait_queue_head_t *poll; 300 301 unsigned long active; 302 } cb_excl, cb_shared; 303}; 304 305/** 306 * struct dma_buf_attachment - holds device-buffer attachment data 307 * @dmabuf: buffer for this attachment. 308 * @dev: device attached to the buffer. 309 * @node: list of dma_buf_attachment. 310 * @priv: exporter specific attachment data. 311 * 312 * This structure holds the attachment information between the dma_buf buffer 313 * and its user device(s). The list contains one attachment struct per device 314 * attached to the buffer. 315 * 316 * An attachment is created by calling dma_buf_attach(), and released again by 317 * calling dma_buf_detach(). The DMA mapping itself needed to initiate a 318 * transfer is created by dma_buf_map_attachment() and freed again by calling 319 * dma_buf_unmap_attachment(). 320 */ 321struct dma_buf_attachment { 322 struct dma_buf *dmabuf; 323 struct device *dev; 324 struct list_head node; 325 void *priv; 326}; 327 328/** 329 * struct dma_buf_export_info - holds information needed to export a dma_buf 330 * @exp_name: name of the exporter - useful for debugging. 331 * @owner: pointer to exporter module - used for refcounting kernel module 332 * @ops: Attach allocator-defined dma buf ops to the new buffer 333 * @size: Size of the buffer 334 * @flags: mode flags for the file 335 * @resv: reservation-object, NULL to allocate default one 336 * @priv: Attach private data of allocator to this buffer 337 * 338 * This structure holds the information required to export the buffer. Used 339 * with dma_buf_export() only. 340 */ 341struct dma_buf_export_info { 342 const char *exp_name; 343 struct module *owner; 344 const struct dma_buf_ops *ops; 345 size_t size; 346 int flags; 347 struct reservation_object *resv; 348 void *priv; 349}; 350 351/** 352 * DEFINE_DMA_BUF_EXPORT_INFO - helper macro for exporters 353 * @name: export-info name 354 * 355 * DEFINE_DMA_BUF_EXPORT_INFO macro defines the &struct dma_buf_export_info, 356 * zeroes it out and pre-populates exp_name in it. 357 */ 358#define DEFINE_DMA_BUF_EXPORT_INFO(name) \ 359 struct dma_buf_export_info name = { .exp_name = KBUILD_MODNAME, \ 360 .owner = THIS_MODULE } 361 362/** 363 * get_dma_buf - convenience wrapper for get_file. 364 * @dmabuf: [in] pointer to dma_buf 365 * 366 * Increments the reference count on the dma-buf, needed in case of drivers 367 * that either need to create additional references to the dmabuf on the 368 * kernel side. For example, an exporter that needs to keep a dmabuf ptr 369 * so that subsequent exports don't create a new dmabuf. 370 */ 371static inline void get_dma_buf(struct dma_buf *dmabuf) 372{ 373 get_file(dmabuf->file); 374} 375 376struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, 377 struct device *dev); 378void dma_buf_detach(struct dma_buf *dmabuf, 379 struct dma_buf_attachment *dmabuf_attach); 380 381struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info); 382 383int dma_buf_fd(struct dma_buf *dmabuf, int flags); 384struct dma_buf *dma_buf_get(int fd); 385void dma_buf_put(struct dma_buf *dmabuf); 386 387struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *, 388 enum dma_data_direction); 389void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *, 390 enum dma_data_direction); 391int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, 392 enum dma_data_direction dir); 393int dma_buf_end_cpu_access(struct dma_buf *dma_buf, 394 enum dma_data_direction dir); 395void *dma_buf_kmap(struct dma_buf *, unsigned long); 396void dma_buf_kunmap(struct dma_buf *, unsigned long, void *); 397 398int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *, 399 unsigned long); 400void *dma_buf_vmap(struct dma_buf *); 401void dma_buf_vunmap(struct dma_buf *, void *vaddr); 402#endif /* __DMA_BUF_H__ */ 403