linux/include/linux/dma-buf.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 * Header file for dma buffer sharing framework.
   4 *
   5 * Copyright(C) 2011 Linaro Limited. All rights reserved.
   6 * Author: Sumit Semwal <sumit.semwal@ti.com>
   7 *
   8 * Many thanks to linaro-mm-sig list, and specially
   9 * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
  10 * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
  11 * refining of this idea.
  12 */
  13#ifndef __DMA_BUF_H__
  14#define __DMA_BUF_H__
  15
  16#include <linux/file.h>
  17#include <linux/err.h>
  18#include <linux/scatterlist.h>
  19#include <linux/list.h>
  20#include <linux/dma-mapping.h>
  21#include <linux/fs.h>
  22#include <linux/dma-fence.h>
  23#include <linux/wait.h>
  24
  25struct device;
  26struct dma_buf;
  27struct dma_buf_attachment;
  28
  29/**
  30 * struct dma_buf_ops - operations possible on struct dma_buf
  31 * @vmap: [optional] creates a virtual mapping for the buffer into kernel
  32 *        address space. Same restrictions as for vmap and friends apply.
  33 * @vunmap: [optional] unmaps a vmap from the buffer
  34 */
  35struct dma_buf_ops {
  36        /**
  37          * @cache_sgt_mapping:
  38          *
  39          * If true the framework will cache the first mapping made for each
  40          * attachment. This avoids creating mappings for attachments multiple
  41          * times.
  42          */
  43        bool cache_sgt_mapping;
  44
  45        /**
  46         * @attach:
  47         *
  48         * This is called from dma_buf_attach() to make sure that a given
  49         * &dma_buf_attachment.dev can access the provided &dma_buf. Exporters
  50         * which support buffer objects in special locations like VRAM or
  51         * device-specific carveout areas should check whether the buffer could
  52         * be move to system memory (or directly accessed by the provided
  53         * device), and otherwise need to fail the attach operation.
  54         *
  55         * The exporter should also in general check whether the current
  56         * allocation fullfills the DMA constraints of the new device. If this
  57         * is not the case, and the allocation cannot be moved, it should also
  58         * fail the attach operation.
  59         *
  60         * Any exporter-private housekeeping data can be stored in the
  61         * &dma_buf_attachment.priv pointer.
  62         *
  63         * This callback is optional.
  64         *
  65         * Returns:
  66         *
  67         * 0 on success, negative error code on failure. It might return -EBUSY
  68         * to signal that backing storage is already allocated and incompatible
  69         * with the requirements of requesting device.
  70         */
  71        int (*attach)(struct dma_buf *, struct dma_buf_attachment *);
  72
  73        /**
  74         * @detach:
  75         *
  76         * This is called by dma_buf_detach() to release a &dma_buf_attachment.
  77         * Provided so that exporters can clean up any housekeeping for an
  78         * &dma_buf_attachment.
  79         *
  80         * This callback is optional.
  81         */
  82        void (*detach)(struct dma_buf *, struct dma_buf_attachment *);
  83
  84        /**
  85         * @pin:
  86         *
  87         * This is called by dma_buf_pin and lets the exporter know that the
  88         * DMA-buf can't be moved any more.
  89         *
  90         * This is called with the dmabuf->resv object locked and is mutual
  91         * exclusive with @cache_sgt_mapping.
  92         *
  93         * This callback is optional and should only be used in limited use
  94         * cases like scanout and not for temporary pin operations.
  95         *
  96         * Returns:
  97         *
  98         * 0 on success, negative error code on failure.
  99         */
 100        int (*pin)(struct dma_buf_attachment *attach);
 101
 102        /**
 103         * @unpin:
 104         *
 105         * This is called by dma_buf_unpin and lets the exporter know that the
 106         * DMA-buf can be moved again.
 107         *
 108         * This is called with the dmabuf->resv object locked and is mutual
 109         * exclusive with @cache_sgt_mapping.
 110         *
 111         * This callback is optional.
 112         */
 113        void (*unpin)(struct dma_buf_attachment *attach);
 114
 115        /**
 116         * @map_dma_buf:
 117         *
 118         * This is called by dma_buf_map_attachment() and is used to map a
 119         * shared &dma_buf into device address space, and it is mandatory. It
 120         * can only be called if @attach has been called successfully.
 121         *
 122         * This call may sleep, e.g. when the backing storage first needs to be
 123         * allocated, or moved to a location suitable for all currently attached
 124         * devices.
 125         *
 126         * Note that any specific buffer attributes required for this function
 127         * should get added to device_dma_parameters accessible via
 128         * &device.dma_params from the &dma_buf_attachment. The @attach callback
 129         * should also check these constraints.
 130         *
 131         * If this is being called for the first time, the exporter can now
 132         * choose to scan through the list of attachments for this buffer,
 133         * collate the requirements of the attached devices, and choose an
 134         * appropriate backing storage for the buffer.
 135         *
 136         * Based on enum dma_data_direction, it might be possible to have
 137         * multiple users accessing at the same time (for reading, maybe), or
 138         * any other kind of sharing that the exporter might wish to make
 139         * available to buffer-users.
 140         *
 141         * This is always called with the dmabuf->resv object locked when
 142         * the dynamic_mapping flag is true.
 143         *
 144         * Returns:
 145         *
 146         * A &sg_table scatter list of or the backing storage of the DMA buffer,
 147         * already mapped into the device address space of the &device attached
 148         * with the provided &dma_buf_attachment.
 149         *
 150         * On failure, returns a negative error value wrapped into a pointer.
 151         * May also return -EINTR when a signal was received while being
 152         * blocked.
 153         */
 154        struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *,
 155                                         enum dma_data_direction);
 156        /**
 157         * @unmap_dma_buf:
 158         *
 159         * This is called by dma_buf_unmap_attachment() and should unmap and
 160         * release the &sg_table allocated in @map_dma_buf, and it is mandatory.
 161         * For static dma_buf handling this might also unpins the backing
 162         * storage if this is the last mapping of the DMA buffer.
 163         */
 164        void (*unmap_dma_buf)(struct dma_buf_attachment *,
 165                              struct sg_table *,
 166                              enum dma_data_direction);
 167
 168        /* TODO: Add try_map_dma_buf version, to return immed with -EBUSY
 169         * if the call would block.
 170         */
 171
 172        /**
 173         * @release:
 174         *
 175         * Called after the last dma_buf_put to release the &dma_buf, and
 176         * mandatory.
 177         */
 178        void (*release)(struct dma_buf *);
 179
 180        /**
 181         * @begin_cpu_access:
 182         *
 183         * This is called from dma_buf_begin_cpu_access() and allows the
 184         * exporter to ensure that the memory is actually available for cpu
 185         * access - the exporter might need to allocate or swap-in and pin the
 186         * backing storage. The exporter also needs to ensure that cpu access is
 187         * coherent for the access direction. The direction can be used by the
 188         * exporter to optimize the cache flushing, i.e. access with a different
 189         * direction (read instead of write) might return stale or even bogus
 190         * data (e.g. when the exporter needs to copy the data to temporary
 191         * storage).
 192         *
 193         * This callback is optional.
 194         *
 195         * FIXME: This is both called through the DMA_BUF_IOCTL_SYNC command
 196         * from userspace (where storage shouldn't be pinned to avoid handing
 197         * de-factor mlock rights to userspace) and for the kernel-internal
 198         * users of the various kmap interfaces, where the backing storage must
 199         * be pinned to guarantee that the atomic kmap calls can succeed. Since
 200         * there's no in-kernel users of the kmap interfaces yet this isn't a
 201         * real problem.
 202         *
 203         * Returns:
 204         *
 205         * 0 on success or a negative error code on failure. This can for
 206         * example fail when the backing storage can't be allocated. Can also
 207         * return -ERESTARTSYS or -EINTR when the call has been interrupted and
 208         * needs to be restarted.
 209         */
 210        int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction);
 211
 212        /**
 213         * @end_cpu_access:
 214         *
 215         * This is called from dma_buf_end_cpu_access() when the importer is
 216         * done accessing the CPU. The exporter can use this to flush caches and
 217         * unpin any resources pinned in @begin_cpu_access.
 218         * The result of any dma_buf kmap calls after end_cpu_access is
 219         * undefined.
 220         *
 221         * This callback is optional.
 222         *
 223         * Returns:
 224         *
 225         * 0 on success or a negative error code on failure. Can return
 226         * -ERESTARTSYS or -EINTR when the call has been interrupted and needs
 227         * to be restarted.
 228         */
 229        int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
 230
 231        /**
 232         * @mmap:
 233         *
 234         * This callback is used by the dma_buf_mmap() function
 235         *
 236         * Note that the mapping needs to be incoherent, userspace is expected
 237         * to braket CPU access using the DMA_BUF_IOCTL_SYNC interface.
 238         *
 239         * Because dma-buf buffers have invariant size over their lifetime, the
 240         * dma-buf core checks whether a vma is too large and rejects such
 241         * mappings. The exporter hence does not need to duplicate this check.
 242         * Drivers do not need to check this themselves.
 243         *
 244         * If an exporter needs to manually flush caches and hence needs to fake
 245         * coherency for mmap support, it needs to be able to zap all the ptes
 246         * pointing at the backing storage. Now linux mm needs a struct
 247         * address_space associated with the struct file stored in vma->vm_file
 248         * to do that with the function unmap_mapping_range. But the dma_buf
 249         * framework only backs every dma_buf fd with the anon_file struct file,
 250         * i.e. all dma_bufs share the same file.
 251         *
 252         * Hence exporters need to setup their own file (and address_space)
 253         * association by setting vma->vm_file and adjusting vma->vm_pgoff in
 254         * the dma_buf mmap callback. In the specific case of a gem driver the
 255         * exporter could use the shmem file already provided by gem (and set
 256         * vm_pgoff = 0). Exporters can then zap ptes by unmapping the
 257         * corresponding range of the struct address_space associated with their
 258         * own file.
 259         *
 260         * This callback is optional.
 261         *
 262         * Returns:
 263         *
 264         * 0 on success or a negative error code on failure.
 265         */
 266        int (*mmap)(struct dma_buf *, struct vm_area_struct *vma);
 267
 268        void *(*vmap)(struct dma_buf *);
 269        void (*vunmap)(struct dma_buf *, void *vaddr);
 270};
 271
 272/**
 273 * struct dma_buf - shared buffer object
 274 * @size: size of the buffer
 275 * @file: file pointer used for sharing buffers across, and for refcounting.
 276 * @attachments: list of dma_buf_attachment that denotes all devices attached,
 277 *               protected by dma_resv lock.
 278 * @ops: dma_buf_ops associated with this buffer object.
 279 * @lock: used internally to serialize list manipulation, attach/detach and
 280 *        vmap/unmap
 281 * @vmapping_counter: used internally to refcnt the vmaps
 282 * @vmap_ptr: the current vmap ptr if vmapping_counter > 0
 283 * @exp_name: name of the exporter; useful for debugging.
 284 * @name: userspace-provided name; useful for accounting and debugging,
 285 *        protected by @resv.
 286 * @name_lock: spinlock to protect name access
 287 * @owner: pointer to exporter module; used for refcounting when exporter is a
 288 *         kernel module.
 289 * @list_node: node for dma_buf accounting and debugging.
 290 * @priv: exporter specific private data for this buffer object.
 291 * @resv: reservation object linked to this dma-buf
 292 * @poll: for userspace poll support
 293 * @cb_excl: for userspace poll support
 294 * @cb_shared: for userspace poll support
 295 *
 296 * This represents a shared buffer, created by calling dma_buf_export(). The
 297 * userspace representation is a normal file descriptor, which can be created by
 298 * calling dma_buf_fd().
 299 *
 300 * Shared dma buffers are reference counted using dma_buf_put() and
 301 * get_dma_buf().
 302 *
 303 * Device DMA access is handled by the separate &struct dma_buf_attachment.
 304 */
 305struct dma_buf {
 306        size_t size;
 307        struct file *file;
 308        struct list_head attachments;
 309        const struct dma_buf_ops *ops;
 310        struct mutex lock;
 311        unsigned vmapping_counter;
 312        void *vmap_ptr;
 313        const char *exp_name;
 314        const char *name;
 315        spinlock_t name_lock;
 316        struct module *owner;
 317        struct list_head list_node;
 318        void *priv;
 319        struct dma_resv *resv;
 320
 321        /* poll support */
 322        wait_queue_head_t poll;
 323
 324        struct dma_buf_poll_cb_t {
 325                struct dma_fence_cb cb;
 326                wait_queue_head_t *poll;
 327
 328                __poll_t active;
 329        } cb_excl, cb_shared;
 330};
 331
 332/**
 333 * struct dma_buf_attach_ops - importer operations for an attachment
 334 *
 335 * Attachment operations implemented by the importer.
 336 */
 337struct dma_buf_attach_ops {
 338        /**
 339         * @allow_peer2peer:
 340         *
 341         * If this is set to true the importer must be able to handle peer
 342         * resources without struct pages.
 343         */
 344        bool allow_peer2peer;
 345
 346        /**
 347         * @move_notify: [optional] notification that the DMA-buf is moving
 348         *
 349         * If this callback is provided the framework can avoid pinning the
 350         * backing store while mappings exists.
 351         *
 352         * This callback is called with the lock of the reservation object
 353         * associated with the dma_buf held and the mapping function must be
 354         * called with this lock held as well. This makes sure that no mapping
 355         * is created concurrently with an ongoing move operation.
 356         *
 357         * Mappings stay valid and are not directly affected by this callback.
 358         * But the DMA-buf can now be in a different physical location, so all
 359         * mappings should be destroyed and re-created as soon as possible.
 360         *
 361         * New mappings can be created after this callback returns, and will
 362         * point to the new location of the DMA-buf.
 363         */
 364        void (*move_notify)(struct dma_buf_attachment *attach);
 365};
 366
 367/**
 368 * struct dma_buf_attachment - holds device-buffer attachment data
 369 * @dmabuf: buffer for this attachment.
 370 * @dev: device attached to the buffer.
 371 * @node: list of dma_buf_attachment, protected by dma_resv lock of the dmabuf.
 372 * @sgt: cached mapping.
 373 * @dir: direction of cached mapping.
 374 * @peer2peer: true if the importer can handle peer resources without pages.
 375 * @priv: exporter specific attachment data.
 376 * @importer_ops: importer operations for this attachment, if provided
 377 * dma_buf_map/unmap_attachment() must be called with the dma_resv lock held.
 378 * @importer_priv: importer specific attachment data.
 379 *
 380 * This structure holds the attachment information between the dma_buf buffer
 381 * and its user device(s). The list contains one attachment struct per device
 382 * attached to the buffer.
 383 *
 384 * An attachment is created by calling dma_buf_attach(), and released again by
 385 * calling dma_buf_detach(). The DMA mapping itself needed to initiate a
 386 * transfer is created by dma_buf_map_attachment() and freed again by calling
 387 * dma_buf_unmap_attachment().
 388 */
 389struct dma_buf_attachment {
 390        struct dma_buf *dmabuf;
 391        struct device *dev;
 392        struct list_head node;
 393        struct sg_table *sgt;
 394        enum dma_data_direction dir;
 395        bool peer2peer;
 396        const struct dma_buf_attach_ops *importer_ops;
 397        void *importer_priv;
 398        void *priv;
 399};
 400
 401/**
 402 * struct dma_buf_export_info - holds information needed to export a dma_buf
 403 * @exp_name:   name of the exporter - useful for debugging.
 404 * @owner:      pointer to exporter module - used for refcounting kernel module
 405 * @ops:        Attach allocator-defined dma buf ops to the new buffer
 406 * @size:       Size of the buffer
 407 * @flags:      mode flags for the file
 408 * @resv:       reservation-object, NULL to allocate default one
 409 * @priv:       Attach private data of allocator to this buffer
 410 *
 411 * This structure holds the information required to export the buffer. Used
 412 * with dma_buf_export() only.
 413 */
 414struct dma_buf_export_info {
 415        const char *exp_name;
 416        struct module *owner;
 417        const struct dma_buf_ops *ops;
 418        size_t size;
 419        int flags;
 420        struct dma_resv *resv;
 421        void *priv;
 422};
 423
 424/**
 425 * DEFINE_DMA_BUF_EXPORT_INFO - helper macro for exporters
 426 * @name: export-info name
 427 *
 428 * DEFINE_DMA_BUF_EXPORT_INFO macro defines the &struct dma_buf_export_info,
 429 * zeroes it out and pre-populates exp_name in it.
 430 */
 431#define DEFINE_DMA_BUF_EXPORT_INFO(name)        \
 432        struct dma_buf_export_info name = { .exp_name = KBUILD_MODNAME, \
 433                                         .owner = THIS_MODULE }
 434
 435/**
 436 * get_dma_buf - convenience wrapper for get_file.
 437 * @dmabuf:     [in]    pointer to dma_buf
 438 *
 439 * Increments the reference count on the dma-buf, needed in case of drivers
 440 * that either need to create additional references to the dmabuf on the
 441 * kernel side.  For example, an exporter that needs to keep a dmabuf ptr
 442 * so that subsequent exports don't create a new dmabuf.
 443 */
 444static inline void get_dma_buf(struct dma_buf *dmabuf)
 445{
 446        get_file(dmabuf->file);
 447}
 448
 449/**
 450 * dma_buf_is_dynamic - check if a DMA-buf uses dynamic mappings.
 451 * @dmabuf: the DMA-buf to check
 452 *
 453 * Returns true if a DMA-buf exporter wants to be called with the dma_resv
 454 * locked for the map/unmap callbacks, false if it doesn't wants to be called
 455 * with the lock held.
 456 */
 457static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf)
 458{
 459        return !!dmabuf->ops->pin;
 460}
 461
 462/**
 463 * dma_buf_attachment_is_dynamic - check if a DMA-buf attachment uses dynamic
 464 * mappinsg
 465 * @attach: the DMA-buf attachment to check
 466 *
 467 * Returns true if a DMA-buf importer wants to call the map/unmap functions with
 468 * the dma_resv lock held.
 469 */
 470static inline bool
 471dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach)
 472{
 473        return !!attach->importer_ops;
 474}
 475
 476struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
 477                                          struct device *dev);
 478struct dma_buf_attachment *
 479dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
 480                       const struct dma_buf_attach_ops *importer_ops,
 481                       void *importer_priv);
 482void dma_buf_detach(struct dma_buf *dmabuf,
 483                    struct dma_buf_attachment *attach);
 484int dma_buf_pin(struct dma_buf_attachment *attach);
 485void dma_buf_unpin(struct dma_buf_attachment *attach);
 486
 487struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info);
 488
 489int dma_buf_fd(struct dma_buf *dmabuf, int flags);
 490struct dma_buf *dma_buf_get(int fd);
 491void dma_buf_put(struct dma_buf *dmabuf);
 492
 493struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *,
 494                                        enum dma_data_direction);
 495void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
 496                                enum dma_data_direction);
 497void dma_buf_move_notify(struct dma_buf *dma_buf);
 498int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
 499                             enum dma_data_direction dir);
 500int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
 501                           enum dma_data_direction dir);
 502
 503int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
 504                 unsigned long);
 505void *dma_buf_vmap(struct dma_buf *);
 506void dma_buf_vunmap(struct dma_buf *, void *vaddr);
 507#endif /* __DMA_BUF_H__ */
 508