linux/include/linux/uio.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2/*
   3 *      Berkeley style UIO structures   -       Alan Cox 1994.
   4 */
   5#ifndef __LINUX_UIO_H
   6#define __LINUX_UIO_H
   7
   8#include <linux/kernel.h>
   9#include <linux/thread_info.h>
  10#include <uapi/linux/uio.h>
  11
  12struct page;
  13struct pipe_inode_info;
  14
  15struct kvec {
  16        void *iov_base; /* and that should *never* hold a userland pointer */
  17        size_t iov_len;
  18};
  19
  20enum iter_type {
  21        /* iter types */
  22        ITER_IOVEC,
  23        ITER_KVEC,
  24        ITER_BVEC,
  25        ITER_PIPE,
  26        ITER_XARRAY,
  27        ITER_DISCARD,
  28};
  29
  30struct iov_iter_state {
  31        size_t iov_offset;
  32        size_t count;
  33        unsigned long nr_segs;
  34};
  35
  36struct iov_iter {
  37        u8 iter_type;
  38        bool data_source;
  39        size_t iov_offset;
  40        size_t count;
  41        union {
  42                const struct iovec *iov;
  43                const struct kvec *kvec;
  44                const struct bio_vec *bvec;
  45                struct xarray *xarray;
  46                struct pipe_inode_info *pipe;
  47        };
  48        union {
  49                unsigned long nr_segs;
  50                struct {
  51                        unsigned int head;
  52                        unsigned int start_head;
  53                };
  54                loff_t xarray_start;
  55        };
  56};
  57
  58static inline enum iter_type iov_iter_type(const struct iov_iter *i)
  59{
  60        return i->iter_type;
  61}
  62
  63static inline void iov_iter_save_state(struct iov_iter *iter,
  64                                       struct iov_iter_state *state)
  65{
  66        state->iov_offset = iter->iov_offset;
  67        state->count = iter->count;
  68        state->nr_segs = iter->nr_segs;
  69}
  70
  71static inline bool iter_is_iovec(const struct iov_iter *i)
  72{
  73        return iov_iter_type(i) == ITER_IOVEC;
  74}
  75
  76static inline bool iov_iter_is_kvec(const struct iov_iter *i)
  77{
  78        return iov_iter_type(i) == ITER_KVEC;
  79}
  80
  81static inline bool iov_iter_is_bvec(const struct iov_iter *i)
  82{
  83        return iov_iter_type(i) == ITER_BVEC;
  84}
  85
  86static inline bool iov_iter_is_pipe(const struct iov_iter *i)
  87{
  88        return iov_iter_type(i) == ITER_PIPE;
  89}
  90
  91static inline bool iov_iter_is_discard(const struct iov_iter *i)
  92{
  93        return iov_iter_type(i) == ITER_DISCARD;
  94}
  95
  96static inline bool iov_iter_is_xarray(const struct iov_iter *i)
  97{
  98        return iov_iter_type(i) == ITER_XARRAY;
  99}
 100
 101static inline unsigned char iov_iter_rw(const struct iov_iter *i)
 102{
 103        return i->data_source ? WRITE : READ;
 104}
 105
 106/*
 107 * Total number of bytes covered by an iovec.
 108 *
 109 * NOTE that it is not safe to use this function until all the iovec's
 110 * segment lengths have been validated.  Because the individual lengths can
 111 * overflow a size_t when added together.
 112 */
 113static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
 114{
 115        unsigned long seg;
 116        size_t ret = 0;
 117
 118        for (seg = 0; seg < nr_segs; seg++)
 119                ret += iov[seg].iov_len;
 120        return ret;
 121}
 122
 123static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
 124{
 125        return (struct iovec) {
 126                .iov_base = iter->iov->iov_base + iter->iov_offset,
 127                .iov_len = min(iter->count,
 128                               iter->iov->iov_len - iter->iov_offset),
 129        };
 130}
 131
 132size_t copy_page_from_iter_atomic(struct page *page, unsigned offset,
 133                                  size_t bytes, struct iov_iter *i);
 134void iov_iter_advance(struct iov_iter *i, size_t bytes);
 135void iov_iter_revert(struct iov_iter *i, size_t bytes);
 136int iov_iter_fault_in_readable(const struct iov_iter *i, size_t bytes);
 137size_t iov_iter_single_seg_count(const struct iov_iter *i);
 138size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
 139                         struct iov_iter *i);
 140size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
 141                         struct iov_iter *i);
 142
 143size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
 144size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
 145size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
 146
 147static __always_inline __must_check
 148size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
 149{
 150        if (unlikely(!check_copy_size(addr, bytes, true)))
 151                return 0;
 152        else
 153                return _copy_to_iter(addr, bytes, i);
 154}
 155
 156static __always_inline __must_check
 157size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
 158{
 159        if (unlikely(!check_copy_size(addr, bytes, false)))
 160                return 0;
 161        else
 162                return _copy_from_iter(addr, bytes, i);
 163}
 164
 165static __always_inline __must_check
 166bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
 167{
 168        size_t copied = copy_from_iter(addr, bytes, i);
 169        if (likely(copied == bytes))
 170                return true;
 171        iov_iter_revert(i, copied);
 172        return false;
 173}
 174
 175static __always_inline __must_check
 176size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
 177{
 178        if (unlikely(!check_copy_size(addr, bytes, false)))
 179                return 0;
 180        else
 181                return _copy_from_iter_nocache(addr, bytes, i);
 182}
 183
 184static __always_inline __must_check
 185bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
 186{
 187        size_t copied = copy_from_iter_nocache(addr, bytes, i);
 188        if (likely(copied == bytes))
 189                return true;
 190        iov_iter_revert(i, copied);
 191        return false;
 192}
 193
 194#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
 195/*
 196 * Note, users like pmem that depend on the stricter semantics of
 197 * copy_from_iter_flushcache() than copy_from_iter_nocache() must check for
 198 * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
 199 * destination is flushed from the cache on return.
 200 */
 201size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
 202#else
 203#define _copy_from_iter_flushcache _copy_from_iter_nocache
 204#endif
 205
 206#ifdef CONFIG_ARCH_HAS_COPY_MC
 207size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
 208#else
 209#define _copy_mc_to_iter _copy_to_iter
 210#endif
 211
 212static __always_inline __must_check
 213size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
 214{
 215        if (unlikely(!check_copy_size(addr, bytes, false)))
 216                return 0;
 217        else
 218                return _copy_from_iter_flushcache(addr, bytes, i);
 219}
 220
 221static __always_inline __must_check
 222size_t copy_mc_to_iter(void *addr, size_t bytes, struct iov_iter *i)
 223{
 224        if (unlikely(!check_copy_size(addr, bytes, true)))
 225                return 0;
 226        else
 227                return _copy_mc_to_iter(addr, bytes, i);
 228}
 229
 230size_t iov_iter_zero(size_t bytes, struct iov_iter *);
 231unsigned long iov_iter_alignment(const struct iov_iter *i);
 232unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
 233void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov,
 234                        unsigned long nr_segs, size_t count);
 235void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec,
 236                        unsigned long nr_segs, size_t count);
 237void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec,
 238                        unsigned long nr_segs, size_t count);
 239void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode_info *pipe,
 240                        size_t count);
 241void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count);
 242void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *xarray,
 243                     loff_t start, size_t count);
 244ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
 245                        size_t maxsize, unsigned maxpages, size_t *start);
 246ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
 247                        size_t maxsize, size_t *start);
 248int iov_iter_npages(const struct iov_iter *i, int maxpages);
 249void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state);
 250
 251const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
 252
 253static inline size_t iov_iter_count(const struct iov_iter *i)
 254{
 255        return i->count;
 256}
 257
 258/*
 259 * Cap the iov_iter by given limit; note that the second argument is
 260 * *not* the new size - it's upper limit for such.  Passing it a value
 261 * greater than the amount of data in iov_iter is fine - it'll just do
 262 * nothing in that case.
 263 */
 264static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
 265{
 266        /*
 267         * count doesn't have to fit in size_t - comparison extends both
 268         * operands to u64 here and any value that would be truncated by
 269         * conversion in assignement is by definition greater than all
 270         * values of size_t, including old i->count.
 271         */
 272        if (i->count > count)
 273                i->count = count;
 274}
 275
 276/*
 277 * reexpand a previously truncated iterator; count must be no more than how much
 278 * we had shrunk it.
 279 */
 280static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
 281{
 282        i->count = count;
 283}
 284
 285struct csum_state {
 286        __wsum csum;
 287        size_t off;
 288};
 289
 290size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csstate, struct iov_iter *i);
 291size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
 292
 293static __always_inline __must_check
 294bool csum_and_copy_from_iter_full(void *addr, size_t bytes,
 295                                  __wsum *csum, struct iov_iter *i)
 296{
 297        size_t copied = csum_and_copy_from_iter(addr, bytes, csum, i);
 298        if (likely(copied == bytes))
 299                return true;
 300        iov_iter_revert(i, copied);
 301        return false;
 302}
 303size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
 304                struct iov_iter *i);
 305
 306struct iovec *iovec_from_user(const struct iovec __user *uvector,
 307                unsigned long nr_segs, unsigned long fast_segs,
 308                struct iovec *fast_iov, bool compat);
 309ssize_t import_iovec(int type, const struct iovec __user *uvec,
 310                 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
 311                 struct iov_iter *i);
 312ssize_t __import_iovec(int type, const struct iovec __user *uvec,
 313                 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
 314                 struct iov_iter *i, bool compat);
 315int import_single_range(int type, void __user *buf, size_t len,
 316                 struct iovec *iov, struct iov_iter *i);
 317
 318#endif
 319