linux/include/linux/uio.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2/*
   3 *      Berkeley style UIO structures   -       Alan Cox 1994.
   4 */
   5#ifndef __LINUX_UIO_H
   6#define __LINUX_UIO_H
   7
   8#include <linux/kernel.h>
   9#include <linux/thread_info.h>
  10#include <uapi/linux/uio.h>
  11
  12struct page;
  13struct pipe_inode_info;
  14
  15struct kvec {
  16        void *iov_base; /* and that should *never* hold a userland pointer */
  17        size_t iov_len;
  18};
  19
  20enum iter_type {
  21        /* iter types */
  22        ITER_IOVEC,
  23        ITER_KVEC,
  24        ITER_BVEC,
  25        ITER_PIPE,
  26        ITER_XARRAY,
  27        ITER_DISCARD,
  28};
  29
  30struct iov_iter {
  31        u8 iter_type;
  32        bool data_source;
  33        size_t iov_offset;
  34        size_t count;
  35        union {
  36                const struct iovec *iov;
  37                const struct kvec *kvec;
  38                const struct bio_vec *bvec;
  39                struct xarray *xarray;
  40                struct pipe_inode_info *pipe;
  41        };
  42        union {
  43                unsigned long nr_segs;
  44                struct {
  45                        unsigned int head;
  46                        unsigned int start_head;
  47                };
  48                loff_t xarray_start;
  49        };
  50};
  51
  52static inline enum iter_type iov_iter_type(const struct iov_iter *i)
  53{
  54        return i->iter_type;
  55}
  56
  57static inline bool iter_is_iovec(const struct iov_iter *i)
  58{
  59        return iov_iter_type(i) == ITER_IOVEC;
  60}
  61
  62static inline bool iov_iter_is_kvec(const struct iov_iter *i)
  63{
  64        return iov_iter_type(i) == ITER_KVEC;
  65}
  66
  67static inline bool iov_iter_is_bvec(const struct iov_iter *i)
  68{
  69        return iov_iter_type(i) == ITER_BVEC;
  70}
  71
  72static inline bool iov_iter_is_pipe(const struct iov_iter *i)
  73{
  74        return iov_iter_type(i) == ITER_PIPE;
  75}
  76
  77static inline bool iov_iter_is_discard(const struct iov_iter *i)
  78{
  79        return iov_iter_type(i) == ITER_DISCARD;
  80}
  81
  82static inline bool iov_iter_is_xarray(const struct iov_iter *i)
  83{
  84        return iov_iter_type(i) == ITER_XARRAY;
  85}
  86
  87static inline unsigned char iov_iter_rw(const struct iov_iter *i)
  88{
  89        return i->data_source ? WRITE : READ;
  90}
  91
  92/*
  93 * Total number of bytes covered by an iovec.
  94 *
  95 * NOTE that it is not safe to use this function until all the iovec's
  96 * segment lengths have been validated.  Because the individual lengths can
  97 * overflow a size_t when added together.
  98 */
  99static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
 100{
 101        unsigned long seg;
 102        size_t ret = 0;
 103
 104        for (seg = 0; seg < nr_segs; seg++)
 105                ret += iov[seg].iov_len;
 106        return ret;
 107}
 108
 109static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
 110{
 111        return (struct iovec) {
 112                .iov_base = iter->iov->iov_base + iter->iov_offset,
 113                .iov_len = min(iter->count,
 114                               iter->iov->iov_len - iter->iov_offset),
 115        };
 116}
 117
 118size_t copy_page_from_iter_atomic(struct page *page, unsigned offset,
 119                                  size_t bytes, struct iov_iter *i);
 120void iov_iter_advance(struct iov_iter *i, size_t bytes);
 121void iov_iter_revert(struct iov_iter *i, size_t bytes);
 122int iov_iter_fault_in_readable(const struct iov_iter *i, size_t bytes);
 123size_t iov_iter_single_seg_count(const struct iov_iter *i);
 124size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
 125                         struct iov_iter *i);
 126size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
 127                         struct iov_iter *i);
 128
 129size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
 130size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
 131size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
 132
 133static __always_inline __must_check
 134size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
 135{
 136        if (unlikely(!check_copy_size(addr, bytes, true)))
 137                return 0;
 138        else
 139                return _copy_to_iter(addr, bytes, i);
 140}
 141
 142static __always_inline __must_check
 143size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
 144{
 145        if (unlikely(!check_copy_size(addr, bytes, false)))
 146                return 0;
 147        else
 148                return _copy_from_iter(addr, bytes, i);
 149}
 150
 151static __always_inline __must_check
 152bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
 153{
 154        size_t copied = copy_from_iter(addr, bytes, i);
 155        if (likely(copied == bytes))
 156                return true;
 157        iov_iter_revert(i, copied);
 158        return false;
 159}
 160
 161static __always_inline __must_check
 162size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
 163{
 164        if (unlikely(!check_copy_size(addr, bytes, false)))
 165                return 0;
 166        else
 167                return _copy_from_iter_nocache(addr, bytes, i);
 168}
 169
 170static __always_inline __must_check
 171bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
 172{
 173        size_t copied = copy_from_iter_nocache(addr, bytes, i);
 174        if (likely(copied == bytes))
 175                return true;
 176        iov_iter_revert(i, copied);
 177        return false;
 178}
 179
 180#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
 181/*
 182 * Note, users like pmem that depend on the stricter semantics of
 183 * copy_from_iter_flushcache() than copy_from_iter_nocache() must check for
 184 * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
 185 * destination is flushed from the cache on return.
 186 */
 187size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
 188#else
 189#define _copy_from_iter_flushcache _copy_from_iter_nocache
 190#endif
 191
 192#ifdef CONFIG_ARCH_HAS_COPY_MC
 193size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
 194#else
 195#define _copy_mc_to_iter _copy_to_iter
 196#endif
 197
 198static __always_inline __must_check
 199size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
 200{
 201        if (unlikely(!check_copy_size(addr, bytes, false)))
 202                return 0;
 203        else
 204                return _copy_from_iter_flushcache(addr, bytes, i);
 205}
 206
 207static __always_inline __must_check
 208size_t copy_mc_to_iter(void *addr, size_t bytes, struct iov_iter *i)
 209{
 210        if (unlikely(!check_copy_size(addr, bytes, true)))
 211                return 0;
 212        else
 213                return _copy_mc_to_iter(addr, bytes, i);
 214}
 215
 216size_t iov_iter_zero(size_t bytes, struct iov_iter *);
 217unsigned long iov_iter_alignment(const struct iov_iter *i);
 218unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
 219void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov,
 220                        unsigned long nr_segs, size_t count);
 221void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec,
 222                        unsigned long nr_segs, size_t count);
 223void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec,
 224                        unsigned long nr_segs, size_t count);
 225void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode_info *pipe,
 226                        size_t count);
 227void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count);
 228void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *xarray,
 229                     loff_t start, size_t count);
 230ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
 231                        size_t maxsize, unsigned maxpages, size_t *start);
 232ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
 233                        size_t maxsize, size_t *start);
 234int iov_iter_npages(const struct iov_iter *i, int maxpages);
 235
 236const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
 237
 238static inline size_t iov_iter_count(const struct iov_iter *i)
 239{
 240        return i->count;
 241}
 242
 243/*
 244 * Cap the iov_iter by given limit; note that the second argument is
 245 * *not* the new size - it's upper limit for such.  Passing it a value
 246 * greater than the amount of data in iov_iter is fine - it'll just do
 247 * nothing in that case.
 248 */
 249static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
 250{
 251        /*
 252         * count doesn't have to fit in size_t - comparison extends both
 253         * operands to u64 here and any value that would be truncated by
 254         * conversion in assignement is by definition greater than all
 255         * values of size_t, including old i->count.
 256         */
 257        if (i->count > count)
 258                i->count = count;
 259}
 260
 261/*
 262 * reexpand a previously truncated iterator; count must be no more than how much
 263 * we had shrunk it.
 264 */
 265static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
 266{
 267        i->count = count;
 268}
 269
 270struct csum_state {
 271        __wsum csum;
 272        size_t off;
 273};
 274
 275size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csstate, struct iov_iter *i);
 276size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
 277
 278static __always_inline __must_check
 279bool csum_and_copy_from_iter_full(void *addr, size_t bytes,
 280                                  __wsum *csum, struct iov_iter *i)
 281{
 282        size_t copied = csum_and_copy_from_iter(addr, bytes, csum, i);
 283        if (likely(copied == bytes))
 284                return true;
 285        iov_iter_revert(i, copied);
 286        return false;
 287}
 288size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
 289                struct iov_iter *i);
 290
 291struct iovec *iovec_from_user(const struct iovec __user *uvector,
 292                unsigned long nr_segs, unsigned long fast_segs,
 293                struct iovec *fast_iov, bool compat);
 294ssize_t import_iovec(int type, const struct iovec __user *uvec,
 295                 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
 296                 struct iov_iter *i);
 297ssize_t __import_iovec(int type, const struct iovec __user *uvec,
 298                 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
 299                 struct iov_iter *i, bool compat);
 300int import_single_range(int type, void __user *buf, size_t len,
 301                 struct iovec *iov, struct iov_iter *i);
 302
 303#endif
 304