root/include/linux/uio.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. iov_iter_type
  2. iter_is_iovec
  3. iov_iter_is_kvec
  4. iov_iter_is_bvec
  5. iov_iter_is_pipe
  6. iov_iter_is_discard
  7. iov_iter_rw
  8. iov_length
  9. iov_iter_iovec
  10. copy_to_iter
  11. copy_from_iter
  12. copy_from_iter_full
  13. copy_from_iter_nocache
  14. copy_from_iter_full_nocache
  15. copy_from_iter_flushcache
  16. copy_to_iter_mcsafe
  17. iov_iter_count
  18. iov_iter_truncate
  19. iov_iter_reexpand

   1 /* SPDX-License-Identifier: GPL-2.0-or-later */
   2 /*
   3  *      Berkeley style UIO structures   -       Alan Cox 1994.
   4  */
   5 #ifndef __LINUX_UIO_H
   6 #define __LINUX_UIO_H
   7 
   8 #include <linux/kernel.h>
   9 #include <linux/thread_info.h>
  10 #include <crypto/hash.h>
  11 #include <uapi/linux/uio.h>
  12 
  13 struct page;
  14 struct pipe_inode_info;
  15 
  16 struct kvec {
  17         void *iov_base; /* and that should *never* hold a userland pointer */
  18         size_t iov_len;
  19 };
  20 
  21 enum iter_type {
  22         /* iter types */
  23         ITER_IOVEC = 4,
  24         ITER_KVEC = 8,
  25         ITER_BVEC = 16,
  26         ITER_PIPE = 32,
  27         ITER_DISCARD = 64,
  28 };
  29 
  30 struct iov_iter {
  31         /*
  32          * Bit 0 is the read/write bit, set if we're writing.
  33          * Bit 1 is the BVEC_FLAG_NO_REF bit, set if type is a bvec and
  34          * the caller isn't expecting to drop a page reference when done.
  35          */
  36         unsigned int type;
  37         size_t iov_offset;
  38         size_t count;
  39         union {
  40                 const struct iovec *iov;
  41                 const struct kvec *kvec;
  42                 const struct bio_vec *bvec;
  43                 struct pipe_inode_info *pipe;
  44         };
  45         union {
  46                 unsigned long nr_segs;
  47                 struct {
  48                         int idx;
  49                         int start_idx;
  50                 };
  51         };
  52 };
  53 
  54 static inline enum iter_type iov_iter_type(const struct iov_iter *i)
  55 {
  56         return i->type & ~(READ | WRITE);
  57 }
  58 
  59 static inline bool iter_is_iovec(const struct iov_iter *i)
  60 {
  61         return iov_iter_type(i) == ITER_IOVEC;
  62 }
  63 
  64 static inline bool iov_iter_is_kvec(const struct iov_iter *i)
  65 {
  66         return iov_iter_type(i) == ITER_KVEC;
  67 }
  68 
  69 static inline bool iov_iter_is_bvec(const struct iov_iter *i)
  70 {
  71         return iov_iter_type(i) == ITER_BVEC;
  72 }
  73 
  74 static inline bool iov_iter_is_pipe(const struct iov_iter *i)
  75 {
  76         return iov_iter_type(i) == ITER_PIPE;
  77 }
  78 
  79 static inline bool iov_iter_is_discard(const struct iov_iter *i)
  80 {
  81         return iov_iter_type(i) == ITER_DISCARD;
  82 }
  83 
  84 static inline unsigned char iov_iter_rw(const struct iov_iter *i)
  85 {
  86         return i->type & (READ | WRITE);
  87 }
  88 
  89 /*
  90  * Total number of bytes covered by an iovec.
  91  *
  92  * NOTE that it is not safe to use this function until all the iovec's
  93  * segment lengths have been validated.  Because the individual lengths can
  94  * overflow a size_t when added together.
  95  */
  96 static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
  97 {
  98         unsigned long seg;
  99         size_t ret = 0;
 100 
 101         for (seg = 0; seg < nr_segs; seg++)
 102                 ret += iov[seg].iov_len;
 103         return ret;
 104 }
 105 
 106 static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
 107 {
 108         return (struct iovec) {
 109                 .iov_base = iter->iov->iov_base + iter->iov_offset,
 110                 .iov_len = min(iter->count,
 111                                iter->iov->iov_len - iter->iov_offset),
 112         };
 113 }
 114 
 115 size_t iov_iter_copy_from_user_atomic(struct page *page,
 116                 struct iov_iter *i, unsigned long offset, size_t bytes);
 117 void iov_iter_advance(struct iov_iter *i, size_t bytes);
 118 void iov_iter_revert(struct iov_iter *i, size_t bytes);
 119 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
 120 size_t iov_iter_single_seg_count(const struct iov_iter *i);
 121 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
 122                          struct iov_iter *i);
 123 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
 124                          struct iov_iter *i);
 125 
 126 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
 127 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
 128 bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i);
 129 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
 130 bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i);
 131 
 132 static __always_inline __must_check
 133 size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
 134 {
 135         if (unlikely(!check_copy_size(addr, bytes, true)))
 136                 return 0;
 137         else
 138                 return _copy_to_iter(addr, bytes, i);
 139 }
 140 
 141 static __always_inline __must_check
 142 size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
 143 {
 144         if (unlikely(!check_copy_size(addr, bytes, false)))
 145                 return 0;
 146         else
 147                 return _copy_from_iter(addr, bytes, i);
 148 }
 149 
 150 static __always_inline __must_check
 151 bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
 152 {
 153         if (unlikely(!check_copy_size(addr, bytes, false)))
 154                 return false;
 155         else
 156                 return _copy_from_iter_full(addr, bytes, i);
 157 }
 158 
 159 static __always_inline __must_check
 160 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
 161 {
 162         if (unlikely(!check_copy_size(addr, bytes, false)))
 163                 return 0;
 164         else
 165                 return _copy_from_iter_nocache(addr, bytes, i);
 166 }
 167 
 168 static __always_inline __must_check
 169 bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
 170 {
 171         if (unlikely(!check_copy_size(addr, bytes, false)))
 172                 return false;
 173         else
 174                 return _copy_from_iter_full_nocache(addr, bytes, i);
 175 }
 176 
 177 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
 178 /*
 179  * Note, users like pmem that depend on the stricter semantics of
 180  * copy_from_iter_flushcache() than copy_from_iter_nocache() must check for
 181  * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
 182  * destination is flushed from the cache on return.
 183  */
 184 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
 185 #else
 186 #define _copy_from_iter_flushcache _copy_from_iter_nocache
 187 #endif
 188 
 189 #ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE
 190 size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i);
 191 #else
 192 #define _copy_to_iter_mcsafe _copy_to_iter
 193 #endif
 194 
 195 static __always_inline __must_check
 196 size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
 197 {
 198         if (unlikely(!check_copy_size(addr, bytes, false)))
 199                 return 0;
 200         else
 201                 return _copy_from_iter_flushcache(addr, bytes, i);
 202 }
 203 
 204 static __always_inline __must_check
 205 size_t copy_to_iter_mcsafe(void *addr, size_t bytes, struct iov_iter *i)
 206 {
 207         if (unlikely(!check_copy_size(addr, bytes, true)))
 208                 return 0;
 209         else
 210                 return _copy_to_iter_mcsafe(addr, bytes, i);
 211 }
 212 
 213 size_t iov_iter_zero(size_t bytes, struct iov_iter *);
 214 unsigned long iov_iter_alignment(const struct iov_iter *i);
 215 unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
 216 void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov,
 217                         unsigned long nr_segs, size_t count);
 218 void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec,
 219                         unsigned long nr_segs, size_t count);
 220 void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec,
 221                         unsigned long nr_segs, size_t count);
 222 void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode_info *pipe,
 223                         size_t count);
 224 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count);
 225 ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
 226                         size_t maxsize, unsigned maxpages, size_t *start);
 227 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
 228                         size_t maxsize, size_t *start);
 229 int iov_iter_npages(const struct iov_iter *i, int maxpages);
 230 
 231 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
 232 
 233 static inline size_t iov_iter_count(const struct iov_iter *i)
 234 {
 235         return i->count;
 236 }
 237 
 238 /*
 239  * Cap the iov_iter by given limit; note that the second argument is
 240  * *not* the new size - it's upper limit for such.  Passing it a value
 241  * greater than the amount of data in iov_iter is fine - it'll just do
 242  * nothing in that case.
 243  */
 244 static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
 245 {
 246         /*
 247          * count doesn't have to fit in size_t - comparison extends both
 248          * operands to u64 here and any value that would be truncated by
 249          * conversion in assignement is by definition greater than all
 250          * values of size_t, including old i->count.
 251          */
 252         if (i->count > count)
 253                 i->count = count;
 254 }
 255 
 256 /*
 257  * reexpand a previously truncated iterator; count must be no more than how much
 258  * we had shrunk it.
 259  */
 260 static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
 261 {
 262         i->count = count;
 263 }
 264 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump, struct iov_iter *i);
 265 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
 266 bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
 267 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
 268                 struct iov_iter *i);
 269 
 270 ssize_t import_iovec(int type, const struct iovec __user * uvector,
 271                  unsigned nr_segs, unsigned fast_segs,
 272                  struct iovec **iov, struct iov_iter *i);
 273 
 274 #ifdef CONFIG_COMPAT
 275 struct compat_iovec;
 276 ssize_t compat_import_iovec(int type, const struct compat_iovec __user * uvector,
 277                  unsigned nr_segs, unsigned fast_segs,
 278                  struct iovec **iov, struct iov_iter *i);
 279 #endif
 280 
 281 int import_single_range(int type, void __user *buf, size_t len,
 282                  struct iovec *iov, struct iov_iter *i);
 283 
 284 int iov_iter_for_each_range(struct iov_iter *i, size_t bytes,
 285                             int (*f)(struct kvec *vec, void *context),
 286                             void *context);
 287 
 288 #endif

/* [<][>][^][v][top][bottom][index][help] */