1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Berkeley style UIO structures - Alan Cox 1994.
4 */
5#ifndef __LINUX_UIO_H
6#define __LINUX_UIO_H
7
8#include <linux/kernel.h>
9#include <linux/thread_info.h>
10#include <linux/mm_types.h>
11#include <uapi/linux/uio.h>
12
13struct page;
14struct pipe_inode_info;
15
16struct kvec {
17 void *iov_base; /* and that should *never* hold a userland pointer */
18 size_t iov_len;
19};
20
21enum iter_type {
22 /* iter types */
23 ITER_IOVEC,
24 ITER_KVEC,
25 ITER_BVEC,
26 ITER_PIPE,
27 ITER_XARRAY,
28 ITER_DISCARD,
29 ITER_UBUF,
30};
31
32struct iov_iter_state {
33 size_t iov_offset;
34 size_t count;
35 unsigned long nr_segs;
36};
37
38struct iov_iter {
39 u8 iter_type;
40 bool nofault;
41 bool data_source;
42 bool user_backed;
43 union {
44 size_t iov_offset;
45 int last_offset;
46 };
47 size_t count;
48 union {
49 const struct iovec *iov;
50 const struct kvec *kvec;
51 const struct bio_vec *bvec;
52 struct xarray *xarray;
53 struct pipe_inode_info *pipe;
54 void __user *ubuf;
55 };
56 union {
57 unsigned long nr_segs;
58 struct {
59 unsigned int head;
60 unsigned int start_head;
61 };
62 loff_t xarray_start;
63 };
64};
65
66static inline enum iter_type iov_iter_type(const struct iov_iter *i)
67{
68 return i->iter_type;
69}
70
71static inline void iov_iter_save_state(struct iov_iter *iter,
72 struct iov_iter_state *state)
73{
74 state->iov_offset = iter->iov_offset;
75 state->count = iter->count;
76 state->nr_segs = iter->nr_segs;
77}
78
79static inline bool iter_is_ubuf(const struct iov_iter *i)
80{
81 return iov_iter_type(i) == ITER_UBUF;
82}
83
84static inline bool iter_is_iovec(const struct iov_iter *i)
85{
86 return iov_iter_type(i) == ITER_IOVEC;
87}
88
89static inline bool iov_iter_is_kvec(const struct iov_iter *i)
90{
91 return iov_iter_type(i) == ITER_KVEC;
92}
93
94static inline bool iov_iter_is_bvec(const struct iov_iter *i)
95{
96 return iov_iter_type(i) == ITER_BVEC;
97}
98
99static inline bool iov_iter_is_pipe(const struct iov_iter *i)
100{
101 return iov_iter_type(i) == ITER_PIPE;
102}
103
104static inline bool iov_iter_is_discard(const struct iov_iter *i)
105{
106 return iov_iter_type(i) == ITER_DISCARD;
107}
108
109static inline bool iov_iter_is_xarray(const struct iov_iter *i)
110{
111 return iov_iter_type(i) == ITER_XARRAY;
112}
113
114static inline unsigned char iov_iter_rw(const struct iov_iter *i)
115{
116 return i->data_source ? WRITE : READ;
117}
118
119static inline bool user_backed_iter(const struct iov_iter *i)
120{
121 return i->user_backed;
122}
123
124/*
125 * Total number of bytes covered by an iovec.
126 *
127 * NOTE that it is not safe to use this function until all the iovec's
128 * segment lengths have been validated. Because the individual lengths can
129 * overflow a size_t when added together.
130 */
131static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
132{
133 unsigned long seg;
134 size_t ret = 0;
135
136 for (seg = 0; seg < nr_segs; seg++)
137 ret += iov[seg].iov_len;
138 return ret;
139}
140
141static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
142{
143 return (struct iovec) {
144 .iov_base = iter->iov->iov_base + iter->iov_offset,
145 .iov_len = min(iter->count,
146 iter->iov->iov_len - iter->iov_offset),
147 };
148}
149
150size_t copy_page_from_iter_atomic(struct page *page, unsigned offset,
151 size_t bytes, struct iov_iter *i);
152void iov_iter_advance(struct iov_iter *i, size_t bytes);
153void iov_iter_revert(struct iov_iter *i, size_t bytes);
154size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t bytes);
155size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t bytes);
156size_t iov_iter_single_seg_count(const struct iov_iter *i);
157size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
158 struct iov_iter *i);
159size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
160 struct iov_iter *i);
161
162size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
163size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
164size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
165
166static inline size_t copy_folio_to_iter(struct folio *folio, size_t offset,
167 size_t bytes, struct iov_iter *i)
168{
169 return copy_page_to_iter(&folio->page, offset, bytes, i);
170}
171
172static __always_inline __must_check
173size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
174{
175 if (check_copy_size(addr, bytes, true))
176 return _copy_to_iter(addr, bytes, i);
177 return 0;
178}
179
180static __always_inline __must_check
181size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
182{
183 if (check_copy_size(addr, bytes, false))
184 return _copy_from_iter(addr, bytes, i);
185 return 0;
186}
187
188static __always_inline __must_check
189bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
190{
191 size_t copied = copy_from_iter(addr, bytes, i);
192 if (likely(copied == bytes))
193 return true;
194 iov_iter_revert(i, copied);
195 return false;
196}
197
198static __always_inline __must_check
199size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
200{
201 if (check_copy_size(addr, bytes, false))
202 return _copy_from_iter_nocache(addr, bytes, i);
203 return 0;
204}
205
206static __always_inline __must_check
207bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
208{
209 size_t copied = copy_from_iter_nocache(addr, bytes, i);
210 if (likely(copied == bytes))
211 return true;
212 iov_iter_revert(i, copied);
213 return false;
214}
215
216#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
217/*
218 * Note, users like pmem that depend on the stricter semantics of
219 * _copy_from_iter_flushcache() than _copy_from_iter_nocache() must check for
220 * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
221 * destination is flushed from the cache on return.
222 */
223size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
224#else
225#define _copy_from_iter_flushcache _copy_from_iter_nocache
226#endif
227
228#ifdef CONFIG_ARCH_HAS_COPY_MC
229size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
230#else
231#define _copy_mc_to_iter _copy_to_iter
232#endif
233
234size_t iov_iter_zero(size_t bytes, struct iov_iter *);
235bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
236 unsigned len_mask);
237unsigned long iov_iter_alignment(const struct iov_iter *i);
238unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
239void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov,
240 unsigned long nr_segs, size_t count);
241void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec,
242 unsigned long nr_segs, size_t count);
243void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec,
244 unsigned long nr_segs, size_t count);
245void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode_info *pipe,
246 size_t count);
247void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count);
248void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *xarray,
249 loff_t start, size_t count);
250ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
251 size_t maxsize, unsigned maxpages, size_t *start);
252ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, struct page ***pages,
253 size_t maxsize, size_t *start);
254int iov_iter_npages(const struct iov_iter *i, int maxpages);
255void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state);
256
257const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
258
259static inline size_t iov_iter_count(const struct iov_iter *i)
260{
261 return i->count;
262}
263
264/*
265 * Cap the iov_iter by given limit; note that the second argument is
266 * *not* the new size - it's upper limit for such. Passing it a value
267 * greater than the amount of data in iov_iter is fine - it'll just do
268 * nothing in that case.
269 */
270static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
271{
272 /*
273 * count doesn't have to fit in size_t - comparison extends both
274 * operands to u64 here and any value that would be truncated by
275 * conversion in assignement is by definition greater than all
276 * values of size_t, including old i->count.
277 */
278 if (i->count > count)
279 i->count = count;
280}
281
282/*
283 * reexpand a previously truncated iterator; count must be no more than how much
284 * we had shrunk it.
285 */
286static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
287{
288 i->count = count;
289}
290
291static inline int
292iov_iter_npages_cap(struct iov_iter *i, int maxpages, size_t max_bytes)
293{
294 size_t shorted = 0;
295 int npages;
296
297 if (iov_iter_count(i) > max_bytes) {
298 shorted = iov_iter_count(i) - max_bytes;
299 iov_iter_truncate(i, max_bytes);
300 }
301 npages = iov_iter_npages(i, INT_MAX);
302 if (shorted)
303 iov_iter_reexpand(i, iov_iter_count(i) + shorted);
304
305 return npages;
306}
307
308struct csum_state {
309 __wsum csum;
310 size_t off;
311};
312
313size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csstate, struct iov_iter *i);
314size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
315
316static __always_inline __must_check
317bool csum_and_copy_from_iter_full(void *addr, size_t bytes,
318 __wsum *csum, struct iov_iter *i)
319{
320 size_t copied = csum_and_copy_from_iter(addr, bytes, csum, i);
321 if (likely(copied == bytes))
322 return true;
323 iov_iter_revert(i, copied);
324 return false;
325}
326size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
327 struct iov_iter *i);
328
329struct iovec *iovec_from_user(const struct iovec __user *uvector,
330 unsigned long nr_segs, unsigned long fast_segs,
331 struct iovec *fast_iov, bool compat);
332ssize_t import_iovec(int type, const struct iovec __user *uvec,
333 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
334 struct iov_iter *i);
335ssize_t __import_iovec(int type, const struct iovec __user *uvec,
336 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
337 struct iov_iter *i, bool compat);
338int import_single_range(int type, void __user *buf, size_t len,
339 struct iovec *iov, struct iov_iter *i);
340
341static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction,
342 void __user *buf, size_t count)
343{
344 WARN_ON(direction & ~(READ | WRITE));
345 *i = (struct iov_iter) {
346 .iter_type = ITER_UBUF,
347 .user_backed = true,
348 .data_source = direction,
349 .ubuf = buf,
350 .count = count
351 };
352}
353
354#endif
355

source code of linux/include/linux/uio.h