1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * linux/net/sunrpc/xdr.c |
4 | * |
5 | * Generic XDR support. |
6 | * |
7 | * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> |
8 | */ |
9 | |
10 | #include <linux/module.h> |
11 | #include <linux/slab.h> |
12 | #include <linux/types.h> |
13 | #include <linux/string.h> |
14 | #include <linux/kernel.h> |
15 | #include <linux/pagemap.h> |
16 | #include <linux/errno.h> |
17 | #include <linux/sunrpc/xdr.h> |
18 | #include <linux/sunrpc/msg_prot.h> |
19 | #include <linux/bvec.h> |
20 | #include <trace/events/sunrpc.h> |
21 | |
22 | static void _copy_to_pages(struct page **, size_t, const char *, size_t); |
23 | |
24 | |
25 | /* |
26 | * XDR functions for basic NFS types |
27 | */ |
28 | __be32 * |
29 | xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj) |
30 | { |
31 | unsigned int quadlen = XDR_QUADLEN(obj->len); |
32 | |
33 | p[quadlen] = 0; /* zero trailing bytes */ |
34 | *p++ = cpu_to_be32(obj->len); |
35 | memcpy(p, obj->data, obj->len); |
36 | return p + XDR_QUADLEN(obj->len); |
37 | } |
38 | EXPORT_SYMBOL_GPL(xdr_encode_netobj); |
39 | |
40 | __be32 * |
41 | xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj) |
42 | { |
43 | unsigned int len; |
44 | |
45 | if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ) |
46 | return NULL; |
47 | obj->len = len; |
48 | obj->data = (u8 *) p; |
49 | return p + XDR_QUADLEN(len); |
50 | } |
51 | EXPORT_SYMBOL_GPL(xdr_decode_netobj); |
52 | |
53 | /** |
54 | * xdr_encode_opaque_fixed - Encode fixed length opaque data |
55 | * @p: pointer to current position in XDR buffer. |
56 | * @ptr: pointer to data to encode (or NULL) |
57 | * @nbytes: size of data. |
58 | * |
59 | * Copy the array of data of length nbytes at ptr to the XDR buffer |
60 | * at position p, then align to the next 32-bit boundary by padding |
61 | * with zero bytes (see RFC1832). |
62 | * Note: if ptr is NULL, only the padding is performed. |
63 | * |
64 | * Returns the updated current XDR buffer position |
65 | * |
66 | */ |
67 | __be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes) |
68 | { |
69 | if (likely(nbytes != 0)) { |
70 | unsigned int quadlen = XDR_QUADLEN(nbytes); |
71 | unsigned int padding = (quadlen << 2) - nbytes; |
72 | |
73 | if (ptr != NULL) |
74 | memcpy(p, ptr, nbytes); |
75 | if (padding != 0) |
76 | memset((char *)p + nbytes, 0, padding); |
77 | p += quadlen; |
78 | } |
79 | return p; |
80 | } |
81 | EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed); |
82 | |
83 | /** |
84 | * xdr_encode_opaque - Encode variable length opaque data |
85 | * @p: pointer to current position in XDR buffer. |
86 | * @ptr: pointer to data to encode (or NULL) |
87 | * @nbytes: size of data. |
88 | * |
89 | * Returns the updated current XDR buffer position |
90 | */ |
91 | __be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes) |
92 | { |
93 | *p++ = cpu_to_be32(nbytes); |
94 | return xdr_encode_opaque_fixed(p, ptr, nbytes); |
95 | } |
96 | EXPORT_SYMBOL_GPL(xdr_encode_opaque); |
97 | |
98 | __be32 * |
99 | xdr_encode_string(__be32 *p, const char *string) |
100 | { |
101 | return xdr_encode_array(p, s: string, strlen(string)); |
102 | } |
103 | EXPORT_SYMBOL_GPL(xdr_encode_string); |
104 | |
105 | __be32 * |
106 | xdr_decode_string_inplace(__be32 *p, char **sp, |
107 | unsigned int *lenp, unsigned int maxlen) |
108 | { |
109 | u32 len; |
110 | |
111 | len = be32_to_cpu(*p++); |
112 | if (len > maxlen) |
113 | return NULL; |
114 | *lenp = len; |
115 | *sp = (char *) p; |
116 | return p + XDR_QUADLEN(len); |
117 | } |
118 | EXPORT_SYMBOL_GPL(xdr_decode_string_inplace); |
119 | |
120 | /** |
121 | * xdr_terminate_string - '\0'-terminate a string residing in an xdr_buf |
122 | * @buf: XDR buffer where string resides |
123 | * @len: length of string, in bytes |
124 | * |
125 | */ |
126 | void xdr_terminate_string(const struct xdr_buf *buf, const u32 len) |
127 | { |
128 | char *kaddr; |
129 | |
130 | kaddr = kmap_atomic(page: buf->pages[0]); |
131 | kaddr[buf->page_base + len] = '\0'; |
132 | kunmap_atomic(kaddr); |
133 | } |
134 | EXPORT_SYMBOL_GPL(xdr_terminate_string); |
135 | |
136 | size_t xdr_buf_pagecount(const struct xdr_buf *buf) |
137 | { |
138 | if (!buf->page_len) |
139 | return 0; |
140 | return (buf->page_base + buf->page_len + PAGE_SIZE - 1) >> PAGE_SHIFT; |
141 | } |
142 | |
143 | int |
144 | xdr_alloc_bvec(struct xdr_buf *buf, gfp_t gfp) |
145 | { |
146 | size_t i, n = xdr_buf_pagecount(buf); |
147 | |
148 | if (n != 0 && buf->bvec == NULL) { |
149 | buf->bvec = kmalloc_array(n, size: sizeof(buf->bvec[0]), flags: gfp); |
150 | if (!buf->bvec) |
151 | return -ENOMEM; |
152 | for (i = 0; i < n; i++) { |
153 | bvec_set_page(bv: &buf->bvec[i], page: buf->pages[i], PAGE_SIZE, |
154 | offset: 0); |
155 | } |
156 | } |
157 | return 0; |
158 | } |
159 | |
160 | void |
161 | xdr_free_bvec(struct xdr_buf *buf) |
162 | { |
163 | kfree(objp: buf->bvec); |
164 | buf->bvec = NULL; |
165 | } |
166 | |
167 | /** |
168 | * xdr_buf_to_bvec - Copy components of an xdr_buf into a bio_vec array |
169 | * @bvec: bio_vec array to populate |
170 | * @bvec_size: element count of @bio_vec |
171 | * @xdr: xdr_buf to be copied |
172 | * |
173 | * Returns the number of entries consumed in @bvec. |
174 | */ |
175 | unsigned int xdr_buf_to_bvec(struct bio_vec *bvec, unsigned int bvec_size, |
176 | const struct xdr_buf *xdr) |
177 | { |
178 | const struct kvec *head = xdr->head; |
179 | const struct kvec *tail = xdr->tail; |
180 | unsigned int count = 0; |
181 | |
182 | if (head->iov_len) { |
183 | bvec_set_virt(bv: bvec++, vaddr: head->iov_base, len: head->iov_len); |
184 | ++count; |
185 | } |
186 | |
187 | if (xdr->page_len) { |
188 | unsigned int offset, len, remaining; |
189 | struct page **pages = xdr->pages; |
190 | |
191 | offset = offset_in_page(xdr->page_base); |
192 | remaining = xdr->page_len; |
193 | while (remaining > 0) { |
194 | len = min_t(unsigned int, remaining, |
195 | PAGE_SIZE - offset); |
196 | bvec_set_page(bv: bvec++, page: *pages++, len, offset); |
197 | remaining -= len; |
198 | offset = 0; |
199 | if (unlikely(++count > bvec_size)) |
200 | goto bvec_overflow; |
201 | } |
202 | } |
203 | |
204 | if (tail->iov_len) { |
205 | bvec_set_virt(bv: bvec, vaddr: tail->iov_base, len: tail->iov_len); |
206 | if (unlikely(++count > bvec_size)) |
207 | goto bvec_overflow; |
208 | } |
209 | |
210 | return count; |
211 | |
212 | bvec_overflow: |
213 | pr_warn_once("%s: bio_vec array overflow\n" , __func__); |
214 | return count - 1; |
215 | } |
216 | |
217 | /** |
218 | * xdr_inline_pages - Prepare receive buffer for a large reply |
219 | * @xdr: xdr_buf into which reply will be placed |
220 | * @offset: expected offset where data payload will start, in bytes |
221 | * @pages: vector of struct page pointers |
222 | * @base: offset in first page where receive should start, in bytes |
223 | * @len: expected size of the upper layer data payload, in bytes |
224 | * |
225 | */ |
226 | void |
227 | xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset, |
228 | struct page **pages, unsigned int base, unsigned int len) |
229 | { |
230 | struct kvec *head = xdr->head; |
231 | struct kvec *tail = xdr->tail; |
232 | char *buf = (char *)head->iov_base; |
233 | unsigned int buflen = head->iov_len; |
234 | |
235 | head->iov_len = offset; |
236 | |
237 | xdr->pages = pages; |
238 | xdr->page_base = base; |
239 | xdr->page_len = len; |
240 | |
241 | tail->iov_base = buf + offset; |
242 | tail->iov_len = buflen - offset; |
243 | xdr->buflen += len; |
244 | } |
245 | EXPORT_SYMBOL_GPL(xdr_inline_pages); |
246 | |
247 | /* |
248 | * Helper routines for doing 'memmove' like operations on a struct xdr_buf |
249 | */ |
250 | |
251 | /** |
252 | * _shift_data_left_pages |
253 | * @pages: vector of pages containing both the source and dest memory area. |
254 | * @pgto_base: page vector address of destination |
255 | * @pgfrom_base: page vector address of source |
256 | * @len: number of bytes to copy |
257 | * |
258 | * Note: the addresses pgto_base and pgfrom_base are both calculated in |
259 | * the same way: |
260 | * if a memory area starts at byte 'base' in page 'pages[i]', |
261 | * then its address is given as (i << PAGE_CACHE_SHIFT) + base |
262 | * Alse note: pgto_base must be < pgfrom_base, but the memory areas |
263 | * they point to may overlap. |
264 | */ |
265 | static void |
266 | _shift_data_left_pages(struct page **pages, size_t pgto_base, |
267 | size_t pgfrom_base, size_t len) |
268 | { |
269 | struct page **pgfrom, **pgto; |
270 | char *vfrom, *vto; |
271 | size_t copy; |
272 | |
273 | BUG_ON(pgfrom_base <= pgto_base); |
274 | |
275 | if (!len) |
276 | return; |
277 | |
278 | pgto = pages + (pgto_base >> PAGE_SHIFT); |
279 | pgfrom = pages + (pgfrom_base >> PAGE_SHIFT); |
280 | |
281 | pgto_base &= ~PAGE_MASK; |
282 | pgfrom_base &= ~PAGE_MASK; |
283 | |
284 | do { |
285 | if (pgto_base >= PAGE_SIZE) { |
286 | pgto_base = 0; |
287 | pgto++; |
288 | } |
289 | if (pgfrom_base >= PAGE_SIZE){ |
290 | pgfrom_base = 0; |
291 | pgfrom++; |
292 | } |
293 | |
294 | copy = len; |
295 | if (copy > (PAGE_SIZE - pgto_base)) |
296 | copy = PAGE_SIZE - pgto_base; |
297 | if (copy > (PAGE_SIZE - pgfrom_base)) |
298 | copy = PAGE_SIZE - pgfrom_base; |
299 | |
300 | vto = kmap_atomic(page: *pgto); |
301 | if (*pgto != *pgfrom) { |
302 | vfrom = kmap_atomic(page: *pgfrom); |
303 | memcpy(vto + pgto_base, vfrom + pgfrom_base, copy); |
304 | kunmap_atomic(vfrom); |
305 | } else |
306 | memmove(vto + pgto_base, vto + pgfrom_base, copy); |
307 | flush_dcache_page(page: *pgto); |
308 | kunmap_atomic(vto); |
309 | |
310 | pgto_base += copy; |
311 | pgfrom_base += copy; |
312 | |
313 | } while ((len -= copy) != 0); |
314 | } |
315 | |
316 | /** |
317 | * _shift_data_right_pages |
318 | * @pages: vector of pages containing both the source and dest memory area. |
319 | * @pgto_base: page vector address of destination |
320 | * @pgfrom_base: page vector address of source |
321 | * @len: number of bytes to copy |
322 | * |
323 | * Note: the addresses pgto_base and pgfrom_base are both calculated in |
324 | * the same way: |
325 | * if a memory area starts at byte 'base' in page 'pages[i]', |
326 | * then its address is given as (i << PAGE_SHIFT) + base |
327 | * Also note: pgfrom_base must be < pgto_base, but the memory areas |
328 | * they point to may overlap. |
329 | */ |
330 | static void |
331 | _shift_data_right_pages(struct page **pages, size_t pgto_base, |
332 | size_t pgfrom_base, size_t len) |
333 | { |
334 | struct page **pgfrom, **pgto; |
335 | char *vfrom, *vto; |
336 | size_t copy; |
337 | |
338 | BUG_ON(pgto_base <= pgfrom_base); |
339 | |
340 | if (!len) |
341 | return; |
342 | |
343 | pgto_base += len; |
344 | pgfrom_base += len; |
345 | |
346 | pgto = pages + (pgto_base >> PAGE_SHIFT); |
347 | pgfrom = pages + (pgfrom_base >> PAGE_SHIFT); |
348 | |
349 | pgto_base &= ~PAGE_MASK; |
350 | pgfrom_base &= ~PAGE_MASK; |
351 | |
352 | do { |
353 | /* Are any pointers crossing a page boundary? */ |
354 | if (pgto_base == 0) { |
355 | pgto_base = PAGE_SIZE; |
356 | pgto--; |
357 | } |
358 | if (pgfrom_base == 0) { |
359 | pgfrom_base = PAGE_SIZE; |
360 | pgfrom--; |
361 | } |
362 | |
363 | copy = len; |
364 | if (copy > pgto_base) |
365 | copy = pgto_base; |
366 | if (copy > pgfrom_base) |
367 | copy = pgfrom_base; |
368 | pgto_base -= copy; |
369 | pgfrom_base -= copy; |
370 | |
371 | vto = kmap_atomic(page: *pgto); |
372 | if (*pgto != *pgfrom) { |
373 | vfrom = kmap_atomic(page: *pgfrom); |
374 | memcpy(vto + pgto_base, vfrom + pgfrom_base, copy); |
375 | kunmap_atomic(vfrom); |
376 | } else |
377 | memmove(vto + pgto_base, vto + pgfrom_base, copy); |
378 | flush_dcache_page(page: *pgto); |
379 | kunmap_atomic(vto); |
380 | |
381 | } while ((len -= copy) != 0); |
382 | } |
383 | |
384 | /** |
385 | * _copy_to_pages |
386 | * @pages: array of pages |
387 | * @pgbase: page vector address of destination |
388 | * @p: pointer to source data |
389 | * @len: length |
390 | * |
391 | * Copies data from an arbitrary memory location into an array of pages |
392 | * The copy is assumed to be non-overlapping. |
393 | */ |
394 | static void |
395 | _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len) |
396 | { |
397 | struct page **pgto; |
398 | char *vto; |
399 | size_t copy; |
400 | |
401 | if (!len) |
402 | return; |
403 | |
404 | pgto = pages + (pgbase >> PAGE_SHIFT); |
405 | pgbase &= ~PAGE_MASK; |
406 | |
407 | for (;;) { |
408 | copy = PAGE_SIZE - pgbase; |
409 | if (copy > len) |
410 | copy = len; |
411 | |
412 | vto = kmap_atomic(page: *pgto); |
413 | memcpy(vto + pgbase, p, copy); |
414 | kunmap_atomic(vto); |
415 | |
416 | len -= copy; |
417 | if (len == 0) |
418 | break; |
419 | |
420 | pgbase += copy; |
421 | if (pgbase == PAGE_SIZE) { |
422 | flush_dcache_page(page: *pgto); |
423 | pgbase = 0; |
424 | pgto++; |
425 | } |
426 | p += copy; |
427 | } |
428 | flush_dcache_page(page: *pgto); |
429 | } |
430 | |
431 | /** |
432 | * _copy_from_pages |
433 | * @p: pointer to destination |
434 | * @pages: array of pages |
435 | * @pgbase: offset of source data |
436 | * @len: length |
437 | * |
438 | * Copies data into an arbitrary memory location from an array of pages |
439 | * The copy is assumed to be non-overlapping. |
440 | */ |
441 | void |
442 | _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len) |
443 | { |
444 | struct page **pgfrom; |
445 | char *vfrom; |
446 | size_t copy; |
447 | |
448 | if (!len) |
449 | return; |
450 | |
451 | pgfrom = pages + (pgbase >> PAGE_SHIFT); |
452 | pgbase &= ~PAGE_MASK; |
453 | |
454 | do { |
455 | copy = PAGE_SIZE - pgbase; |
456 | if (copy > len) |
457 | copy = len; |
458 | |
459 | vfrom = kmap_atomic(page: *pgfrom); |
460 | memcpy(p, vfrom + pgbase, copy); |
461 | kunmap_atomic(vfrom); |
462 | |
463 | pgbase += copy; |
464 | if (pgbase == PAGE_SIZE) { |
465 | pgbase = 0; |
466 | pgfrom++; |
467 | } |
468 | p += copy; |
469 | |
470 | } while ((len -= copy) != 0); |
471 | } |
472 | EXPORT_SYMBOL_GPL(_copy_from_pages); |
473 | |
474 | static void xdr_buf_iov_zero(const struct kvec *iov, unsigned int base, |
475 | unsigned int len) |
476 | { |
477 | if (base >= iov->iov_len) |
478 | return; |
479 | if (len > iov->iov_len - base) |
480 | len = iov->iov_len - base; |
481 | memset(iov->iov_base + base, 0, len); |
482 | } |
483 | |
484 | /** |
485 | * xdr_buf_pages_zero |
486 | * @buf: xdr_buf |
487 | * @pgbase: beginning offset |
488 | * @len: length |
489 | */ |
490 | static void xdr_buf_pages_zero(const struct xdr_buf *buf, unsigned int pgbase, |
491 | unsigned int len) |
492 | { |
493 | struct page **pages = buf->pages; |
494 | struct page **page; |
495 | char *vpage; |
496 | unsigned int zero; |
497 | |
498 | if (!len) |
499 | return; |
500 | if (pgbase >= buf->page_len) { |
501 | xdr_buf_iov_zero(iov: buf->tail, base: pgbase - buf->page_len, len); |
502 | return; |
503 | } |
504 | if (pgbase + len > buf->page_len) { |
505 | xdr_buf_iov_zero(iov: buf->tail, base: 0, len: pgbase + len - buf->page_len); |
506 | len = buf->page_len - pgbase; |
507 | } |
508 | |
509 | pgbase += buf->page_base; |
510 | |
511 | page = pages + (pgbase >> PAGE_SHIFT); |
512 | pgbase &= ~PAGE_MASK; |
513 | |
514 | do { |
515 | zero = PAGE_SIZE - pgbase; |
516 | if (zero > len) |
517 | zero = len; |
518 | |
519 | vpage = kmap_atomic(page: *page); |
520 | memset(vpage + pgbase, 0, zero); |
521 | kunmap_atomic(vpage); |
522 | |
523 | flush_dcache_page(page: *page); |
524 | pgbase = 0; |
525 | page++; |
526 | |
527 | } while ((len -= zero) != 0); |
528 | } |
529 | |
530 | static unsigned int xdr_buf_pages_fill_sparse(const struct xdr_buf *buf, |
531 | unsigned int buflen, gfp_t gfp) |
532 | { |
533 | unsigned int i, npages, pagelen; |
534 | |
535 | if (!(buf->flags & XDRBUF_SPARSE_PAGES)) |
536 | return buflen; |
537 | if (buflen <= buf->head->iov_len) |
538 | return buflen; |
539 | pagelen = buflen - buf->head->iov_len; |
540 | if (pagelen > buf->page_len) |
541 | pagelen = buf->page_len; |
542 | npages = (pagelen + buf->page_base + PAGE_SIZE - 1) >> PAGE_SHIFT; |
543 | for (i = 0; i < npages; i++) { |
544 | if (!buf->pages[i]) |
545 | continue; |
546 | buf->pages[i] = alloc_page(gfp); |
547 | if (likely(buf->pages[i])) |
548 | continue; |
549 | buflen -= pagelen; |
550 | pagelen = i << PAGE_SHIFT; |
551 | if (pagelen > buf->page_base) |
552 | buflen += pagelen - buf->page_base; |
553 | break; |
554 | } |
555 | return buflen; |
556 | } |
557 | |
558 | static void xdr_buf_try_expand(struct xdr_buf *buf, unsigned int len) |
559 | { |
560 | struct kvec *head = buf->head; |
561 | struct kvec *tail = buf->tail; |
562 | unsigned int sum = head->iov_len + buf->page_len + tail->iov_len; |
563 | unsigned int free_space, newlen; |
564 | |
565 | if (sum > buf->len) { |
566 | free_space = min_t(unsigned int, sum - buf->len, len); |
567 | newlen = xdr_buf_pages_fill_sparse(buf, buflen: buf->len + free_space, |
568 | GFP_KERNEL); |
569 | free_space = newlen - buf->len; |
570 | buf->len = newlen; |
571 | len -= free_space; |
572 | if (!len) |
573 | return; |
574 | } |
575 | |
576 | if (buf->buflen > sum) { |
577 | /* Expand the tail buffer */ |
578 | free_space = min_t(unsigned int, buf->buflen - sum, len); |
579 | tail->iov_len += free_space; |
580 | buf->len += free_space; |
581 | } |
582 | } |
583 | |
584 | static void xdr_buf_tail_copy_right(const struct xdr_buf *buf, |
585 | unsigned int base, unsigned int len, |
586 | unsigned int shift) |
587 | { |
588 | const struct kvec *tail = buf->tail; |
589 | unsigned int to = base + shift; |
590 | |
591 | if (to >= tail->iov_len) |
592 | return; |
593 | if (len + to > tail->iov_len) |
594 | len = tail->iov_len - to; |
595 | memmove(tail->iov_base + to, tail->iov_base + base, len); |
596 | } |
597 | |
598 | static void xdr_buf_pages_copy_right(const struct xdr_buf *buf, |
599 | unsigned int base, unsigned int len, |
600 | unsigned int shift) |
601 | { |
602 | const struct kvec *tail = buf->tail; |
603 | unsigned int to = base + shift; |
604 | unsigned int pglen = 0; |
605 | unsigned int talen = 0, tato = 0; |
606 | |
607 | if (base >= buf->page_len) |
608 | return; |
609 | if (len > buf->page_len - base) |
610 | len = buf->page_len - base; |
611 | if (to >= buf->page_len) { |
612 | tato = to - buf->page_len; |
613 | if (tail->iov_len >= len + tato) |
614 | talen = len; |
615 | else if (tail->iov_len > tato) |
616 | talen = tail->iov_len - tato; |
617 | } else if (len + to >= buf->page_len) { |
618 | pglen = buf->page_len - to; |
619 | talen = len - pglen; |
620 | if (talen > tail->iov_len) |
621 | talen = tail->iov_len; |
622 | } else |
623 | pglen = len; |
624 | |
625 | _copy_from_pages(tail->iov_base + tato, buf->pages, |
626 | buf->page_base + base + pglen, talen); |
627 | _shift_data_right_pages(pages: buf->pages, pgto_base: buf->page_base + to, |
628 | pgfrom_base: buf->page_base + base, len: pglen); |
629 | } |
630 | |
631 | static void xdr_buf_head_copy_right(const struct xdr_buf *buf, |
632 | unsigned int base, unsigned int len, |
633 | unsigned int shift) |
634 | { |
635 | const struct kvec *head = buf->head; |
636 | const struct kvec *tail = buf->tail; |
637 | unsigned int to = base + shift; |
638 | unsigned int pglen = 0, pgto = 0; |
639 | unsigned int talen = 0, tato = 0; |
640 | |
641 | if (base >= head->iov_len) |
642 | return; |
643 | if (len > head->iov_len - base) |
644 | len = head->iov_len - base; |
645 | if (to >= buf->page_len + head->iov_len) { |
646 | tato = to - buf->page_len - head->iov_len; |
647 | talen = len; |
648 | } else if (to >= head->iov_len) { |
649 | pgto = to - head->iov_len; |
650 | pglen = len; |
651 | if (pgto + pglen > buf->page_len) { |
652 | talen = pgto + pglen - buf->page_len; |
653 | pglen -= talen; |
654 | } |
655 | } else { |
656 | pglen = len - to; |
657 | if (pglen > buf->page_len) { |
658 | talen = pglen - buf->page_len; |
659 | pglen = buf->page_len; |
660 | } |
661 | } |
662 | |
663 | len -= talen; |
664 | base += len; |
665 | if (talen + tato > tail->iov_len) |
666 | talen = tail->iov_len > tato ? tail->iov_len - tato : 0; |
667 | memcpy(tail->iov_base + tato, head->iov_base + base, talen); |
668 | |
669 | len -= pglen; |
670 | base -= pglen; |
671 | _copy_to_pages(pages: buf->pages, pgbase: buf->page_base + pgto, p: head->iov_base + base, |
672 | len: pglen); |
673 | |
674 | base -= len; |
675 | memmove(head->iov_base + to, head->iov_base + base, len); |
676 | } |
677 | |
678 | static void xdr_buf_tail_shift_right(const struct xdr_buf *buf, |
679 | unsigned int base, unsigned int len, |
680 | unsigned int shift) |
681 | { |
682 | const struct kvec *tail = buf->tail; |
683 | |
684 | if (base >= tail->iov_len || !shift || !len) |
685 | return; |
686 | xdr_buf_tail_copy_right(buf, base, len, shift); |
687 | } |
688 | |
689 | static void xdr_buf_pages_shift_right(const struct xdr_buf *buf, |
690 | unsigned int base, unsigned int len, |
691 | unsigned int shift) |
692 | { |
693 | if (!shift || !len) |
694 | return; |
695 | if (base >= buf->page_len) { |
696 | xdr_buf_tail_shift_right(buf, base: base - buf->page_len, len, shift); |
697 | return; |
698 | } |
699 | if (base + len > buf->page_len) |
700 | xdr_buf_tail_shift_right(buf, base: 0, len: base + len - buf->page_len, |
701 | shift); |
702 | xdr_buf_pages_copy_right(buf, base, len, shift); |
703 | } |
704 | |
705 | static void xdr_buf_head_shift_right(const struct xdr_buf *buf, |
706 | unsigned int base, unsigned int len, |
707 | unsigned int shift) |
708 | { |
709 | const struct kvec *head = buf->head; |
710 | |
711 | if (!shift) |
712 | return; |
713 | if (base >= head->iov_len) { |
714 | xdr_buf_pages_shift_right(buf, base: head->iov_len - base, len, |
715 | shift); |
716 | return; |
717 | } |
718 | if (base + len > head->iov_len) |
719 | xdr_buf_pages_shift_right(buf, base: 0, len: base + len - head->iov_len, |
720 | shift); |
721 | xdr_buf_head_copy_right(buf, base, len, shift); |
722 | } |
723 | |
724 | static void xdr_buf_tail_copy_left(const struct xdr_buf *buf, unsigned int base, |
725 | unsigned int len, unsigned int shift) |
726 | { |
727 | const struct kvec *tail = buf->tail; |
728 | |
729 | if (base >= tail->iov_len) |
730 | return; |
731 | if (len > tail->iov_len - base) |
732 | len = tail->iov_len - base; |
733 | /* Shift data into head */ |
734 | if (shift > buf->page_len + base) { |
735 | const struct kvec *head = buf->head; |
736 | unsigned int hdto = |
737 | head->iov_len + buf->page_len + base - shift; |
738 | unsigned int hdlen = len; |
739 | |
740 | if (WARN_ONCE(shift > head->iov_len + buf->page_len + base, |
741 | "SUNRPC: Misaligned data.\n" )) |
742 | return; |
743 | if (hdto + hdlen > head->iov_len) |
744 | hdlen = head->iov_len - hdto; |
745 | memcpy(head->iov_base + hdto, tail->iov_base + base, hdlen); |
746 | base += hdlen; |
747 | len -= hdlen; |
748 | if (!len) |
749 | return; |
750 | } |
751 | /* Shift data into pages */ |
752 | if (shift > base) { |
753 | unsigned int pgto = buf->page_len + base - shift; |
754 | unsigned int pglen = len; |
755 | |
756 | if (pgto + pglen > buf->page_len) |
757 | pglen = buf->page_len - pgto; |
758 | _copy_to_pages(pages: buf->pages, pgbase: buf->page_base + pgto, |
759 | p: tail->iov_base + base, len: pglen); |
760 | base += pglen; |
761 | len -= pglen; |
762 | if (!len) |
763 | return; |
764 | } |
765 | memmove(tail->iov_base + base - shift, tail->iov_base + base, len); |
766 | } |
767 | |
768 | static void xdr_buf_pages_copy_left(const struct xdr_buf *buf, |
769 | unsigned int base, unsigned int len, |
770 | unsigned int shift) |
771 | { |
772 | unsigned int pgto; |
773 | |
774 | if (base >= buf->page_len) |
775 | return; |
776 | if (len > buf->page_len - base) |
777 | len = buf->page_len - base; |
778 | /* Shift data into head */ |
779 | if (shift > base) { |
780 | const struct kvec *head = buf->head; |
781 | unsigned int hdto = head->iov_len + base - shift; |
782 | unsigned int hdlen = len; |
783 | |
784 | if (WARN_ONCE(shift > head->iov_len + base, |
785 | "SUNRPC: Misaligned data.\n" )) |
786 | return; |
787 | if (hdto + hdlen > head->iov_len) |
788 | hdlen = head->iov_len - hdto; |
789 | _copy_from_pages(head->iov_base + hdto, buf->pages, |
790 | buf->page_base + base, hdlen); |
791 | base += hdlen; |
792 | len -= hdlen; |
793 | if (!len) |
794 | return; |
795 | } |
796 | pgto = base - shift; |
797 | _shift_data_left_pages(pages: buf->pages, pgto_base: buf->page_base + pgto, |
798 | pgfrom_base: buf->page_base + base, len); |
799 | } |
800 | |
801 | static void xdr_buf_tail_shift_left(const struct xdr_buf *buf, |
802 | unsigned int base, unsigned int len, |
803 | unsigned int shift) |
804 | { |
805 | if (!shift || !len) |
806 | return; |
807 | xdr_buf_tail_copy_left(buf, base, len, shift); |
808 | } |
809 | |
810 | static void xdr_buf_pages_shift_left(const struct xdr_buf *buf, |
811 | unsigned int base, unsigned int len, |
812 | unsigned int shift) |
813 | { |
814 | if (!shift || !len) |
815 | return; |
816 | if (base >= buf->page_len) { |
817 | xdr_buf_tail_shift_left(buf, base: base - buf->page_len, len, shift); |
818 | return; |
819 | } |
820 | xdr_buf_pages_copy_left(buf, base, len, shift); |
821 | len += base; |
822 | if (len <= buf->page_len) |
823 | return; |
824 | xdr_buf_tail_copy_left(buf, base: 0, len: len - buf->page_len, shift); |
825 | } |
826 | |
827 | static void xdr_buf_head_shift_left(const struct xdr_buf *buf, |
828 | unsigned int base, unsigned int len, |
829 | unsigned int shift) |
830 | { |
831 | const struct kvec *head = buf->head; |
832 | unsigned int bytes; |
833 | |
834 | if (!shift || !len) |
835 | return; |
836 | |
837 | if (shift > base) { |
838 | bytes = (shift - base); |
839 | if (bytes >= len) |
840 | return; |
841 | base += bytes; |
842 | len -= bytes; |
843 | } |
844 | |
845 | if (base < head->iov_len) { |
846 | bytes = min_t(unsigned int, len, head->iov_len - base); |
847 | memmove(head->iov_base + (base - shift), |
848 | head->iov_base + base, bytes); |
849 | base += bytes; |
850 | len -= bytes; |
851 | } |
852 | xdr_buf_pages_shift_left(buf, base: base - head->iov_len, len, shift); |
853 | } |
854 | |
855 | /** |
856 | * xdr_shrink_bufhead |
857 | * @buf: xdr_buf |
858 | * @len: new length of buf->head[0] |
859 | * |
860 | * Shrinks XDR buffer's header kvec buf->head[0], setting it to |
861 | * 'len' bytes. The extra data is not lost, but is instead |
862 | * moved into the inlined pages and/or the tail. |
863 | */ |
864 | static unsigned int xdr_shrink_bufhead(struct xdr_buf *buf, unsigned int len) |
865 | { |
866 | struct kvec *head = buf->head; |
867 | unsigned int shift, buflen = max(buf->len, len); |
868 | |
869 | WARN_ON_ONCE(len > head->iov_len); |
870 | if (head->iov_len > buflen) { |
871 | buf->buflen -= head->iov_len - buflen; |
872 | head->iov_len = buflen; |
873 | } |
874 | if (len >= head->iov_len) |
875 | return 0; |
876 | shift = head->iov_len - len; |
877 | xdr_buf_try_expand(buf, len: shift); |
878 | xdr_buf_head_shift_right(buf, base: len, len: buflen - len, shift); |
879 | head->iov_len = len; |
880 | buf->buflen -= shift; |
881 | buf->len -= shift; |
882 | return shift; |
883 | } |
884 | |
885 | /** |
886 | * xdr_shrink_pagelen - shrinks buf->pages to @len bytes |
887 | * @buf: xdr_buf |
888 | * @len: new page buffer length |
889 | * |
890 | * The extra data is not lost, but is instead moved into buf->tail. |
891 | * Returns the actual number of bytes moved. |
892 | */ |
893 | static unsigned int xdr_shrink_pagelen(struct xdr_buf *buf, unsigned int len) |
894 | { |
895 | unsigned int shift, buflen = buf->len - buf->head->iov_len; |
896 | |
897 | WARN_ON_ONCE(len > buf->page_len); |
898 | if (buf->head->iov_len >= buf->len || len > buflen) |
899 | buflen = len; |
900 | if (buf->page_len > buflen) { |
901 | buf->buflen -= buf->page_len - buflen; |
902 | buf->page_len = buflen; |
903 | } |
904 | if (len >= buf->page_len) |
905 | return 0; |
906 | shift = buf->page_len - len; |
907 | xdr_buf_try_expand(buf, len: shift); |
908 | xdr_buf_pages_shift_right(buf, base: len, len: buflen - len, shift); |
909 | buf->page_len = len; |
910 | buf->len -= shift; |
911 | buf->buflen -= shift; |
912 | return shift; |
913 | } |
914 | |
915 | /** |
916 | * xdr_stream_pos - Return the current offset from the start of the xdr_stream |
917 | * @xdr: pointer to struct xdr_stream |
918 | */ |
919 | unsigned int xdr_stream_pos(const struct xdr_stream *xdr) |
920 | { |
921 | return (unsigned int)(XDR_QUADLEN(xdr->buf->len) - xdr->nwords) << 2; |
922 | } |
923 | EXPORT_SYMBOL_GPL(xdr_stream_pos); |
924 | |
925 | static void xdr_stream_set_pos(struct xdr_stream *xdr, unsigned int pos) |
926 | { |
927 | unsigned int blen = xdr->buf->len; |
928 | |
929 | xdr->nwords = blen > pos ? XDR_QUADLEN(blen) - XDR_QUADLEN(pos) : 0; |
930 | } |
931 | |
932 | static void xdr_stream_page_set_pos(struct xdr_stream *xdr, unsigned int pos) |
933 | { |
934 | xdr_stream_set_pos(xdr, pos: pos + xdr->buf->head[0].iov_len); |
935 | } |
936 | |
937 | /** |
938 | * xdr_page_pos - Return the current offset from the start of the xdr pages |
939 | * @xdr: pointer to struct xdr_stream |
940 | */ |
941 | unsigned int xdr_page_pos(const struct xdr_stream *xdr) |
942 | { |
943 | unsigned int pos = xdr_stream_pos(xdr); |
944 | |
945 | WARN_ON(pos < xdr->buf->head[0].iov_len); |
946 | return pos - xdr->buf->head[0].iov_len; |
947 | } |
948 | EXPORT_SYMBOL_GPL(xdr_page_pos); |
949 | |
950 | /** |
951 | * xdr_init_encode - Initialize a struct xdr_stream for sending data. |
952 | * @xdr: pointer to xdr_stream struct |
953 | * @buf: pointer to XDR buffer in which to encode data |
954 | * @p: current pointer inside XDR buffer |
955 | * @rqst: pointer to controlling rpc_rqst, for debugging |
956 | * |
957 | * Note: at the moment the RPC client only passes the length of our |
958 | * scratch buffer in the xdr_buf's header kvec. Previously this |
959 | * meant we needed to call xdr_adjust_iovec() after encoding the |
960 | * data. With the new scheme, the xdr_stream manages the details |
961 | * of the buffer length, and takes care of adjusting the kvec |
962 | * length for us. |
963 | */ |
964 | void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p, |
965 | struct rpc_rqst *rqst) |
966 | { |
967 | struct kvec *iov = buf->head; |
968 | int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len; |
969 | |
970 | xdr_reset_scratch_buffer(xdr); |
971 | BUG_ON(scratch_len < 0); |
972 | xdr->buf = buf; |
973 | xdr->iov = iov; |
974 | xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len); |
975 | xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len); |
976 | BUG_ON(iov->iov_len > scratch_len); |
977 | |
978 | if (p != xdr->p && p != NULL) { |
979 | size_t len; |
980 | |
981 | BUG_ON(p < xdr->p || p > xdr->end); |
982 | len = (char *)p - (char *)xdr->p; |
983 | xdr->p = p; |
984 | buf->len += len; |
985 | iov->iov_len += len; |
986 | } |
987 | xdr->rqst = rqst; |
988 | } |
989 | EXPORT_SYMBOL_GPL(xdr_init_encode); |
990 | |
991 | /** |
992 | * xdr_init_encode_pages - Initialize an xdr_stream for encoding into pages |
993 | * @xdr: pointer to xdr_stream struct |
994 | * @buf: pointer to XDR buffer into which to encode data |
995 | * @pages: list of pages to decode into |
996 | * @rqst: pointer to controlling rpc_rqst, for debugging |
997 | * |
998 | */ |
999 | void xdr_init_encode_pages(struct xdr_stream *xdr, struct xdr_buf *buf, |
1000 | struct page **pages, struct rpc_rqst *rqst) |
1001 | { |
1002 | xdr_reset_scratch_buffer(xdr); |
1003 | |
1004 | xdr->buf = buf; |
1005 | xdr->page_ptr = pages; |
1006 | xdr->iov = NULL; |
1007 | xdr->p = page_address(*pages); |
1008 | xdr->end = (void *)xdr->p + min_t(u32, buf->buflen, PAGE_SIZE); |
1009 | xdr->rqst = rqst; |
1010 | } |
1011 | EXPORT_SYMBOL_GPL(xdr_init_encode_pages); |
1012 | |
1013 | /** |
1014 | * __xdr_commit_encode - Ensure all data is written to buffer |
1015 | * @xdr: pointer to xdr_stream |
1016 | * |
1017 | * We handle encoding across page boundaries by giving the caller a |
1018 | * temporary location to write to, then later copying the data into |
1019 | * place; xdr_commit_encode does that copying. |
1020 | * |
1021 | * Normally the caller doesn't need to call this directly, as the |
1022 | * following xdr_reserve_space will do it. But an explicit call may be |
1023 | * required at the end of encoding, or any other time when the xdr_buf |
1024 | * data might be read. |
1025 | */ |
1026 | void __xdr_commit_encode(struct xdr_stream *xdr) |
1027 | { |
1028 | size_t shift = xdr->scratch.iov_len; |
1029 | void *page; |
1030 | |
1031 | page = page_address(*xdr->page_ptr); |
1032 | memcpy(xdr->scratch.iov_base, page, shift); |
1033 | memmove(page, page + shift, (void *)xdr->p - page); |
1034 | xdr_reset_scratch_buffer(xdr); |
1035 | } |
1036 | EXPORT_SYMBOL_GPL(__xdr_commit_encode); |
1037 | |
1038 | /* |
1039 | * The buffer space to be reserved crosses the boundary between |
1040 | * xdr->buf->head and xdr->buf->pages, or between two pages |
1041 | * in xdr->buf->pages. |
1042 | */ |
1043 | static noinline __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr, |
1044 | size_t nbytes) |
1045 | { |
1046 | int space_left; |
1047 | int frag1bytes, frag2bytes; |
1048 | void *p; |
1049 | |
1050 | if (nbytes > PAGE_SIZE) |
1051 | goto out_overflow; /* Bigger buffers require special handling */ |
1052 | if (xdr->buf->len + nbytes > xdr->buf->buflen) |
1053 | goto out_overflow; /* Sorry, we're totally out of space */ |
1054 | frag1bytes = (xdr->end - xdr->p) << 2; |
1055 | frag2bytes = nbytes - frag1bytes; |
1056 | if (xdr->iov) |
1057 | xdr->iov->iov_len += frag1bytes; |
1058 | else |
1059 | xdr->buf->page_len += frag1bytes; |
1060 | xdr->page_ptr++; |
1061 | xdr->iov = NULL; |
1062 | |
1063 | /* |
1064 | * If the last encode didn't end exactly on a page boundary, the |
1065 | * next one will straddle boundaries. Encode into the next |
1066 | * page, then copy it back later in xdr_commit_encode. We use |
1067 | * the "scratch" iov to track any temporarily unused fragment of |
1068 | * space at the end of the previous buffer: |
1069 | */ |
1070 | xdr_set_scratch_buffer(xdr, buf: xdr->p, buflen: frag1bytes); |
1071 | |
1072 | /* |
1073 | * xdr->p is where the next encode will start after |
1074 | * xdr_commit_encode() has shifted this one back: |
1075 | */ |
1076 | p = page_address(*xdr->page_ptr); |
1077 | xdr->p = p + frag2bytes; |
1078 | space_left = xdr->buf->buflen - xdr->buf->len; |
1079 | if (space_left - frag1bytes >= PAGE_SIZE) |
1080 | xdr->end = p + PAGE_SIZE; |
1081 | else |
1082 | xdr->end = p + space_left - frag1bytes; |
1083 | |
1084 | xdr->buf->page_len += frag2bytes; |
1085 | xdr->buf->len += nbytes; |
1086 | return p; |
1087 | out_overflow: |
1088 | trace_rpc_xdr_overflow(xdr, requested: nbytes); |
1089 | return NULL; |
1090 | } |
1091 | |
1092 | /** |
1093 | * xdr_reserve_space - Reserve buffer space for sending |
1094 | * @xdr: pointer to xdr_stream |
1095 | * @nbytes: number of bytes to reserve |
1096 | * |
1097 | * Checks that we have enough buffer space to encode 'nbytes' more |
1098 | * bytes of data. If so, update the total xdr_buf length, and |
1099 | * adjust the length of the current kvec. |
1100 | */ |
1101 | __be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes) |
1102 | { |
1103 | __be32 *p = xdr->p; |
1104 | __be32 *q; |
1105 | |
1106 | xdr_commit_encode(xdr); |
1107 | /* align nbytes on the next 32-bit boundary */ |
1108 | nbytes += 3; |
1109 | nbytes &= ~3; |
1110 | q = p + (nbytes >> 2); |
1111 | if (unlikely(q > xdr->end || q < p)) |
1112 | return xdr_get_next_encode_buffer(xdr, nbytes); |
1113 | xdr->p = q; |
1114 | if (xdr->iov) |
1115 | xdr->iov->iov_len += nbytes; |
1116 | else |
1117 | xdr->buf->page_len += nbytes; |
1118 | xdr->buf->len += nbytes; |
1119 | return p; |
1120 | } |
1121 | EXPORT_SYMBOL_GPL(xdr_reserve_space); |
1122 | |
1123 | /** |
1124 | * xdr_reserve_space_vec - Reserves a large amount of buffer space for sending |
1125 | * @xdr: pointer to xdr_stream |
1126 | * @nbytes: number of bytes to reserve |
1127 | * |
1128 | * The size argument passed to xdr_reserve_space() is determined based |
1129 | * on the number of bytes remaining in the current page to avoid |
1130 | * invalidating iov_base pointers when xdr_commit_encode() is called. |
1131 | * |
1132 | * Return values: |
1133 | * %0: success |
1134 | * %-EMSGSIZE: not enough space is available in @xdr |
1135 | */ |
1136 | int xdr_reserve_space_vec(struct xdr_stream *xdr, size_t nbytes) |
1137 | { |
1138 | size_t thislen; |
1139 | __be32 *p; |
1140 | |
1141 | /* |
1142 | * svcrdma requires every READ payload to start somewhere |
1143 | * in xdr->pages. |
1144 | */ |
1145 | if (xdr->iov == xdr->buf->head) { |
1146 | xdr->iov = NULL; |
1147 | xdr->end = xdr->p; |
1148 | } |
1149 | |
1150 | /* XXX: Let's find a way to make this more efficient */ |
1151 | while (nbytes) { |
1152 | thislen = xdr->buf->page_len % PAGE_SIZE; |
1153 | thislen = min_t(size_t, nbytes, PAGE_SIZE - thislen); |
1154 | |
1155 | p = xdr_reserve_space(xdr, thislen); |
1156 | if (!p) |
1157 | return -EMSGSIZE; |
1158 | |
1159 | nbytes -= thislen; |
1160 | } |
1161 | |
1162 | return 0; |
1163 | } |
1164 | EXPORT_SYMBOL_GPL(xdr_reserve_space_vec); |
1165 | |
1166 | /** |
1167 | * xdr_truncate_encode - truncate an encode buffer |
1168 | * @xdr: pointer to xdr_stream |
1169 | * @len: new length of buffer |
1170 | * |
1171 | * Truncates the xdr stream, so that xdr->buf->len == len, |
1172 | * and xdr->p points at offset len from the start of the buffer, and |
1173 | * head, tail, and page lengths are adjusted to correspond. |
1174 | * |
1175 | * If this means moving xdr->p to a different buffer, we assume that |
1176 | * the end pointer should be set to the end of the current page, |
1177 | * except in the case of the head buffer when we assume the head |
1178 | * buffer's current length represents the end of the available buffer. |
1179 | * |
1180 | * This is *not* safe to use on a buffer that already has inlined page |
1181 | * cache pages (as in a zero-copy server read reply), except for the |
1182 | * simple case of truncating from one position in the tail to another. |
1183 | * |
1184 | */ |
1185 | void xdr_truncate_encode(struct xdr_stream *xdr, size_t len) |
1186 | { |
1187 | struct xdr_buf *buf = xdr->buf; |
1188 | struct kvec *head = buf->head; |
1189 | struct kvec *tail = buf->tail; |
1190 | int fraglen; |
1191 | int new; |
1192 | |
1193 | if (len > buf->len) { |
1194 | WARN_ON_ONCE(1); |
1195 | return; |
1196 | } |
1197 | xdr_commit_encode(xdr); |
1198 | |
1199 | fraglen = min_t(int, buf->len - len, tail->iov_len); |
1200 | tail->iov_len -= fraglen; |
1201 | buf->len -= fraglen; |
1202 | if (tail->iov_len) { |
1203 | xdr->p = tail->iov_base + tail->iov_len; |
1204 | WARN_ON_ONCE(!xdr->end); |
1205 | WARN_ON_ONCE(!xdr->iov); |
1206 | return; |
1207 | } |
1208 | WARN_ON_ONCE(fraglen); |
1209 | fraglen = min_t(int, buf->len - len, buf->page_len); |
1210 | buf->page_len -= fraglen; |
1211 | buf->len -= fraglen; |
1212 | |
1213 | new = buf->page_base + buf->page_len; |
1214 | |
1215 | xdr->page_ptr = buf->pages + (new >> PAGE_SHIFT); |
1216 | |
1217 | if (buf->page_len) { |
1218 | xdr->p = page_address(*xdr->page_ptr); |
1219 | xdr->end = (void *)xdr->p + PAGE_SIZE; |
1220 | xdr->p = (void *)xdr->p + (new % PAGE_SIZE); |
1221 | WARN_ON_ONCE(xdr->iov); |
1222 | return; |
1223 | } |
1224 | if (fraglen) |
1225 | xdr->end = head->iov_base + head->iov_len; |
1226 | /* (otherwise assume xdr->end is already set) */ |
1227 | xdr->page_ptr--; |
1228 | head->iov_len = len; |
1229 | buf->len = len; |
1230 | xdr->p = head->iov_base + head->iov_len; |
1231 | xdr->iov = buf->head; |
1232 | } |
1233 | EXPORT_SYMBOL(xdr_truncate_encode); |
1234 | |
1235 | /** |
1236 | * xdr_truncate_decode - Truncate a decoding stream |
1237 | * @xdr: pointer to struct xdr_stream |
1238 | * @len: Number of bytes to remove |
1239 | * |
1240 | */ |
1241 | void xdr_truncate_decode(struct xdr_stream *xdr, size_t len) |
1242 | { |
1243 | unsigned int nbytes = xdr_align_size(n: len); |
1244 | |
1245 | xdr->buf->len -= nbytes; |
1246 | xdr->nwords -= XDR_QUADLEN(nbytes); |
1247 | } |
1248 | EXPORT_SYMBOL_GPL(xdr_truncate_decode); |
1249 | |
1250 | /** |
1251 | * xdr_restrict_buflen - decrease available buffer space |
1252 | * @xdr: pointer to xdr_stream |
1253 | * @newbuflen: new maximum number of bytes available |
1254 | * |
1255 | * Adjust our idea of how much space is available in the buffer. |
1256 | * If we've already used too much space in the buffer, returns -1. |
1257 | * If the available space is already smaller than newbuflen, returns 0 |
1258 | * and does nothing. Otherwise, adjusts xdr->buf->buflen to newbuflen |
1259 | * and ensures xdr->end is set at most offset newbuflen from the start |
1260 | * of the buffer. |
1261 | */ |
1262 | int xdr_restrict_buflen(struct xdr_stream *xdr, int newbuflen) |
1263 | { |
1264 | struct xdr_buf *buf = xdr->buf; |
1265 | int left_in_this_buf = (void *)xdr->end - (void *)xdr->p; |
1266 | int end_offset = buf->len + left_in_this_buf; |
1267 | |
1268 | if (newbuflen < 0 || newbuflen < buf->len) |
1269 | return -1; |
1270 | if (newbuflen > buf->buflen) |
1271 | return 0; |
1272 | if (newbuflen < end_offset) |
1273 | xdr->end = (void *)xdr->end + newbuflen - end_offset; |
1274 | buf->buflen = newbuflen; |
1275 | return 0; |
1276 | } |
1277 | EXPORT_SYMBOL(xdr_restrict_buflen); |
1278 | |
1279 | /** |
1280 | * xdr_write_pages - Insert a list of pages into an XDR buffer for sending |
1281 | * @xdr: pointer to xdr_stream |
1282 | * @pages: array of pages to insert |
1283 | * @base: starting offset of first data byte in @pages |
1284 | * @len: number of data bytes in @pages to insert |
1285 | * |
1286 | * After the @pages are added, the tail iovec is instantiated pointing to |
1287 | * end of the head buffer, and the stream is set up to encode subsequent |
1288 | * items into the tail. |
1289 | */ |
1290 | void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base, |
1291 | unsigned int len) |
1292 | { |
1293 | struct xdr_buf *buf = xdr->buf; |
1294 | struct kvec *tail = buf->tail; |
1295 | |
1296 | buf->pages = pages; |
1297 | buf->page_base = base; |
1298 | buf->page_len = len; |
1299 | |
1300 | tail->iov_base = xdr->p; |
1301 | tail->iov_len = 0; |
1302 | xdr->iov = tail; |
1303 | |
1304 | if (len & 3) { |
1305 | unsigned int pad = 4 - (len & 3); |
1306 | |
1307 | BUG_ON(xdr->p >= xdr->end); |
1308 | tail->iov_base = (char *)xdr->p + (len & 3); |
1309 | tail->iov_len += pad; |
1310 | len += pad; |
1311 | *xdr->p++ = 0; |
1312 | } |
1313 | buf->buflen += len; |
1314 | buf->len += len; |
1315 | } |
1316 | EXPORT_SYMBOL_GPL(xdr_write_pages); |
1317 | |
1318 | static unsigned int xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov, |
1319 | unsigned int base, unsigned int len) |
1320 | { |
1321 | if (len > iov->iov_len) |
1322 | len = iov->iov_len; |
1323 | if (unlikely(base > len)) |
1324 | base = len; |
1325 | xdr->p = (__be32*)(iov->iov_base + base); |
1326 | xdr->end = (__be32*)(iov->iov_base + len); |
1327 | xdr->iov = iov; |
1328 | xdr->page_ptr = NULL; |
1329 | return len - base; |
1330 | } |
1331 | |
1332 | static unsigned int xdr_set_tail_base(struct xdr_stream *xdr, |
1333 | unsigned int base, unsigned int len) |
1334 | { |
1335 | struct xdr_buf *buf = xdr->buf; |
1336 | |
1337 | xdr_stream_set_pos(xdr, pos: base + buf->page_len + buf->head->iov_len); |
1338 | return xdr_set_iov(xdr, iov: buf->tail, base, len); |
1339 | } |
1340 | |
1341 | static void xdr_stream_unmap_current_page(struct xdr_stream *xdr) |
1342 | { |
1343 | if (xdr->page_kaddr) { |
1344 | kunmap_local(xdr->page_kaddr); |
1345 | xdr->page_kaddr = NULL; |
1346 | } |
1347 | } |
1348 | |
1349 | static unsigned int xdr_set_page_base(struct xdr_stream *xdr, |
1350 | unsigned int base, unsigned int len) |
1351 | { |
1352 | unsigned int pgnr; |
1353 | unsigned int maxlen; |
1354 | unsigned int pgoff; |
1355 | unsigned int pgend; |
1356 | void *kaddr; |
1357 | |
1358 | maxlen = xdr->buf->page_len; |
1359 | if (base >= maxlen) |
1360 | return 0; |
1361 | else |
1362 | maxlen -= base; |
1363 | if (len > maxlen) |
1364 | len = maxlen; |
1365 | |
1366 | xdr_stream_unmap_current_page(xdr); |
1367 | xdr_stream_page_set_pos(xdr, pos: base); |
1368 | base += xdr->buf->page_base; |
1369 | |
1370 | pgnr = base >> PAGE_SHIFT; |
1371 | xdr->page_ptr = &xdr->buf->pages[pgnr]; |
1372 | |
1373 | if (PageHighMem(page: *xdr->page_ptr)) { |
1374 | xdr->page_kaddr = kmap_local_page(page: *xdr->page_ptr); |
1375 | kaddr = xdr->page_kaddr; |
1376 | } else |
1377 | kaddr = page_address(*xdr->page_ptr); |
1378 | |
1379 | pgoff = base & ~PAGE_MASK; |
1380 | xdr->p = (__be32*)(kaddr + pgoff); |
1381 | |
1382 | pgend = pgoff + len; |
1383 | if (pgend > PAGE_SIZE) |
1384 | pgend = PAGE_SIZE; |
1385 | xdr->end = (__be32*)(kaddr + pgend); |
1386 | xdr->iov = NULL; |
1387 | return len; |
1388 | } |
1389 | |
1390 | static void xdr_set_page(struct xdr_stream *xdr, unsigned int base, |
1391 | unsigned int len) |
1392 | { |
1393 | if (xdr_set_page_base(xdr, base, len) == 0) { |
1394 | base -= xdr->buf->page_len; |
1395 | xdr_set_tail_base(xdr, base, len); |
1396 | } |
1397 | } |
1398 | |
1399 | static void xdr_set_next_page(struct xdr_stream *xdr) |
1400 | { |
1401 | unsigned int newbase; |
1402 | |
1403 | newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT; |
1404 | newbase -= xdr->buf->page_base; |
1405 | if (newbase < xdr->buf->page_len) |
1406 | xdr_set_page_base(xdr, base: newbase, len: xdr_stream_remaining(xdr)); |
1407 | else |
1408 | xdr_set_tail_base(xdr, base: 0, len: xdr_stream_remaining(xdr)); |
1409 | } |
1410 | |
1411 | static bool xdr_set_next_buffer(struct xdr_stream *xdr) |
1412 | { |
1413 | if (xdr->page_ptr != NULL) |
1414 | xdr_set_next_page(xdr); |
1415 | else if (xdr->iov == xdr->buf->head) |
1416 | xdr_set_page(xdr, base: 0, len: xdr_stream_remaining(xdr)); |
1417 | return xdr->p != xdr->end; |
1418 | } |
1419 | |
1420 | /** |
1421 | * xdr_init_decode - Initialize an xdr_stream for decoding data. |
1422 | * @xdr: pointer to xdr_stream struct |
1423 | * @buf: pointer to XDR buffer from which to decode data |
1424 | * @p: current pointer inside XDR buffer |
1425 | * @rqst: pointer to controlling rpc_rqst, for debugging |
1426 | */ |
1427 | void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p, |
1428 | struct rpc_rqst *rqst) |
1429 | { |
1430 | xdr->buf = buf; |
1431 | xdr->page_kaddr = NULL; |
1432 | xdr_reset_scratch_buffer(xdr); |
1433 | xdr->nwords = XDR_QUADLEN(buf->len); |
1434 | if (xdr_set_iov(xdr, iov: buf->head, base: 0, len: buf->len) == 0 && |
1435 | xdr_set_page_base(xdr, base: 0, len: buf->len) == 0) |
1436 | xdr_set_iov(xdr, iov: buf->tail, base: 0, len: buf->len); |
1437 | if (p != NULL && p > xdr->p && xdr->end >= p) { |
1438 | xdr->nwords -= p - xdr->p; |
1439 | xdr->p = p; |
1440 | } |
1441 | xdr->rqst = rqst; |
1442 | } |
1443 | EXPORT_SYMBOL_GPL(xdr_init_decode); |
1444 | |
1445 | /** |
1446 | * xdr_init_decode_pages - Initialize an xdr_stream for decoding into pages |
1447 | * @xdr: pointer to xdr_stream struct |
1448 | * @buf: pointer to XDR buffer from which to decode data |
1449 | * @pages: list of pages to decode into |
1450 | * @len: length in bytes of buffer in pages |
1451 | */ |
1452 | void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf, |
1453 | struct page **pages, unsigned int len) |
1454 | { |
1455 | memset(buf, 0, sizeof(*buf)); |
1456 | buf->pages = pages; |
1457 | buf->page_len = len; |
1458 | buf->buflen = len; |
1459 | buf->len = len; |
1460 | xdr_init_decode(xdr, buf, NULL, NULL); |
1461 | } |
1462 | EXPORT_SYMBOL_GPL(xdr_init_decode_pages); |
1463 | |
1464 | /** |
1465 | * xdr_finish_decode - Clean up the xdr_stream after decoding data. |
1466 | * @xdr: pointer to xdr_stream struct |
1467 | */ |
1468 | void xdr_finish_decode(struct xdr_stream *xdr) |
1469 | { |
1470 | xdr_stream_unmap_current_page(xdr); |
1471 | } |
1472 | EXPORT_SYMBOL(xdr_finish_decode); |
1473 | |
1474 | static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes) |
1475 | { |
1476 | unsigned int nwords = XDR_QUADLEN(nbytes); |
1477 | __be32 *p = xdr->p; |
1478 | __be32 *q = p + nwords; |
1479 | |
1480 | if (unlikely(nwords > xdr->nwords || q > xdr->end || q < p)) |
1481 | return NULL; |
1482 | xdr->p = q; |
1483 | xdr->nwords -= nwords; |
1484 | return p; |
1485 | } |
1486 | |
1487 | static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes) |
1488 | { |
1489 | __be32 *p; |
1490 | char *cpdest = xdr->scratch.iov_base; |
1491 | size_t cplen = (char *)xdr->end - (char *)xdr->p; |
1492 | |
1493 | if (nbytes > xdr->scratch.iov_len) |
1494 | goto out_overflow; |
1495 | p = __xdr_inline_decode(xdr, nbytes: cplen); |
1496 | if (p == NULL) |
1497 | return NULL; |
1498 | memcpy(cpdest, p, cplen); |
1499 | if (!xdr_set_next_buffer(xdr)) |
1500 | goto out_overflow; |
1501 | cpdest += cplen; |
1502 | nbytes -= cplen; |
1503 | p = __xdr_inline_decode(xdr, nbytes); |
1504 | if (p == NULL) |
1505 | return NULL; |
1506 | memcpy(cpdest, p, nbytes); |
1507 | return xdr->scratch.iov_base; |
1508 | out_overflow: |
1509 | trace_rpc_xdr_overflow(xdr, requested: nbytes); |
1510 | return NULL; |
1511 | } |
1512 | |
1513 | /** |
1514 | * xdr_inline_decode - Retrieve XDR data to decode |
1515 | * @xdr: pointer to xdr_stream struct |
1516 | * @nbytes: number of bytes of data to decode |
1517 | * |
1518 | * Check if the input buffer is long enough to enable us to decode |
1519 | * 'nbytes' more bytes of data starting at the current position. |
1520 | * If so return the current pointer, then update the current |
1521 | * pointer position. |
1522 | */ |
1523 | __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes) |
1524 | { |
1525 | __be32 *p; |
1526 | |
1527 | if (unlikely(nbytes == 0)) |
1528 | return xdr->p; |
1529 | if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr)) |
1530 | goto out_overflow; |
1531 | p = __xdr_inline_decode(xdr, nbytes); |
1532 | if (p != NULL) |
1533 | return p; |
1534 | return xdr_copy_to_scratch(xdr, nbytes); |
1535 | out_overflow: |
1536 | trace_rpc_xdr_overflow(xdr, requested: nbytes); |
1537 | return NULL; |
1538 | } |
1539 | EXPORT_SYMBOL_GPL(xdr_inline_decode); |
1540 | |
1541 | static void xdr_realign_pages(struct xdr_stream *xdr) |
1542 | { |
1543 | struct xdr_buf *buf = xdr->buf; |
1544 | struct kvec *iov = buf->head; |
1545 | unsigned int cur = xdr_stream_pos(xdr); |
1546 | unsigned int copied; |
1547 | |
1548 | /* Realign pages to current pointer position */ |
1549 | if (iov->iov_len > cur) { |
1550 | copied = xdr_shrink_bufhead(buf, len: cur); |
1551 | trace_rpc_xdr_alignment(xdr, offset: cur, copied); |
1552 | xdr_set_page(xdr, base: 0, len: buf->page_len); |
1553 | } |
1554 | } |
1555 | |
1556 | static unsigned int xdr_align_pages(struct xdr_stream *xdr, unsigned int len) |
1557 | { |
1558 | struct xdr_buf *buf = xdr->buf; |
1559 | unsigned int nwords = XDR_QUADLEN(len); |
1560 | unsigned int copied; |
1561 | |
1562 | if (xdr->nwords == 0) |
1563 | return 0; |
1564 | |
1565 | xdr_realign_pages(xdr); |
1566 | if (nwords > xdr->nwords) { |
1567 | nwords = xdr->nwords; |
1568 | len = nwords << 2; |
1569 | } |
1570 | if (buf->page_len <= len) |
1571 | len = buf->page_len; |
1572 | else if (nwords < xdr->nwords) { |
1573 | /* Truncate page data and move it into the tail */ |
1574 | copied = xdr_shrink_pagelen(buf, len); |
1575 | trace_rpc_xdr_alignment(xdr, offset: len, copied); |
1576 | } |
1577 | return len; |
1578 | } |
1579 | |
1580 | /** |
1581 | * xdr_read_pages - align page-based XDR data to current pointer position |
1582 | * @xdr: pointer to xdr_stream struct |
1583 | * @len: number of bytes of page data |
1584 | * |
1585 | * Moves data beyond the current pointer position from the XDR head[] buffer |
1586 | * into the page list. Any data that lies beyond current position + @len |
1587 | * bytes is moved into the XDR tail[]. The xdr_stream current position is |
1588 | * then advanced past that data to align to the next XDR object in the tail. |
1589 | * |
1590 | * Returns the number of XDR encoded bytes now contained in the pages |
1591 | */ |
1592 | unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len) |
1593 | { |
1594 | unsigned int nwords = XDR_QUADLEN(len); |
1595 | unsigned int base, end, pglen; |
1596 | |
1597 | pglen = xdr_align_pages(xdr, len: nwords << 2); |
1598 | if (pglen == 0) |
1599 | return 0; |
1600 | |
1601 | base = (nwords << 2) - pglen; |
1602 | end = xdr_stream_remaining(xdr) - pglen; |
1603 | |
1604 | xdr_set_tail_base(xdr, base, len: end); |
1605 | return len <= pglen ? len : pglen; |
1606 | } |
1607 | EXPORT_SYMBOL_GPL(xdr_read_pages); |
1608 | |
1609 | /** |
1610 | * xdr_set_pagelen - Sets the length of the XDR pages |
1611 | * @xdr: pointer to xdr_stream struct |
1612 | * @len: new length of the XDR page data |
1613 | * |
1614 | * Either grows or shrinks the length of the xdr pages by setting pagelen to |
1615 | * @len bytes. When shrinking, any extra data is moved into buf->tail, whereas |
1616 | * when growing any data beyond the current pointer is moved into the tail. |
1617 | * |
1618 | * Returns True if the operation was successful, and False otherwise. |
1619 | */ |
1620 | void xdr_set_pagelen(struct xdr_stream *xdr, unsigned int len) |
1621 | { |
1622 | struct xdr_buf *buf = xdr->buf; |
1623 | size_t remaining = xdr_stream_remaining(xdr); |
1624 | size_t base = 0; |
1625 | |
1626 | if (len < buf->page_len) { |
1627 | base = buf->page_len - len; |
1628 | xdr_shrink_pagelen(buf, len); |
1629 | } else { |
1630 | xdr_buf_head_shift_right(buf, base: xdr_stream_pos(xdr), |
1631 | len: buf->page_len, shift: remaining); |
1632 | if (len > buf->page_len) |
1633 | xdr_buf_try_expand(buf, len: len - buf->page_len); |
1634 | } |
1635 | xdr_set_tail_base(xdr, base, len: remaining); |
1636 | } |
1637 | EXPORT_SYMBOL_GPL(xdr_set_pagelen); |
1638 | |
1639 | /** |
1640 | * xdr_enter_page - decode data from the XDR page |
1641 | * @xdr: pointer to xdr_stream struct |
1642 | * @len: number of bytes of page data |
1643 | * |
1644 | * Moves data beyond the current pointer position from the XDR head[] buffer |
1645 | * into the page list. Any data that lies beyond current position + "len" |
1646 | * bytes is moved into the XDR tail[]. The current pointer is then |
1647 | * repositioned at the beginning of the first XDR page. |
1648 | */ |
1649 | void xdr_enter_page(struct xdr_stream *xdr, unsigned int len) |
1650 | { |
1651 | len = xdr_align_pages(xdr, len); |
1652 | /* |
1653 | * Position current pointer at beginning of tail, and |
1654 | * set remaining message length. |
1655 | */ |
1656 | if (len != 0) |
1657 | xdr_set_page_base(xdr, base: 0, len); |
1658 | } |
1659 | EXPORT_SYMBOL_GPL(xdr_enter_page); |
1660 | |
1661 | static const struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0}; |
1662 | |
1663 | void xdr_buf_from_iov(const struct kvec *iov, struct xdr_buf *buf) |
1664 | { |
1665 | buf->head[0] = *iov; |
1666 | buf->tail[0] = empty_iov; |
1667 | buf->page_len = 0; |
1668 | buf->buflen = buf->len = iov->iov_len; |
1669 | } |
1670 | EXPORT_SYMBOL_GPL(xdr_buf_from_iov); |
1671 | |
1672 | /** |
1673 | * xdr_buf_subsegment - set subbuf to a portion of buf |
1674 | * @buf: an xdr buffer |
1675 | * @subbuf: the result buffer |
1676 | * @base: beginning of range in bytes |
1677 | * @len: length of range in bytes |
1678 | * |
1679 | * sets @subbuf to an xdr buffer representing the portion of @buf of |
1680 | * length @len starting at offset @base. |
1681 | * |
1682 | * @buf and @subbuf may be pointers to the same struct xdr_buf. |
1683 | * |
1684 | * Returns -1 if base or length are out of bounds. |
1685 | */ |
1686 | int xdr_buf_subsegment(const struct xdr_buf *buf, struct xdr_buf *subbuf, |
1687 | unsigned int base, unsigned int len) |
1688 | { |
1689 | subbuf->buflen = subbuf->len = len; |
1690 | if (base < buf->head[0].iov_len) { |
1691 | subbuf->head[0].iov_base = buf->head[0].iov_base + base; |
1692 | subbuf->head[0].iov_len = min_t(unsigned int, len, |
1693 | buf->head[0].iov_len - base); |
1694 | len -= subbuf->head[0].iov_len; |
1695 | base = 0; |
1696 | } else { |
1697 | base -= buf->head[0].iov_len; |
1698 | subbuf->head[0].iov_base = buf->head[0].iov_base; |
1699 | subbuf->head[0].iov_len = 0; |
1700 | } |
1701 | |
1702 | if (base < buf->page_len) { |
1703 | subbuf->page_len = min(buf->page_len - base, len); |
1704 | base += buf->page_base; |
1705 | subbuf->page_base = base & ~PAGE_MASK; |
1706 | subbuf->pages = &buf->pages[base >> PAGE_SHIFT]; |
1707 | len -= subbuf->page_len; |
1708 | base = 0; |
1709 | } else { |
1710 | base -= buf->page_len; |
1711 | subbuf->pages = buf->pages; |
1712 | subbuf->page_base = 0; |
1713 | subbuf->page_len = 0; |
1714 | } |
1715 | |
1716 | if (base < buf->tail[0].iov_len) { |
1717 | subbuf->tail[0].iov_base = buf->tail[0].iov_base + base; |
1718 | subbuf->tail[0].iov_len = min_t(unsigned int, len, |
1719 | buf->tail[0].iov_len - base); |
1720 | len -= subbuf->tail[0].iov_len; |
1721 | base = 0; |
1722 | } else { |
1723 | base -= buf->tail[0].iov_len; |
1724 | subbuf->tail[0].iov_base = buf->tail[0].iov_base; |
1725 | subbuf->tail[0].iov_len = 0; |
1726 | } |
1727 | |
1728 | if (base || len) |
1729 | return -1; |
1730 | return 0; |
1731 | } |
1732 | EXPORT_SYMBOL_GPL(xdr_buf_subsegment); |
1733 | |
1734 | /** |
1735 | * xdr_stream_subsegment - set @subbuf to a portion of @xdr |
1736 | * @xdr: an xdr_stream set up for decoding |
1737 | * @subbuf: the result buffer |
1738 | * @nbytes: length of @xdr to extract, in bytes |
1739 | * |
1740 | * Sets up @subbuf to represent a portion of @xdr. The portion |
1741 | * starts at the current offset in @xdr, and extends for a length |
1742 | * of @nbytes. If this is successful, @xdr is advanced to the next |
1743 | * XDR data item following that portion. |
1744 | * |
1745 | * Return values: |
1746 | * %true: @subbuf has been initialized, and @xdr has been advanced. |
1747 | * %false: a bounds error has occurred |
1748 | */ |
1749 | bool xdr_stream_subsegment(struct xdr_stream *xdr, struct xdr_buf *subbuf, |
1750 | unsigned int nbytes) |
1751 | { |
1752 | unsigned int start = xdr_stream_pos(xdr); |
1753 | unsigned int remaining, len; |
1754 | |
1755 | /* Extract @subbuf and bounds-check the fn arguments */ |
1756 | if (xdr_buf_subsegment(xdr->buf, subbuf, start, nbytes)) |
1757 | return false; |
1758 | |
1759 | /* Advance @xdr by @nbytes */ |
1760 | for (remaining = nbytes; remaining;) { |
1761 | if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr)) |
1762 | return false; |
1763 | |
1764 | len = (char *)xdr->end - (char *)xdr->p; |
1765 | if (remaining <= len) { |
1766 | xdr->p = (__be32 *)((char *)xdr->p + |
1767 | (remaining + xdr_pad_size(n: nbytes))); |
1768 | break; |
1769 | } |
1770 | |
1771 | xdr->p = (__be32 *)((char *)xdr->p + len); |
1772 | xdr->end = xdr->p; |
1773 | remaining -= len; |
1774 | } |
1775 | |
1776 | xdr_stream_set_pos(xdr, pos: start + nbytes); |
1777 | return true; |
1778 | } |
1779 | EXPORT_SYMBOL_GPL(xdr_stream_subsegment); |
1780 | |
1781 | /** |
1782 | * xdr_stream_move_subsegment - Move part of a stream to another position |
1783 | * @xdr: the source xdr_stream |
1784 | * @offset: the source offset of the segment |
1785 | * @target: the target offset of the segment |
1786 | * @length: the number of bytes to move |
1787 | * |
1788 | * Moves @length bytes from @offset to @target in the xdr_stream, overwriting |
1789 | * anything in its space. Returns the number of bytes in the segment. |
1790 | */ |
1791 | unsigned int xdr_stream_move_subsegment(struct xdr_stream *xdr, unsigned int offset, |
1792 | unsigned int target, unsigned int length) |
1793 | { |
1794 | struct xdr_buf buf; |
1795 | unsigned int shift; |
1796 | |
1797 | if (offset < target) { |
1798 | shift = target - offset; |
1799 | if (xdr_buf_subsegment(xdr->buf, &buf, offset, shift + length) < 0) |
1800 | return 0; |
1801 | xdr_buf_head_shift_right(buf: &buf, base: 0, len: length, shift); |
1802 | } else if (offset > target) { |
1803 | shift = offset - target; |
1804 | if (xdr_buf_subsegment(xdr->buf, &buf, target, shift + length) < 0) |
1805 | return 0; |
1806 | xdr_buf_head_shift_left(buf: &buf, base: shift, len: length, shift); |
1807 | } |
1808 | return length; |
1809 | } |
1810 | EXPORT_SYMBOL_GPL(xdr_stream_move_subsegment); |
1811 | |
1812 | /** |
1813 | * xdr_stream_zero - zero out a portion of an xdr_stream |
1814 | * @xdr: an xdr_stream to zero out |
1815 | * @offset: the starting point in the stream |
1816 | * @length: the number of bytes to zero |
1817 | */ |
1818 | unsigned int xdr_stream_zero(struct xdr_stream *xdr, unsigned int offset, |
1819 | unsigned int length) |
1820 | { |
1821 | struct xdr_buf buf; |
1822 | |
1823 | if (xdr_buf_subsegment(xdr->buf, &buf, offset, length) < 0) |
1824 | return 0; |
1825 | if (buf.head[0].iov_len) |
1826 | xdr_buf_iov_zero(iov: buf.head, base: 0, len: buf.head[0].iov_len); |
1827 | if (buf.page_len > 0) |
1828 | xdr_buf_pages_zero(buf: &buf, pgbase: 0, len: buf.page_len); |
1829 | if (buf.tail[0].iov_len) |
1830 | xdr_buf_iov_zero(iov: buf.tail, base: 0, len: buf.tail[0].iov_len); |
1831 | return length; |
1832 | } |
1833 | EXPORT_SYMBOL_GPL(xdr_stream_zero); |
1834 | |
1835 | /** |
1836 | * xdr_buf_trim - lop at most "len" bytes off the end of "buf" |
1837 | * @buf: buf to be trimmed |
1838 | * @len: number of bytes to reduce "buf" by |
1839 | * |
1840 | * Trim an xdr_buf by the given number of bytes by fixing up the lengths. Note |
1841 | * that it's possible that we'll trim less than that amount if the xdr_buf is |
1842 | * too small, or if (for instance) it's all in the head and the parser has |
1843 | * already read too far into it. |
1844 | */ |
1845 | void xdr_buf_trim(struct xdr_buf *buf, unsigned int len) |
1846 | { |
1847 | size_t cur; |
1848 | unsigned int trim = len; |
1849 | |
1850 | if (buf->tail[0].iov_len) { |
1851 | cur = min_t(size_t, buf->tail[0].iov_len, trim); |
1852 | buf->tail[0].iov_len -= cur; |
1853 | trim -= cur; |
1854 | if (!trim) |
1855 | goto fix_len; |
1856 | } |
1857 | |
1858 | if (buf->page_len) { |
1859 | cur = min_t(unsigned int, buf->page_len, trim); |
1860 | buf->page_len -= cur; |
1861 | trim -= cur; |
1862 | if (!trim) |
1863 | goto fix_len; |
1864 | } |
1865 | |
1866 | if (buf->head[0].iov_len) { |
1867 | cur = min_t(size_t, buf->head[0].iov_len, trim); |
1868 | buf->head[0].iov_len -= cur; |
1869 | trim -= cur; |
1870 | } |
1871 | fix_len: |
1872 | buf->len -= (len - trim); |
1873 | } |
1874 | EXPORT_SYMBOL_GPL(xdr_buf_trim); |
1875 | |
1876 | static void __read_bytes_from_xdr_buf(const struct xdr_buf *subbuf, |
1877 | void *obj, unsigned int len) |
1878 | { |
1879 | unsigned int this_len; |
1880 | |
1881 | this_len = min_t(unsigned int, len, subbuf->head[0].iov_len); |
1882 | memcpy(obj, subbuf->head[0].iov_base, this_len); |
1883 | len -= this_len; |
1884 | obj += this_len; |
1885 | this_len = min_t(unsigned int, len, subbuf->page_len); |
1886 | _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len); |
1887 | len -= this_len; |
1888 | obj += this_len; |
1889 | this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len); |
1890 | memcpy(obj, subbuf->tail[0].iov_base, this_len); |
1891 | } |
1892 | |
1893 | /* obj is assumed to point to allocated memory of size at least len: */ |
1894 | int read_bytes_from_xdr_buf(const struct xdr_buf *buf, unsigned int base, |
1895 | void *obj, unsigned int len) |
1896 | { |
1897 | struct xdr_buf subbuf; |
1898 | int status; |
1899 | |
1900 | status = xdr_buf_subsegment(buf, &subbuf, base, len); |
1901 | if (status != 0) |
1902 | return status; |
1903 | __read_bytes_from_xdr_buf(subbuf: &subbuf, obj, len); |
1904 | return 0; |
1905 | } |
1906 | EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf); |
1907 | |
1908 | static void __write_bytes_to_xdr_buf(const struct xdr_buf *subbuf, |
1909 | void *obj, unsigned int len) |
1910 | { |
1911 | unsigned int this_len; |
1912 | |
1913 | this_len = min_t(unsigned int, len, subbuf->head[0].iov_len); |
1914 | memcpy(subbuf->head[0].iov_base, obj, this_len); |
1915 | len -= this_len; |
1916 | obj += this_len; |
1917 | this_len = min_t(unsigned int, len, subbuf->page_len); |
1918 | _copy_to_pages(pages: subbuf->pages, pgbase: subbuf->page_base, p: obj, len: this_len); |
1919 | len -= this_len; |
1920 | obj += this_len; |
1921 | this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len); |
1922 | memcpy(subbuf->tail[0].iov_base, obj, this_len); |
1923 | } |
1924 | |
1925 | /* obj is assumed to point to allocated memory of size at least len: */ |
1926 | int write_bytes_to_xdr_buf(const struct xdr_buf *buf, unsigned int base, |
1927 | void *obj, unsigned int len) |
1928 | { |
1929 | struct xdr_buf subbuf; |
1930 | int status; |
1931 | |
1932 | status = xdr_buf_subsegment(buf, &subbuf, base, len); |
1933 | if (status != 0) |
1934 | return status; |
1935 | __write_bytes_to_xdr_buf(subbuf: &subbuf, obj, len); |
1936 | return 0; |
1937 | } |
1938 | EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf); |
1939 | |
1940 | int xdr_decode_word(const struct xdr_buf *buf, unsigned int base, u32 *obj) |
1941 | { |
1942 | __be32 raw; |
1943 | int status; |
1944 | |
1945 | status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj)); |
1946 | if (status) |
1947 | return status; |
1948 | *obj = be32_to_cpu(raw); |
1949 | return 0; |
1950 | } |
1951 | EXPORT_SYMBOL_GPL(xdr_decode_word); |
1952 | |
1953 | int xdr_encode_word(const struct xdr_buf *buf, unsigned int base, u32 obj) |
1954 | { |
1955 | __be32 raw = cpu_to_be32(obj); |
1956 | |
1957 | return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj)); |
1958 | } |
1959 | EXPORT_SYMBOL_GPL(xdr_encode_word); |
1960 | |
1961 | /* Returns 0 on success, or else a negative error code. */ |
1962 | static int xdr_xcode_array2(const struct xdr_buf *buf, unsigned int base, |
1963 | struct xdr_array2_desc *desc, int encode) |
1964 | { |
1965 | char *elem = NULL, *c; |
1966 | unsigned int copied = 0, todo, avail_here; |
1967 | struct page **ppages = NULL; |
1968 | int err; |
1969 | |
1970 | if (encode) { |
1971 | if (xdr_encode_word(buf, base, desc->array_len) != 0) |
1972 | return -EINVAL; |
1973 | } else { |
1974 | if (xdr_decode_word(buf, base, &desc->array_len) != 0 || |
1975 | desc->array_len > desc->array_maxlen || |
1976 | (unsigned long) base + 4 + desc->array_len * |
1977 | desc->elem_size > buf->len) |
1978 | return -EINVAL; |
1979 | } |
1980 | base += 4; |
1981 | |
1982 | if (!desc->xcode) |
1983 | return 0; |
1984 | |
1985 | todo = desc->array_len * desc->elem_size; |
1986 | |
1987 | /* process head */ |
1988 | if (todo && base < buf->head->iov_len) { |
1989 | c = buf->head->iov_base + base; |
1990 | avail_here = min_t(unsigned int, todo, |
1991 | buf->head->iov_len - base); |
1992 | todo -= avail_here; |
1993 | |
1994 | while (avail_here >= desc->elem_size) { |
1995 | err = desc->xcode(desc, c); |
1996 | if (err) |
1997 | goto out; |
1998 | c += desc->elem_size; |
1999 | avail_here -= desc->elem_size; |
2000 | } |
2001 | if (avail_here) { |
2002 | if (!elem) { |
2003 | elem = kmalloc(size: desc->elem_size, GFP_KERNEL); |
2004 | err = -ENOMEM; |
2005 | if (!elem) |
2006 | goto out; |
2007 | } |
2008 | if (encode) { |
2009 | err = desc->xcode(desc, elem); |
2010 | if (err) |
2011 | goto out; |
2012 | memcpy(c, elem, avail_here); |
2013 | } else |
2014 | memcpy(elem, c, avail_here); |
2015 | copied = avail_here; |
2016 | } |
2017 | base = buf->head->iov_len; /* align to start of pages */ |
2018 | } |
2019 | |
2020 | /* process pages array */ |
2021 | base -= buf->head->iov_len; |
2022 | if (todo && base < buf->page_len) { |
2023 | unsigned int avail_page; |
2024 | |
2025 | avail_here = min(todo, buf->page_len - base); |
2026 | todo -= avail_here; |
2027 | |
2028 | base += buf->page_base; |
2029 | ppages = buf->pages + (base >> PAGE_SHIFT); |
2030 | base &= ~PAGE_MASK; |
2031 | avail_page = min_t(unsigned int, PAGE_SIZE - base, |
2032 | avail_here); |
2033 | c = kmap(page: *ppages) + base; |
2034 | |
2035 | while (avail_here) { |
2036 | avail_here -= avail_page; |
2037 | if (copied || avail_page < desc->elem_size) { |
2038 | unsigned int l = min(avail_page, |
2039 | desc->elem_size - copied); |
2040 | if (!elem) { |
2041 | elem = kmalloc(size: desc->elem_size, |
2042 | GFP_KERNEL); |
2043 | err = -ENOMEM; |
2044 | if (!elem) |
2045 | goto out; |
2046 | } |
2047 | if (encode) { |
2048 | if (!copied) { |
2049 | err = desc->xcode(desc, elem); |
2050 | if (err) |
2051 | goto out; |
2052 | } |
2053 | memcpy(c, elem + copied, l); |
2054 | copied += l; |
2055 | if (copied == desc->elem_size) |
2056 | copied = 0; |
2057 | } else { |
2058 | memcpy(elem + copied, c, l); |
2059 | copied += l; |
2060 | if (copied == desc->elem_size) { |
2061 | err = desc->xcode(desc, elem); |
2062 | if (err) |
2063 | goto out; |
2064 | copied = 0; |
2065 | } |
2066 | } |
2067 | avail_page -= l; |
2068 | c += l; |
2069 | } |
2070 | while (avail_page >= desc->elem_size) { |
2071 | err = desc->xcode(desc, c); |
2072 | if (err) |
2073 | goto out; |
2074 | c += desc->elem_size; |
2075 | avail_page -= desc->elem_size; |
2076 | } |
2077 | if (avail_page) { |
2078 | unsigned int l = min(avail_page, |
2079 | desc->elem_size - copied); |
2080 | if (!elem) { |
2081 | elem = kmalloc(size: desc->elem_size, |
2082 | GFP_KERNEL); |
2083 | err = -ENOMEM; |
2084 | if (!elem) |
2085 | goto out; |
2086 | } |
2087 | if (encode) { |
2088 | if (!copied) { |
2089 | err = desc->xcode(desc, elem); |
2090 | if (err) |
2091 | goto out; |
2092 | } |
2093 | memcpy(c, elem + copied, l); |
2094 | copied += l; |
2095 | if (copied == desc->elem_size) |
2096 | copied = 0; |
2097 | } else { |
2098 | memcpy(elem + copied, c, l); |
2099 | copied += l; |
2100 | if (copied == desc->elem_size) { |
2101 | err = desc->xcode(desc, elem); |
2102 | if (err) |
2103 | goto out; |
2104 | copied = 0; |
2105 | } |
2106 | } |
2107 | } |
2108 | if (avail_here) { |
2109 | kunmap(page: *ppages); |
2110 | ppages++; |
2111 | c = kmap(page: *ppages); |
2112 | } |
2113 | |
2114 | avail_page = min(avail_here, |
2115 | (unsigned int) PAGE_SIZE); |
2116 | } |
2117 | base = buf->page_len; /* align to start of tail */ |
2118 | } |
2119 | |
2120 | /* process tail */ |
2121 | base -= buf->page_len; |
2122 | if (todo) { |
2123 | c = buf->tail->iov_base + base; |
2124 | if (copied) { |
2125 | unsigned int l = desc->elem_size - copied; |
2126 | |
2127 | if (encode) |
2128 | memcpy(c, elem + copied, l); |
2129 | else { |
2130 | memcpy(elem + copied, c, l); |
2131 | err = desc->xcode(desc, elem); |
2132 | if (err) |
2133 | goto out; |
2134 | } |
2135 | todo -= l; |
2136 | c += l; |
2137 | } |
2138 | while (todo) { |
2139 | err = desc->xcode(desc, c); |
2140 | if (err) |
2141 | goto out; |
2142 | c += desc->elem_size; |
2143 | todo -= desc->elem_size; |
2144 | } |
2145 | } |
2146 | err = 0; |
2147 | |
2148 | out: |
2149 | kfree(objp: elem); |
2150 | if (ppages) |
2151 | kunmap(page: *ppages); |
2152 | return err; |
2153 | } |
2154 | |
2155 | int xdr_decode_array2(const struct xdr_buf *buf, unsigned int base, |
2156 | struct xdr_array2_desc *desc) |
2157 | { |
2158 | if (base >= buf->len) |
2159 | return -EINVAL; |
2160 | |
2161 | return xdr_xcode_array2(buf, base, desc, encode: 0); |
2162 | } |
2163 | EXPORT_SYMBOL_GPL(xdr_decode_array2); |
2164 | |
2165 | int xdr_encode_array2(const struct xdr_buf *buf, unsigned int base, |
2166 | struct xdr_array2_desc *desc) |
2167 | { |
2168 | if ((unsigned long) base + 4 + desc->array_len * desc->elem_size > |
2169 | buf->head->iov_len + buf->page_len + buf->tail->iov_len) |
2170 | return -EINVAL; |
2171 | |
2172 | return xdr_xcode_array2(buf, base, desc, encode: 1); |
2173 | } |
2174 | EXPORT_SYMBOL_GPL(xdr_encode_array2); |
2175 | |
2176 | int xdr_process_buf(const struct xdr_buf *buf, unsigned int offset, |
2177 | unsigned int len, |
2178 | int (*actor)(struct scatterlist *, void *), void *data) |
2179 | { |
2180 | int i, ret = 0; |
2181 | unsigned int page_len, thislen, page_offset; |
2182 | struct scatterlist sg[1]; |
2183 | |
2184 | sg_init_table(sg, 1); |
2185 | |
2186 | if (offset >= buf->head[0].iov_len) { |
2187 | offset -= buf->head[0].iov_len; |
2188 | } else { |
2189 | thislen = buf->head[0].iov_len - offset; |
2190 | if (thislen > len) |
2191 | thislen = len; |
2192 | sg_set_buf(sg, buf: buf->head[0].iov_base + offset, buflen: thislen); |
2193 | ret = actor(sg, data); |
2194 | if (ret) |
2195 | goto out; |
2196 | offset = 0; |
2197 | len -= thislen; |
2198 | } |
2199 | if (len == 0) |
2200 | goto out; |
2201 | |
2202 | if (offset >= buf->page_len) { |
2203 | offset -= buf->page_len; |
2204 | } else { |
2205 | page_len = buf->page_len - offset; |
2206 | if (page_len > len) |
2207 | page_len = len; |
2208 | len -= page_len; |
2209 | page_offset = (offset + buf->page_base) & (PAGE_SIZE - 1); |
2210 | i = (offset + buf->page_base) >> PAGE_SHIFT; |
2211 | thislen = PAGE_SIZE - page_offset; |
2212 | do { |
2213 | if (thislen > page_len) |
2214 | thislen = page_len; |
2215 | sg_set_page(sg, page: buf->pages[i], len: thislen, offset: page_offset); |
2216 | ret = actor(sg, data); |
2217 | if (ret) |
2218 | goto out; |
2219 | page_len -= thislen; |
2220 | i++; |
2221 | page_offset = 0; |
2222 | thislen = PAGE_SIZE; |
2223 | } while (page_len != 0); |
2224 | offset = 0; |
2225 | } |
2226 | if (len == 0) |
2227 | goto out; |
2228 | if (offset < buf->tail[0].iov_len) { |
2229 | thislen = buf->tail[0].iov_len - offset; |
2230 | if (thislen > len) |
2231 | thislen = len; |
2232 | sg_set_buf(sg, buf: buf->tail[0].iov_base + offset, buflen: thislen); |
2233 | ret = actor(sg, data); |
2234 | len -= thislen; |
2235 | } |
2236 | if (len != 0) |
2237 | ret = -EINVAL; |
2238 | out: |
2239 | return ret; |
2240 | } |
2241 | EXPORT_SYMBOL_GPL(xdr_process_buf); |
2242 | |
2243 | /** |
2244 | * xdr_stream_decode_opaque - Decode variable length opaque |
2245 | * @xdr: pointer to xdr_stream |
2246 | * @ptr: location to store opaque data |
2247 | * @size: size of storage buffer @ptr |
2248 | * |
2249 | * Return values: |
2250 | * On success, returns size of object stored in *@ptr |
2251 | * %-EBADMSG on XDR buffer overflow |
2252 | * %-EMSGSIZE on overflow of storage buffer @ptr |
2253 | */ |
2254 | ssize_t xdr_stream_decode_opaque(struct xdr_stream *xdr, void *ptr, size_t size) |
2255 | { |
2256 | ssize_t ret; |
2257 | void *p; |
2258 | |
2259 | ret = xdr_stream_decode_opaque_inline(xdr, ptr: &p, maxlen: size); |
2260 | if (ret <= 0) |
2261 | return ret; |
2262 | memcpy(ptr, p, ret); |
2263 | return ret; |
2264 | } |
2265 | EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque); |
2266 | |
2267 | /** |
2268 | * xdr_stream_decode_opaque_dup - Decode and duplicate variable length opaque |
2269 | * @xdr: pointer to xdr_stream |
2270 | * @ptr: location to store pointer to opaque data |
2271 | * @maxlen: maximum acceptable object size |
2272 | * @gfp_flags: GFP mask to use |
2273 | * |
2274 | * Return values: |
2275 | * On success, returns size of object stored in *@ptr |
2276 | * %-EBADMSG on XDR buffer overflow |
2277 | * %-EMSGSIZE if the size of the object would exceed @maxlen |
2278 | * %-ENOMEM on memory allocation failure |
2279 | */ |
2280 | ssize_t xdr_stream_decode_opaque_dup(struct xdr_stream *xdr, void **ptr, |
2281 | size_t maxlen, gfp_t gfp_flags) |
2282 | { |
2283 | ssize_t ret; |
2284 | void *p; |
2285 | |
2286 | ret = xdr_stream_decode_opaque_inline(xdr, ptr: &p, maxlen); |
2287 | if (ret > 0) { |
2288 | *ptr = kmemdup(p, size: ret, gfp: gfp_flags); |
2289 | if (*ptr != NULL) |
2290 | return ret; |
2291 | ret = -ENOMEM; |
2292 | } |
2293 | *ptr = NULL; |
2294 | return ret; |
2295 | } |
2296 | EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque_dup); |
2297 | |
2298 | /** |
2299 | * xdr_stream_decode_string - Decode variable length string |
2300 | * @xdr: pointer to xdr_stream |
2301 | * @str: location to store string |
2302 | * @size: size of storage buffer @str |
2303 | * |
2304 | * Return values: |
2305 | * On success, returns length of NUL-terminated string stored in *@str |
2306 | * %-EBADMSG on XDR buffer overflow |
2307 | * %-EMSGSIZE on overflow of storage buffer @str |
2308 | */ |
2309 | ssize_t xdr_stream_decode_string(struct xdr_stream *xdr, char *str, size_t size) |
2310 | { |
2311 | ssize_t ret; |
2312 | void *p; |
2313 | |
2314 | ret = xdr_stream_decode_opaque_inline(xdr, ptr: &p, maxlen: size); |
2315 | if (ret > 0) { |
2316 | memcpy(str, p, ret); |
2317 | str[ret] = '\0'; |
2318 | return strlen(str); |
2319 | } |
2320 | *str = '\0'; |
2321 | return ret; |
2322 | } |
2323 | EXPORT_SYMBOL_GPL(xdr_stream_decode_string); |
2324 | |
2325 | /** |
2326 | * xdr_stream_decode_string_dup - Decode and duplicate variable length string |
2327 | * @xdr: pointer to xdr_stream |
2328 | * @str: location to store pointer to string |
2329 | * @maxlen: maximum acceptable string length |
2330 | * @gfp_flags: GFP mask to use |
2331 | * |
2332 | * Return values: |
2333 | * On success, returns length of NUL-terminated string stored in *@ptr |
2334 | * %-EBADMSG on XDR buffer overflow |
2335 | * %-EMSGSIZE if the size of the string would exceed @maxlen |
2336 | * %-ENOMEM on memory allocation failure |
2337 | */ |
2338 | ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str, |
2339 | size_t maxlen, gfp_t gfp_flags) |
2340 | { |
2341 | void *p; |
2342 | ssize_t ret; |
2343 | |
2344 | ret = xdr_stream_decode_opaque_inline(xdr, ptr: &p, maxlen); |
2345 | if (ret > 0) { |
2346 | char *s = kmemdup_nul(s: p, len: ret, gfp: gfp_flags); |
2347 | if (s != NULL) { |
2348 | *str = s; |
2349 | return strlen(s); |
2350 | } |
2351 | ret = -ENOMEM; |
2352 | } |
2353 | *str = NULL; |
2354 | return ret; |
2355 | } |
2356 | EXPORT_SYMBOL_GPL(xdr_stream_decode_string_dup); |
2357 | |
2358 | /** |
2359 | * xdr_stream_decode_opaque_auth - Decode struct opaque_auth (RFC5531 S8.2) |
2360 | * @xdr: pointer to xdr_stream |
2361 | * @flavor: location to store decoded flavor |
2362 | * @body: location to store decode body |
2363 | * @body_len: location to store length of decoded body |
2364 | * |
2365 | * Return values: |
2366 | * On success, returns the number of buffer bytes consumed |
2367 | * %-EBADMSG on XDR buffer overflow |
2368 | * %-EMSGSIZE if the decoded size of the body field exceeds 400 octets |
2369 | */ |
2370 | ssize_t xdr_stream_decode_opaque_auth(struct xdr_stream *xdr, u32 *flavor, |
2371 | void **body, unsigned int *body_len) |
2372 | { |
2373 | ssize_t ret, len; |
2374 | |
2375 | len = xdr_stream_decode_u32(xdr, ptr: flavor); |
2376 | if (unlikely(len < 0)) |
2377 | return len; |
2378 | ret = xdr_stream_decode_opaque_inline(xdr, ptr: body, RPC_MAX_AUTH_SIZE); |
2379 | if (unlikely(ret < 0)) |
2380 | return ret; |
2381 | *body_len = ret; |
2382 | return len + ret; |
2383 | } |
2384 | EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque_auth); |
2385 | |
2386 | /** |
2387 | * xdr_stream_encode_opaque_auth - Encode struct opaque_auth (RFC5531 S8.2) |
2388 | * @xdr: pointer to xdr_stream |
2389 | * @flavor: verifier flavor to encode |
2390 | * @body: content of body to encode |
2391 | * @body_len: length of body to encode |
2392 | * |
2393 | * Return values: |
2394 | * On success, returns length in bytes of XDR buffer consumed |
2395 | * %-EBADMSG on XDR buffer overflow |
2396 | * %-EMSGSIZE if the size of @body exceeds 400 octets |
2397 | */ |
2398 | ssize_t xdr_stream_encode_opaque_auth(struct xdr_stream *xdr, u32 flavor, |
2399 | void *body, unsigned int body_len) |
2400 | { |
2401 | ssize_t ret, len; |
2402 | |
2403 | if (unlikely(body_len > RPC_MAX_AUTH_SIZE)) |
2404 | return -EMSGSIZE; |
2405 | len = xdr_stream_encode_u32(xdr, n: flavor); |
2406 | if (unlikely(len < 0)) |
2407 | return len; |
2408 | ret = xdr_stream_encode_opaque(xdr, ptr: body, len: body_len); |
2409 | if (unlikely(ret < 0)) |
2410 | return ret; |
2411 | return len + ret; |
2412 | } |
2413 | EXPORT_SYMBOL_GPL(xdr_stream_encode_opaque_auth); |
2414 | |