• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * linux/net/sunrpc/xdr.c
3  *
4  * Generic XDR support.
5  *
6  * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
7  */
8 
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/types.h>
12 #include <linux/string.h>
13 #include <linux/kernel.h>
14 #include <linux/pagemap.h>
15 #include <linux/errno.h>
16 #include <linux/sunrpc/xdr.h>
17 #include <linux/sunrpc/msg_prot.h>
18 
19 /*
20  * XDR functions for basic NFS types
21  */
22 __be32 *
xdr_encode_netobj(__be32 * p,const struct xdr_netobj * obj)23 xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
24 {
25 	unsigned int	quadlen = XDR_QUADLEN(obj->len);
26 
27 	p[quadlen] = 0;		/* zero trailing bytes */
28 	*p++ = cpu_to_be32(obj->len);
29 	memcpy(p, obj->data, obj->len);
30 	return p + XDR_QUADLEN(obj->len);
31 }
32 EXPORT_SYMBOL_GPL(xdr_encode_netobj);
33 
34 __be32 *
xdr_decode_netobj(__be32 * p,struct xdr_netobj * obj)35 xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
36 {
37 	unsigned int	len;
38 
39 	if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ)
40 		return NULL;
41 	obj->len  = len;
42 	obj->data = (u8 *) p;
43 	return p + XDR_QUADLEN(len);
44 }
45 EXPORT_SYMBOL_GPL(xdr_decode_netobj);
46 
47 /**
48  * xdr_encode_opaque_fixed - Encode fixed length opaque data
49  * @p: pointer to current position in XDR buffer.
50  * @ptr: pointer to data to encode (or NULL)
51  * @nbytes: size of data.
52  *
53  * Copy the array of data of length nbytes at ptr to the XDR buffer
54  * at position p, then align to the next 32-bit boundary by padding
55  * with zero bytes (see RFC1832).
56  * Note: if ptr is NULL, only the padding is performed.
57  *
58  * Returns the updated current XDR buffer position
59  *
60  */
xdr_encode_opaque_fixed(__be32 * p,const void * ptr,unsigned int nbytes)61 __be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
62 {
63 	if (likely(nbytes != 0)) {
64 		unsigned int quadlen = XDR_QUADLEN(nbytes);
65 		unsigned int padding = (quadlen << 2) - nbytes;
66 
67 		if (ptr != NULL)
68 			memcpy(p, ptr, nbytes);
69 		if (padding != 0)
70 			memset((char *)p + nbytes, 0, padding);
71 		p += quadlen;
72 	}
73 	return p;
74 }
75 EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed);
76 
77 /**
78  * xdr_encode_opaque - Encode variable length opaque data
79  * @p: pointer to current position in XDR buffer.
80  * @ptr: pointer to data to encode (or NULL)
81  * @nbytes: size of data.
82  *
83  * Returns the updated current XDR buffer position
84  */
xdr_encode_opaque(__be32 * p,const void * ptr,unsigned int nbytes)85 __be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
86 {
87 	*p++ = cpu_to_be32(nbytes);
88 	return xdr_encode_opaque_fixed(p, ptr, nbytes);
89 }
90 EXPORT_SYMBOL_GPL(xdr_encode_opaque);
91 
92 __be32 *
xdr_encode_string(__be32 * p,const char * string)93 xdr_encode_string(__be32 *p, const char *string)
94 {
95 	return xdr_encode_array(p, string, strlen(string));
96 }
97 EXPORT_SYMBOL_GPL(xdr_encode_string);
98 
99 __be32 *
xdr_decode_string_inplace(__be32 * p,char ** sp,unsigned int * lenp,unsigned int maxlen)100 xdr_decode_string_inplace(__be32 *p, char **sp,
101 			  unsigned int *lenp, unsigned int maxlen)
102 {
103 	u32 len;
104 
105 	len = be32_to_cpu(*p++);
106 	if (len > maxlen)
107 		return NULL;
108 	*lenp = len;
109 	*sp = (char *) p;
110 	return p + XDR_QUADLEN(len);
111 }
112 EXPORT_SYMBOL_GPL(xdr_decode_string_inplace);
113 
114 /**
115  * xdr_terminate_string - '\0'-terminate a string residing in an xdr_buf
116  * @buf: XDR buffer where string resides
117  * @len: length of string, in bytes
118  *
119  */
120 void
xdr_terminate_string(struct xdr_buf * buf,const u32 len)121 xdr_terminate_string(struct xdr_buf *buf, const u32 len)
122 {
123 	char *kaddr;
124 
125 	kaddr = kmap_atomic(buf->pages[0]);
126 	kaddr[buf->page_base + len] = '\0';
127 	kunmap_atomic(kaddr);
128 }
129 EXPORT_SYMBOL_GPL(xdr_terminate_string);
130 
131 void
xdr_inline_pages(struct xdr_buf * xdr,unsigned int offset,struct page ** pages,unsigned int base,unsigned int len)132 xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
133 		 struct page **pages, unsigned int base, unsigned int len)
134 {
135 	struct kvec *head = xdr->head;
136 	struct kvec *tail = xdr->tail;
137 	char *buf = (char *)head->iov_base;
138 	unsigned int buflen = head->iov_len;
139 
140 	head->iov_len  = offset;
141 
142 	xdr->pages = pages;
143 	xdr->page_base = base;
144 	xdr->page_len = len;
145 
146 	tail->iov_base = buf + offset;
147 	tail->iov_len = buflen - offset;
148 
149 	xdr->buflen += len;
150 }
151 EXPORT_SYMBOL_GPL(xdr_inline_pages);
152 
153 /*
154  * Helper routines for doing 'memmove' like operations on a struct xdr_buf
155  */
156 
157 /**
158  * _shift_data_right_pages
159  * @pages: vector of pages containing both the source and dest memory area.
160  * @pgto_base: page vector address of destination
161  * @pgfrom_base: page vector address of source
162  * @len: number of bytes to copy
163  *
164  * Note: the addresses pgto_base and pgfrom_base are both calculated in
165  *       the same way:
166  *            if a memory area starts at byte 'base' in page 'pages[i]',
167  *            then its address is given as (i << PAGE_CACHE_SHIFT) + base
168  * Also note: pgfrom_base must be < pgto_base, but the memory areas
169  * 	they point to may overlap.
170  */
171 static void
_shift_data_right_pages(struct page ** pages,size_t pgto_base,size_t pgfrom_base,size_t len)172 _shift_data_right_pages(struct page **pages, size_t pgto_base,
173 		size_t pgfrom_base, size_t len)
174 {
175 	struct page **pgfrom, **pgto;
176 	char *vfrom, *vto;
177 	size_t copy;
178 
179 	BUG_ON(pgto_base <= pgfrom_base);
180 
181 	pgto_base += len;
182 	pgfrom_base += len;
183 
184 	pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT);
185 	pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT);
186 
187 	pgto_base &= ~PAGE_CACHE_MASK;
188 	pgfrom_base &= ~PAGE_CACHE_MASK;
189 
190 	do {
191 		/* Are any pointers crossing a page boundary? */
192 		if (pgto_base == 0) {
193 			pgto_base = PAGE_CACHE_SIZE;
194 			pgto--;
195 		}
196 		if (pgfrom_base == 0) {
197 			pgfrom_base = PAGE_CACHE_SIZE;
198 			pgfrom--;
199 		}
200 
201 		copy = len;
202 		if (copy > pgto_base)
203 			copy = pgto_base;
204 		if (copy > pgfrom_base)
205 			copy = pgfrom_base;
206 		pgto_base -= copy;
207 		pgfrom_base -= copy;
208 
209 		vto = kmap_atomic(*pgto);
210 		if (*pgto != *pgfrom) {
211 			vfrom = kmap_atomic(*pgfrom);
212 			memcpy(vto + pgto_base, vfrom + pgfrom_base, copy);
213 			kunmap_atomic(vfrom);
214 		} else
215 			memmove(vto + pgto_base, vto + pgfrom_base, copy);
216 		flush_dcache_page(*pgto);
217 		kunmap_atomic(vto);
218 
219 	} while ((len -= copy) != 0);
220 }
221 
222 /**
223  * _copy_to_pages
224  * @pages: array of pages
225  * @pgbase: page vector address of destination
226  * @p: pointer to source data
227  * @len: length
228  *
229  * Copies data from an arbitrary memory location into an array of pages
230  * The copy is assumed to be non-overlapping.
231  */
232 static void
_copy_to_pages(struct page ** pages,size_t pgbase,const char * p,size_t len)233 _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
234 {
235 	struct page **pgto;
236 	char *vto;
237 	size_t copy;
238 
239 	pgto = pages + (pgbase >> PAGE_CACHE_SHIFT);
240 	pgbase &= ~PAGE_CACHE_MASK;
241 
242 	for (;;) {
243 		copy = PAGE_CACHE_SIZE - pgbase;
244 		if (copy > len)
245 			copy = len;
246 
247 		vto = kmap_atomic(*pgto);
248 		memcpy(vto + pgbase, p, copy);
249 		kunmap_atomic(vto);
250 
251 		len -= copy;
252 		if (len == 0)
253 			break;
254 
255 		pgbase += copy;
256 		if (pgbase == PAGE_CACHE_SIZE) {
257 			flush_dcache_page(*pgto);
258 			pgbase = 0;
259 			pgto++;
260 		}
261 		p += copy;
262 	}
263 	flush_dcache_page(*pgto);
264 }
265 
266 /**
267  * _copy_from_pages
268  * @p: pointer to destination
269  * @pages: array of pages
270  * @pgbase: offset of source data
271  * @len: length
272  *
273  * Copies data into an arbitrary memory location from an array of pages
274  * The copy is assumed to be non-overlapping.
275  */
276 void
_copy_from_pages(char * p,struct page ** pages,size_t pgbase,size_t len)277 _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
278 {
279 	struct page **pgfrom;
280 	char *vfrom;
281 	size_t copy;
282 
283 	pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT);
284 	pgbase &= ~PAGE_CACHE_MASK;
285 
286 	do {
287 		copy = PAGE_CACHE_SIZE - pgbase;
288 		if (copy > len)
289 			copy = len;
290 
291 		vfrom = kmap_atomic(*pgfrom);
292 		memcpy(p, vfrom + pgbase, copy);
293 		kunmap_atomic(vfrom);
294 
295 		pgbase += copy;
296 		if (pgbase == PAGE_CACHE_SIZE) {
297 			pgbase = 0;
298 			pgfrom++;
299 		}
300 		p += copy;
301 
302 	} while ((len -= copy) != 0);
303 }
304 EXPORT_SYMBOL_GPL(_copy_from_pages);
305 
306 /**
307  * xdr_shrink_bufhead
308  * @buf: xdr_buf
309  * @len: bytes to remove from buf->head[0]
310  *
311  * Shrinks XDR buffer's header kvec buf->head[0] by
312  * 'len' bytes. The extra data is not lost, but is instead
313  * moved into the inlined pages and/or the tail.
314  */
315 static void
xdr_shrink_bufhead(struct xdr_buf * buf,size_t len)316 xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
317 {
318 	struct kvec *head, *tail;
319 	size_t copy, offs;
320 	unsigned int pglen = buf->page_len;
321 
322 	tail = buf->tail;
323 	head = buf->head;
324 
325 	WARN_ON_ONCE(len > head->iov_len);
326 	if (len > head->iov_len)
327 		len = head->iov_len;
328 
329 	/* Shift the tail first */
330 	if (tail->iov_len != 0) {
331 		if (tail->iov_len > len) {
332 			copy = tail->iov_len - len;
333 			memmove((char *)tail->iov_base + len,
334 					tail->iov_base, copy);
335 		}
336 		/* Copy from the inlined pages into the tail */
337 		copy = len;
338 		if (copy > pglen)
339 			copy = pglen;
340 		offs = len - copy;
341 		if (offs >= tail->iov_len)
342 			copy = 0;
343 		else if (copy > tail->iov_len - offs)
344 			copy = tail->iov_len - offs;
345 		if (copy != 0)
346 			_copy_from_pages((char *)tail->iov_base + offs,
347 					buf->pages,
348 					buf->page_base + pglen + offs - len,
349 					copy);
350 		/* Do we also need to copy data from the head into the tail ? */
351 		if (len > pglen) {
352 			offs = copy = len - pglen;
353 			if (copy > tail->iov_len)
354 				copy = tail->iov_len;
355 			memcpy(tail->iov_base,
356 					(char *)head->iov_base +
357 					head->iov_len - offs,
358 					copy);
359 		}
360 	}
361 	/* Now handle pages */
362 	if (pglen != 0) {
363 		if (pglen > len)
364 			_shift_data_right_pages(buf->pages,
365 					buf->page_base + len,
366 					buf->page_base,
367 					pglen - len);
368 		copy = len;
369 		if (len > pglen)
370 			copy = pglen;
371 		_copy_to_pages(buf->pages, buf->page_base,
372 				(char *)head->iov_base + head->iov_len - len,
373 				copy);
374 	}
375 	head->iov_len -= len;
376 	buf->buflen -= len;
377 	/* Have we truncated the message? */
378 	if (buf->len > buf->buflen)
379 		buf->len = buf->buflen;
380 }
381 
382 /**
383  * xdr_shrink_pagelen
384  * @buf: xdr_buf
385  * @len: bytes to remove from buf->pages
386  *
387  * Shrinks XDR buffer's page array buf->pages by
388  * 'len' bytes. The extra data is not lost, but is instead
389  * moved into the tail.
390  */
391 static void
xdr_shrink_pagelen(struct xdr_buf * buf,size_t len)392 xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
393 {
394 	struct kvec *tail;
395 	size_t copy;
396 	unsigned int pglen = buf->page_len;
397 	unsigned int tailbuf_len;
398 
399 	tail = buf->tail;
400 	BUG_ON (len > pglen);
401 
402 	tailbuf_len = buf->buflen - buf->head->iov_len - buf->page_len;
403 
404 	/* Shift the tail first */
405 	if (tailbuf_len != 0) {
406 		unsigned int free_space = tailbuf_len - tail->iov_len;
407 
408 		if (len < free_space)
409 			free_space = len;
410 		tail->iov_len += free_space;
411 
412 		copy = len;
413 		if (tail->iov_len > len) {
414 			char *p = (char *)tail->iov_base + len;
415 			memmove(p, tail->iov_base, tail->iov_len - len);
416 		} else
417 			copy = tail->iov_len;
418 		/* Copy from the inlined pages into the tail */
419 		_copy_from_pages((char *)tail->iov_base,
420 				buf->pages, buf->page_base + pglen - len,
421 				copy);
422 	}
423 	buf->page_len -= len;
424 	buf->buflen -= len;
425 	/* Have we truncated the message? */
426 	if (buf->len > buf->buflen)
427 		buf->len = buf->buflen;
428 }
429 
430 void
xdr_shift_buf(struct xdr_buf * buf,size_t len)431 xdr_shift_buf(struct xdr_buf *buf, size_t len)
432 {
433 	xdr_shrink_bufhead(buf, len);
434 }
435 EXPORT_SYMBOL_GPL(xdr_shift_buf);
436 
437 /**
438  * xdr_stream_pos - Return the current offset from the start of the xdr_stream
439  * @xdr: pointer to struct xdr_stream
440  */
xdr_stream_pos(const struct xdr_stream * xdr)441 unsigned int xdr_stream_pos(const struct xdr_stream *xdr)
442 {
443 	return (unsigned int)(XDR_QUADLEN(xdr->buf->len) - xdr->nwords) << 2;
444 }
445 EXPORT_SYMBOL_GPL(xdr_stream_pos);
446 
447 /**
448  * xdr_init_encode - Initialize a struct xdr_stream for sending data.
449  * @xdr: pointer to xdr_stream struct
450  * @buf: pointer to XDR buffer in which to encode data
451  * @p: current pointer inside XDR buffer
452  *
453  * Note: at the moment the RPC client only passes the length of our
454  *	 scratch buffer in the xdr_buf's header kvec. Previously this
455  *	 meant we needed to call xdr_adjust_iovec() after encoding the
456  *	 data. With the new scheme, the xdr_stream manages the details
457  *	 of the buffer length, and takes care of adjusting the kvec
458  *	 length for us.
459  */
xdr_init_encode(struct xdr_stream * xdr,struct xdr_buf * buf,__be32 * p)460 void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
461 {
462 	struct kvec *iov = buf->head;
463 	int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
464 
465 	xdr_set_scratch_buffer(xdr, NULL, 0);
466 	BUG_ON(scratch_len < 0);
467 	xdr->buf = buf;
468 	xdr->iov = iov;
469 	xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len);
470 	xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len);
471 	BUG_ON(iov->iov_len > scratch_len);
472 
473 	if (p != xdr->p && p != NULL) {
474 		size_t len;
475 
476 		BUG_ON(p < xdr->p || p > xdr->end);
477 		len = (char *)p - (char *)xdr->p;
478 		xdr->p = p;
479 		buf->len += len;
480 		iov->iov_len += len;
481 	}
482 }
483 EXPORT_SYMBOL_GPL(xdr_init_encode);
484 
485 /**
486  * xdr_commit_encode - Ensure all data is written to buffer
487  * @xdr: pointer to xdr_stream
488  *
489  * We handle encoding across page boundaries by giving the caller a
490  * temporary location to write to, then later copying the data into
491  * place; xdr_commit_encode does that copying.
492  *
493  * Normally the caller doesn't need to call this directly, as the
494  * following xdr_reserve_space will do it.  But an explicit call may be
495  * required at the end of encoding, or any other time when the xdr_buf
496  * data might be read.
497  */
xdr_commit_encode(struct xdr_stream * xdr)498 void xdr_commit_encode(struct xdr_stream *xdr)
499 {
500 	int shift = xdr->scratch.iov_len;
501 	void *page;
502 
503 	if (shift == 0)
504 		return;
505 	page = page_address(*xdr->page_ptr);
506 	memcpy(xdr->scratch.iov_base, page, shift);
507 	memmove(page, page + shift, (void *)xdr->p - page);
508 	xdr->scratch.iov_len = 0;
509 }
510 EXPORT_SYMBOL_GPL(xdr_commit_encode);
511 
xdr_get_next_encode_buffer(struct xdr_stream * xdr,size_t nbytes)512 static __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr,
513 		size_t nbytes)
514 {
515 	static __be32 *p;
516 	int space_left;
517 	int frag1bytes, frag2bytes;
518 
519 	if (nbytes > PAGE_SIZE)
520 		return NULL; /* Bigger buffers require special handling */
521 	if (xdr->buf->len + nbytes > xdr->buf->buflen)
522 		return NULL; /* Sorry, we're totally out of space */
523 	frag1bytes = (xdr->end - xdr->p) << 2;
524 	frag2bytes = nbytes - frag1bytes;
525 	if (xdr->iov)
526 		xdr->iov->iov_len += frag1bytes;
527 	else
528 		xdr->buf->page_len += frag1bytes;
529 	xdr->page_ptr++;
530 	xdr->iov = NULL;
531 	/*
532 	 * If the last encode didn't end exactly on a page boundary, the
533 	 * next one will straddle boundaries.  Encode into the next
534 	 * page, then copy it back later in xdr_commit_encode.  We use
535 	 * the "scratch" iov to track any temporarily unused fragment of
536 	 * space at the end of the previous buffer:
537 	 */
538 	xdr->scratch.iov_base = xdr->p;
539 	xdr->scratch.iov_len = frag1bytes;
540 	p = page_address(*xdr->page_ptr);
541 	/*
542 	 * Note this is where the next encode will start after we've
543 	 * shifted this one back:
544 	 */
545 	xdr->p = (void *)p + frag2bytes;
546 	space_left = xdr->buf->buflen - xdr->buf->len;
547 	xdr->end = (void *)p + min_t(int, space_left, PAGE_SIZE);
548 	xdr->buf->page_len += frag2bytes;
549 	xdr->buf->len += nbytes;
550 	return p;
551 }
552 
553 /**
554  * xdr_reserve_space - Reserve buffer space for sending
555  * @xdr: pointer to xdr_stream
556  * @nbytes: number of bytes to reserve
557  *
558  * Checks that we have enough buffer space to encode 'nbytes' more
559  * bytes of data. If so, update the total xdr_buf length, and
560  * adjust the length of the current kvec.
561  */
xdr_reserve_space(struct xdr_stream * xdr,size_t nbytes)562 __be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
563 {
564 	__be32 *p = xdr->p;
565 	__be32 *q;
566 
567 	xdr_commit_encode(xdr);
568 	/* align nbytes on the next 32-bit boundary */
569 	nbytes += 3;
570 	nbytes &= ~3;
571 	q = p + (nbytes >> 2);
572 	if (unlikely(q > xdr->end || q < p))
573 		return xdr_get_next_encode_buffer(xdr, nbytes);
574 	xdr->p = q;
575 	if (xdr->iov)
576 		xdr->iov->iov_len += nbytes;
577 	else
578 		xdr->buf->page_len += nbytes;
579 	xdr->buf->len += nbytes;
580 	return p;
581 }
582 EXPORT_SYMBOL_GPL(xdr_reserve_space);
583 
584 /**
585  * xdr_truncate_encode - truncate an encode buffer
586  * @xdr: pointer to xdr_stream
587  * @len: new length of buffer
588  *
589  * Truncates the xdr stream, so that xdr->buf->len == len,
590  * and xdr->p points at offset len from the start of the buffer, and
591  * head, tail, and page lengths are adjusted to correspond.
592  *
593  * If this means moving xdr->p to a different buffer, we assume that
594  * that the end pointer should be set to the end of the current page,
595  * except in the case of the head buffer when we assume the head
596  * buffer's current length represents the end of the available buffer.
597  *
598  * This is *not* safe to use on a buffer that already has inlined page
599  * cache pages (as in a zero-copy server read reply), except for the
600  * simple case of truncating from one position in the tail to another.
601  *
602  */
xdr_truncate_encode(struct xdr_stream * xdr,size_t len)603 void xdr_truncate_encode(struct xdr_stream *xdr, size_t len)
604 {
605 	struct xdr_buf *buf = xdr->buf;
606 	struct kvec *head = buf->head;
607 	struct kvec *tail = buf->tail;
608 	int fraglen;
609 	int new;
610 
611 	if (len > buf->len) {
612 		WARN_ON_ONCE(1);
613 		return;
614 	}
615 	xdr_commit_encode(xdr);
616 
617 	fraglen = min_t(int, buf->len - len, tail->iov_len);
618 	tail->iov_len -= fraglen;
619 	buf->len -= fraglen;
620 	if (tail->iov_len && buf->len == len) {
621 		xdr->p = tail->iov_base + tail->iov_len;
622 		/* xdr->end, xdr->iov should be set already */
623 		return;
624 	}
625 	WARN_ON_ONCE(fraglen);
626 	fraglen = min_t(int, buf->len - len, buf->page_len);
627 	buf->page_len -= fraglen;
628 	buf->len -= fraglen;
629 
630 	new = buf->page_base + buf->page_len;
631 
632 	xdr->page_ptr = buf->pages + (new >> PAGE_SHIFT);
633 
634 	if (buf->page_len && buf->len == len) {
635 		xdr->p = page_address(*xdr->page_ptr);
636 		xdr->end = (void *)xdr->p + PAGE_SIZE;
637 		xdr->p = (void *)xdr->p + (new % PAGE_SIZE);
638 		/* xdr->iov should already be NULL */
639 		return;
640 	}
641 	if (fraglen) {
642 		xdr->end = head->iov_base + head->iov_len;
643 		xdr->page_ptr--;
644 	}
645 	/* (otherwise assume xdr->end is already set) */
646 	head->iov_len = len;
647 	buf->len = len;
648 	xdr->p = head->iov_base + head->iov_len;
649 	xdr->iov = buf->head;
650 }
651 EXPORT_SYMBOL(xdr_truncate_encode);
652 
653 /**
654  * xdr_restrict_buflen - decrease available buffer space
655  * @xdr: pointer to xdr_stream
656  * @newbuflen: new maximum number of bytes available
657  *
658  * Adjust our idea of how much space is available in the buffer.
659  * If we've already used too much space in the buffer, returns -1.
660  * If the available space is already smaller than newbuflen, returns 0
661  * and does nothing.  Otherwise, adjusts xdr->buf->buflen to newbuflen
662  * and ensures xdr->end is set at most offset newbuflen from the start
663  * of the buffer.
664  */
xdr_restrict_buflen(struct xdr_stream * xdr,int newbuflen)665 int xdr_restrict_buflen(struct xdr_stream *xdr, int newbuflen)
666 {
667 	struct xdr_buf *buf = xdr->buf;
668 	int left_in_this_buf = (void *)xdr->end - (void *)xdr->p;
669 	int end_offset = buf->len + left_in_this_buf;
670 
671 	if (newbuflen < 0 || newbuflen < buf->len)
672 		return -1;
673 	if (newbuflen > buf->buflen)
674 		return 0;
675 	if (newbuflen < end_offset)
676 		xdr->end = (void *)xdr->end + newbuflen - end_offset;
677 	buf->buflen = newbuflen;
678 	return 0;
679 }
680 EXPORT_SYMBOL(xdr_restrict_buflen);
681 
682 /**
683  * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
684  * @xdr: pointer to xdr_stream
685  * @pages: list of pages
686  * @base: offset of first byte
687  * @len: length of data in bytes
688  *
689  */
xdr_write_pages(struct xdr_stream * xdr,struct page ** pages,unsigned int base,unsigned int len)690 void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
691 		 unsigned int len)
692 {
693 	struct xdr_buf *buf = xdr->buf;
694 	struct kvec *iov = buf->tail;
695 	buf->pages = pages;
696 	buf->page_base = base;
697 	buf->page_len = len;
698 
699 	iov->iov_base = (char *)xdr->p;
700 	iov->iov_len  = 0;
701 	xdr->iov = iov;
702 
703 	if (len & 3) {
704 		unsigned int pad = 4 - (len & 3);
705 
706 		BUG_ON(xdr->p >= xdr->end);
707 		iov->iov_base = (char *)xdr->p + (len & 3);
708 		iov->iov_len  += pad;
709 		len += pad;
710 		*xdr->p++ = 0;
711 	}
712 	buf->buflen += len;
713 	buf->len += len;
714 }
715 EXPORT_SYMBOL_GPL(xdr_write_pages);
716 
xdr_set_iov(struct xdr_stream * xdr,struct kvec * iov,unsigned int len)717 static void xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov,
718 		unsigned int len)
719 {
720 	if (len > iov->iov_len)
721 		len = iov->iov_len;
722 	xdr->p = (__be32*)iov->iov_base;
723 	xdr->end = (__be32*)(iov->iov_base + len);
724 	xdr->iov = iov;
725 	xdr->page_ptr = NULL;
726 }
727 
xdr_set_page_base(struct xdr_stream * xdr,unsigned int base,unsigned int len)728 static int xdr_set_page_base(struct xdr_stream *xdr,
729 		unsigned int base, unsigned int len)
730 {
731 	unsigned int pgnr;
732 	unsigned int maxlen;
733 	unsigned int pgoff;
734 	unsigned int pgend;
735 	void *kaddr;
736 
737 	maxlen = xdr->buf->page_len;
738 	if (base >= maxlen)
739 		return -EINVAL;
740 	maxlen -= base;
741 	if (len > maxlen)
742 		len = maxlen;
743 
744 	base += xdr->buf->page_base;
745 
746 	pgnr = base >> PAGE_SHIFT;
747 	xdr->page_ptr = &xdr->buf->pages[pgnr];
748 	kaddr = page_address(*xdr->page_ptr);
749 
750 	pgoff = base & ~PAGE_MASK;
751 	xdr->p = (__be32*)(kaddr + pgoff);
752 
753 	pgend = pgoff + len;
754 	if (pgend > PAGE_SIZE)
755 		pgend = PAGE_SIZE;
756 	xdr->end = (__be32*)(kaddr + pgend);
757 	xdr->iov = NULL;
758 	return 0;
759 }
760 
xdr_set_next_page(struct xdr_stream * xdr)761 static void xdr_set_next_page(struct xdr_stream *xdr)
762 {
763 	unsigned int newbase;
764 
765 	newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT;
766 	newbase -= xdr->buf->page_base;
767 
768 	if (xdr_set_page_base(xdr, newbase, PAGE_SIZE) < 0)
769 		xdr_set_iov(xdr, xdr->buf->tail, xdr->buf->len);
770 }
771 
xdr_set_next_buffer(struct xdr_stream * xdr)772 static bool xdr_set_next_buffer(struct xdr_stream *xdr)
773 {
774 	if (xdr->page_ptr != NULL)
775 		xdr_set_next_page(xdr);
776 	else if (xdr->iov == xdr->buf->head) {
777 		if (xdr_set_page_base(xdr, 0, PAGE_SIZE) < 0)
778 			xdr_set_iov(xdr, xdr->buf->tail, xdr->buf->len);
779 	}
780 	return xdr->p != xdr->end;
781 }
782 
783 /**
784  * xdr_init_decode - Initialize an xdr_stream for decoding data.
785  * @xdr: pointer to xdr_stream struct
786  * @buf: pointer to XDR buffer from which to decode data
787  * @p: current pointer inside XDR buffer
788  */
xdr_init_decode(struct xdr_stream * xdr,struct xdr_buf * buf,__be32 * p)789 void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
790 {
791 	xdr->buf = buf;
792 	xdr->scratch.iov_base = NULL;
793 	xdr->scratch.iov_len = 0;
794 	xdr->nwords = XDR_QUADLEN(buf->len);
795 	if (buf->head[0].iov_len != 0)
796 		xdr_set_iov(xdr, buf->head, buf->len);
797 	else if (buf->page_len != 0)
798 		xdr_set_page_base(xdr, 0, buf->len);
799 	if (p != NULL && p > xdr->p && xdr->end >= p) {
800 		xdr->nwords -= p - xdr->p;
801 		xdr->p = p;
802 	}
803 }
804 EXPORT_SYMBOL_GPL(xdr_init_decode);
805 
806 /**
807  * xdr_init_decode - Initialize an xdr_stream for decoding data.
808  * @xdr: pointer to xdr_stream struct
809  * @buf: pointer to XDR buffer from which to decode data
810  * @pages: list of pages to decode into
811  * @len: length in bytes of buffer in pages
812  */
xdr_init_decode_pages(struct xdr_stream * xdr,struct xdr_buf * buf,struct page ** pages,unsigned int len)813 void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
814 			   struct page **pages, unsigned int len)
815 {
816 	memset(buf, 0, sizeof(*buf));
817 	buf->pages =  pages;
818 	buf->page_len =  len;
819 	buf->buflen =  len;
820 	buf->len = len;
821 	xdr_init_decode(xdr, buf, NULL);
822 }
823 EXPORT_SYMBOL_GPL(xdr_init_decode_pages);
824 
__xdr_inline_decode(struct xdr_stream * xdr,size_t nbytes)825 static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
826 {
827 	unsigned int nwords = XDR_QUADLEN(nbytes);
828 	__be32 *p = xdr->p;
829 	__be32 *q = p + nwords;
830 
831 	if (unlikely(nwords > xdr->nwords || q > xdr->end || q < p))
832 		return NULL;
833 	xdr->p = q;
834 	xdr->nwords -= nwords;
835 	return p;
836 }
837 
838 /**
839  * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data.
840  * @xdr: pointer to xdr_stream struct
841  * @buf: pointer to an empty buffer
842  * @buflen: size of 'buf'
843  *
844  * The scratch buffer is used when decoding from an array of pages.
845  * If an xdr_inline_decode() call spans across page boundaries, then
846  * we copy the data into the scratch buffer in order to allow linear
847  * access.
848  */
xdr_set_scratch_buffer(struct xdr_stream * xdr,void * buf,size_t buflen)849 void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen)
850 {
851 	xdr->scratch.iov_base = buf;
852 	xdr->scratch.iov_len = buflen;
853 }
854 EXPORT_SYMBOL_GPL(xdr_set_scratch_buffer);
855 
xdr_copy_to_scratch(struct xdr_stream * xdr,size_t nbytes)856 static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes)
857 {
858 	__be32 *p;
859 	void *cpdest = xdr->scratch.iov_base;
860 	size_t cplen = (char *)xdr->end - (char *)xdr->p;
861 
862 	if (nbytes > xdr->scratch.iov_len)
863 		return NULL;
864 	memcpy(cpdest, xdr->p, cplen);
865 	cpdest += cplen;
866 	nbytes -= cplen;
867 	if (!xdr_set_next_buffer(xdr))
868 		return NULL;
869 	p = __xdr_inline_decode(xdr, nbytes);
870 	if (p == NULL)
871 		return NULL;
872 	memcpy(cpdest, p, nbytes);
873 	return xdr->scratch.iov_base;
874 }
875 
876 /**
877  * xdr_inline_decode - Retrieve XDR data to decode
878  * @xdr: pointer to xdr_stream struct
879  * @nbytes: number of bytes of data to decode
880  *
881  * Check if the input buffer is long enough to enable us to decode
882  * 'nbytes' more bytes of data starting at the current position.
883  * If so return the current pointer, then update the current
884  * pointer position.
885  */
xdr_inline_decode(struct xdr_stream * xdr,size_t nbytes)886 __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
887 {
888 	__be32 *p;
889 
890 	if (nbytes == 0)
891 		return xdr->p;
892 	if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
893 		return NULL;
894 	p = __xdr_inline_decode(xdr, nbytes);
895 	if (p != NULL)
896 		return p;
897 	return xdr_copy_to_scratch(xdr, nbytes);
898 }
899 EXPORT_SYMBOL_GPL(xdr_inline_decode);
900 
xdr_align_pages(struct xdr_stream * xdr,unsigned int len)901 static unsigned int xdr_align_pages(struct xdr_stream *xdr, unsigned int len)
902 {
903 	struct xdr_buf *buf = xdr->buf;
904 	struct kvec *iov;
905 	unsigned int nwords = XDR_QUADLEN(len);
906 	unsigned int cur = xdr_stream_pos(xdr);
907 
908 	if (xdr->nwords == 0)
909 		return 0;
910 	/* Realign pages to current pointer position */
911 	iov  = buf->head;
912 	if (iov->iov_len > cur) {
913 		xdr_shrink_bufhead(buf, iov->iov_len - cur);
914 		xdr->nwords = XDR_QUADLEN(buf->len - cur);
915 	}
916 
917 	if (nwords > xdr->nwords) {
918 		nwords = xdr->nwords;
919 		len = nwords << 2;
920 	}
921 	if (buf->page_len <= len)
922 		len = buf->page_len;
923 	else if (nwords < xdr->nwords) {
924 		/* Truncate page data and move it into the tail */
925 		xdr_shrink_pagelen(buf, buf->page_len - len);
926 		xdr->nwords = XDR_QUADLEN(buf->len - cur);
927 	}
928 	return len;
929 }
930 
931 /**
932  * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
933  * @xdr: pointer to xdr_stream struct
934  * @len: number of bytes of page data
935  *
936  * Moves data beyond the current pointer position from the XDR head[] buffer
937  * into the page list. Any data that lies beyond current position + "len"
938  * bytes is moved into the XDR tail[].
939  *
940  * Returns the number of XDR encoded bytes now contained in the pages
941  */
xdr_read_pages(struct xdr_stream * xdr,unsigned int len)942 unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
943 {
944 	struct xdr_buf *buf = xdr->buf;
945 	struct kvec *iov;
946 	unsigned int nwords;
947 	unsigned int end;
948 	unsigned int padding;
949 
950 	len = xdr_align_pages(xdr, len);
951 	if (len == 0)
952 		return 0;
953 	nwords = XDR_QUADLEN(len);
954 	padding = (nwords << 2) - len;
955 	xdr->iov = iov = buf->tail;
956 	/* Compute remaining message length.  */
957 	end = ((xdr->nwords - nwords) << 2) + padding;
958 	if (end > iov->iov_len)
959 		end = iov->iov_len;
960 
961 	/*
962 	 * Position current pointer at beginning of tail, and
963 	 * set remaining message length.
964 	 */
965 	xdr->p = (__be32 *)((char *)iov->iov_base + padding);
966 	xdr->end = (__be32 *)((char *)iov->iov_base + end);
967 	xdr->page_ptr = NULL;
968 	xdr->nwords = XDR_QUADLEN(end - padding);
969 	return len;
970 }
971 EXPORT_SYMBOL_GPL(xdr_read_pages);
972 
973 /**
974  * xdr_enter_page - decode data from the XDR page
975  * @xdr: pointer to xdr_stream struct
976  * @len: number of bytes of page data
977  *
978  * Moves data beyond the current pointer position from the XDR head[] buffer
979  * into the page list. Any data that lies beyond current position + "len"
980  * bytes is moved into the XDR tail[]. The current pointer is then
981  * repositioned at the beginning of the first XDR page.
982  */
xdr_enter_page(struct xdr_stream * xdr,unsigned int len)983 void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
984 {
985 	len = xdr_align_pages(xdr, len);
986 	/*
987 	 * Position current pointer at beginning of tail, and
988 	 * set remaining message length.
989 	 */
990 	if (len != 0)
991 		xdr_set_page_base(xdr, 0, len);
992 }
993 EXPORT_SYMBOL_GPL(xdr_enter_page);
994 
995 static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
996 
997 void
xdr_buf_from_iov(struct kvec * iov,struct xdr_buf * buf)998 xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
999 {
1000 	buf->head[0] = *iov;
1001 	buf->tail[0] = empty_iov;
1002 	buf->page_len = 0;
1003 	buf->buflen = buf->len = iov->iov_len;
1004 }
1005 EXPORT_SYMBOL_GPL(xdr_buf_from_iov);
1006 
1007 /**
1008  * xdr_buf_subsegment - set subbuf to a portion of buf
1009  * @buf: an xdr buffer
1010  * @subbuf: the result buffer
1011  * @base: beginning of range in bytes
1012  * @len: length of range in bytes
1013  *
1014  * sets @subbuf to an xdr buffer representing the portion of @buf of
1015  * length @len starting at offset @base.
1016  *
1017  * @buf and @subbuf may be pointers to the same struct xdr_buf.
1018  *
1019  * Returns -1 if base of length are out of bounds.
1020  */
1021 int
xdr_buf_subsegment(struct xdr_buf * buf,struct xdr_buf * subbuf,unsigned int base,unsigned int len)1022 xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
1023 			unsigned int base, unsigned int len)
1024 {
1025 	subbuf->buflen = subbuf->len = len;
1026 	if (base < buf->head[0].iov_len) {
1027 		subbuf->head[0].iov_base = buf->head[0].iov_base + base;
1028 		subbuf->head[0].iov_len = min_t(unsigned int, len,
1029 						buf->head[0].iov_len - base);
1030 		len -= subbuf->head[0].iov_len;
1031 		base = 0;
1032 	} else {
1033 		base -= buf->head[0].iov_len;
1034 		subbuf->head[0].iov_len = 0;
1035 	}
1036 
1037 	if (base < buf->page_len) {
1038 		subbuf->page_len = min(buf->page_len - base, len);
1039 		base += buf->page_base;
1040 		subbuf->page_base = base & ~PAGE_CACHE_MASK;
1041 		subbuf->pages = &buf->pages[base >> PAGE_CACHE_SHIFT];
1042 		len -= subbuf->page_len;
1043 		base = 0;
1044 	} else {
1045 		base -= buf->page_len;
1046 		subbuf->page_len = 0;
1047 	}
1048 
1049 	if (base < buf->tail[0].iov_len) {
1050 		subbuf->tail[0].iov_base = buf->tail[0].iov_base + base;
1051 		subbuf->tail[0].iov_len = min_t(unsigned int, len,
1052 						buf->tail[0].iov_len - base);
1053 		len -= subbuf->tail[0].iov_len;
1054 		base = 0;
1055 	} else {
1056 		base -= buf->tail[0].iov_len;
1057 		subbuf->tail[0].iov_len = 0;
1058 	}
1059 
1060 	if (base || len)
1061 		return -1;
1062 	return 0;
1063 }
1064 EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
1065 
1066 /**
1067  * xdr_buf_trim - lop at most "len" bytes off the end of "buf"
1068  * @buf: buf to be trimmed
1069  * @len: number of bytes to reduce "buf" by
1070  *
1071  * Trim an xdr_buf by the given number of bytes by fixing up the lengths. Note
1072  * that it's possible that we'll trim less than that amount if the xdr_buf is
1073  * too small, or if (for instance) it's all in the head and the parser has
1074  * already read too far into it.
1075  */
xdr_buf_trim(struct xdr_buf * buf,unsigned int len)1076 void xdr_buf_trim(struct xdr_buf *buf, unsigned int len)
1077 {
1078 	size_t cur;
1079 	unsigned int trim = len;
1080 
1081 	if (buf->tail[0].iov_len) {
1082 		cur = min_t(size_t, buf->tail[0].iov_len, trim);
1083 		buf->tail[0].iov_len -= cur;
1084 		trim -= cur;
1085 		if (!trim)
1086 			goto fix_len;
1087 	}
1088 
1089 	if (buf->page_len) {
1090 		cur = min_t(unsigned int, buf->page_len, trim);
1091 		buf->page_len -= cur;
1092 		trim -= cur;
1093 		if (!trim)
1094 			goto fix_len;
1095 	}
1096 
1097 	if (buf->head[0].iov_len) {
1098 		cur = min_t(size_t, buf->head[0].iov_len, trim);
1099 		buf->head[0].iov_len -= cur;
1100 		trim -= cur;
1101 	}
1102 fix_len:
1103 	buf->len -= (len - trim);
1104 }
1105 EXPORT_SYMBOL_GPL(xdr_buf_trim);
1106 
__read_bytes_from_xdr_buf(struct xdr_buf * subbuf,void * obj,unsigned int len)1107 static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
1108 {
1109 	unsigned int this_len;
1110 
1111 	this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
1112 	memcpy(obj, subbuf->head[0].iov_base, this_len);
1113 	len -= this_len;
1114 	obj += this_len;
1115 	this_len = min_t(unsigned int, len, subbuf->page_len);
1116 	if (this_len)
1117 		_copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len);
1118 	len -= this_len;
1119 	obj += this_len;
1120 	this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
1121 	memcpy(obj, subbuf->tail[0].iov_base, this_len);
1122 }
1123 
1124 /* obj is assumed to point to allocated memory of size at least len: */
read_bytes_from_xdr_buf(struct xdr_buf * buf,unsigned int base,void * obj,unsigned int len)1125 int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
1126 {
1127 	struct xdr_buf subbuf;
1128 	int status;
1129 
1130 	status = xdr_buf_subsegment(buf, &subbuf, base, len);
1131 	if (status != 0)
1132 		return status;
1133 	__read_bytes_from_xdr_buf(&subbuf, obj, len);
1134 	return 0;
1135 }
1136 EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf);
1137 
__write_bytes_to_xdr_buf(struct xdr_buf * subbuf,void * obj,unsigned int len)1138 static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
1139 {
1140 	unsigned int this_len;
1141 
1142 	this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
1143 	memcpy(subbuf->head[0].iov_base, obj, this_len);
1144 	len -= this_len;
1145 	obj += this_len;
1146 	this_len = min_t(unsigned int, len, subbuf->page_len);
1147 	if (this_len)
1148 		_copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len);
1149 	len -= this_len;
1150 	obj += this_len;
1151 	this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
1152 	memcpy(subbuf->tail[0].iov_base, obj, this_len);
1153 }
1154 
1155 /* obj is assumed to point to allocated memory of size at least len: */
write_bytes_to_xdr_buf(struct xdr_buf * buf,unsigned int base,void * obj,unsigned int len)1156 int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
1157 {
1158 	struct xdr_buf subbuf;
1159 	int status;
1160 
1161 	status = xdr_buf_subsegment(buf, &subbuf, base, len);
1162 	if (status != 0)
1163 		return status;
1164 	__write_bytes_to_xdr_buf(&subbuf, obj, len);
1165 	return 0;
1166 }
1167 EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf);
1168 
1169 int
xdr_decode_word(struct xdr_buf * buf,unsigned int base,u32 * obj)1170 xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
1171 {
1172 	__be32	raw;
1173 	int	status;
1174 
1175 	status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
1176 	if (status)
1177 		return status;
1178 	*obj = be32_to_cpu(raw);
1179 	return 0;
1180 }
1181 EXPORT_SYMBOL_GPL(xdr_decode_word);
1182 
1183 int
xdr_encode_word(struct xdr_buf * buf,unsigned int base,u32 obj)1184 xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
1185 {
1186 	__be32	raw = cpu_to_be32(obj);
1187 
1188 	return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
1189 }
1190 EXPORT_SYMBOL_GPL(xdr_encode_word);
1191 
1192 /* If the netobj starting offset bytes from the start of xdr_buf is contained
1193  * entirely in the head or the tail, set object to point to it; otherwise
1194  * try to find space for it at the end of the tail, copy it there, and
1195  * set obj to point to it. */
xdr_buf_read_netobj(struct xdr_buf * buf,struct xdr_netobj * obj,unsigned int offset)1196 int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned int offset)
1197 {
1198 	struct xdr_buf subbuf;
1199 
1200 	if (xdr_decode_word(buf, offset, &obj->len))
1201 		return -EFAULT;
1202 	if (xdr_buf_subsegment(buf, &subbuf, offset + 4, obj->len))
1203 		return -EFAULT;
1204 
1205 	/* Is the obj contained entirely in the head? */
1206 	obj->data = subbuf.head[0].iov_base;
1207 	if (subbuf.head[0].iov_len == obj->len)
1208 		return 0;
1209 	/* ..or is the obj contained entirely in the tail? */
1210 	obj->data = subbuf.tail[0].iov_base;
1211 	if (subbuf.tail[0].iov_len == obj->len)
1212 		return 0;
1213 
1214 	/* use end of tail as storage for obj:
1215 	 * (We don't copy to the beginning because then we'd have
1216 	 * to worry about doing a potentially overlapping copy.
1217 	 * This assumes the object is at most half the length of the
1218 	 * tail.) */
1219 	if (obj->len > buf->buflen - buf->len)
1220 		return -ENOMEM;
1221 	if (buf->tail[0].iov_len != 0)
1222 		obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
1223 	else
1224 		obj->data = buf->head[0].iov_base + buf->head[0].iov_len;
1225 	__read_bytes_from_xdr_buf(&subbuf, obj->data, obj->len);
1226 	return 0;
1227 }
1228 EXPORT_SYMBOL_GPL(xdr_buf_read_netobj);
1229 
1230 /* Returns 0 on success, or else a negative error code. */
1231 static int
xdr_xcode_array2(struct xdr_buf * buf,unsigned int base,struct xdr_array2_desc * desc,int encode)1232 xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
1233 		 struct xdr_array2_desc *desc, int encode)
1234 {
1235 	char *elem = NULL, *c;
1236 	unsigned int copied = 0, todo, avail_here;
1237 	struct page **ppages = NULL;
1238 	int err;
1239 
1240 	if (encode) {
1241 		if (xdr_encode_word(buf, base, desc->array_len) != 0)
1242 			return -EINVAL;
1243 	} else {
1244 		if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
1245 		    desc->array_len > desc->array_maxlen ||
1246 		    (unsigned long) base + 4 + desc->array_len *
1247 				    desc->elem_size > buf->len)
1248 			return -EINVAL;
1249 	}
1250 	base += 4;
1251 
1252 	if (!desc->xcode)
1253 		return 0;
1254 
1255 	todo = desc->array_len * desc->elem_size;
1256 
1257 	/* process head */
1258 	if (todo && base < buf->head->iov_len) {
1259 		c = buf->head->iov_base + base;
1260 		avail_here = min_t(unsigned int, todo,
1261 				   buf->head->iov_len - base);
1262 		todo -= avail_here;
1263 
1264 		while (avail_here >= desc->elem_size) {
1265 			err = desc->xcode(desc, c);
1266 			if (err)
1267 				goto out;
1268 			c += desc->elem_size;
1269 			avail_here -= desc->elem_size;
1270 		}
1271 		if (avail_here) {
1272 			if (!elem) {
1273 				elem = kmalloc(desc->elem_size, GFP_KERNEL);
1274 				err = -ENOMEM;
1275 				if (!elem)
1276 					goto out;
1277 			}
1278 			if (encode) {
1279 				err = desc->xcode(desc, elem);
1280 				if (err)
1281 					goto out;
1282 				memcpy(c, elem, avail_here);
1283 			} else
1284 				memcpy(elem, c, avail_here);
1285 			copied = avail_here;
1286 		}
1287 		base = buf->head->iov_len;  /* align to start of pages */
1288 	}
1289 
1290 	/* process pages array */
1291 	base -= buf->head->iov_len;
1292 	if (todo && base < buf->page_len) {
1293 		unsigned int avail_page;
1294 
1295 		avail_here = min(todo, buf->page_len - base);
1296 		todo -= avail_here;
1297 
1298 		base += buf->page_base;
1299 		ppages = buf->pages + (base >> PAGE_CACHE_SHIFT);
1300 		base &= ~PAGE_CACHE_MASK;
1301 		avail_page = min_t(unsigned int, PAGE_CACHE_SIZE - base,
1302 					avail_here);
1303 		c = kmap(*ppages) + base;
1304 
1305 		while (avail_here) {
1306 			avail_here -= avail_page;
1307 			if (copied || avail_page < desc->elem_size) {
1308 				unsigned int l = min(avail_page,
1309 					desc->elem_size - copied);
1310 				if (!elem) {
1311 					elem = kmalloc(desc->elem_size,
1312 						       GFP_KERNEL);
1313 					err = -ENOMEM;
1314 					if (!elem)
1315 						goto out;
1316 				}
1317 				if (encode) {
1318 					if (!copied) {
1319 						err = desc->xcode(desc, elem);
1320 						if (err)
1321 							goto out;
1322 					}
1323 					memcpy(c, elem + copied, l);
1324 					copied += l;
1325 					if (copied == desc->elem_size)
1326 						copied = 0;
1327 				} else {
1328 					memcpy(elem + copied, c, l);
1329 					copied += l;
1330 					if (copied == desc->elem_size) {
1331 						err = desc->xcode(desc, elem);
1332 						if (err)
1333 							goto out;
1334 						copied = 0;
1335 					}
1336 				}
1337 				avail_page -= l;
1338 				c += l;
1339 			}
1340 			while (avail_page >= desc->elem_size) {
1341 				err = desc->xcode(desc, c);
1342 				if (err)
1343 					goto out;
1344 				c += desc->elem_size;
1345 				avail_page -= desc->elem_size;
1346 			}
1347 			if (avail_page) {
1348 				unsigned int l = min(avail_page,
1349 					    desc->elem_size - copied);
1350 				if (!elem) {
1351 					elem = kmalloc(desc->elem_size,
1352 						       GFP_KERNEL);
1353 					err = -ENOMEM;
1354 					if (!elem)
1355 						goto out;
1356 				}
1357 				if (encode) {
1358 					if (!copied) {
1359 						err = desc->xcode(desc, elem);
1360 						if (err)
1361 							goto out;
1362 					}
1363 					memcpy(c, elem + copied, l);
1364 					copied += l;
1365 					if (copied == desc->elem_size)
1366 						copied = 0;
1367 				} else {
1368 					memcpy(elem + copied, c, l);
1369 					copied += l;
1370 					if (copied == desc->elem_size) {
1371 						err = desc->xcode(desc, elem);
1372 						if (err)
1373 							goto out;
1374 						copied = 0;
1375 					}
1376 				}
1377 			}
1378 			if (avail_here) {
1379 				kunmap(*ppages);
1380 				ppages++;
1381 				c = kmap(*ppages);
1382 			}
1383 
1384 			avail_page = min(avail_here,
1385 				 (unsigned int) PAGE_CACHE_SIZE);
1386 		}
1387 		base = buf->page_len;  /* align to start of tail */
1388 	}
1389 
1390 	/* process tail */
1391 	base -= buf->page_len;
1392 	if (todo) {
1393 		c = buf->tail->iov_base + base;
1394 		if (copied) {
1395 			unsigned int l = desc->elem_size - copied;
1396 
1397 			if (encode)
1398 				memcpy(c, elem + copied, l);
1399 			else {
1400 				memcpy(elem + copied, c, l);
1401 				err = desc->xcode(desc, elem);
1402 				if (err)
1403 					goto out;
1404 			}
1405 			todo -= l;
1406 			c += l;
1407 		}
1408 		while (todo) {
1409 			err = desc->xcode(desc, c);
1410 			if (err)
1411 				goto out;
1412 			c += desc->elem_size;
1413 			todo -= desc->elem_size;
1414 		}
1415 	}
1416 	err = 0;
1417 
1418 out:
1419 	kfree(elem);
1420 	if (ppages)
1421 		kunmap(*ppages);
1422 	return err;
1423 }
1424 
1425 int
xdr_decode_array2(struct xdr_buf * buf,unsigned int base,struct xdr_array2_desc * desc)1426 xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
1427 		  struct xdr_array2_desc *desc)
1428 {
1429 	if (base >= buf->len)
1430 		return -EINVAL;
1431 
1432 	return xdr_xcode_array2(buf, base, desc, 0);
1433 }
1434 EXPORT_SYMBOL_GPL(xdr_decode_array2);
1435 
1436 int
xdr_encode_array2(struct xdr_buf * buf,unsigned int base,struct xdr_array2_desc * desc)1437 xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
1438 		  struct xdr_array2_desc *desc)
1439 {
1440 	if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
1441 	    buf->head->iov_len + buf->page_len + buf->tail->iov_len)
1442 		return -EINVAL;
1443 
1444 	return xdr_xcode_array2(buf, base, desc, 1);
1445 }
1446 EXPORT_SYMBOL_GPL(xdr_encode_array2);
1447 
1448 int
xdr_process_buf(struct xdr_buf * buf,unsigned int offset,unsigned int len,int (* actor)(struct scatterlist *,void *),void * data)1449 xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
1450 		int (*actor)(struct scatterlist *, void *), void *data)
1451 {
1452 	int i, ret = 0;
1453 	unsigned int page_len, thislen, page_offset;
1454 	struct scatterlist      sg[1];
1455 
1456 	sg_init_table(sg, 1);
1457 
1458 	if (offset >= buf->head[0].iov_len) {
1459 		offset -= buf->head[0].iov_len;
1460 	} else {
1461 		thislen = buf->head[0].iov_len - offset;
1462 		if (thislen > len)
1463 			thislen = len;
1464 		sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
1465 		ret = actor(sg, data);
1466 		if (ret)
1467 			goto out;
1468 		offset = 0;
1469 		len -= thislen;
1470 	}
1471 	if (len == 0)
1472 		goto out;
1473 
1474 	if (offset >= buf->page_len) {
1475 		offset -= buf->page_len;
1476 	} else {
1477 		page_len = buf->page_len - offset;
1478 		if (page_len > len)
1479 			page_len = len;
1480 		len -= page_len;
1481 		page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1);
1482 		i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT;
1483 		thislen = PAGE_CACHE_SIZE - page_offset;
1484 		do {
1485 			if (thislen > page_len)
1486 				thislen = page_len;
1487 			sg_set_page(sg, buf->pages[i], thislen, page_offset);
1488 			ret = actor(sg, data);
1489 			if (ret)
1490 				goto out;
1491 			page_len -= thislen;
1492 			i++;
1493 			page_offset = 0;
1494 			thislen = PAGE_CACHE_SIZE;
1495 		} while (page_len != 0);
1496 		offset = 0;
1497 	}
1498 	if (len == 0)
1499 		goto out;
1500 	if (offset < buf->tail[0].iov_len) {
1501 		thislen = buf->tail[0].iov_len - offset;
1502 		if (thislen > len)
1503 			thislen = len;
1504 		sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
1505 		ret = actor(sg, data);
1506 		len -= thislen;
1507 	}
1508 	if (len != 0)
1509 		ret = -EINVAL;
1510 out:
1511 	return ret;
1512 }
1513 EXPORT_SYMBOL_GPL(xdr_process_buf);
1514 
1515