• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  request.c
3  *
4  *  Copyright (C) 2001 by Urban Widmark
5  *
6  *  Please add a note about your changes to smbfs in the ChangeLog file.
7  */
8 
9 #include <linux/kernel.h>
10 #include <linux/types.h>
11 #include <linux/fs.h>
12 #include <linux/slab.h>
13 #include <linux/net.h>
14 #include <linux/sched.h>
15 
16 #include <linux/smb_fs.h>
17 #include <linux/smbno.h>
18 #include <linux/smb_mount.h>
19 
20 #include "smb_debug.h"
21 #include "request.h"
22 #include "proto.h"
23 
24 /* #define SMB_SLAB_DEBUG	(SLAB_RED_ZONE | SLAB_POISON) */
25 #define SMB_SLAB_DEBUG	0
26 
27 /* cache for request structures */
28 static struct kmem_cache *req_cachep;
29 
30 static int smb_request_send_req(struct smb_request *req);
31 
32 /*
33   /proc/slabinfo:
34   name, active, num, objsize, active_slabs, num_slaps, #pages
35 */
36 
37 
smb_init_request_cache(void)38 int smb_init_request_cache(void)
39 {
40 	req_cachep = kmem_cache_create("smb_request",
41 				       sizeof(struct smb_request), 0,
42 				       SMB_SLAB_DEBUG | SLAB_HWCACHE_ALIGN,
43 				       NULL);
44 	if (req_cachep == NULL)
45 		return -ENOMEM;
46 
47 	return 0;
48 }
49 
smb_destroy_request_cache(void)50 void smb_destroy_request_cache(void)
51 {
52 	kmem_cache_destroy(req_cachep);
53 }
54 
55 /*
56  * Allocate and initialise a request structure
57  */
smb_do_alloc_request(struct smb_sb_info * server,int bufsize)58 static struct smb_request *smb_do_alloc_request(struct smb_sb_info *server,
59 						int bufsize)
60 {
61 	struct smb_request *req;
62 	unsigned char *buf = NULL;
63 
64 	req = kmem_cache_zalloc(req_cachep, GFP_KERNEL);
65 	VERBOSE("allocating request: %p\n", req);
66 	if (!req)
67 		goto out;
68 
69 	if (bufsize > 0) {
70 		buf = kmalloc(bufsize, GFP_NOFS);
71 		if (!buf) {
72 			kmem_cache_free(req_cachep, req);
73 			return NULL;
74 		}
75 	}
76 
77 	req->rq_buffer = buf;
78 	req->rq_bufsize = bufsize;
79 	req->rq_server = server;
80 	init_waitqueue_head(&req->rq_wait);
81 	INIT_LIST_HEAD(&req->rq_queue);
82 	atomic_set(&req->rq_count, 1);
83 
84 out:
85 	return req;
86 }
87 
smb_alloc_request(struct smb_sb_info * server,int bufsize)88 struct smb_request *smb_alloc_request(struct smb_sb_info *server, int bufsize)
89 {
90 	struct smb_request *req = NULL;
91 
92 	for (;;) {
93 		atomic_inc(&server->nr_requests);
94 		if (atomic_read(&server->nr_requests) <= MAX_REQUEST_HARD) {
95 			req = smb_do_alloc_request(server, bufsize);
96 			if (req != NULL)
97 				break;
98 		}
99 
100 #if 0
101 		/*
102 		 * Try to free up at least one request in order to stay
103 		 * below the hard limit
104 		 */
105                 if (nfs_try_to_free_pages(server))
106 			continue;
107 
108 		if (fatal_signal_pending(current))
109 			return ERR_PTR(-ERESTARTSYS);
110 		current->policy = SCHED_YIELD;
111 		schedule();
112 #else
113 		/* FIXME: we want something like nfs does above, but that
114 		   requires changes to all callers and can wait. */
115 		break;
116 #endif
117 	}
118 	return req;
119 }
120 
smb_free_request(struct smb_request * req)121 static void smb_free_request(struct smb_request *req)
122 {
123 	atomic_dec(&req->rq_server->nr_requests);
124 	if (req->rq_buffer && !(req->rq_flags & SMB_REQ_STATIC))
125 		kfree(req->rq_buffer);
126 	kfree(req->rq_trans2buffer);
127 	kmem_cache_free(req_cachep, req);
128 }
129 
130 /*
131  * What prevents a rget to race with a rput? The count must never drop to zero
132  * while it is in use. Only rput if it is ok that it is free'd.
133  */
smb_rget(struct smb_request * req)134 static void smb_rget(struct smb_request *req)
135 {
136 	atomic_inc(&req->rq_count);
137 }
smb_rput(struct smb_request * req)138 void smb_rput(struct smb_request *req)
139 {
140 	if (atomic_dec_and_test(&req->rq_count)) {
141 		list_del_init(&req->rq_queue);
142 		smb_free_request(req);
143 	}
144 }
145 
146 /* setup to receive the data part of the SMB */
smb_setup_bcc(struct smb_request * req)147 static int smb_setup_bcc(struct smb_request *req)
148 {
149 	int result = 0;
150 	req->rq_rlen = smb_len(req->rq_header) + 4 - req->rq_bytes_recvd;
151 
152 	if (req->rq_rlen > req->rq_bufsize) {
153 		PARANOIA("Packet too large %d > %d\n",
154 			 req->rq_rlen, req->rq_bufsize);
155 		return -ENOBUFS;
156 	}
157 
158 	req->rq_iov[0].iov_base = req->rq_buffer;
159 	req->rq_iov[0].iov_len  = req->rq_rlen;
160 	req->rq_iovlen = 1;
161 
162 	return result;
163 }
164 
165 /*
166  * Prepare a "normal" request structure.
167  */
smb_setup_request(struct smb_request * req)168 static int smb_setup_request(struct smb_request *req)
169 {
170 	int len = smb_len(req->rq_header) + 4;
171 	req->rq_slen = len;
172 
173 	/* if we expect a data part in the reply we set the iov's to read it */
174 	if (req->rq_resp_bcc)
175 		req->rq_setup_read = smb_setup_bcc;
176 
177 	/* This tries to support re-using the same request */
178 	req->rq_bytes_sent = 0;
179 	req->rq_rcls = 0;
180 	req->rq_err = 0;
181 	req->rq_errno = 0;
182 	req->rq_fragment = 0;
183 	kfree(req->rq_trans2buffer);
184 	req->rq_trans2buffer = NULL;
185 
186 	return 0;
187 }
188 
189 /*
190  * Prepare a transaction2 request structure
191  */
smb_setup_trans2request(struct smb_request * req)192 static int smb_setup_trans2request(struct smb_request *req)
193 {
194 	struct smb_sb_info *server = req->rq_server;
195 	int mparam, mdata;
196 	static unsigned char padding[4];
197 
198 	/* I know the following is very ugly, but I want to build the
199 	   smb packet as efficiently as possible. */
200 
201 	const int smb_parameters = 15;
202 	const int header = SMB_HEADER_LEN + 2 * smb_parameters + 2;
203 	const int oparam = ALIGN(header + 3, sizeof(u32));
204 	const int odata  = ALIGN(oparam + req->rq_lparm, sizeof(u32));
205 	const int bcc = (req->rq_data ? odata + req->rq_ldata :
206 					oparam + req->rq_lparm) - header;
207 
208 	if ((bcc + oparam) > server->opt.max_xmit)
209 		return -ENOMEM;
210 	smb_setup_header(req, SMBtrans2, smb_parameters, bcc);
211 
212 	/*
213 	 * max parameters + max data + max setup == bufsize to make NT4 happy
214 	 * and not abort the transfer or split into multiple responses. It also
215 	 * makes smbfs happy as handling packets larger than the buffer size
216 	 * is extra work.
217 	 *
218 	 * OS/2 is probably going to hate me for this ...
219 	 */
220 	mparam = SMB_TRANS2_MAX_PARAM;
221 	mdata = req->rq_bufsize - mparam;
222 
223 	mdata = server->opt.max_xmit - mparam - 100;
224 	if (mdata < 1024) {
225 		mdata = 1024;
226 		mparam = 20;
227 	}
228 
229 #if 0
230 	/* NT/win2k has ~4k max_xmit, so with this we request more than it wants
231 	   to return as one SMB. Useful for testing the fragmented trans2
232 	   handling. */
233 	mdata = 8192;
234 #endif
235 
236 	WSET(req->rq_header, smb_tpscnt, req->rq_lparm);
237 	WSET(req->rq_header, smb_tdscnt, req->rq_ldata);
238 	WSET(req->rq_header, smb_mprcnt, mparam);
239 	WSET(req->rq_header, smb_mdrcnt, mdata);
240 	WSET(req->rq_header, smb_msrcnt, 0);    /* max setup always 0 ? */
241 	WSET(req->rq_header, smb_flags, 0);
242 	DSET(req->rq_header, smb_timeout, 0);
243 	WSET(req->rq_header, smb_pscnt, req->rq_lparm);
244 	WSET(req->rq_header, smb_psoff, oparam - 4);
245 	WSET(req->rq_header, smb_dscnt, req->rq_ldata);
246 	WSET(req->rq_header, smb_dsoff, req->rq_data ? odata - 4 : 0);
247 	*(req->rq_header + smb_suwcnt) = 0x01;          /* setup count */
248 	*(req->rq_header + smb_suwcnt + 1) = 0x00;      /* reserved */
249 	WSET(req->rq_header, smb_setup0, req->rq_trans2_command);
250 
251 	req->rq_iovlen = 2;
252 	req->rq_iov[0].iov_base = (void *) req->rq_header;
253 	req->rq_iov[0].iov_len = oparam;
254 	req->rq_iov[1].iov_base = (req->rq_parm==NULL) ? padding : req->rq_parm;
255 	req->rq_iov[1].iov_len = req->rq_lparm;
256 	req->rq_slen = oparam + req->rq_lparm;
257 
258 	if (req->rq_data) {
259 		req->rq_iovlen += 2;
260 		req->rq_iov[2].iov_base = padding;
261 		req->rq_iov[2].iov_len = odata - oparam - req->rq_lparm;
262 		req->rq_iov[3].iov_base = req->rq_data;
263 		req->rq_iov[3].iov_len = req->rq_ldata;
264 		req->rq_slen = odata + req->rq_ldata;
265 	}
266 
267 	/* always a data part for trans2 replies */
268 	req->rq_setup_read = smb_setup_bcc;
269 
270 	return 0;
271 }
272 
273 /*
274  * Add a request and tell smbiod to process it
275  */
smb_add_request(struct smb_request * req)276 int smb_add_request(struct smb_request *req)
277 {
278 	long timeleft;
279 	struct smb_sb_info *server = req->rq_server;
280 	int result = 0;
281 
282 	smb_setup_request(req);
283 	if (req->rq_trans2_command) {
284 		if (req->rq_buffer == NULL) {
285 			PARANOIA("trans2 attempted without response buffer!\n");
286 			return -EIO;
287 		}
288 		result = smb_setup_trans2request(req);
289 	}
290 	if (result < 0)
291 		return result;
292 
293 #ifdef SMB_DEBUG_PACKET_SIZE
294 	add_xmit_stats(req);
295 #endif
296 
297 	/* add 'req' to the queue of requests */
298 	if (smb_lock_server_interruptible(server))
299 		return -EINTR;
300 
301 	/*
302 	 * Try to send the request as the process. If that fails we queue the
303 	 * request and let smbiod send it later.
304 	 */
305 
306 	/* FIXME: each server has a number on the maximum number of parallel
307 	   requests. 10, 50 or so. We should not allow more requests to be
308 	   active. */
309 	if (server->mid > 0xf000)
310 		server->mid = 0;
311 	req->rq_mid = server->mid++;
312 	WSET(req->rq_header, smb_mid, req->rq_mid);
313 
314 	result = 0;
315 	if (server->state == CONN_VALID) {
316 		if (list_empty(&server->xmitq))
317 			result = smb_request_send_req(req);
318 		if (result < 0) {
319 			/* Connection lost? */
320 			server->conn_error = result;
321 			server->state = CONN_INVALID;
322 		}
323 	}
324 	if (result != 1)
325 		list_add_tail(&req->rq_queue, &server->xmitq);
326 	smb_rget(req);
327 
328 	if (server->state != CONN_VALID)
329 		smbiod_retry(server);
330 
331 	smb_unlock_server(server);
332 
333 	smbiod_wake_up();
334 
335 	timeleft = wait_event_interruptible_timeout(req->rq_wait,
336 				    req->rq_flags & SMB_REQ_RECEIVED, 30*HZ);
337 	if (!timeleft || signal_pending(current)) {
338 		/*
339 		 * On timeout or on interrupt we want to try and remove the
340 		 * request from the recvq/xmitq.
341 		 * First check if the request is still part of a queue. (May
342 		 * have been removed by some error condition)
343 		 */
344 		smb_lock_server(server);
345 		if (!list_empty(&req->rq_queue)) {
346 			list_del_init(&req->rq_queue);
347 			smb_rput(req);
348 		}
349 		smb_unlock_server(server);
350 	}
351 
352 	if (!timeleft) {
353 		PARANOIA("request [%p, mid=%d] timed out!\n",
354 			 req, req->rq_mid);
355 		VERBOSE("smb_com:  %02x\n", *(req->rq_header + smb_com));
356 		VERBOSE("smb_rcls: %02x\n", *(req->rq_header + smb_rcls));
357 		VERBOSE("smb_flg:  %02x\n", *(req->rq_header + smb_flg));
358 		VERBOSE("smb_tid:  %04x\n", WVAL(req->rq_header, smb_tid));
359 		VERBOSE("smb_pid:  %04x\n", WVAL(req->rq_header, smb_pid));
360 		VERBOSE("smb_uid:  %04x\n", WVAL(req->rq_header, smb_uid));
361 		VERBOSE("smb_mid:  %04x\n", WVAL(req->rq_header, smb_mid));
362 		VERBOSE("smb_wct:  %02x\n", *(req->rq_header + smb_wct));
363 
364 		req->rq_rcls = ERRSRV;
365 		req->rq_err  = ERRtimeout;
366 
367 		/* Just in case it was "stuck" */
368 		smbiod_wake_up();
369 	}
370 	VERBOSE("woke up, rcls=%d\n", req->rq_rcls);
371 
372 	if (req->rq_rcls != 0)
373 		req->rq_errno = smb_errno(req);
374 	if (signal_pending(current))
375 		req->rq_errno = -ERESTARTSYS;
376 	return req->rq_errno;
377 }
378 
379 /*
380  * Send a request and place it on the recvq if successfully sent.
381  * Must be called with the server lock held.
382  */
smb_request_send_req(struct smb_request * req)383 static int smb_request_send_req(struct smb_request *req)
384 {
385 	struct smb_sb_info *server = req->rq_server;
386 	int result;
387 
388 	if (req->rq_bytes_sent == 0) {
389 		WSET(req->rq_header, smb_tid, server->opt.tid);
390 		WSET(req->rq_header, smb_pid, 1);
391 		WSET(req->rq_header, smb_uid, server->opt.server_uid);
392 	}
393 
394 	result = smb_send_request(req);
395 	if (result < 0 && result != -EAGAIN)
396 		goto out;
397 
398 	result = 0;
399 	if (!(req->rq_flags & SMB_REQ_TRANSMITTED))
400 		goto out;
401 
402 	list_move_tail(&req->rq_queue, &server->recvq);
403 	result = 1;
404 out:
405 	return result;
406 }
407 
408 /*
409  * Sends one request for this server. (smbiod)
410  * Must be called with the server lock held.
411  * Returns: <0 on error
412  *           0 if no request could be completely sent
413  *           1 if all data for one request was sent
414  */
smb_request_send_server(struct smb_sb_info * server)415 int smb_request_send_server(struct smb_sb_info *server)
416 {
417 	struct list_head *head;
418 	struct smb_request *req;
419 	int result;
420 
421 	if (server->state != CONN_VALID)
422 		return 0;
423 
424 	/* dequeue first request, if any */
425 	req = NULL;
426 	head = server->xmitq.next;
427 	if (head != &server->xmitq) {
428 		req = list_entry(head, struct smb_request, rq_queue);
429 	}
430 	if (!req)
431 		return 0;
432 
433 	result = smb_request_send_req(req);
434 	if (result < 0) {
435 		server->conn_error = result;
436 		list_move(&req->rq_queue, &server->xmitq);
437 		result = -EIO;
438 		goto out;
439 	}
440 
441 out:
442 	return result;
443 }
444 
445 /*
446  * Try to find a request matching this "mid". Typically the first entry will
447  * be the matching one.
448  */
find_request(struct smb_sb_info * server,int mid)449 static struct smb_request *find_request(struct smb_sb_info *server, int mid)
450 {
451 	struct list_head *tmp;
452 	struct smb_request *req = NULL;
453 
454 	list_for_each(tmp, &server->recvq) {
455 		req = list_entry(tmp, struct smb_request, rq_queue);
456 		if (req->rq_mid == mid) {
457 			break;
458 		}
459 		req = NULL;
460 	}
461 
462 	if (!req) {
463 		VERBOSE("received reply with mid %d but no request!\n",
464 			WVAL(server->header, smb_mid));
465 		server->rstate = SMB_RECV_DROP;
466 	}
467 
468 	return req;
469 }
470 
471 /*
472  * Called when we have read the smb header and believe this is a response.
473  */
smb_init_request(struct smb_sb_info * server,struct smb_request * req)474 static int smb_init_request(struct smb_sb_info *server, struct smb_request *req)
475 {
476 	int hdrlen, wct;
477 
478 	memcpy(req->rq_header, server->header, SMB_HEADER_LEN);
479 
480 	wct = *(req->rq_header + smb_wct);
481 	if (wct > 20) {
482 		PARANOIA("wct too large, %d > 20\n", wct);
483 		server->rstate = SMB_RECV_DROP;
484 		return 0;
485 	}
486 
487 	req->rq_resp_wct = wct;
488 	hdrlen = SMB_HEADER_LEN + wct*2 + 2;
489 	VERBOSE("header length: %d   smb_wct: %2d\n", hdrlen, wct);
490 
491 	req->rq_bytes_recvd = SMB_HEADER_LEN;
492 	req->rq_rlen = hdrlen;
493 	req->rq_iov[0].iov_base = req->rq_header;
494 	req->rq_iov[0].iov_len  = hdrlen;
495 	req->rq_iovlen = 1;
496 	server->rstate = SMB_RECV_PARAM;
497 
498 #ifdef SMB_DEBUG_PACKET_SIZE
499 	add_recv_stats(smb_len(server->header));
500 #endif
501 	return 0;
502 }
503 
504 /*
505  * Reads the SMB parameters
506  */
smb_recv_param(struct smb_sb_info * server,struct smb_request * req)507 static int smb_recv_param(struct smb_sb_info *server, struct smb_request *req)
508 {
509 	int result;
510 
511 	result = smb_receive(server, req);
512 	if (result < 0)
513 		return result;
514 	if (req->rq_bytes_recvd < req->rq_rlen)
515 		return 0;
516 
517 	VERBOSE("result: %d   smb_bcc:  %04x\n", result,
518 		WVAL(req->rq_header, SMB_HEADER_LEN +
519 		     (*(req->rq_header + smb_wct) * 2)));
520 
521 	result = 0;
522 	req->rq_iov[0].iov_base = NULL;
523 	req->rq_rlen = 0;
524 	if (req->rq_callback)
525 		req->rq_callback(req);
526 	else if (req->rq_setup_read)
527 		result = req->rq_setup_read(req);
528 	if (result < 0) {
529 		server->rstate = SMB_RECV_DROP;
530 		return result;
531 	}
532 
533 	server->rstate = req->rq_rlen > 0 ? SMB_RECV_DATA : SMB_RECV_END;
534 
535 	req->rq_bytes_recvd = 0;	// recvd out of the iov
536 
537 	VERBOSE("rlen: %d\n", req->rq_rlen);
538 	if (req->rq_rlen < 0) {
539 		PARANOIA("Parameters read beyond end of packet!\n");
540 		server->rstate = SMB_RECV_END;
541 		return -EIO;
542 	}
543 	return 0;
544 }
545 
546 /*
547  * Reads the SMB data
548  */
smb_recv_data(struct smb_sb_info * server,struct smb_request * req)549 static int smb_recv_data(struct smb_sb_info *server, struct smb_request *req)
550 {
551 	int result;
552 
553 	result = smb_receive(server, req);
554 	if (result < 0)
555 		goto out;
556 	if (req->rq_bytes_recvd < req->rq_rlen)
557 		goto out;
558 	server->rstate = SMB_RECV_END;
559 out:
560 	VERBOSE("result: %d\n", result);
561 	return result;
562 }
563 
564 /*
565  * Receive a transaction2 response
566  * Return: 0 if the response has been fully read
567  *         1 if there are further "fragments" to read
568  *        <0 if there is an error
569  */
smb_recv_trans2(struct smb_sb_info * server,struct smb_request * req)570 static int smb_recv_trans2(struct smb_sb_info *server, struct smb_request *req)
571 {
572 	unsigned char *inbuf;
573 	unsigned int parm_disp, parm_offset, parm_count, parm_tot;
574 	unsigned int data_disp, data_offset, data_count, data_tot;
575 	int hdrlen = SMB_HEADER_LEN + req->rq_resp_wct*2 - 2;
576 
577 	VERBOSE("handling trans2\n");
578 
579 	inbuf = req->rq_header;
580 	data_tot    = WVAL(inbuf, smb_tdrcnt);
581 	parm_tot    = WVAL(inbuf, smb_tprcnt);
582 	parm_disp   = WVAL(inbuf, smb_prdisp);
583 	parm_offset = WVAL(inbuf, smb_proff);
584 	parm_count  = WVAL(inbuf, smb_prcnt);
585 	data_disp   = WVAL(inbuf, smb_drdisp);
586 	data_offset = WVAL(inbuf, smb_droff);
587 	data_count  = WVAL(inbuf, smb_drcnt);
588 
589 	/* Modify offset for the split header/buffer we use */
590 	if (data_count || data_offset) {
591 		if (unlikely(data_offset < hdrlen))
592 			goto out_bad_data;
593 		else
594 			data_offset -= hdrlen;
595 	}
596 	if (parm_count || parm_offset) {
597 		if (unlikely(parm_offset < hdrlen))
598 			goto out_bad_parm;
599 		else
600 			parm_offset -= hdrlen;
601 	}
602 
603 	if (parm_count == parm_tot && data_count == data_tot) {
604 		/*
605 		 * This packet has all the trans2 data.
606 		 *
607 		 * We setup the request so that this will be the common
608 		 * case. It may be a server error to not return a
609 		 * response that fits.
610 		 */
611 		VERBOSE("single trans2 response  "
612 			"dcnt=%u, pcnt=%u, doff=%u, poff=%u\n",
613 			data_count, parm_count,
614 			data_offset, parm_offset);
615 		req->rq_ldata = data_count;
616 		req->rq_lparm = parm_count;
617 		req->rq_data = req->rq_buffer + data_offset;
618 		req->rq_parm = req->rq_buffer + parm_offset;
619 		if (unlikely(parm_offset + parm_count > req->rq_rlen))
620 			goto out_bad_parm;
621 		if (unlikely(data_offset + data_count > req->rq_rlen))
622 			goto out_bad_data;
623 		return 0;
624 	}
625 
626 	VERBOSE("multi trans2 response  "
627 		"frag=%d, dcnt=%u, pcnt=%u, doff=%u, poff=%u\n",
628 		req->rq_fragment,
629 		data_count, parm_count,
630 		data_offset, parm_offset);
631 
632 	if (!req->rq_fragment) {
633 		int buf_len;
634 
635 		/* We got the first trans2 fragment */
636 		req->rq_fragment = 1;
637 		req->rq_total_data = data_tot;
638 		req->rq_total_parm = parm_tot;
639 		req->rq_ldata = 0;
640 		req->rq_lparm = 0;
641 
642 		buf_len = data_tot + parm_tot;
643 		if (buf_len > SMB_MAX_PACKET_SIZE)
644 			goto out_too_long;
645 
646 		req->rq_trans2bufsize = buf_len;
647 		req->rq_trans2buffer = kzalloc(buf_len, GFP_NOFS);
648 		if (!req->rq_trans2buffer)
649 			goto out_no_mem;
650 
651 		req->rq_parm = req->rq_trans2buffer;
652 		req->rq_data = req->rq_trans2buffer + parm_tot;
653 	} else if (unlikely(req->rq_total_data < data_tot ||
654 			    req->rq_total_parm < parm_tot))
655 		goto out_data_grew;
656 
657 	if (unlikely(parm_disp + parm_count > req->rq_total_parm ||
658 		     parm_offset + parm_count > req->rq_rlen))
659 		goto out_bad_parm;
660 	if (unlikely(data_disp + data_count > req->rq_total_data ||
661 		     data_offset + data_count > req->rq_rlen))
662 		goto out_bad_data;
663 
664 	inbuf = req->rq_buffer;
665 	memcpy(req->rq_parm + parm_disp, inbuf + parm_offset, parm_count);
666 	memcpy(req->rq_data + data_disp, inbuf + data_offset, data_count);
667 
668 	req->rq_ldata += data_count;
669 	req->rq_lparm += parm_count;
670 
671 	/*
672 	 * Check whether we've received all of the data. Note that
673 	 * we use the packet totals -- total lengths might shrink!
674 	 */
675 	if (req->rq_ldata >= data_tot && req->rq_lparm >= parm_tot) {
676 		req->rq_ldata = data_tot;
677 		req->rq_lparm = parm_tot;
678 		return 0;
679 	}
680 	return 1;
681 
682 out_too_long:
683 	printk(KERN_ERR "smb_trans2: data/param too long, data=%u, parm=%u\n",
684 		data_tot, parm_tot);
685 	goto out_EIO;
686 out_no_mem:
687 	printk(KERN_ERR "smb_trans2: couldn't allocate data area of %d bytes\n",
688 	       req->rq_trans2bufsize);
689 	req->rq_errno = -ENOMEM;
690 	goto out;
691 out_data_grew:
692 	printk(KERN_ERR "smb_trans2: data/params grew!\n");
693 	goto out_EIO;
694 out_bad_parm:
695 	printk(KERN_ERR "smb_trans2: invalid parms, disp=%u, cnt=%u, tot=%u, ofs=%u\n",
696 	       parm_disp, parm_count, parm_tot, parm_offset);
697 	goto out_EIO;
698 out_bad_data:
699 	printk(KERN_ERR "smb_trans2: invalid data, disp=%u, cnt=%u, tot=%u, ofs=%u\n",
700 	       data_disp, data_count, data_tot, data_offset);
701 out_EIO:
702 	req->rq_errno = -EIO;
703 out:
704 	return req->rq_errno;
705 }
706 
707 /*
708  * State machine for receiving responses. We handle the fact that we can't
709  * read the full response in one try by having states telling us how much we
710  * have read.
711  *
712  * Must be called with the server lock held (only called from smbiod).
713  *
714  * Return: <0 on error
715  */
smb_request_recv(struct smb_sb_info * server)716 int smb_request_recv(struct smb_sb_info *server)
717 {
718 	struct smb_request *req = NULL;
719 	int result = 0;
720 
721 	if (smb_recv_available(server) <= 0)
722 		return 0;
723 
724 	VERBOSE("state: %d\n", server->rstate);
725 	switch (server->rstate) {
726 	case SMB_RECV_DROP:
727 		result = smb_receive_drop(server);
728 		if (result < 0)
729 			break;
730 		if (server->rstate == SMB_RECV_DROP)
731 			break;
732 		server->rstate = SMB_RECV_START;
733 		/* fallthrough */
734 	case SMB_RECV_START:
735 		server->smb_read = 0;
736 		server->rstate = SMB_RECV_HEADER;
737 		/* fallthrough */
738 	case SMB_RECV_HEADER:
739 		result = smb_receive_header(server);
740 		if (result < 0)
741 			break;
742 		if (server->rstate == SMB_RECV_HEADER)
743 			break;
744 		if (! (*(server->header + smb_flg) & SMB_FLAGS_REPLY) ) {
745 			server->rstate = SMB_RECV_REQUEST;
746 			break;
747 		}
748 		if (server->rstate != SMB_RECV_HCOMPLETE)
749 			break;
750 		/* fallthrough */
751 	case SMB_RECV_HCOMPLETE:
752 		req = find_request(server, WVAL(server->header, smb_mid));
753 		if (!req)
754 			break;
755 		smb_init_request(server, req);
756 		req->rq_rcls = *(req->rq_header + smb_rcls);
757 		req->rq_err  = WVAL(req->rq_header, smb_err);
758 		if (server->rstate != SMB_RECV_PARAM)
759 			break;
760 		/* fallthrough */
761 	case SMB_RECV_PARAM:
762 		if (!req)
763 			req = find_request(server,WVAL(server->header,smb_mid));
764 		if (!req)
765 			break;
766 		result = smb_recv_param(server, req);
767 		if (result < 0)
768 			break;
769 		if (server->rstate != SMB_RECV_DATA)
770 			break;
771 		/* fallthrough */
772 	case SMB_RECV_DATA:
773 		if (!req)
774 			req = find_request(server,WVAL(server->header,smb_mid));
775 		if (!req)
776 			break;
777 		result = smb_recv_data(server, req);
778 		if (result < 0)
779 			break;
780 		break;
781 
782 		/* We should never be called with any of these states */
783 	case SMB_RECV_END:
784 	case SMB_RECV_REQUEST:
785 		BUG();
786 	}
787 
788 	if (result < 0) {
789 		/* We saw an error */
790 		return result;
791 	}
792 
793 	if (server->rstate != SMB_RECV_END)
794 		return 0;
795 
796 	result = 0;
797 	if (req->rq_trans2_command && req->rq_rcls == SUCCESS)
798 		result = smb_recv_trans2(server, req);
799 
800 	/*
801 	 * Response completely read. Drop any extra bytes sent by the server.
802 	 * (Yes, servers sometimes add extra bytes to responses)
803 	 */
804 	VERBOSE("smb_len: %d   smb_read: %d\n",
805 		server->smb_len, server->smb_read);
806 	if (server->smb_read < server->smb_len)
807 		smb_receive_drop(server);
808 
809 	server->rstate = SMB_RECV_START;
810 
811 	if (!result) {
812 		list_del_init(&req->rq_queue);
813 		req->rq_flags |= SMB_REQ_RECEIVED;
814 		smb_rput(req);
815 		wake_up_interruptible(&req->rq_wait);
816 	}
817 	return 0;
818 }
819