• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Driver for Realtek RTS51xx USB card reader
2  *
3  * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License as published by the
7  * Free Software Foundation; either version 2, or (at your option) any
8  * later version.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License along
16  * with this program; if not, see <http://www.gnu.org/licenses/>.
17  *
18  * Author:
19  *   wwang (wei_wang@realsil.com.cn)
20  *   No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
21  * Maintainer:
22  *   Edwin Rong (edwin_rong@realsil.com.cn)
23  *   No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
24  */
25 
26 #include <linux/blkdev.h>
27 #include <linux/kthread.h>
28 #include <linux/sched.h>
29 #include <linux/slab.h>
30 
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_eh.h>
33 #include <scsi/scsi_device.h>
34 
35 #include "debug.h"
36 #include "rts51x.h"
37 #include "rts51x_chip.h"
38 #include "rts51x_card.h"
39 #include "rts51x_scsi.h"
40 #include "rts51x_transport.h"
41 #include "trace.h"
42 
43 /***********************************************************************
44  * Scatter-gather transfer buffer access routines
45  ***********************************************************************/
46 
47 /* Copy a buffer of length buflen to/from the srb's transfer buffer.
48  * Update the **sgptr and *offset variables so that the next copy will
49  * pick up from where this one left off.
50  */
51 
rts51x_access_sglist(unsigned char * buffer,unsigned int buflen,void * sglist,void ** sgptr,unsigned int * offset,enum xfer_buf_dir dir)52 unsigned int rts51x_access_sglist(unsigned char *buffer,
53 				  unsigned int buflen, void *sglist,
54 				  void **sgptr, unsigned int *offset,
55 				  enum xfer_buf_dir dir)
56 {
57 	unsigned int cnt;
58 	struct scatterlist *sg = (struct scatterlist *)*sgptr;
59 
60 	/* We have to go through the list one entry
61 	 * at a time.  Each s-g entry contains some number of pages, and
62 	 * each page has to be kmap()'ed separately.  If the page is already
63 	 * in kernel-addressable memory then kmap() will return its address.
64 	 * If the page is not directly accessible -- such as a user buffer
65 	 * located in high memory -- then kmap() will map it to a temporary
66 	 * position in the kernel's virtual address space.
67 	 */
68 
69 	if (!sg)
70 		sg = (struct scatterlist *)sglist;
71 
72 	/* This loop handles a single s-g list entry, which may
73 	 * include multiple pages.  Find the initial page structure
74 	 * and the starting offset within the page, and update
75 	 * the *offset and **sgptr values for the next loop.
76 	 */
77 	cnt = 0;
78 	while (cnt < buflen && sg) {
79 		struct page *page = sg_page(sg) +
80 		    ((sg->offset + *offset) >> PAGE_SHIFT);
81 		unsigned int poff = (sg->offset + *offset) & (PAGE_SIZE - 1);
82 		unsigned int sglen = sg->length - *offset;
83 
84 		if (sglen > buflen - cnt) {
85 
86 			/* Transfer ends within this s-g entry */
87 			sglen = buflen - cnt;
88 			*offset += sglen;
89 		} else {
90 
91 			/* Transfer continues to next s-g entry */
92 			*offset = 0;
93 			sg = sg_next(sg);
94 		}
95 
96 		/* Transfer the data for all the pages in this
97 		 * s-g entry.  For each page: call kmap(), do the
98 		 * transfer, and call kunmap() immediately after. */
99 		while (sglen > 0) {
100 			unsigned int plen = min(sglen, (unsigned int)
101 						PAGE_SIZE - poff);
102 			unsigned char *ptr = kmap(page);
103 
104 			if (dir == TO_XFER_BUF)
105 				memcpy(ptr + poff, buffer + cnt, plen);
106 			else
107 				memcpy(buffer + cnt, ptr + poff, plen);
108 			kunmap(page);
109 
110 			/* Start at the beginning of the next page */
111 			poff = 0;
112 			++page;
113 			cnt += plen;
114 			sglen -= plen;
115 		}
116 	}
117 	*sgptr = sg;
118 
119 	/* Return the amount actually transferred */
120 	return cnt;
121 }
122 
rts51x_access_xfer_buf(unsigned char * buffer,unsigned int buflen,struct scsi_cmnd * srb,struct scatterlist ** sgptr,unsigned int * offset,enum xfer_buf_dir dir)123 unsigned int rts51x_access_xfer_buf(unsigned char *buffer,
124 				    unsigned int buflen, struct scsi_cmnd *srb,
125 				    struct scatterlist **sgptr,
126 				    unsigned int *offset, enum xfer_buf_dir dir)
127 {
128 	return rts51x_access_sglist(buffer, buflen, (void *)scsi_sglist(srb),
129 				    (void **)sgptr, offset, dir);
130 }
131 
132 /* Store the contents of buffer into srb's transfer buffer and set the
133  * SCSI residue.
134  */
rts51x_set_xfer_buf(unsigned char * buffer,unsigned int buflen,struct scsi_cmnd * srb)135 void rts51x_set_xfer_buf(unsigned char *buffer,
136 			 unsigned int buflen, struct scsi_cmnd *srb)
137 {
138 	unsigned int offset = 0;
139 	struct scatterlist *sg = NULL;
140 
141 	buflen = min(buflen, scsi_bufflen(srb));
142 	buflen = rts51x_access_xfer_buf(buffer, buflen, srb, &sg, &offset,
143 					TO_XFER_BUF);
144 	if (buflen < scsi_bufflen(srb))
145 		scsi_set_resid(srb, scsi_bufflen(srb) - buflen);
146 }
147 
rts51x_get_xfer_buf(unsigned char * buffer,unsigned int buflen,struct scsi_cmnd * srb)148 void rts51x_get_xfer_buf(unsigned char *buffer,
149 			 unsigned int buflen, struct scsi_cmnd *srb)
150 {
151 	unsigned int offset = 0;
152 	struct scatterlist *sg = NULL;
153 
154 	buflen = min(buflen, scsi_bufflen(srb));
155 	buflen = rts51x_access_xfer_buf(buffer, buflen, srb, &sg, &offset,
156 					FROM_XFER_BUF);
157 	if (buflen < scsi_bufflen(srb))
158 		scsi_set_resid(srb, scsi_bufflen(srb) - buflen);
159 }
160 
161 /* This is the completion handler which will wake us up when an URB
162  * completes.
163  */
urb_done_completion(struct urb * urb)164 static void urb_done_completion(struct urb *urb)
165 {
166 	struct completion *urb_done_ptr = urb->context;
167 
168 	if (urb_done_ptr)
169 		complete(urb_done_ptr);
170 }
171 
172 /* This is the common part of the URB message submission code
173  *
174  * All URBs from the driver involved in handling a queued scsi
175  * command _must_ pass through this function (or something like it) for the
176  * abort mechanisms to work properly.
177  */
rts51x_msg_common(struct rts51x_chip * chip,struct urb * urb,int timeout)178 static int rts51x_msg_common(struct rts51x_chip *chip, struct urb *urb,
179 			     int timeout)
180 {
181 	struct rts51x_usb *rts51x = chip->usb;
182 	struct completion urb_done;
183 	long timeleft;
184 	int status;
185 
186 	/* don't submit URBs during abort processing */
187 	if (test_bit(FLIDX_ABORTING, &rts51x->dflags))
188 		TRACE_RET(chip, -EIO);
189 
190 	/* set up data structures for the wakeup system */
191 	init_completion(&urb_done);
192 
193 	/* fill the common fields in the URB */
194 	urb->context = &urb_done;
195 	urb->actual_length = 0;
196 	urb->error_count = 0;
197 	urb->status = 0;
198 
199 	/* we assume that if transfer_buffer isn't us->iobuf then it
200 	 * hasn't been mapped for DMA.  Yes, this is clunky, but it's
201 	 * easier than always having the caller tell us whether the
202 	 * transfer buffer has already been mapped. */
203 	urb->transfer_flags = URB_NO_SETUP_DMA_MAP;
204 	if (urb->transfer_buffer == rts51x->iobuf) {
205 		urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
206 		urb->transfer_dma = rts51x->iobuf_dma;
207 	}
208 	urb->setup_dma = rts51x->cr_dma;
209 
210 	/* submit the URB */
211 	status = usb_submit_urb(urb, GFP_NOIO);
212 	if (status) {
213 		/* something went wrong */
214 		TRACE_RET(chip, status);
215 	}
216 
217 	/* since the URB has been submitted successfully, it's now okay
218 	 * to cancel it */
219 	set_bit(FLIDX_URB_ACTIVE, &rts51x->dflags);
220 
221 	/* did an abort occur during the submission? */
222 	if (test_bit(FLIDX_ABORTING, &rts51x->dflags)) {
223 
224 		/* cancel the URB, if it hasn't been cancelled already */
225 		if (test_and_clear_bit(FLIDX_URB_ACTIVE, &rts51x->dflags)) {
226 			RTS51X_DEBUGP("-- cancelling URB\n");
227 			usb_unlink_urb(urb);
228 		}
229 	}
230 
231 	/* wait for the completion of the URB */
232 	timeleft =
233 	    wait_for_completion_interruptible_timeout(&urb_done,
234 						      (timeout * HZ /
235 						       1000) ? :
236 						      MAX_SCHEDULE_TIMEOUT);
237 
238 	clear_bit(FLIDX_URB_ACTIVE, &rts51x->dflags);
239 
240 	if (timeleft <= 0) {
241 		RTS51X_DEBUGP("%s -- cancelling URB\n",
242 			       timeleft == 0 ? "Timeout" : "Signal");
243 		usb_kill_urb(urb);
244 		if (timeleft == 0)
245 			status = -ETIMEDOUT;
246 		else
247 			status = -EINTR;
248 	} else {
249 		status = urb->status;
250 	}
251 
252 	return status;
253 }
254 
255 /*
256  * Interpret the results of a URB transfer
257  */
interpret_urb_result(struct rts51x_chip * chip,unsigned int pipe,unsigned int length,int result,unsigned int partial)258 static int interpret_urb_result(struct rts51x_chip *chip, unsigned int pipe,
259 				unsigned int length, int result,
260 				unsigned int partial)
261 {
262 	int retval = STATUS_SUCCESS;
263 
264 	/* RTS51X_DEBUGP("Status code %d; transferred %u/%u\n",
265 				result, partial, length); */
266 	switch (result) {
267 		/* no error code; did we send all the data? */
268 	case 0:
269 		if (partial != length) {
270 			RTS51X_DEBUGP("-- short transfer\n");
271 			TRACE_RET(chip, STATUS_TRANS_SHORT);
272 		}
273 		/* RTS51X_DEBUGP("-- transfer complete\n"); */
274 		return STATUS_SUCCESS;
275 		/* stalled */
276 	case -EPIPE:
277 		/* for control endpoints, (used by CB[I]) a stall indicates
278 		 * a failed command */
279 		if (usb_pipecontrol(pipe)) {
280 			RTS51X_DEBUGP("-- stall on control pipe\n");
281 			TRACE_RET(chip, STATUS_STALLED);
282 		}
283 		/* for other sorts of endpoint, clear the stall */
284 		RTS51X_DEBUGP("clearing endpoint halt for pipe 0x%x\n", pipe);
285 		if (rts51x_clear_halt(chip, pipe) < 0)
286 			TRACE_RET(chip, STATUS_ERROR);
287 		retval = STATUS_STALLED;
288 		TRACE_GOTO(chip, Exit);
289 
290 		/* babble - the device tried to send more than
291 		 * we wanted to read */
292 	case -EOVERFLOW:
293 		RTS51X_DEBUGP("-- babble\n");
294 		retval = STATUS_TRANS_LONG;
295 		TRACE_GOTO(chip, Exit);
296 
297 		/* the transfer was cancelled by abort,
298 		 * disconnect, or timeout */
299 	case -ECONNRESET:
300 		RTS51X_DEBUGP("-- transfer cancelled\n");
301 		retval = STATUS_ERROR;
302 		TRACE_GOTO(chip, Exit);
303 
304 		/* short scatter-gather read transfer */
305 	case -EREMOTEIO:
306 		RTS51X_DEBUGP("-- short read transfer\n");
307 		retval = STATUS_TRANS_SHORT;
308 		TRACE_GOTO(chip, Exit);
309 
310 		/* abort or disconnect in progress */
311 	case -EIO:
312 		RTS51X_DEBUGP("-- abort or disconnect in progress\n");
313 		retval = STATUS_ERROR;
314 		TRACE_GOTO(chip, Exit);
315 
316 	case -ETIMEDOUT:
317 		RTS51X_DEBUGP("-- time out\n");
318 		retval = STATUS_TIMEDOUT;
319 		TRACE_GOTO(chip, Exit);
320 
321 		/* the catch-all error case */
322 	default:
323 		RTS51X_DEBUGP("-- unknown error\n");
324 		retval = STATUS_ERROR;
325 		TRACE_GOTO(chip, Exit);
326 	}
327 
328 Exit:
329 	if ((retval != STATUS_SUCCESS) && !usb_pipecontrol(pipe))
330 		rts51x_clear_hw_error(chip);
331 
332 	return retval;
333 }
334 
rts51x_ctrl_transfer(struct rts51x_chip * chip,unsigned int pipe,u8 request,u8 requesttype,u16 value,u16 index,void * data,u16 size,int timeout)335 int rts51x_ctrl_transfer(struct rts51x_chip *chip, unsigned int pipe,
336 			 u8 request, u8 requesttype, u16 value, u16 index,
337 			 void *data, u16 size, int timeout)
338 {
339 	struct rts51x_usb *rts51x = chip->usb;
340 	int result;
341 
342 	RTS51X_DEBUGP("%s: rq=%02x rqtype=%02x value=%04x index=%02x len=%u\n",
343 		       __func__, request, requesttype, value, index, size);
344 
345 	/* fill in the devrequest structure */
346 	rts51x->cr->bRequestType = requesttype;
347 	rts51x->cr->bRequest = request;
348 	rts51x->cr->wValue = cpu_to_le16(value);
349 	rts51x->cr->wIndex = cpu_to_le16(index);
350 	rts51x->cr->wLength = cpu_to_le16(size);
351 
352 	/* fill and submit the URB */
353 	usb_fill_control_urb(rts51x->current_urb, rts51x->pusb_dev, pipe,
354 			     (unsigned char *)rts51x->cr, data, size,
355 			     urb_done_completion, NULL);
356 	result = rts51x_msg_common(chip, rts51x->current_urb, timeout);
357 
358 	return interpret_urb_result(chip, pipe, size, result,
359 				    rts51x->current_urb->actual_length);
360 }
361 
rts51x_clear_halt(struct rts51x_chip * chip,unsigned int pipe)362 int rts51x_clear_halt(struct rts51x_chip *chip, unsigned int pipe)
363 {
364 	int result;
365 	int endp = usb_pipeendpoint(pipe);
366 
367 	if (usb_pipein(pipe))
368 		endp |= USB_DIR_IN;
369 
370 	result = rts51x_ctrl_transfer(chip, SND_CTRL_PIPE(chip),
371 				      USB_REQ_CLEAR_FEATURE, USB_RECIP_ENDPOINT,
372 				      USB_ENDPOINT_HALT, endp, NULL, 0, 3000);
373 	if (result != STATUS_SUCCESS)
374 		TRACE_RET(chip, STATUS_FAIL);
375 
376 	usb_reset_endpoint(chip->usb->pusb_dev, endp);
377 
378 	return STATUS_SUCCESS;
379 }
380 
rts51x_reset_pipe(struct rts51x_chip * chip,char pipe)381 int rts51x_reset_pipe(struct rts51x_chip *chip, char pipe)
382 {
383 	return rts51x_clear_halt(chip, pipe);
384 }
385 
rts51x_sg_clean(struct usb_sg_request * io)386 static void rts51x_sg_clean(struct usb_sg_request *io)
387 {
388 	if (io->urbs) {
389 		while (io->entries--)
390 			usb_free_urb(io->urbs[io->entries]);
391 		kfree(io->urbs);
392 		io->urbs = NULL;
393 	}
394 #if 0 /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35) */
395 	if (io->dev->dev.dma_mask != NULL)
396 		usb_buffer_unmap_sg(io->dev, usb_pipein(io->pipe),
397 				    io->sg, io->nents);
398 #endif
399 	io->dev = NULL;
400 }
401 #if 0
402 static void rts51x_sg_complete(struct urb *urb)
403 {
404 	struct usb_sg_request *io = urb->context;
405 	int status = urb->status;
406 
407 	spin_lock(&io->lock);
408 
409 	/* In 2.5 we require hcds' endpoint queues not to progress after fault
410 	* reports, until the completion callback (this!) returns.  That lets
411 	* device driver code (like this routine) unlink queued urbs first,
412 	* if it needs to, since the HC won't work on them at all.  So it's
413 	* not possible for page N+1 to overwrite page N, and so on.
414 	*
415 	* That's only for "hard" faults; "soft" faults (unlinks) sometimes
416 	* complete before the HCD can get requests away from hardware,
417 	* though never during cleanup after a hard fault.
418 	*/
419 	if (io->status
420 		&& (io->status != -ECONNRESET
421 		|| status != -ECONNRESET)
422 		&& urb->actual_length) {
423 			dev_err(io->dev->bus->controller,
424 				"dev %s ep%d%s scatterlist error %d/%d\n",
425 				io->dev->devpath,
426 				usb_endpoint_num(&urb->ep->desc),
427 				usb_urb_dir_in(urb) ? "in" : "out",
428 				status, io->status);
429 			/* BUG (); */
430 	}
431 
432 	if (io->status == 0 && status && status != -ECONNRESET) {
433 		int i, found, retval;
434 
435 		io->status = status;
436 
437 		/* the previous urbs, and this one, completed already.
438 		* unlink pending urbs so they won't rx/tx bad data.
439 		* careful: unlink can sometimes be synchronous...
440 		*/
441 		spin_unlock(&io->lock);
442 		for (i = 0, found = 0; i < io->entries; i++) {
443 			if (!io->urbs[i] || !io->urbs[i]->dev)
444 				continue;
445 			if (found) {
446 				retval = usb_unlink_urb(io->urbs[i]);
447 				if (retval != -EINPROGRESS &&
448 					retval != -ENODEV &&
449 					retval != -EBUSY)
450 					dev_err(&io->dev->dev,
451 						"%s, unlink --> %d\n",
452 						__func__, retval);
453 			} else if (urb == io->urbs[i])
454 				found = 1;
455 		}
456 		spin_lock(&io->lock);
457 	}
458 	urb->dev = NULL;
459 
460 	/* on the last completion, signal usb_sg_wait() */
461 	io->bytes += urb->actual_length;
462 	io->count--;
463 	if (!io->count)
464 		complete(&io->complete);
465 
466 	spin_unlock(&io->lock);
467 }
468 
469 /* This function is ported from usb_sg_init, which can transfer
470  * sg list partially */
471 int rts51x_sg_init_partial(struct usb_sg_request *io, struct usb_device *dev,
472 	unsigned pipe, unsigned period, void *buf, struct scatterlist **sgptr,
473 	unsigned int *offset, int nents, size_t length, gfp_t mem_flags)
474 {
475 	int i;
476 	int urb_flags;
477 	int dma;
478 	struct scatterlist *sg = *sgptr, *first_sg;
479 
480 	first_sg = (struct scatterlist *)buf;
481 	if (!sg)
482 		sg = first_sg;
483 
484 	if (!io || !dev || !sg
485 		|| usb_pipecontrol(pipe)
486 		|| usb_pipeisoc(pipe)
487 		|| (nents <= 0))
488 		return -EINVAL;
489 
490 	spin_lock_init(&io->lock);
491 	io->dev = dev;
492 	io->pipe = pipe;
493 	io->sg = first_sg;  /* used by unmap */
494 	io->nents = nents;
495 
496 	RTS51X_DEBUGP("Before map, sg address: 0x%x\n", (unsigned int)sg);
497 	RTS51X_DEBUGP("Before map, dev address: 0x%x\n", (unsigned int)dev);
498 
499 	/* not all host controllers use DMA (like the mainstream pci ones);
500 	* they can use PIO (sl811) or be software over another transport.
501 	*/
502 	dma = (dev->dev.dma_mask != NULL);
503 	if (dma) {
504 		/* map the whole sg list, because here we only know the
505 		 * total nents */
506 		io->entries = usb_buffer_map_sg(dev, usb_pipein(pipe),
507 		first_sg, nents);
508 	} else {
509 		io->entries = nents;
510 	}
511 
512 	/* initialize all the urbs we'll use */
513 	if (io->entries <= 0)
514 		return io->entries;
515 
516 	io->urbs = kmalloc(io->entries * sizeof *io->urbs, mem_flags);
517 	if (!io->urbs)
518 		goto nomem;
519 
520 	urb_flags = URB_NO_INTERRUPT;
521 	if (dma)
522 		urb_flags |= URB_NO_TRANSFER_DMA_MAP;
523 	if (usb_pipein(pipe))
524 		urb_flags |= URB_SHORT_NOT_OK;
525 
526 	RTS51X_DEBUGP("io->entries = %d\n", io->entries);
527 
528 	for (i = 0; (sg != NULL) && (length > 0); i++) {
529 		unsigned len;
530 
531 		RTS51X_DEBUGP("sg address: 0x%x\n", (unsigned int)sg);
532 		RTS51X_DEBUGP("length = %d, *offset = %d\n", length, *offset);
533 
534 		io->urbs[i] = usb_alloc_urb(0, mem_flags);
535 		if (!io->urbs[i]) {
536 			io->entries = i;
537 			goto nomem;
538 		}
539 
540 		io->urbs[i]->dev = NULL;
541 		io->urbs[i]->pipe = pipe;
542 		io->urbs[i]->interval = period;
543 		io->urbs[i]->transfer_flags = urb_flags;
544 
545 		io->urbs[i]->complete = rts51x_sg_complete;
546 		io->urbs[i]->context = io;
547 
548 		if (dma) {
549 			io->urbs[i]->transfer_dma =
550 				sg_dma_address(sg) + *offset;
551 			len = sg_dma_len(sg) - *offset;
552 			io->urbs[i]->transfer_buffer = NULL;
553 			RTS51X_DEBUGP(" -- sg entry dma length = %d\n",
554 						sg_dma_len(sg));
555 		} else {
556 			/* hc may use _only_ transfer_buffer */
557 			io->urbs[i]->transfer_buffer = sg_virt(sg) + *offset;
558 			len = sg->length - *offset;
559 			RTS51X_DEBUGP(" -- sg entry length = %d\n",
560 						sg->length);
561 		}
562 
563 		if (length >= len) {
564 			*offset = 0;
565 			io->urbs[i]->transfer_buffer_length = len;
566 			length -= len;
567 			sg = sg_next(sg);
568 		} else {
569 			*offset += length;
570 			io->urbs[i]->transfer_buffer_length = length;
571 			length = 0;
572 		}
573 		if (length == 0)
574 			io->entries = i + 1;
575 #if 0
576 		if (length) {
577 			len = min_t(unsigned, len, length);
578 			length -= len;
579 			if (length == 0) {
580 				io->entries = i + 1;
581 				*offset += len;
582 			} else {
583 				*offset = 0;
584 			}
585 		}
586 #endif
587 	}
588 	RTS51X_DEBUGP("In %s, urb count: %d\n", __func__, i);
589 	io->urbs[--i]->transfer_flags &= ~URB_NO_INTERRUPT;
590 
591 	RTS51X_DEBUGP("sg address stored in sgptr: 0x%x\n", (unsigned int)sg);
592 	*sgptr = sg;
593 
594 	/* transaction state */
595 	io->count = io->entries;
596 	io->status = 0;
597 	io->bytes = 0;
598 	init_completion(&io->complete);
599 	return 0;
600 
601 nomem:
602 	rts51x_sg_clean(io);
603 	return -ENOMEM;
604 }
605 #endif
rts51x_sg_init(struct usb_sg_request * io,struct usb_device * dev,unsigned pipe,unsigned period,struct scatterlist * sg,int nents,size_t length,gfp_t mem_flags)606 int rts51x_sg_init(struct usb_sg_request *io, struct usb_device *dev,
607 		   unsigned pipe, unsigned period, struct scatterlist *sg,
608 		   int nents, size_t length, gfp_t mem_flags)
609 {
610 	return usb_sg_init(io, dev, pipe, period, sg, nents, length, mem_flags);
611 }
612 
rts51x_sg_wait(struct usb_sg_request * io,int timeout)613 int rts51x_sg_wait(struct usb_sg_request *io, int timeout)
614 {
615 	long timeleft;
616 	int i;
617 	int entries = io->entries;
618 
619 	/* queue the urbs.  */
620 	spin_lock_irq(&io->lock);
621 	i = 0;
622 	while (i < entries && !io->status) {
623 		int retval;
624 
625 		io->urbs[i]->dev = io->dev;
626 		retval = usb_submit_urb(io->urbs[i], GFP_ATOMIC);
627 
628 		/* after we submit, let completions or cancelations fire;
629 		 * we handshake using io->status.
630 		 */
631 		spin_unlock_irq(&io->lock);
632 		switch (retval) {
633 			/* maybe we retrying will recover */
634 		case -ENXIO:	/* hc didn't queue this one */
635 		case -EAGAIN:
636 		case -ENOMEM:
637 			io->urbs[i]->dev = NULL;
638 			retval = 0;
639 			yield();
640 			break;
641 
642 			/* no error? continue immediately.
643 			 *
644 			 * NOTE: to work better with UHCI (4K I/O buffer may
645 			 * need 3K of TDs) it may be good to limit how many
646 			 * URBs are queued at once; N milliseconds?
647 			 */
648 		case 0:
649 			++i;
650 			cpu_relax();
651 			break;
652 
653 			/* fail any uncompleted urbs */
654 		default:
655 			io->urbs[i]->dev = NULL;
656 			io->urbs[i]->status = retval;
657 			dev_dbg(&io->dev->dev, "%s, submit --> %d\n",
658 				__func__, retval);
659 			usb_sg_cancel(io);
660 		}
661 		spin_lock_irq(&io->lock);
662 		if (retval && (io->status == 0 || io->status == -ECONNRESET))
663 			io->status = retval;
664 	}
665 	io->count -= entries - i;
666 	if (io->count == 0)
667 		complete(&io->complete);
668 	spin_unlock_irq(&io->lock);
669 
670 	timeleft =
671 	    wait_for_completion_interruptible_timeout(&io->complete,
672 						      (timeout * HZ /
673 						       1000) ? :
674 						      MAX_SCHEDULE_TIMEOUT);
675 	if (timeleft <= 0) {
676 		RTS51X_DEBUGP("%s -- cancelling SG request\n",
677 			       timeleft == 0 ? "Timeout" : "Signal");
678 		usb_sg_cancel(io);
679 		if (timeleft == 0)
680 			io->status = -ETIMEDOUT;
681 		else
682 			io->status = -EINTR;
683 	}
684 
685 	rts51x_sg_clean(io);
686 	return io->status;
687 }
688 
689 /*
690  * Transfer a scatter-gather list via bulk transfer
691  *
692  * This function does basically the same thing as usb_stor_bulk_transfer_buf()
693  * above, but it uses the usbcore scatter-gather library.
694  */
rts51x_bulk_transfer_sglist(struct rts51x_chip * chip,unsigned int pipe,struct scatterlist * sg,int num_sg,unsigned int length,unsigned int * act_len,int timeout)695 static int rts51x_bulk_transfer_sglist(struct rts51x_chip *chip,
696 				       unsigned int pipe,
697 				       struct scatterlist *sg, int num_sg,
698 				       unsigned int length,
699 				       unsigned int *act_len, int timeout)
700 {
701 	int result;
702 
703 	/* don't submit s-g requests during abort processing */
704 	if (test_bit(FLIDX_ABORTING, &chip->usb->dflags))
705 		TRACE_RET(chip, STATUS_ERROR);
706 
707 	/* initialize the scatter-gather request block */
708 	RTS51X_DEBUGP("%s: xfer %u bytes, %d entries\n", __func__,
709 		       length, num_sg);
710 	result =
711 	    rts51x_sg_init(&chip->usb->current_sg, chip->usb->pusb_dev, pipe, 0,
712 			   sg, num_sg, length, GFP_NOIO);
713 	if (result) {
714 		RTS51X_DEBUGP("rts51x_sg_init returned %d\n", result);
715 		TRACE_RET(chip, STATUS_ERROR);
716 	}
717 
718 	/* since the block has been initialized successfully, it's now
719 	 * okay to cancel it */
720 	set_bit(FLIDX_SG_ACTIVE, &chip->usb->dflags);
721 
722 	/* did an abort occur during the submission? */
723 	if (test_bit(FLIDX_ABORTING, &chip->usb->dflags)) {
724 
725 		/* cancel the request, if it hasn't been cancelled already */
726 		if (test_and_clear_bit(FLIDX_SG_ACTIVE, &chip->usb->dflags)) {
727 			RTS51X_DEBUGP("-- cancelling sg request\n");
728 			usb_sg_cancel(&chip->usb->current_sg);
729 		}
730 	}
731 
732 	/* wait for the completion of the transfer */
733 	result = rts51x_sg_wait(&chip->usb->current_sg, timeout);
734 
735 	clear_bit(FLIDX_SG_ACTIVE, &chip->usb->dflags);
736 
737 	/* result = us->current_sg.status; */
738 	if (act_len)
739 		*act_len = chip->usb->current_sg.bytes;
740 	return interpret_urb_result(chip, pipe, length, result,
741 				    chip->usb->current_sg.bytes);
742 }
743 #if 0
744 static int rts51x_bulk_transfer_sglist_partial(struct rts51x_chip *chip,
745 		unsigned int pipe, void *buf, struct scatterlist **sgptr,
746 		unsigned int *offset, int num_sg, unsigned int length,
747 		unsigned int *act_len, int timeout)
748 {
749 	int result;
750 
751 	/* don't submit s-g requests during abort processing */
752 	if (test_bit(FLIDX_ABORTING, &chip->usb->dflags))
753 		TRACE_RET(chip, STATUS_ERROR);
754 
755 	/* initialize the scatter-gather request block */
756 	RTS51X_DEBUGP("%s: xfer %u bytes, %d entries\n", __func__,
757 			length, num_sg);
758 	result = rts51x_sg_init_partial(&chip->usb->current_sg,
759 			chip->usb->pusb_dev, pipe, 0, buf, sgptr, offset,
760 			num_sg, length, GFP_NOIO);
761 	if (result) {
762 		RTS51X_DEBUGP("rts51x_sg_init_partial returned %d\n", result);
763 		TRACE_RET(chip, STATUS_ERROR);
764 	}
765 
766 	/* since the block has been initialized successfully, it's now
767 	 * okay to cancel it */
768 	set_bit(FLIDX_SG_ACTIVE, &chip->usb->dflags);
769 
770 	/* did an abort occur during the submission? */
771 	if (test_bit(FLIDX_ABORTING, &chip->usb->dflags)) {
772 
773 		/* cancel the request, if it hasn't been cancelled already */
774 		if (test_and_clear_bit(FLIDX_SG_ACTIVE, &chip->usb->dflags)) {
775 			RTS51X_DEBUGP("-- cancelling sg request\n");
776 			usb_sg_cancel(&chip->usb->current_sg);
777 		}
778 	}
779 
780 	/* wait for the completion of the transfer */
781 	result = rts51x_sg_wait(&chip->usb->current_sg, timeout);
782 
783 	clear_bit(FLIDX_SG_ACTIVE, &chip->usb->dflags);
784 
785 	/* result = us->current_sg.status; */
786 	if (act_len)
787 		*act_len = chip->usb->current_sg.bytes;
788 	return interpret_urb_result(chip, pipe, length, result,
789 		chip->usb->current_sg.bytes);
790 }
791 #endif
rts51x_bulk_transfer_buf(struct rts51x_chip * chip,unsigned int pipe,void * buf,unsigned int length,unsigned int * act_len,int timeout)792 int rts51x_bulk_transfer_buf(struct rts51x_chip *chip, unsigned int pipe,
793 			     void *buf, unsigned int length,
794 			     unsigned int *act_len, int timeout)
795 {
796 	int result;
797 
798 	/* fill and submit the URB */
799 	usb_fill_bulk_urb(chip->usb->current_urb, chip->usb->pusb_dev, pipe,
800 			  buf, length, urb_done_completion, NULL);
801 	result = rts51x_msg_common(chip, chip->usb->current_urb, timeout);
802 
803 	/* store the actual length of the data transferred */
804 	if (act_len)
805 		*act_len = chip->usb->current_urb->actual_length;
806 	return interpret_urb_result(chip, pipe, length, result,
807 				    chip->usb->current_urb->actual_length);
808 }
809 
rts51x_transfer_data(struct rts51x_chip * chip,unsigned int pipe,void * buf,unsigned int len,int use_sg,unsigned int * act_len,int timeout)810 int rts51x_transfer_data(struct rts51x_chip *chip, unsigned int pipe,
811 			 void *buf, unsigned int len, int use_sg,
812 			 unsigned int *act_len, int timeout)
813 {
814 	int result;
815 
816 	if (timeout < 600)
817 		timeout = 600;
818 
819 	if (use_sg) {
820 		result =
821 		    rts51x_bulk_transfer_sglist(chip, pipe,
822 						(struct scatterlist *)buf,
823 						use_sg, len, act_len, timeout);
824 	} else {
825 		result =
826 		    rts51x_bulk_transfer_buf(chip, pipe, buf, len, act_len,
827 					     timeout);
828 	}
829 
830 	return result;
831 }
832 
rts51x_transfer_data_partial(struct rts51x_chip * chip,unsigned int pipe,void * buf,void ** ptr,unsigned int * offset,unsigned int len,int use_sg,unsigned int * act_len,int timeout)833 int rts51x_transfer_data_partial(struct rts51x_chip *chip, unsigned int pipe,
834 				 void *buf, void **ptr, unsigned int *offset,
835 				 unsigned int len, int use_sg,
836 				 unsigned int *act_len, int timeout)
837 {
838 	int result;
839 
840 	if (timeout < 600)
841 		timeout = 600;
842 
843 	if (use_sg) {
844 		void *tmp_buf = kmalloc(len, GFP_KERNEL);
845 		if (!tmp_buf)
846 			TRACE_RET(chip, STATUS_NOMEM);
847 
848 		if (usb_pipeout(pipe)) {
849 			rts51x_access_sglist(tmp_buf, len, buf, ptr, offset,
850 					     FROM_XFER_BUF);
851 		}
852 		result =
853 		    rts51x_bulk_transfer_buf(chip, pipe, tmp_buf, len, act_len,
854 					     timeout);
855 		if (result == STATUS_SUCCESS) {
856 			if (usb_pipein(pipe)) {
857 				rts51x_access_sglist(tmp_buf, len, buf, ptr,
858 						     offset, TO_XFER_BUF);
859 			}
860 		}
861 
862 		kfree(tmp_buf);
863 #if 0
864 		result = rts51x_bulk_transfer_sglist_partial(chip, pipe, buf,
865 					(struct scatterlist **)ptr, offset,
866 					use_sg, len, act_len, timeout);
867 #endif
868 	} else {
869 		unsigned int step = 0;
870 		if (offset)
871 			step = *offset;
872 		result =
873 		    rts51x_bulk_transfer_buf(chip, pipe, buf + step, len,
874 					     act_len, timeout);
875 		if (act_len)
876 			step += *act_len;
877 		else
878 			step += len;
879 		if (offset)
880 			*offset = step;
881 	}
882 
883 	return result;
884 }
885 
rts51x_get_epc_status(struct rts51x_chip * chip,u16 * status)886 int rts51x_get_epc_status(struct rts51x_chip *chip, u16 *status)
887 {
888 	unsigned int pipe = RCV_INTR_PIPE(chip);
889 	struct usb_host_endpoint *ep;
890 	struct completion urb_done;
891 	int result;
892 
893 	if (!status)
894 		TRACE_RET(chip, STATUS_ERROR);
895 
896 	/* set up data structures for the wakeup system */
897 	init_completion(&urb_done);
898 
899 	ep = chip->usb->pusb_dev->ep_in[usb_pipeendpoint(pipe)];
900 
901 	/* fill and submit the URB */
902 	/* We set interval to 1 here, so the polling interval is controlled
903 	 * by our polling thread */
904 	usb_fill_int_urb(chip->usb->intr_urb, chip->usb->pusb_dev, pipe,
905 			 status, 2, urb_done_completion, &urb_done, 1);
906 
907 	result = rts51x_msg_common(chip, chip->usb->intr_urb, 50);
908 
909 	return interpret_urb_result(chip, pipe, 2, result,
910 				    chip->usb->intr_urb->actual_length);
911 }
912 
913 u8 media_not_present[] = {
914 	0x70, 0, 0x02, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0x3A, 0, 0, 0, 0, 0 };
915 u8 invalid_cmd_field[] = {
916 	0x70, 0, 0x05, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0x24, 0, 0, 0, 0, 0 };
917 
rts51x_invoke_transport(struct scsi_cmnd * srb,struct rts51x_chip * chip)918 void rts51x_invoke_transport(struct scsi_cmnd *srb, struct rts51x_chip *chip)
919 {
920 	int result;
921 
922 #ifdef CONFIG_PM
923 	if (chip->option.ss_en) {
924 		if (srb->cmnd[0] == TEST_UNIT_READY) {
925 			if (RTS51X_CHK_STAT(chip, STAT_SS)) {
926 				if (check_fake_card_ready(chip,
927 							SCSI_LUN(srb))) {
928 					srb->result = SAM_STAT_GOOD;
929 				} else {
930 					srb->result = SAM_STAT_CHECK_CONDITION;
931 					memcpy(srb->sense_buffer,
932 					       media_not_present, SENSE_SIZE);
933 				}
934 				return;
935 			}
936 		} else if (srb->cmnd[0] == ALLOW_MEDIUM_REMOVAL) {
937 			if (RTS51X_CHK_STAT(chip, STAT_SS)) {
938 				int prevent = srb->cmnd[4] & 0x1;
939 
940 				if (prevent) {
941 					srb->result = SAM_STAT_CHECK_CONDITION;
942 					memcpy(srb->sense_buffer,
943 					       invalid_cmd_field, SENSE_SIZE);
944 				} else {
945 					srb->result = SAM_STAT_GOOD;
946 				}
947 				return;
948 			}
949 		} else {
950 			if (RTS51X_CHK_STAT(chip, STAT_SS)
951 			    || RTS51X_CHK_STAT(chip, STAT_SS_PRE)) {
952 				/* Wake up device */
953 				RTS51X_DEBUGP("Try to wake up device\n");
954 				chip->resume_from_scsi = 1;
955 
956 				rts51x_try_to_exit_ss(chip);
957 
958 				if (RTS51X_CHK_STAT(chip, STAT_SS)) {
959 					wait_timeout(3000);
960 
961 					rts51x_init_chip(chip);
962 					rts51x_init_cards(chip);
963 				}
964 			}
965 		}
966 	}
967 #endif
968 
969 	result = rts51x_scsi_handler(srb, chip);
970 
971 	/* if there is a transport error, reset and don't auto-sense */
972 	if (result == TRANSPORT_ERROR) {
973 		RTS51X_DEBUGP("-- transport indicates error, resetting\n");
974 		srb->result = DID_ERROR << 16;
975 		goto Handle_Errors;
976 	}
977 
978 	srb->result = SAM_STAT_GOOD;
979 
980 	/*
981 	 * If we have a failure, we're going to do a REQUEST_SENSE
982 	 * automatically.  Note that we differentiate between a command
983 	 * "failure" and an "error" in the transport mechanism.
984 	 */
985 	if (result == TRANSPORT_FAILED) {
986 		/* set the result so the higher layers expect this data */
987 		srb->result = SAM_STAT_CHECK_CONDITION;
988 		memcpy(srb->sense_buffer,
989 		       (unsigned char *)&(chip->sense_buffer[SCSI_LUN(srb)]),
990 		       sizeof(struct sense_data_t));
991 	}
992 
993 	return;
994 
995 	/* Error and abort processing: try to resynchronize with the device
996 	 * by issuing a port reset.  If that fails, try a class-specific
997 	 * device reset. */
998 Handle_Errors:
999 	return;
1000 }
1001