1 /***********************************************************************************
2 CED1401 usb driver. This basic loading is based on the usb-skeleton.c code that is:
3 Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com)
4 Copyright (C) 2012 Alois Schloegl <alois.schloegl@ist.ac.at>
5 There is not a great deal of the skeleton left.
6
7 All the remainder dealing specifically with the CED1401 is based on drivers written
8 by CED for other systems (mainly Windows) and is:
9 Copyright (C) 2010 Cambridge Electronic Design Ltd
10 Author Greg P Smith (greg@ced.co.uk)
11
12 This program is free software; you can redistribute it and/or
13 modify it under the terms of the GNU General Public License
14 as published by the Free Software Foundation; either version 2
15 of the License, or (at your option) any later version.
16
17 This program is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
21
22 You should have received a copy of the GNU General Public License
23 along with this program; if not, write to the Free Software
24 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
25
26 Endpoints
27 *********
28 There are 4 endpoints plus the control endpoint in the standard interface
29 provided by most 1401s. The control endpoint is used for standard USB requests,
30 plus various CED-specific transactions such as start self test, debug and get
31 the 1401 status. The other endpoints are:
32
33 1 Characters to the 1401
34 2 Characters from the 1401
35 3 Block data to the 1401
36 4 Block data to the host.
37
38 inside the driver these are indexed as an array from 0 to 3, transactions
39 over the control endpoint are carried out using a separate mechanism. The
40 use of the endpoints is mostly straightforward, with the driver issuing
41 IO request packets (IRPs) as required to transfer data to and from the 1401.
42 The handling of endpoint 2 is different because it is used for characters
43 from the 1401, which can appear spontaneously and without any other driver
44 activity - for example to repeatedly request DMA transfers in Spike2. The
45 desired effect is achieved by using an interrupt endpoint which can be
46 polled to see if it has data available, and writing the driver so that it
47 always maintains a pending read IRP from that endpoint which will read the
48 character data and terminate as soon as the 1401 makes data available. This
49 works very well, some care is taken with when you kick off this character
50 read IRP to avoid it being active when it is not wanted but generally it
51 is running all the time.
52
53 In the 2270, there are only three endpoints plus the control endpoint. In
54 addition to the transactions mentioned above, the control endpoint is used
55 to transfer character data to the 1401. The other endpoints are used as:
56
57 1 Characters from the 1401
58 2 Block data to the 1401
59 3 Block data to the host.
60
61 The type of interface available is specified by the interface subclass field
62 in the interface descriptor provided by the 1401. See the USB_INT_ constants
63 for the values that this field can hold.
64
65 ****************************************************************************
66 Linux implementation
67
68 Although Linux Device Drivers (3rd Edition) was a major source of information,
69 it is very out of date. A lot of information was gleaned from the latest
70 usb_skeleton.c code (you need to download the kernel sources to get this).
71
72 To match the Windows version, everything is done using ioctl calls. All the
73 device state is held in the DEVICE_EXTENSION (named to match Windows use).
74 Block transfers are done by using get_user_pages() to pin down a list of
75 pages that we hold a pointer to in the device driver. We also allocate a
76 coherent transfer buffer of size STAGED_SZ (this must be a multiple of the
77 bulk endpoint size so that the 1401 does not realise that we break large
78 transfers down into smaller pieces). We use kmap_atomic() to get a kernel
79 va for each page, as it is required, for copying; see CopyUserSpace().
80
81 All character and data transfers are done using asynchronous IO. All Urbs are
82 tracked by anchoring them. Status and debug ioctls are implemented with the
83 synchronous non-Urb based transfers.
84 */
85
86 #include <linux/kernel.h>
87 #include <linux/errno.h>
88 #include <linux/usb.h>
89 #include <linux/mutex.h>
90 #include <linux/mm.h>
91 #include <linux/highmem.h>
92 #include <linux/init.h>
93 #include <linux/slab.h>
94 #include <linux/module.h>
95 #include <linux/kref.h>
96 #include <linux/uaccess.h>
97
98 #include "usb1401.h"
99
100 /* Define these values to match your devices */
101 #define USB_CED_VENDOR_ID 0x0525
102 #define USB_CED_PRODUCT_ID 0xa0f0
103
104 /* table of devices that work with this driver */
105 static const struct usb_device_id ced_table[] = {
106 {USB_DEVICE(USB_CED_VENDOR_ID, USB_CED_PRODUCT_ID)},
107 {} /* Terminating entry */
108 };
109
110 MODULE_DEVICE_TABLE(usb, ced_table);
111
112 /* Get a minor range for your devices from the usb maintainer */
113 #define USB_CED_MINOR_BASE 192
114
115 /* our private defines. if this grows any larger, use your own .h file */
116 #define MAX_TRANSFER (PAGE_SIZE - 512)
117 /* MAX_TRANSFER is chosen so that the VM is not stressed by
118 allocations > PAGE_SIZE and the number of packets in a page
119 is an integer 512 is the largest possible packet on EHCI */
120 #define WRITES_IN_FLIGHT 8
121 /* arbitrarily chosen */
122
123 static struct usb_driver ced_driver;
124
ced_delete(struct kref * kref)125 static void ced_delete(struct kref *kref)
126 {
127 DEVICE_EXTENSION *pdx = to_DEVICE_EXTENSION(kref);
128
129 // Free up the output buffer, then free the output urb. Note that the interface member
130 // of pdx will probably be NULL, so cannot be used to get to dev.
131 usb_free_coherent(pdx->udev, OUTBUF_SZ, pdx->pCoherCharOut,
132 pdx->pUrbCharOut->transfer_dma);
133 usb_free_urb(pdx->pUrbCharOut);
134
135 // Do the same for chan input
136 usb_free_coherent(pdx->udev, INBUF_SZ, pdx->pCoherCharIn,
137 pdx->pUrbCharIn->transfer_dma);
138 usb_free_urb(pdx->pUrbCharIn);
139
140 // Do the same for the block transfers
141 usb_free_coherent(pdx->udev, STAGED_SZ, pdx->pCoherStagedIO,
142 pdx->pStagedUrb->transfer_dma);
143 usb_free_urb(pdx->pStagedUrb);
144
145 usb_put_dev(pdx->udev);
146 kfree(pdx);
147 }
148
149 // This is the driver end of the open() call from user space.
ced_open(struct inode * inode,struct file * file)150 static int ced_open(struct inode *inode, struct file *file)
151 {
152 DEVICE_EXTENSION *pdx;
153 int retval = 0;
154 int subminor = iminor(inode);
155 struct usb_interface *interface =
156 usb_find_interface(&ced_driver, subminor);
157 if (!interface) {
158 pr_err("%s - error, can't find device for minor %d", __func__,
159 subminor);
160 retval = -ENODEV;
161 goto exit;
162 }
163
164 pdx = usb_get_intfdata(interface);
165 if (!pdx) {
166 retval = -ENODEV;
167 goto exit;
168 }
169
170 dev_dbg(&interface->dev, "%s got pdx", __func__);
171
172 /* increment our usage count for the device */
173 kref_get(&pdx->kref);
174
175 /* lock the device to allow correctly handling errors
176 * in resumption */
177 mutex_lock(&pdx->io_mutex);
178
179 if (!pdx->open_count++) {
180 retval = usb_autopm_get_interface(interface);
181 if (retval) {
182 pdx->open_count--;
183 mutex_unlock(&pdx->io_mutex);
184 kref_put(&pdx->kref, ced_delete);
185 goto exit;
186 }
187 } else { //uncomment this block if you want exclusive open
188 dev_err(&interface->dev, "%s fail: already open", __func__);
189 retval = -EBUSY;
190 pdx->open_count--;
191 mutex_unlock(&pdx->io_mutex);
192 kref_put(&pdx->kref, ced_delete);
193 goto exit;
194 }
195 /* prevent the device from being autosuspended */
196
197 /* save our object in the file's private structure */
198 file->private_data = pdx;
199 mutex_unlock(&pdx->io_mutex);
200
201 exit:
202 return retval;
203 }
204
ced_release(struct inode * inode,struct file * file)205 static int ced_release(struct inode *inode, struct file *file)
206 {
207 DEVICE_EXTENSION *pdx = file->private_data;
208 if (pdx == NULL)
209 return -ENODEV;
210
211 dev_dbg(&pdx->interface->dev, "%s called", __func__);
212 mutex_lock(&pdx->io_mutex);
213 if (!--pdx->open_count && pdx->interface) // Allow autosuspend
214 usb_autopm_put_interface(pdx->interface);
215 mutex_unlock(&pdx->io_mutex);
216
217 kref_put(&pdx->kref, ced_delete); // decrement the count on our device
218 return 0;
219 }
220
ced_flush(struct file * file,fl_owner_t id)221 static int ced_flush(struct file *file, fl_owner_t id)
222 {
223 int res;
224 DEVICE_EXTENSION *pdx = file->private_data;
225 if (pdx == NULL)
226 return -ENODEV;
227
228 dev_dbg(&pdx->interface->dev, "%s char in pend=%d", __func__,
229 pdx->bReadCharsPending);
230
231 /* wait for io to stop */
232 mutex_lock(&pdx->io_mutex);
233 dev_dbg(&pdx->interface->dev, "%s got io_mutex", __func__);
234 ced_draw_down(pdx);
235
236 /* read out errors, leave subsequent opens a clean slate */
237 spin_lock_irq(&pdx->err_lock);
238 res = pdx->errors ? (pdx->errors == -EPIPE ? -EPIPE : -EIO) : 0;
239 pdx->errors = 0;
240 spin_unlock_irq(&pdx->err_lock);
241
242 mutex_unlock(&pdx->io_mutex);
243 dev_dbg(&pdx->interface->dev, "%s exit reached", __func__);
244
245 return res;
246 }
247
248 /***************************************************************************
249 ** CanAcceptIoRequests
250 ** If the device is removed, interface is set NULL. We also clear our pointer
251 ** from the interface, so we should make sure that pdx is not NULL. This will
252 ** not help with a device extension held by a file.
253 ** return true if can accept new io requests, else false
254 */
CanAcceptIoRequests(DEVICE_EXTENSION * pdx)255 static bool CanAcceptIoRequests(DEVICE_EXTENSION * pdx)
256 {
257 return pdx && pdx->interface; // Can we accept IO requests
258 }
259
260 /****************************************************************************
261 ** Callback routine to complete writes. This may need to fire off another
262 ** urb to complete the transfer.
263 ****************************************************************************/
ced_writechar_callback(struct urb * pUrb)264 static void ced_writechar_callback(struct urb *pUrb)
265 {
266 DEVICE_EXTENSION *pdx = pUrb->context;
267 int nGot = pUrb->actual_length; // what we transferred
268
269 if (pUrb->status) { // sync/async unlink faults aren't errors
270 if (!
271 (pUrb->status == -ENOENT || pUrb->status == -ECONNRESET
272 || pUrb->status == -ESHUTDOWN)) {
273 dev_err(&pdx->interface->dev,
274 "%s - nonzero write bulk status received: %d",
275 __func__, pUrb->status);
276 }
277
278 spin_lock(&pdx->err_lock);
279 pdx->errors = pUrb->status;
280 spin_unlock(&pdx->err_lock);
281 nGot = 0; // and tidy up again if so
282
283 spin_lock(&pdx->charOutLock); // already at irq level
284 pdx->dwOutBuffGet = 0; // Reset the output buffer
285 pdx->dwOutBuffPut = 0;
286 pdx->dwNumOutput = 0; // Clear the char count
287 pdx->bPipeError[0] = 1; // Flag an error for later
288 pdx->bSendCharsPending = false; // Allow other threads again
289 spin_unlock(&pdx->charOutLock); // already at irq level
290 dev_dbg(&pdx->interface->dev,
291 "%s - char out done, 0 chars sent", __func__);
292 } else {
293 dev_dbg(&pdx->interface->dev,
294 "%s - char out done, %d chars sent", __func__, nGot);
295 spin_lock(&pdx->charOutLock); // already at irq level
296 pdx->dwNumOutput -= nGot; // Now adjust the char send buffer
297 pdx->dwOutBuffGet += nGot; // to match what we did
298 if (pdx->dwOutBuffGet >= OUTBUF_SZ) // Can't do this any earlier as data could be overwritten
299 pdx->dwOutBuffGet = 0;
300
301 if (pdx->dwNumOutput > 0) // if more to be done...
302 {
303 int nPipe = 0; // The pipe number to use
304 int iReturn;
305 char *pDat = &pdx->outputBuffer[pdx->dwOutBuffGet];
306 unsigned int dwCount = pdx->dwNumOutput; // maximum to send
307 if ((pdx->dwOutBuffGet + dwCount) > OUTBUF_SZ) // does it cross buffer end?
308 dwCount = OUTBUF_SZ - pdx->dwOutBuffGet;
309 spin_unlock(&pdx->charOutLock); // we are done with stuff that changes
310 memcpy(pdx->pCoherCharOut, pDat, dwCount); // copy output data to the buffer
311 usb_fill_bulk_urb(pdx->pUrbCharOut, pdx->udev,
312 usb_sndbulkpipe(pdx->udev,
313 pdx->epAddr[0]),
314 pdx->pCoherCharOut, dwCount,
315 ced_writechar_callback, pdx);
316 pdx->pUrbCharOut->transfer_flags |=
317 URB_NO_TRANSFER_DMA_MAP;
318 usb_anchor_urb(pdx->pUrbCharOut, &pdx->submitted); // in case we need to kill it
319 iReturn = usb_submit_urb(pdx->pUrbCharOut, GFP_ATOMIC);
320 dev_dbg(&pdx->interface->dev, "%s n=%d>%s<", __func__,
321 dwCount, pDat);
322 spin_lock(&pdx->charOutLock); // grab lock for errors
323 if (iReturn) {
324 pdx->bPipeError[nPipe] = 1; // Flag an error to be handled later
325 pdx->bSendCharsPending = false; // Allow other threads again
326 usb_unanchor_urb(pdx->pUrbCharOut);
327 dev_err(&pdx->interface->dev,
328 "%s usb_submit_urb() returned %d",
329 __func__, iReturn);
330 }
331 } else
332 pdx->bSendCharsPending = false; // Allow other threads again
333 spin_unlock(&pdx->charOutLock); // already at irq level
334 }
335 }
336
337 /****************************************************************************
338 ** SendChars
339 ** Transmit the characters in the output buffer to the 1401. This may need
340 ** breaking down into multiple transfers.
341 ****************************************************************************/
SendChars(DEVICE_EXTENSION * pdx)342 int SendChars(DEVICE_EXTENSION * pdx)
343 {
344 int iReturn = U14ERR_NOERROR;
345
346 spin_lock_irq(&pdx->charOutLock); // Protect ourselves
347
348 if ((!pdx->bSendCharsPending) && // Not currently sending
349 (pdx->dwNumOutput > 0) && // has characters to output
350 (CanAcceptIoRequests(pdx))) // and current activity is OK
351 {
352 unsigned int dwCount = pdx->dwNumOutput; // Get a copy of the character count
353 pdx->bSendCharsPending = true; // Set flag to lock out other threads
354
355 dev_dbg(&pdx->interface->dev,
356 "Send %d chars to 1401, EP0 flag %d\n", dwCount,
357 pdx->nPipes == 3);
358 // If we have only 3 end points we must send the characters to the 1401 using EP0.
359 if (pdx->nPipes == 3) {
360 // For EP0 character transmissions to the 1401, we have to hang about until they
361 // are gone, as otherwise without more character IO activity they will never go.
362 unsigned int count = dwCount; // Local char counter
363 unsigned int index = 0; // The index into the char buffer
364
365 spin_unlock_irq(&pdx->charOutLock); // Free spinlock as we call USBD
366
367 while ((count > 0) && (iReturn == U14ERR_NOERROR)) {
368 // We have to break the transfer up into 64-byte chunks because of a 2270 problem
369 int n = count > 64 ? 64 : count; // Chars for this xfer, max of 64
370 int nSent = usb_control_msg(pdx->udev,
371 usb_sndctrlpipe(pdx->udev, 0), // use end point 0
372 DB_CHARS, // bRequest
373 (H_TO_D | VENDOR | DEVREQ), // to the device, vendor request to the device
374 0, 0, // value and index are both 0
375 &pdx->outputBuffer[index], // where to send from
376 n, // how much to send
377 1000); // timeout in jiffies
378 if (nSent <= 0) {
379 iReturn = nSent ? nSent : -ETIMEDOUT; // if 0 chars says we timed out
380 dev_err(&pdx->interface->dev,
381 "Send %d chars by EP0 failed: %d",
382 n, iReturn);
383 } else {
384 dev_dbg(&pdx->interface->dev,
385 "Sent %d chars by EP0", n);
386 count -= nSent;
387 index += nSent;
388 }
389 }
390
391 spin_lock_irq(&pdx->charOutLock); // Protect pdx changes, released by general code
392 pdx->dwOutBuffGet = 0; // so reset the output buffer
393 pdx->dwOutBuffPut = 0;
394 pdx->dwNumOutput = 0; // and clear the buffer count
395 pdx->bSendCharsPending = false; // Allow other threads again
396 } else { // Here for sending chars normally - we hold the spin lock
397 int nPipe = 0; // The pipe number to use
398 char *pDat = &pdx->outputBuffer[pdx->dwOutBuffGet];
399
400 if ((pdx->dwOutBuffGet + dwCount) > OUTBUF_SZ) // does it cross buffer end?
401 dwCount = OUTBUF_SZ - pdx->dwOutBuffGet;
402 spin_unlock_irq(&pdx->charOutLock); // we are done with stuff that changes
403 memcpy(pdx->pCoherCharOut, pDat, dwCount); // copy output data to the buffer
404 usb_fill_bulk_urb(pdx->pUrbCharOut, pdx->udev,
405 usb_sndbulkpipe(pdx->udev,
406 pdx->epAddr[0]),
407 pdx->pCoherCharOut, dwCount,
408 ced_writechar_callback, pdx);
409 pdx->pUrbCharOut->transfer_flags |=
410 URB_NO_TRANSFER_DMA_MAP;
411 usb_anchor_urb(pdx->pUrbCharOut, &pdx->submitted);
412 iReturn = usb_submit_urb(pdx->pUrbCharOut, GFP_KERNEL);
413 spin_lock_irq(&pdx->charOutLock); // grab lock for errors
414 if (iReturn) {
415 pdx->bPipeError[nPipe] = 1; // Flag an error to be handled later
416 pdx->bSendCharsPending = false; // Allow other threads again
417 usb_unanchor_urb(pdx->pUrbCharOut); // remove from list of active urbs
418 }
419 }
420 } else if (pdx->bSendCharsPending && (pdx->dwNumOutput > 0))
421 dev_dbg(&pdx->interface->dev,
422 "SendChars bSendCharsPending:true");
423
424 dev_dbg(&pdx->interface->dev, "SendChars exit code: %d", iReturn);
425 spin_unlock_irq(&pdx->charOutLock); // Now let go of the spinlock
426 return iReturn;
427 }
428
429 /***************************************************************************
430 ** CopyUserSpace
431 ** This moves memory between pinned down user space and the pCoherStagedIO
432 ** memory buffer we use for transfers. Copy n bytes in the directions that
433 ** is defined by pdx->StagedRead. The user space is determined by the area
434 ** in pdx->StagedId and the offset in pdx->StagedDone. The user
435 ** area may well not start on a page boundary, so allow for that.
436 **
437 ** We have a table of physical pages that describe the area, so we can use
438 ** this to get a virtual address that the kernel can use.
439 **
440 ** pdx Is our device extension which holds all we know about the transfer.
441 ** n The number of bytes to move one way or the other.
442 ***************************************************************************/
CopyUserSpace(DEVICE_EXTENSION * pdx,int n)443 static void CopyUserSpace(DEVICE_EXTENSION * pdx, int n)
444 {
445 unsigned int nArea = pdx->StagedId;
446 if (nArea < MAX_TRANSAREAS) {
447 TRANSAREA *pArea = &pdx->rTransDef[nArea]; // area to be used
448 unsigned int dwOffset =
449 pdx->StagedDone + pdx->StagedOffset + pArea->dwBaseOffset;
450 char *pCoherBuf = pdx->pCoherStagedIO; // coherent buffer
451 if (!pArea->bUsed) {
452 dev_err(&pdx->interface->dev, "%s area %d unused",
453 __func__, nArea);
454 return;
455 }
456
457 while (n) {
458 int nPage = dwOffset >> PAGE_SHIFT; // page number in table
459 if (nPage < pArea->nPages) {
460 char *pvAddress =
461 (char *)kmap_atomic(pArea->pPages[nPage]);
462 if (pvAddress) {
463 unsigned int uiPageOff = dwOffset & (PAGE_SIZE - 1); // offset into the page
464 size_t uiXfer = PAGE_SIZE - uiPageOff; // max to transfer on this page
465 if (uiXfer > n) // limit byte count if too much
466 uiXfer = n; // for the page
467 if (pdx->StagedRead)
468 memcpy(pvAddress + uiPageOff,
469 pCoherBuf, uiXfer);
470 else
471 memcpy(pCoherBuf,
472 pvAddress + uiPageOff,
473 uiXfer);
474 kunmap_atomic(pvAddress);
475 dwOffset += uiXfer;
476 pCoherBuf += uiXfer;
477 n -= uiXfer;
478 } else {
479 dev_err(&pdx->interface->dev,
480 "%s did not map page %d",
481 __func__, nPage);
482 return;
483 }
484
485 } else {
486 dev_err(&pdx->interface->dev,
487 "%s exceeded pages %d", __func__,
488 nPage);
489 return;
490 }
491 }
492 } else
493 dev_err(&pdx->interface->dev, "%s bad area %d", __func__,
494 nArea);
495 }
496
497 // Forward declarations for stuff used circularly
498 static int StageChunk(DEVICE_EXTENSION * pdx);
499 /***************************************************************************
500 ** ReadWrite_Complete
501 **
502 ** Completion routine for our staged read/write Irps
503 */
staged_callback(struct urb * pUrb)504 static void staged_callback(struct urb *pUrb)
505 {
506 DEVICE_EXTENSION *pdx = pUrb->context;
507 unsigned int nGot = pUrb->actual_length; // what we transferred
508 bool bCancel = false;
509 bool bRestartCharInput; // used at the end
510
511 spin_lock(&pdx->stagedLock); // stop ReadWriteMem() action while this routine is running
512 pdx->bStagedUrbPending = false; // clear the flag for staged IRP pending
513
514 if (pUrb->status) { // sync/async unlink faults aren't errors
515 if (!
516 (pUrb->status == -ENOENT || pUrb->status == -ECONNRESET
517 || pUrb->status == -ESHUTDOWN)) {
518 dev_err(&pdx->interface->dev,
519 "%s - nonzero write bulk status received: %d",
520 __func__, pUrb->status);
521 } else
522 dev_info(&pdx->interface->dev,
523 "%s - staged xfer cancelled", __func__);
524
525 spin_lock(&pdx->err_lock);
526 pdx->errors = pUrb->status;
527 spin_unlock(&pdx->err_lock);
528 nGot = 0; // and tidy up again if so
529 bCancel = true;
530 } else {
531 dev_dbg(&pdx->interface->dev, "%s %d chars xferred", __func__,
532 nGot);
533 if (pdx->StagedRead) // if reading, save to user space
534 CopyUserSpace(pdx, nGot); // copy from buffer to user
535 if (nGot == 0)
536 dev_dbg(&pdx->interface->dev, "%s ZLP", __func__);
537 }
538
539 // Update the transfer length based on the TransferBufferLength value in the URB
540 pdx->StagedDone += nGot;
541
542 dev_dbg(&pdx->interface->dev, "%s, done %d bytes of %d", __func__,
543 pdx->StagedDone, pdx->StagedLength);
544
545 if ((pdx->StagedDone == pdx->StagedLength) || // If no more to do
546 (bCancel)) // or this IRP was cancelled
547 {
548 TRANSAREA *pArea = &pdx->rTransDef[pdx->StagedId]; // Transfer area info
549 dev_dbg(&pdx->interface->dev,
550 "%s transfer done, bytes %d, cancel %d", __func__,
551 pdx->StagedDone, bCancel);
552
553 // Here is where we sort out what to do with this transfer if using a circular buffer. We have
554 // a completed transfer that can be assumed to fit into the transfer area. We should be able to
555 // add this to the end of a growing block or to use it to start a new block unless the code
556 // that calculates the offset to use (in ReadWriteMem) is totally duff.
557 if ((pArea->bCircular) && (pArea->bCircToHost) && (!bCancel) && // Time to sort out circular buffer info?
558 (pdx->StagedRead)) // Only for tohost transfers for now
559 {
560 if (pArea->aBlocks[1].dwSize > 0) // If block 1 is in use we must append to it
561 {
562 if (pdx->StagedOffset ==
563 (pArea->aBlocks[1].dwOffset +
564 pArea->aBlocks[1].dwSize)) {
565 pArea->aBlocks[1].dwSize +=
566 pdx->StagedLength;
567 dev_dbg(&pdx->interface->dev,
568 "RWM_Complete, circ block 1 now %d bytes at %d",
569 pArea->aBlocks[1].dwSize,
570 pArea->aBlocks[1].dwOffset);
571 } else {
572 // Here things have gone very, very, wrong, but I cannot see how this can actually be achieved
573 pArea->aBlocks[1].dwOffset =
574 pdx->StagedOffset;
575 pArea->aBlocks[1].dwSize =
576 pdx->StagedLength;
577 dev_err(&pdx->interface->dev,
578 "%s ERROR, circ block 1 re-started %d bytes at %d",
579 __func__,
580 pArea->aBlocks[1].dwSize,
581 pArea->aBlocks[1].dwOffset);
582 }
583 } else // If block 1 is not used, we try to add to block 0
584 {
585 if (pArea->aBlocks[0].dwSize > 0) // Got stored block 0 information?
586 { // Must append onto the existing block 0
587 if (pdx->StagedOffset ==
588 (pArea->aBlocks[0].dwOffset +
589 pArea->aBlocks[0].dwSize)) {
590 pArea->aBlocks[0].dwSize += pdx->StagedLength; // Just add this transfer in
591 dev_dbg(&pdx->interface->dev,
592 "RWM_Complete, circ block 0 now %d bytes at %d",
593 pArea->aBlocks[0].
594 dwSize,
595 pArea->aBlocks[0].
596 dwOffset);
597 } else // If it doesn't append, put into new block 1
598 {
599 pArea->aBlocks[1].dwOffset =
600 pdx->StagedOffset;
601 pArea->aBlocks[1].dwSize =
602 pdx->StagedLength;
603 dev_dbg(&pdx->interface->dev,
604 "RWM_Complete, circ block 1 started %d bytes at %d",
605 pArea->aBlocks[1].
606 dwSize,
607 pArea->aBlocks[1].
608 dwOffset);
609 }
610 } else // No info stored yet, just save in block 0
611 {
612 pArea->aBlocks[0].dwOffset =
613 pdx->StagedOffset;
614 pArea->aBlocks[0].dwSize =
615 pdx->StagedLength;
616 dev_dbg(&pdx->interface->dev,
617 "RWM_Complete, circ block 0 started %d bytes at %d",
618 pArea->aBlocks[0].dwSize,
619 pArea->aBlocks[0].dwOffset);
620 }
621 }
622 }
623
624 if (!bCancel) // Don't generate an event if cancelled
625 {
626 dev_dbg(&pdx->interface->dev,
627 "RWM_Complete, bCircular %d, bToHost %d, eStart %d, eSize %d",
628 pArea->bCircular, pArea->bEventToHost,
629 pArea->dwEventSt, pArea->dwEventSz);
630 if ((pArea->dwEventSz) && // Set a user-mode event...
631 (pdx->StagedRead == pArea->bEventToHost)) // ...on transfers in this direction?
632 {
633 int iWakeUp = 0; // assume
634 // If we have completed the right sort of DMA transfer then set the event to notify
635 // the user code to wake up anyone that is waiting.
636 if ((pArea->bCircular) && // Circular areas use a simpler test
637 (pArea->bCircToHost)) // only in supported direction
638 { // Is total data waiting up to size limit?
639 unsigned int dwTotal =
640 pArea->aBlocks[0].dwSize +
641 pArea->aBlocks[1].dwSize;
642 iWakeUp = (dwTotal >= pArea->dwEventSz);
643 } else {
644 unsigned int transEnd =
645 pdx->StagedOffset +
646 pdx->StagedLength;
647 unsigned int eventEnd =
648 pArea->dwEventSt + pArea->dwEventSz;
649 iWakeUp = (pdx->StagedOffset < eventEnd)
650 && (transEnd > pArea->dwEventSt);
651 }
652
653 if (iWakeUp) {
654 dev_dbg(&pdx->interface->dev,
655 "About to set event to notify app");
656 wake_up_interruptible(&pArea->wqEvent); // wake up waiting processes
657 ++pArea->iWakeUp; // increment wakeup count
658 }
659 }
660 }
661
662 pdx->dwDMAFlag = MODE_CHAR; // Switch back to char mode before ReadWriteMem call
663
664 if (!bCancel) // Don't look for waiting transfer if cancelled
665 {
666 // If we have a transfer waiting, kick it off
667 if (pdx->bXFerWaiting) // Got a block xfer waiting?
668 {
669 int iReturn;
670 dev_info(&pdx->interface->dev,
671 "*** RWM_Complete *** pending transfer will now be set up!!!");
672 iReturn =
673 ReadWriteMem(pdx, !pdx->rDMAInfo.bOutWard,
674 pdx->rDMAInfo.wIdent,
675 pdx->rDMAInfo.dwOffset,
676 pdx->rDMAInfo.dwSize);
677
678 if (iReturn)
679 dev_err(&pdx->interface->dev,
680 "RWM_Complete rw setup failed %d",
681 iReturn);
682 }
683 }
684
685 } else // Here for more to do
686 StageChunk(pdx); // fire off the next bit
687
688 // While we hold the stagedLock, see if we should reallow character input ints
689 // Don't allow if cancelled, or if a new block has started or if there is a waiting block.
690 // This feels wrong as we should ask which spin lock protects dwDMAFlag.
691 bRestartCharInput = !bCancel && (pdx->dwDMAFlag == MODE_CHAR)
692 && !pdx->bXFerWaiting;
693
694 spin_unlock(&pdx->stagedLock); // Finally release the lock again
695
696 // This is not correct as dwDMAFlag is protected by the staged lock, but it is treated
697 // in Allowi as if it were protected by the char lock. In any case, most systems will
698 // not be upset by char input during DMA... sigh. Needs sorting out.
699 if (bRestartCharInput) // may be out of date, but...
700 Allowi(pdx); // ...Allowi tests a lock too.
701 dev_dbg(&pdx->interface->dev, "%s done", __func__);
702 }
703
704 /****************************************************************************
705 ** StageChunk
706 **
707 ** Generates the next chunk of data making up a staged transfer.
708 **
709 ** The calling code must have acquired the staging spinlock before calling
710 ** this function, and is responsible for releasing it. We are at callback level.
711 ****************************************************************************/
StageChunk(DEVICE_EXTENSION * pdx)712 static int StageChunk(DEVICE_EXTENSION * pdx)
713 {
714 int iReturn = U14ERR_NOERROR;
715 unsigned int ChunkSize;
716 int nPipe = pdx->StagedRead ? 3 : 2; // The pipe number to use for reads or writes
717 if (pdx->nPipes == 3)
718 nPipe--; // Adjust for the 3-pipe case
719 if (nPipe < 0) // and trap case that should never happen
720 return U14ERR_FAIL;
721
722 if (!CanAcceptIoRequests(pdx)) // got sudden remove?
723 {
724 dev_info(&pdx->interface->dev, "%s sudden remove, giving up",
725 __func__);
726 return U14ERR_FAIL; // could do with a better error
727 }
728
729 ChunkSize = (pdx->StagedLength - pdx->StagedDone); // transfer length remaining
730 if (ChunkSize > STAGED_SZ) // make sure to keep legal
731 ChunkSize = STAGED_SZ; // limit to max allowed
732
733 if (!pdx->StagedRead) // if writing...
734 CopyUserSpace(pdx, ChunkSize); // ...copy data into the buffer
735
736 usb_fill_bulk_urb(pdx->pStagedUrb, pdx->udev,
737 pdx->StagedRead ? usb_rcvbulkpipe(pdx->udev,
738 pdx->
739 epAddr[nPipe]) :
740 usb_sndbulkpipe(pdx->udev, pdx->epAddr[nPipe]),
741 pdx->pCoherStagedIO, ChunkSize, staged_callback, pdx);
742 pdx->pStagedUrb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
743 usb_anchor_urb(pdx->pStagedUrb, &pdx->submitted); // in case we need to kill it
744 iReturn = usb_submit_urb(pdx->pStagedUrb, GFP_ATOMIC);
745 if (iReturn) {
746 usb_unanchor_urb(pdx->pStagedUrb); // kill it
747 pdx->bPipeError[nPipe] = 1; // Flag an error to be handled later
748 dev_err(&pdx->interface->dev, "%s submit urb failed, code %d",
749 __func__, iReturn);
750 } else
751 pdx->bStagedUrbPending = true; // Set the flag for staged URB pending
752 dev_dbg(&pdx->interface->dev, "%s done so far:%d, this size:%d",
753 __func__, pdx->StagedDone, ChunkSize);
754
755 return iReturn;
756 }
757
758 /***************************************************************************
759 ** ReadWriteMem
760 **
761 ** This routine is used generally for block read and write operations.
762 ** Breaks up a read or write in to specified sized chunks, as specified by pipe
763 ** information on maximum transfer size.
764 **
765 ** Any code that calls this must be holding the stagedLock
766 **
767 ** Arguments:
768 ** DeviceObject - pointer to our FDO (Functional Device Object)
769 ** Read - TRUE for read, FALSE for write. This is from POV of the driver
770 ** wIdent - the transfer area number - defines memory area and more.
771 ** dwOffs - the start offset within the transfer area of the start of this
772 ** transfer.
773 ** dwLen - the number of bytes to transfer.
774 */
ReadWriteMem(DEVICE_EXTENSION * pdx,bool Read,unsigned short wIdent,unsigned int dwOffs,unsigned int dwLen)775 int ReadWriteMem(DEVICE_EXTENSION * pdx, bool Read, unsigned short wIdent,
776 unsigned int dwOffs, unsigned int dwLen)
777 {
778 TRANSAREA *pArea = &pdx->rTransDef[wIdent]; // Transfer area info
779
780 if (!CanAcceptIoRequests(pdx)) // Are we in a state to accept new requests?
781 {
782 dev_err(&pdx->interface->dev, "%s can't accept requests",
783 __func__);
784 return U14ERR_FAIL;
785 }
786
787 dev_dbg(&pdx->interface->dev,
788 "%s xfer %d bytes to %s, offset %d, area %d", __func__, dwLen,
789 Read ? "host" : "1401", dwOffs, wIdent);
790
791 // Amazingly, we can get an escape sequence back before the current staged Urb is done, so we
792 // have to check for this situation and, if so, wait until all is OK.
793 if (pdx->bStagedUrbPending) {
794 pdx->bXFerWaiting = true; // Flag we are waiting
795 dev_info(&pdx->interface->dev,
796 "%s xfer is waiting, as previous staged pending",
797 __func__);
798 return U14ERR_NOERROR;
799 }
800
801 if (dwLen == 0) // allow 0-len read or write; just return success
802 {
803 dev_dbg(&pdx->interface->dev,
804 "%s OK; zero-len read/write request", __func__);
805 return U14ERR_NOERROR;
806 }
807
808 if ((pArea->bCircular) && // Circular transfer?
809 (pArea->bCircToHost) && (Read)) // In a supported direction
810 { // If so, we sort out offset ourself
811 bool bWait = false; // Flag for transfer having to wait
812
813 dev_dbg(&pdx->interface->dev,
814 "Circular buffers are %d at %d and %d at %d",
815 pArea->aBlocks[0].dwSize, pArea->aBlocks[0].dwOffset,
816 pArea->aBlocks[1].dwSize, pArea->aBlocks[1].dwOffset);
817 if (pArea->aBlocks[1].dwSize > 0) // Using the second block already?
818 {
819 dwOffs = pArea->aBlocks[1].dwOffset + pArea->aBlocks[1].dwSize; // take offset from that
820 bWait = (dwOffs + dwLen) > pArea->aBlocks[0].dwOffset; // Wait if will overwrite block 0?
821 bWait |= (dwOffs + dwLen) > pArea->dwLength; // or if it overflows the buffer
822 } else // Area 1 not in use, try to use area 0
823 {
824 if (pArea->aBlocks[0].dwSize == 0) // Reset block 0 if not in use
825 pArea->aBlocks[0].dwOffset = 0;
826 dwOffs =
827 pArea->aBlocks[0].dwOffset +
828 pArea->aBlocks[0].dwSize;
829 if ((dwOffs + dwLen) > pArea->dwLength) // Off the end of the buffer?
830 {
831 pArea->aBlocks[1].dwOffset = 0; // Set up to use second block
832 dwOffs = 0;
833 bWait = (dwOffs + dwLen) > pArea->aBlocks[0].dwOffset; // Wait if will overwrite block 0?
834 bWait |= (dwOffs + dwLen) > pArea->dwLength; // or if it overflows the buffer
835 }
836 }
837
838 if (bWait) // This transfer will have to wait?
839 {
840 pdx->bXFerWaiting = true; // Flag we are waiting
841 dev_dbg(&pdx->interface->dev,
842 "%s xfer waiting for circular buffer space",
843 __func__);
844 return U14ERR_NOERROR;
845 }
846
847 dev_dbg(&pdx->interface->dev,
848 "%s circular xfer, %d bytes starting at %d", __func__,
849 dwLen, dwOffs);
850 }
851 // Save the parameters for the read\write transfer
852 pdx->StagedRead = Read; // Save the parameters for this read
853 pdx->StagedId = wIdent; // ID allows us to get transfer area info
854 pdx->StagedOffset = dwOffs; // The area within the transfer area
855 pdx->StagedLength = dwLen;
856 pdx->StagedDone = 0; // Initialise the byte count
857 pdx->dwDMAFlag = MODE_LINEAR; // Set DMA mode flag at this point
858 pdx->bXFerWaiting = false; // Clearly not a transfer waiting now
859
860 // KeClearEvent(&pdx->StagingDoneEvent); // Clear the transfer done event
861 StageChunk(pdx); // fire off the first chunk
862
863 return U14ERR_NOERROR;
864 }
865
866 /****************************************************************************
867 **
868 ** ReadChar
869 **
870 ** Reads a character a buffer. If there is no more
871 ** data we return FALSE. Used as part of decoding a DMA request.
872 **
873 ****************************************************************************/
ReadChar(unsigned char * pChar,char * pBuf,unsigned int * pdDone,unsigned int dGot)874 static bool ReadChar(unsigned char *pChar, char *pBuf, unsigned int *pdDone,
875 unsigned int dGot)
876 {
877 bool bRead = false;
878 unsigned int dDone = *pdDone;
879
880 if (dDone < dGot) // If there is more data
881 {
882 *pChar = (unsigned char)pBuf[dDone]; // Extract the next char
883 dDone++; // Increment the done count
884 *pdDone = dDone;
885 bRead = true; // and flag success
886 }
887
888 return bRead;
889 }
890
891 #ifdef NOTUSED
892 /****************************************************************************
893 **
894 ** ReadWord
895 **
896 ** Reads a word from the 1401, just uses ReadChar twice; passes on any error
897 **
898 *****************************************************************************/
ReadWord(unsigned short * pWord,char * pBuf,unsigned int * pdDone,unsigned int dGot)899 static bool ReadWord(unsigned short *pWord, char *pBuf, unsigned int *pdDone,
900 unsigned int dGot)
901 {
902 if (ReadChar((unsigned char *)pWord, pBuf, pdDone, dGot))
903 return ReadChar(((unsigned char *)pWord) + 1, pBuf, pdDone,
904 dGot);
905 else
906 return false;
907 }
908 #endif
909
910 /****************************************************************************
911 ** ReadHuff
912 **
913 ** Reads a coded number in and returns it, Code is:
914 ** If data is in range 0..127 we receive 1 byte. If data in range 128-16383
915 ** we receive two bytes, top bit of first indicates another on its way. If
916 ** data in range 16384-4194303 we get three bytes, top two bits of first set
917 ** to indicate three byte total.
918 **
919 *****************************************************************************/
ReadHuff(volatile unsigned int * pDWord,char * pBuf,unsigned int * pdDone,unsigned int dGot)920 static bool ReadHuff(volatile unsigned int *pDWord, char *pBuf,
921 unsigned int *pdDone, unsigned int dGot)
922 {
923 unsigned char ucData; /* for each read to ReadChar */
924 bool bReturn = true; /* assume we will succeed */
925 unsigned int dwData = 0; /* Accumulator for the data */
926
927 if (ReadChar(&ucData, pBuf, pdDone, dGot)) {
928 dwData = ucData; /* copy the data */
929 if ((dwData & 0x00000080) != 0) { /* Bit set for more data ? */
930 dwData &= 0x0000007F; /* Clear the relevant bit */
931 if (ReadChar(&ucData, pBuf, pdDone, dGot)) {
932 dwData = (dwData << 8) | ucData;
933 if ((dwData & 0x00004000) != 0) { /* three byte sequence ? */
934 dwData &= 0x00003FFF; /* Clear the relevant bit */
935 if (ReadChar
936 (&ucData, pBuf, pdDone, dGot))
937 dwData = (dwData << 8) | ucData;
938 else
939 bReturn = false;
940 }
941 } else
942 bReturn = false; /* couldn't read data */
943 }
944 } else
945 bReturn = false;
946
947 *pDWord = dwData; /* return the data */
948 return bReturn;
949 }
950
951 /***************************************************************************
952 **
953 ** ReadDMAInfo
954 **
955 ** Tries to read info about the dma request from the 1401 and decode it into
956 ** the dma descriptor block. We have at this point had the escape character
957 ** from the 1401 and now we must read in the rest of the information about
958 ** the transfer request. Returns FALSE if 1401 fails to respond or obselete
959 ** code from 1401 or bad parameters.
960 **
961 ** The pBuf char pointer does not include the initial escape character, so
962 ** we start handling the data at offset zero.
963 **
964 *****************************************************************************/
ReadDMAInfo(volatile DMADESC * pDmaDesc,DEVICE_EXTENSION * pdx,char * pBuf,unsigned int dwCount)965 static bool ReadDMAInfo(volatile DMADESC * pDmaDesc, DEVICE_EXTENSION * pdx,
966 char *pBuf, unsigned int dwCount)
967 {
968 bool bResult = false; // assume we won't succeed
969 unsigned char ucData;
970 unsigned int dDone = 0; // We haven't parsed anything so far
971
972 dev_dbg(&pdx->interface->dev, "%s", __func__);
973
974 if (ReadChar(&ucData, pBuf, &dDone, dwCount)) {
975 unsigned char ucTransCode = (ucData & 0x0F); // get code for transfer type
976 unsigned short wIdent = ((ucData >> 4) & 0x07); // and area identifier
977
978 // fill in the structure we were given
979 pDmaDesc->wTransType = ucTransCode; // type of transfer
980 pDmaDesc->wIdent = wIdent; // area to use
981 pDmaDesc->dwSize = 0; // initialise other bits
982 pDmaDesc->dwOffset = 0;
983
984 dev_dbg(&pdx->interface->dev, "%s type: %d ident: %d", __func__,
985 pDmaDesc->wTransType, pDmaDesc->wIdent);
986
987 pDmaDesc->bOutWard = (ucTransCode != TM_EXTTOHOST); // set transfer direction
988
989 switch (ucTransCode) {
990 case TM_EXTTOHOST: // Extended linear transfer modes (the only ones!)
991 case TM_EXTTO1401:
992 {
993 bResult =
994 ReadHuff(&(pDmaDesc->dwOffset), pBuf,
995 &dDone, dwCount)
996 && ReadHuff(&(pDmaDesc->dwSize), pBuf,
997 &dDone, dwCount);
998 if (bResult) {
999 dev_dbg(&pdx->interface->dev,
1000 "%s xfer offset & size %d %d",
1001 __func__, pDmaDesc->dwOffset,
1002 pDmaDesc->dwSize);
1003
1004 if ((wIdent >= MAX_TRANSAREAS) || // Illegal area number, or...
1005 (!pdx->rTransDef[wIdent].bUsed) || // area not set up, or...
1006 (pDmaDesc->dwOffset > pdx->rTransDef[wIdent].dwLength) || // range/size
1007 ((pDmaDesc->dwOffset +
1008 pDmaDesc->dwSize) >
1009 (pdx->rTransDef[wIdent].
1010 dwLength))) {
1011 bResult = false; // bad parameter(s)
1012 dev_dbg(&pdx->interface->dev,
1013 "%s bad param - id %d, bUsed %d, offset %d, size %d, area length %d",
1014 __func__, wIdent,
1015 pdx->rTransDef[wIdent].
1016 bUsed,
1017 pDmaDesc->dwOffset,
1018 pDmaDesc->dwSize,
1019 pdx->rTransDef[wIdent].
1020 dwLength);
1021 }
1022 }
1023 break;
1024 }
1025 default:
1026 break;
1027 }
1028 } else
1029 bResult = false;
1030
1031 if (!bResult) // now check parameters for validity
1032 dev_err(&pdx->interface->dev, "%s error reading Esc sequence",
1033 __func__);
1034
1035 return bResult;
1036 }
1037
1038 /****************************************************************************
1039 **
1040 ** Handle1401Esc
1041 **
1042 ** Deals with an escape sequence coming from the 1401. This can either be
1043 ** a DMA transfer request of various types or a response to an escape sequence
1044 ** sent to the 1401. This is called from a callback.
1045 **
1046 ** Parameters are
1047 **
1048 ** dwCount - the number of characters in the device extension char in buffer,
1049 ** this is known to be at least 2 or we will not be called.
1050 **
1051 ****************************************************************************/
Handle1401Esc(DEVICE_EXTENSION * pdx,char * pCh,unsigned int dwCount)1052 static int Handle1401Esc(DEVICE_EXTENSION * pdx, char *pCh,
1053 unsigned int dwCount)
1054 {
1055 int iReturn = U14ERR_FAIL;
1056
1057 // I have no idea what this next test is about. '?' is 0x3f, which is area 3, code
1058 // 15. At the moment, this is not used, so it does no harm, but unless someone can
1059 // tell me what this is for, it should be removed from this and the Windows driver.
1060 if (pCh[0] == '?') // Is this an information response
1061 { // Parse and save the information
1062 } else {
1063 spin_lock(&pdx->stagedLock); // Lock others out
1064
1065 if (ReadDMAInfo(&pdx->rDMAInfo, pdx, pCh, dwCount)) // Get DMA parameters
1066 {
1067 unsigned short wTransType = pdx->rDMAInfo.wTransType; // check transfer type
1068
1069 dev_dbg(&pdx->interface->dev,
1070 "%s xfer to %s, offset %d, length %d", __func__,
1071 pdx->rDMAInfo.bOutWard ? "1401" : "host",
1072 pdx->rDMAInfo.dwOffset, pdx->rDMAInfo.dwSize);
1073
1074 if (pdx->bXFerWaiting) // Check here for badly out of kilter...
1075 { // This can never happen, really
1076 dev_err(&pdx->interface->dev,
1077 "ERROR: DMA setup while transfer still waiting");
1078 spin_unlock(&pdx->stagedLock);
1079 } else {
1080 if ((wTransType == TM_EXTTOHOST)
1081 || (wTransType == TM_EXTTO1401)) {
1082 iReturn =
1083 ReadWriteMem(pdx,
1084 !pdx->rDMAInfo.
1085 bOutWard,
1086 pdx->rDMAInfo.wIdent,
1087 pdx->rDMAInfo.dwOffset,
1088 pdx->rDMAInfo.dwSize);
1089 if (iReturn != U14ERR_NOERROR)
1090 dev_err(&pdx->interface->dev,
1091 "%s ReadWriteMem() failed %d",
1092 __func__, iReturn);
1093 } else // This covers non-linear transfer setup
1094 dev_err(&pdx->interface->dev,
1095 "%s Unknown block xfer type %d",
1096 __func__, wTransType);
1097 }
1098 } else // Failed to read parameters
1099 dev_err(&pdx->interface->dev, "%s ReadDMAInfo() fail",
1100 __func__);
1101
1102 spin_unlock(&pdx->stagedLock); // OK here
1103 }
1104
1105 dev_dbg(&pdx->interface->dev, "%s returns %d", __func__, iReturn);
1106
1107 return iReturn;
1108 }
1109
1110 /****************************************************************************
1111 ** Callback for the character read complete or error
1112 ****************************************************************************/
ced_readchar_callback(struct urb * pUrb)1113 static void ced_readchar_callback(struct urb *pUrb)
1114 {
1115 DEVICE_EXTENSION *pdx = pUrb->context;
1116 int nGot = pUrb->actual_length; // what we transferred
1117
1118 if (pUrb->status) // Do we have a problem to handle?
1119 {
1120 int nPipe = pdx->nPipes == 4 ? 1 : 0; // The pipe number to use for error
1121 // sync/async unlink faults aren't errors... just saying device removed or stopped
1122 if (!
1123 (pUrb->status == -ENOENT || pUrb->status == -ECONNRESET
1124 || pUrb->status == -ESHUTDOWN)) {
1125 dev_err(&pdx->interface->dev,
1126 "%s - nonzero write bulk status received: %d",
1127 __func__, pUrb->status);
1128 } else
1129 dev_dbg(&pdx->interface->dev,
1130 "%s - 0 chars pUrb->status=%d (shutdown?)",
1131 __func__, pUrb->status);
1132
1133 spin_lock(&pdx->err_lock);
1134 pdx->errors = pUrb->status;
1135 spin_unlock(&pdx->err_lock);
1136 nGot = 0; // and tidy up again if so
1137
1138 spin_lock(&pdx->charInLock); // already at irq level
1139 pdx->bPipeError[nPipe] = 1; // Flag an error for later
1140 } else {
1141 if ((nGot > 1) && ((pdx->pCoherCharIn[0] & 0x7f) == 0x1b)) // Esc sequence?
1142 {
1143 Handle1401Esc(pdx, &pdx->pCoherCharIn[1], nGot - 1); // handle it
1144 spin_lock(&pdx->charInLock); // already at irq level
1145 } else {
1146 spin_lock(&pdx->charInLock); // already at irq level
1147 if (nGot > 0) {
1148 unsigned int i;
1149 if (nGot < INBUF_SZ) {
1150 pdx->pCoherCharIn[nGot] = 0; // tidy the string
1151 dev_dbg(&pdx->interface->dev,
1152 "%s got %d chars >%s<",
1153 __func__, nGot,
1154 pdx->pCoherCharIn);
1155 }
1156 // We know that whatever we read must fit in the input buffer
1157 for (i = 0; i < nGot; i++) {
1158 pdx->inputBuffer[pdx->dwInBuffPut++] =
1159 pdx->pCoherCharIn[i] & 0x7F;
1160 if (pdx->dwInBuffPut >= INBUF_SZ)
1161 pdx->dwInBuffPut = 0;
1162 }
1163
1164 if ((pdx->dwNumInput + nGot) <= INBUF_SZ)
1165 pdx->dwNumInput += nGot; // Adjust the buffer count accordingly
1166 } else
1167 dev_dbg(&pdx->interface->dev, "%s read ZLP",
1168 __func__);
1169 }
1170 }
1171
1172 pdx->bReadCharsPending = false; // No longer have a pending read
1173 spin_unlock(&pdx->charInLock); // already at irq level
1174
1175 Allowi(pdx); // see if we can do the next one
1176 }
1177
1178 /****************************************************************************
1179 ** Allowi
1180 **
1181 ** This is used to make sure that there is always a pending input transfer so
1182 ** we can pick up any inward transfers. This can be called in multiple contexts
1183 ** so we use the irqsave version of the spinlock.
1184 ****************************************************************************/
Allowi(DEVICE_EXTENSION * pdx)1185 int Allowi(DEVICE_EXTENSION * pdx)
1186 {
1187 int iReturn = U14ERR_NOERROR;
1188 unsigned long flags;
1189 spin_lock_irqsave(&pdx->charInLock, flags); // can be called in multiple contexts
1190
1191 // We don't want char input running while DMA is in progress as we know that this
1192 // can cause sequencing problems for the 2270. So don't. It will also allow the
1193 // ERR response to get back to the host code too early on some PCs, even if there
1194 // is no actual driver failure, so we don't allow this at all.
1195 if (!pdx->bInDrawDown && // stop input if
1196 !pdx->bReadCharsPending && // If no read request outstanding
1197 (pdx->dwNumInput < (INBUF_SZ / 2)) && // and there is some space
1198 (pdx->dwDMAFlag == MODE_CHAR) && // not doing any DMA
1199 (!pdx->bXFerWaiting) && // no xfer waiting to start
1200 (CanAcceptIoRequests(pdx))) // and activity is generally OK
1201 { // then off we go
1202 unsigned int nMax = INBUF_SZ - pdx->dwNumInput; // max we could read
1203 int nPipe = pdx->nPipes == 4 ? 1 : 0; // The pipe number to use
1204
1205 dev_dbg(&pdx->interface->dev, "%s %d chars in input buffer",
1206 __func__, pdx->dwNumInput);
1207
1208 usb_fill_int_urb(pdx->pUrbCharIn, pdx->udev,
1209 usb_rcvintpipe(pdx->udev, pdx->epAddr[nPipe]),
1210 pdx->pCoherCharIn, nMax, ced_readchar_callback,
1211 pdx, pdx->bInterval);
1212 pdx->pUrbCharIn->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; // short xfers are OK by default
1213 usb_anchor_urb(pdx->pUrbCharIn, &pdx->submitted); // in case we need to kill it
1214 iReturn = usb_submit_urb(pdx->pUrbCharIn, GFP_ATOMIC);
1215 if (iReturn) {
1216 usb_unanchor_urb(pdx->pUrbCharIn); // remove from list of active Urbs
1217 pdx->bPipeError[nPipe] = 1; // Flag an error to be handled later
1218 dev_err(&pdx->interface->dev,
1219 "%s submit urb failed: %d", __func__, iReturn);
1220 } else
1221 pdx->bReadCharsPending = true; // Flag that we are active here
1222 }
1223
1224 spin_unlock_irqrestore(&pdx->charInLock, flags);
1225
1226 return iReturn;
1227
1228 }
1229
1230 /*****************************************************************************
1231 ** The ioctl entry point to the driver that is used by us to talk to it.
1232 ** inode The device node (no longer in 3.0.0 kernels)
1233 ** file The file that is open, which holds our pdx pointer
1234 ** ulArg The argument passed in. Note that long is 64-bits in 64-bit system, i.e. it is big
1235 ** enough for a 64-bit pointer.
1236 *****************************************************************************/
ced_ioctl(struct file * file,unsigned int cmd,unsigned long ulArg)1237 static long ced_ioctl(struct file *file, unsigned int cmd, unsigned long ulArg)
1238 {
1239 int err = 0;
1240 DEVICE_EXTENSION *pdx = file->private_data;
1241 if (!CanAcceptIoRequests(pdx)) // check we still exist
1242 return -ENODEV;
1243
1244 // Check that access is allowed, where is is needed. Anything that would have an indeterminate
1245 // size will be checked by the specific command.
1246 if (_IOC_DIR(cmd) & _IOC_READ) // read from point of view of user...
1247 err = !access_ok(VERIFY_WRITE, (void __user *)ulArg, _IOC_SIZE(cmd)); // is kernel write
1248 else if (_IOC_DIR(cmd) & _IOC_WRITE) // and write from point of view of user...
1249 err = !access_ok(VERIFY_READ, (void __user *)ulArg, _IOC_SIZE(cmd)); // is kernel read
1250 if (err)
1251 return -EFAULT;
1252
1253 switch (_IOC_NR(cmd)) {
1254 case _IOC_NR(IOCTL_CED_SENDSTRING(0)):
1255 return SendString(pdx, (const char __user *)ulArg,
1256 _IOC_SIZE(cmd));
1257
1258 case _IOC_NR(IOCTL_CED_RESET1401):
1259 return Reset1401(pdx);
1260
1261 case _IOC_NR(IOCTL_CED_GETCHAR):
1262 return GetChar(pdx);
1263
1264 case _IOC_NR(IOCTL_CED_SENDCHAR):
1265 return SendChar(pdx, (char)ulArg);
1266
1267 case _IOC_NR(IOCTL_CED_STAT1401):
1268 return Stat1401(pdx);
1269
1270 case _IOC_NR(IOCTL_CED_LINECOUNT):
1271 return LineCount(pdx);
1272
1273 case _IOC_NR(IOCTL_CED_GETSTRING(0)):
1274 return GetString(pdx, (char __user *)ulArg, _IOC_SIZE(cmd));
1275
1276 case _IOC_NR(IOCTL_CED_SETTRANSFER):
1277 return SetTransfer(pdx, (TRANSFERDESC __user *) ulArg);
1278
1279 case _IOC_NR(IOCTL_CED_UNSETTRANSFER):
1280 return UnsetTransfer(pdx, (int)ulArg);
1281
1282 case _IOC_NR(IOCTL_CED_SETEVENT):
1283 return SetEvent(pdx, (TRANSFEREVENT __user *) ulArg);
1284
1285 case _IOC_NR(IOCTL_CED_GETOUTBUFSPACE):
1286 return GetOutBufSpace(pdx);
1287
1288 case _IOC_NR(IOCTL_CED_GETBASEADDRESS):
1289 return -1;
1290
1291 case _IOC_NR(IOCTL_CED_GETDRIVERREVISION):
1292 return (2 << 24) | (DRIVERMAJREV << 16) | DRIVERMINREV; // USB | MAJOR | MINOR
1293
1294 case _IOC_NR(IOCTL_CED_GETTRANSFER):
1295 return GetTransfer(pdx, (TGET_TX_BLOCK __user *) ulArg);
1296
1297 case _IOC_NR(IOCTL_CED_KILLIO1401):
1298 return KillIO1401(pdx);
1299
1300 case _IOC_NR(IOCTL_CED_STATEOF1401):
1301 return StateOf1401(pdx);
1302
1303 case _IOC_NR(IOCTL_CED_GRAB1401):
1304 case _IOC_NR(IOCTL_CED_FREE1401):
1305 return U14ERR_NOERROR;
1306
1307 case _IOC_NR(IOCTL_CED_STARTSELFTEST):
1308 return StartSelfTest(pdx);
1309
1310 case _IOC_NR(IOCTL_CED_CHECKSELFTEST):
1311 return CheckSelfTest(pdx, (TGET_SELFTEST __user *) ulArg);
1312
1313 case _IOC_NR(IOCTL_CED_TYPEOF1401):
1314 return TypeOf1401(pdx);
1315
1316 case _IOC_NR(IOCTL_CED_TRANSFERFLAGS):
1317 return TransferFlags(pdx);
1318
1319 case _IOC_NR(IOCTL_CED_DBGPEEK):
1320 return DbgPeek(pdx, (TDBGBLOCK __user *) ulArg);
1321
1322 case _IOC_NR(IOCTL_CED_DBGPOKE):
1323 return DbgPoke(pdx, (TDBGBLOCK __user *) ulArg);
1324
1325 case _IOC_NR(IOCTL_CED_DBGRAMPDATA):
1326 return DbgRampData(pdx, (TDBGBLOCK __user *) ulArg);
1327
1328 case _IOC_NR(IOCTL_CED_DBGRAMPADDR):
1329 return DbgRampAddr(pdx, (TDBGBLOCK __user *) ulArg);
1330
1331 case _IOC_NR(IOCTL_CED_DBGGETDATA):
1332 return DbgGetData(pdx, (TDBGBLOCK __user *) ulArg);
1333
1334 case _IOC_NR(IOCTL_CED_DBGSTOPLOOP):
1335 return DbgStopLoop(pdx);
1336
1337 case _IOC_NR(IOCTL_CED_FULLRESET):
1338 pdx->bForceReset = true; // Set a flag for a full reset
1339 break;
1340
1341 case _IOC_NR(IOCTL_CED_SETCIRCULAR):
1342 return SetCircular(pdx, (TRANSFERDESC __user *) ulArg);
1343
1344 case _IOC_NR(IOCTL_CED_GETCIRCBLOCK):
1345 return GetCircBlock(pdx, (TCIRCBLOCK __user *) ulArg);
1346
1347 case _IOC_NR(IOCTL_CED_FREECIRCBLOCK):
1348 return FreeCircBlock(pdx, (TCIRCBLOCK __user *) ulArg);
1349
1350 case _IOC_NR(IOCTL_CED_WAITEVENT):
1351 return WaitEvent(pdx, (int)(ulArg & 0xff), (int)(ulArg >> 8));
1352
1353 case _IOC_NR(IOCTL_CED_TESTEVENT):
1354 return TestEvent(pdx, (int)ulArg);
1355
1356 default:
1357 return U14ERR_NO_SUCH_FN;
1358 }
1359 return U14ERR_NOERROR;
1360 }
1361
1362 static const struct file_operations ced_fops = {
1363 .owner = THIS_MODULE,
1364 .open = ced_open,
1365 .release = ced_release,
1366 .flush = ced_flush,
1367 .llseek = noop_llseek,
1368 .unlocked_ioctl = ced_ioctl,
1369 };
1370
1371 /*
1372 * usb class driver info in order to get a minor number from the usb core,
1373 * and to have the device registered with the driver core
1374 */
1375 static struct usb_class_driver ced_class = {
1376 .name = "cedusb%d",
1377 .fops = &ced_fops,
1378 .minor_base = USB_CED_MINOR_BASE,
1379 };
1380
1381 // Check that the device that matches a 1401 vendor and product ID is OK to use and
1382 // initialise our DEVICE_EXTENSION.
ced_probe(struct usb_interface * interface,const struct usb_device_id * id)1383 static int ced_probe(struct usb_interface *interface,
1384 const struct usb_device_id *id)
1385 {
1386 DEVICE_EXTENSION *pdx;
1387 struct usb_host_interface *iface_desc;
1388 struct usb_endpoint_descriptor *endpoint;
1389 int i, bcdDevice;
1390 int retval = -ENOMEM;
1391
1392 // allocate memory for our device extension and initialize it
1393 pdx = kzalloc(sizeof(*pdx), GFP_KERNEL);
1394 if (!pdx)
1395 goto error;
1396
1397 for (i = 0; i < MAX_TRANSAREAS; ++i) // Initialise the wait queues
1398 {
1399 init_waitqueue_head(&pdx->rTransDef[i].wqEvent);
1400 }
1401
1402 // Put initialises for our stuff here. Note that all of *pdx is zero, so
1403 // no need to explicitly zero it.
1404 spin_lock_init(&pdx->charOutLock);
1405 spin_lock_init(&pdx->charInLock);
1406 spin_lock_init(&pdx->stagedLock);
1407
1408 // Initialises from the skeleton stuff
1409 kref_init(&pdx->kref);
1410 mutex_init(&pdx->io_mutex);
1411 spin_lock_init(&pdx->err_lock);
1412 init_usb_anchor(&pdx->submitted);
1413
1414 pdx->udev = usb_get_dev(interface_to_usbdev(interface));
1415 pdx->interface = interface;
1416
1417 // Attempt to identify the device
1418 bcdDevice = pdx->udev->descriptor.bcdDevice;
1419 i = (bcdDevice >> 8);
1420 if (i == 0)
1421 pdx->s1401Type = TYPEU1401;
1422 else if ((i >= 1) && (i <= 23))
1423 pdx->s1401Type = i + 2;
1424 else {
1425 dev_err(&interface->dev, "%s Unknown device. bcdDevice = %d",
1426 __func__, bcdDevice);
1427 goto error;
1428 }
1429 // set up the endpoint information. We only care about the number of EP as
1430 // we know that we are dealing with a 1401 device.
1431 iface_desc = interface->cur_altsetting;
1432 pdx->nPipes = iface_desc->desc.bNumEndpoints;
1433 dev_info(&interface->dev, "1401Type=%d with %d End Points",
1434 pdx->s1401Type, pdx->nPipes);
1435 if ((pdx->nPipes < 3) || (pdx->nPipes > 4))
1436 goto error;
1437
1438 // Allocate the URBs we hold for performing transfers
1439 pdx->pUrbCharOut = usb_alloc_urb(0, GFP_KERNEL); // character output URB
1440 pdx->pUrbCharIn = usb_alloc_urb(0, GFP_KERNEL); // character input URB
1441 pdx->pStagedUrb = usb_alloc_urb(0, GFP_KERNEL); // block transfer URB
1442 if (!pdx->pUrbCharOut || !pdx->pUrbCharIn || !pdx->pStagedUrb) {
1443 dev_err(&interface->dev, "%s URB alloc failed", __func__);
1444 goto error;
1445 }
1446
1447 pdx->pCoherStagedIO =
1448 usb_alloc_coherent(pdx->udev, STAGED_SZ, GFP_KERNEL,
1449 &pdx->pStagedUrb->transfer_dma);
1450 pdx->pCoherCharOut =
1451 usb_alloc_coherent(pdx->udev, OUTBUF_SZ, GFP_KERNEL,
1452 &pdx->pUrbCharOut->transfer_dma);
1453 pdx->pCoherCharIn =
1454 usb_alloc_coherent(pdx->udev, INBUF_SZ, GFP_KERNEL,
1455 &pdx->pUrbCharIn->transfer_dma);
1456 if (!pdx->pCoherCharOut || !pdx->pCoherCharIn || !pdx->pCoherStagedIO) {
1457 dev_err(&interface->dev, "%s Coherent buffer alloc failed",
1458 __func__);
1459 goto error;
1460 }
1461
1462 for (i = 0; i < pdx->nPipes; ++i) {
1463 endpoint = &iface_desc->endpoint[i].desc;
1464 pdx->epAddr[i] = endpoint->bEndpointAddress;
1465 dev_info(&interface->dev, "Pipe %d, ep address %02x", i,
1466 pdx->epAddr[i]);
1467 if (((pdx->nPipes == 3) && (i == 0)) || // if char input end point
1468 ((pdx->nPipes == 4) && (i == 1))) {
1469 pdx->bInterval = endpoint->bInterval; // save the endpoint interrupt interval
1470 dev_info(&interface->dev, "Pipe %d, bInterval = %d", i,
1471 pdx->bInterval);
1472 }
1473 // Detect USB2 by checking last ep size (64 if USB1)
1474 if (i == pdx->nPipes - 1) // if this is the last ep (bulk)
1475 {
1476 pdx->bIsUSB2 =
1477 le16_to_cpu(endpoint->wMaxPacketSize) > 64;
1478 dev_info(&pdx->interface->dev, "USB%d",
1479 pdx->bIsUSB2 + 1);
1480 }
1481 }
1482
1483 /* save our data pointer in this interface device */
1484 usb_set_intfdata(interface, pdx);
1485
1486 /* we can register the device now, as it is ready */
1487 retval = usb_register_dev(interface, &ced_class);
1488 if (retval) {
1489 /* something prevented us from registering this driver */
1490 dev_err(&interface->dev,
1491 "Not able to get a minor for this device.\n");
1492 usb_set_intfdata(interface, NULL);
1493 goto error;
1494 }
1495
1496 /* let the user know what node this device is now attached to */
1497 dev_info(&interface->dev,
1498 "USB CEDUSB device now attached to cedusb #%d",
1499 interface->minor);
1500 return 0;
1501
1502 error:
1503 if (pdx)
1504 kref_put(&pdx->kref, ced_delete); // frees allocated memory
1505 return retval;
1506 }
1507
ced_disconnect(struct usb_interface * interface)1508 static void ced_disconnect(struct usb_interface *interface)
1509 {
1510 DEVICE_EXTENSION *pdx = usb_get_intfdata(interface);
1511 int minor = interface->minor;
1512 int i;
1513
1514 usb_set_intfdata(interface, NULL); // remove the pdx from the interface
1515 usb_deregister_dev(interface, &ced_class); // give back our minor device number
1516
1517 mutex_lock(&pdx->io_mutex); // stop more I/O starting while...
1518 ced_draw_down(pdx); // ...wait for then kill any io
1519 for (i = 0; i < MAX_TRANSAREAS; ++i) {
1520 int iErr = ClearArea(pdx, i); // ...release any used memory
1521 if (iErr == U14ERR_UNLOCKFAIL)
1522 dev_err(&pdx->interface->dev, "%s Area %d was in used",
1523 __func__, i);
1524 }
1525 pdx->interface = NULL; // ...we kill off link to interface
1526 mutex_unlock(&pdx->io_mutex);
1527
1528 usb_kill_anchored_urbs(&pdx->submitted);
1529
1530 kref_put(&pdx->kref, ced_delete); // decrement our usage count
1531
1532 dev_info(&interface->dev, "USB cedusb #%d now disconnected", minor);
1533 }
1534
1535 // Wait for all the urbs we know of to be done with, then kill off any that
1536 // are left. NBNB we will need to have a mechanism to stop circular xfers
1537 // from trying to fire off more urbs. We will wait up to 3 seconds for Urbs
1538 // to be done.
ced_draw_down(DEVICE_EXTENSION * pdx)1539 void ced_draw_down(DEVICE_EXTENSION * pdx)
1540 {
1541 int time;
1542 dev_dbg(&pdx->interface->dev, "%s called", __func__);
1543
1544 pdx->bInDrawDown = true;
1545 time = usb_wait_anchor_empty_timeout(&pdx->submitted, 3000);
1546 if (!time) { // if we timed out we kill the urbs
1547 usb_kill_anchored_urbs(&pdx->submitted);
1548 dev_err(&pdx->interface->dev, "%s timed out", __func__);
1549 }
1550 pdx->bInDrawDown = false;
1551 }
1552
ced_suspend(struct usb_interface * intf,pm_message_t message)1553 static int ced_suspend(struct usb_interface *intf, pm_message_t message)
1554 {
1555 DEVICE_EXTENSION *pdx = usb_get_intfdata(intf);
1556 if (!pdx)
1557 return 0;
1558 ced_draw_down(pdx);
1559
1560 dev_dbg(&pdx->interface->dev, "%s called", __func__);
1561 return 0;
1562 }
1563
ced_resume(struct usb_interface * intf)1564 static int ced_resume(struct usb_interface *intf)
1565 {
1566 DEVICE_EXTENSION *pdx = usb_get_intfdata(intf);
1567 if (!pdx)
1568 return 0;
1569 dev_dbg(&pdx->interface->dev, "%s called", __func__);
1570 return 0;
1571 }
1572
ced_pre_reset(struct usb_interface * intf)1573 static int ced_pre_reset(struct usb_interface *intf)
1574 {
1575 DEVICE_EXTENSION *pdx = usb_get_intfdata(intf);
1576 dev_dbg(&pdx->interface->dev, "%s", __func__);
1577 mutex_lock(&pdx->io_mutex);
1578 ced_draw_down(pdx);
1579 return 0;
1580 }
1581
ced_post_reset(struct usb_interface * intf)1582 static int ced_post_reset(struct usb_interface *intf)
1583 {
1584 DEVICE_EXTENSION *pdx = usb_get_intfdata(intf);
1585 dev_dbg(&pdx->interface->dev, "%s", __func__);
1586
1587 /* we are sure no URBs are active - no locking needed */
1588 pdx->errors = -EPIPE;
1589 mutex_unlock(&pdx->io_mutex);
1590
1591 return 0;
1592 }
1593
1594 static struct usb_driver ced_driver = {
1595 .name = "cedusb",
1596 .probe = ced_probe,
1597 .disconnect = ced_disconnect,
1598 .suspend = ced_suspend,
1599 .resume = ced_resume,
1600 .pre_reset = ced_pre_reset,
1601 .post_reset = ced_post_reset,
1602 .id_table = ced_table,
1603 .supports_autosuspend = 1,
1604 };
1605
1606 module_usb_driver(ced_driver);
1607 MODULE_LICENSE("GPL");
1608