1 /*
2 * Copyright (c) 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32 #include <linux/mm.h>
33 #include <linux/types.h>
34 #include <linux/device.h>
35 #include <linux/dmapool.h>
36 #include <linux/slab.h>
37 #include <linux/list.h>
38 #include <linux/highmem.h>
39 #include <linux/io.h>
40 #include <linux/uio.h>
41 #include <linux/rbtree.h>
42 #include <linux/spinlock.h>
43 #include <linux/delay.h>
44
45 #include "qib.h"
46 #include "qib_user_sdma.h"
47
48 /* minimum size of header */
49 #define QIB_USER_SDMA_MIN_HEADER_LENGTH 64
50 /* expected size of headers (for dma_pool) */
51 #define QIB_USER_SDMA_EXP_HEADER_LENGTH 64
52 /* attempt to drain the queue for 5secs */
53 #define QIB_USER_SDMA_DRAIN_TIMEOUT 500
54
55 /*
56 * track how many times a process open this driver.
57 */
58 static struct rb_root qib_user_sdma_rb_root = RB_ROOT;
59
60 struct qib_user_sdma_rb_node {
61 struct rb_node node;
62 int refcount;
63 pid_t pid;
64 };
65
66 struct qib_user_sdma_pkt {
67 struct list_head list; /* list element */
68
69 u8 tiddma; /* if this is NEW tid-sdma */
70 u8 largepkt; /* this is large pkt from kmalloc */
71 u16 frag_size; /* frag size used by PSM */
72 u16 index; /* last header index or push index */
73 u16 naddr; /* dimension of addr (1..3) ... */
74 u16 addrlimit; /* addr array size */
75 u16 tidsmidx; /* current tidsm index */
76 u16 tidsmcount; /* tidsm array item count */
77 u16 payload_size; /* payload size so far for header */
78 u32 bytes_togo; /* bytes for processing */
79 u32 counter; /* sdma pkts queued counter for this entry */
80 struct qib_tid_session_member *tidsm; /* tid session member array */
81 struct qib_user_sdma_queue *pq; /* which pq this pkt belongs to */
82 u64 added; /* global descq number of entries */
83
84 struct {
85 u16 offset; /* offset for kvaddr, addr */
86 u16 length; /* length in page */
87 u16 first_desc; /* first desc */
88 u16 last_desc; /* last desc */
89 u16 put_page; /* should we put_page? */
90 u16 dma_mapped; /* is page dma_mapped? */
91 u16 dma_length; /* for dma_unmap_page() */
92 u16 padding;
93 struct page *page; /* may be NULL (coherent mem) */
94 void *kvaddr; /* FIXME: only for pio hack */
95 dma_addr_t addr;
96 } addr[4]; /* max pages, any more and we coalesce */
97 };
98
99 struct qib_user_sdma_queue {
100 /*
101 * pkts sent to dma engine are queued on this
102 * list head. the type of the elements of this
103 * list are struct qib_user_sdma_pkt...
104 */
105 struct list_head sent;
106
107 /*
108 * Because above list will be accessed by both process and
109 * signal handler, we need a spinlock for it.
110 */
111 spinlock_t sent_lock ____cacheline_aligned_in_smp;
112
113 /* headers with expected length are allocated from here... */
114 char header_cache_name[64];
115 struct dma_pool *header_cache;
116
117 /* packets are allocated from the slab cache... */
118 char pkt_slab_name[64];
119 struct kmem_cache *pkt_slab;
120
121 /* as packets go on the queued queue, they are counted... */
122 u32 counter;
123 u32 sent_counter;
124 /* pending packets, not sending yet */
125 u32 num_pending;
126 /* sending packets, not complete yet */
127 u32 num_sending;
128 /* global descq number of entry of last sending packet */
129 u64 added;
130
131 /* dma page table */
132 struct rb_root dma_pages_root;
133
134 struct qib_user_sdma_rb_node *sdma_rb_node;
135
136 /* protect everything above... */
137 struct mutex lock;
138 };
139
140 static struct qib_user_sdma_rb_node *
qib_user_sdma_rb_search(struct rb_root * root,pid_t pid)141 qib_user_sdma_rb_search(struct rb_root *root, pid_t pid)
142 {
143 struct qib_user_sdma_rb_node *sdma_rb_node;
144 struct rb_node *node = root->rb_node;
145
146 while (node) {
147 sdma_rb_node = container_of(node,
148 struct qib_user_sdma_rb_node, node);
149 if (pid < sdma_rb_node->pid)
150 node = node->rb_left;
151 else if (pid > sdma_rb_node->pid)
152 node = node->rb_right;
153 else
154 return sdma_rb_node;
155 }
156 return NULL;
157 }
158
159 static int
qib_user_sdma_rb_insert(struct rb_root * root,struct qib_user_sdma_rb_node * new)160 qib_user_sdma_rb_insert(struct rb_root *root, struct qib_user_sdma_rb_node *new)
161 {
162 struct rb_node **node = &(root->rb_node);
163 struct rb_node *parent = NULL;
164 struct qib_user_sdma_rb_node *got;
165
166 while (*node) {
167 got = container_of(*node, struct qib_user_sdma_rb_node, node);
168 parent = *node;
169 if (new->pid < got->pid)
170 node = &((*node)->rb_left);
171 else if (new->pid > got->pid)
172 node = &((*node)->rb_right);
173 else
174 return 0;
175 }
176
177 rb_link_node(&new->node, parent, node);
178 rb_insert_color(&new->node, root);
179 return 1;
180 }
181
182 struct qib_user_sdma_queue *
qib_user_sdma_queue_create(struct device * dev,int unit,int ctxt,int sctxt)183 qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt)
184 {
185 struct qib_user_sdma_queue *pq =
186 kmalloc(sizeof(struct qib_user_sdma_queue), GFP_KERNEL);
187 struct qib_user_sdma_rb_node *sdma_rb_node;
188
189 if (!pq)
190 goto done;
191
192 pq->counter = 0;
193 pq->sent_counter = 0;
194 pq->num_pending = 0;
195 pq->num_sending = 0;
196 pq->added = 0;
197 pq->sdma_rb_node = NULL;
198
199 INIT_LIST_HEAD(&pq->sent);
200 spin_lock_init(&pq->sent_lock);
201 mutex_init(&pq->lock);
202
203 snprintf(pq->pkt_slab_name, sizeof(pq->pkt_slab_name),
204 "qib-user-sdma-pkts-%u-%02u.%02u", unit, ctxt, sctxt);
205 pq->pkt_slab = kmem_cache_create(pq->pkt_slab_name,
206 sizeof(struct qib_user_sdma_pkt),
207 0, 0, NULL);
208
209 if (!pq->pkt_slab)
210 goto err_kfree;
211
212 snprintf(pq->header_cache_name, sizeof(pq->header_cache_name),
213 "qib-user-sdma-headers-%u-%02u.%02u", unit, ctxt, sctxt);
214 pq->header_cache = dma_pool_create(pq->header_cache_name,
215 dev,
216 QIB_USER_SDMA_EXP_HEADER_LENGTH,
217 4, 0);
218 if (!pq->header_cache)
219 goto err_slab;
220
221 pq->dma_pages_root = RB_ROOT;
222
223 sdma_rb_node = qib_user_sdma_rb_search(&qib_user_sdma_rb_root,
224 current->pid);
225 if (sdma_rb_node) {
226 sdma_rb_node->refcount++;
227 } else {
228 int ret;
229 sdma_rb_node = kmalloc(sizeof(
230 struct qib_user_sdma_rb_node), GFP_KERNEL);
231 if (!sdma_rb_node)
232 goto err_rb;
233
234 sdma_rb_node->refcount = 1;
235 sdma_rb_node->pid = current->pid;
236
237 ret = qib_user_sdma_rb_insert(&qib_user_sdma_rb_root,
238 sdma_rb_node);
239 BUG_ON(ret == 0);
240 }
241 pq->sdma_rb_node = sdma_rb_node;
242
243 goto done;
244
245 err_rb:
246 dma_pool_destroy(pq->header_cache);
247 err_slab:
248 kmem_cache_destroy(pq->pkt_slab);
249 err_kfree:
250 kfree(pq);
251 pq = NULL;
252
253 done:
254 return pq;
255 }
256
qib_user_sdma_init_frag(struct qib_user_sdma_pkt * pkt,int i,u16 offset,u16 len,u16 first_desc,u16 last_desc,u16 put_page,u16 dma_mapped,struct page * page,void * kvaddr,dma_addr_t dma_addr,u16 dma_length)257 static void qib_user_sdma_init_frag(struct qib_user_sdma_pkt *pkt,
258 int i, u16 offset, u16 len,
259 u16 first_desc, u16 last_desc,
260 u16 put_page, u16 dma_mapped,
261 struct page *page, void *kvaddr,
262 dma_addr_t dma_addr, u16 dma_length)
263 {
264 pkt->addr[i].offset = offset;
265 pkt->addr[i].length = len;
266 pkt->addr[i].first_desc = first_desc;
267 pkt->addr[i].last_desc = last_desc;
268 pkt->addr[i].put_page = put_page;
269 pkt->addr[i].dma_mapped = dma_mapped;
270 pkt->addr[i].page = page;
271 pkt->addr[i].kvaddr = kvaddr;
272 pkt->addr[i].addr = dma_addr;
273 pkt->addr[i].dma_length = dma_length;
274 }
275
qib_user_sdma_alloc_header(struct qib_user_sdma_queue * pq,size_t len,dma_addr_t * dma_addr)276 static void *qib_user_sdma_alloc_header(struct qib_user_sdma_queue *pq,
277 size_t len, dma_addr_t *dma_addr)
278 {
279 void *hdr;
280
281 if (len == QIB_USER_SDMA_EXP_HEADER_LENGTH)
282 hdr = dma_pool_alloc(pq->header_cache, GFP_KERNEL,
283 dma_addr);
284 else
285 hdr = NULL;
286
287 if (!hdr) {
288 hdr = kmalloc(len, GFP_KERNEL);
289 if (!hdr)
290 return NULL;
291
292 *dma_addr = 0;
293 }
294
295 return hdr;
296 }
297
qib_user_sdma_page_to_frags(const struct qib_devdata * dd,struct qib_user_sdma_queue * pq,struct qib_user_sdma_pkt * pkt,struct page * page,u16 put,u16 offset,u16 len,void * kvaddr)298 static int qib_user_sdma_page_to_frags(const struct qib_devdata *dd,
299 struct qib_user_sdma_queue *pq,
300 struct qib_user_sdma_pkt *pkt,
301 struct page *page, u16 put,
302 u16 offset, u16 len, void *kvaddr)
303 {
304 __le16 *pbc16;
305 void *pbcvaddr;
306 struct qib_message_header *hdr;
307 u16 newlen, pbclen, lastdesc, dma_mapped;
308 u32 vcto;
309 union qib_seqnum seqnum;
310 dma_addr_t pbcdaddr;
311 dma_addr_t dma_addr =
312 dma_map_page(&dd->pcidev->dev,
313 page, offset, len, DMA_TO_DEVICE);
314 int ret = 0;
315
316 if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
317 /*
318 * dma mapping error, pkt has not managed
319 * this page yet, return the page here so
320 * the caller can ignore this page.
321 */
322 if (put) {
323 put_page(page);
324 } else {
325 /* coalesce case */
326 kunmap(page);
327 __free_page(page);
328 }
329 ret = -ENOMEM;
330 goto done;
331 }
332 offset = 0;
333 dma_mapped = 1;
334
335
336 next_fragment:
337
338 /*
339 * In tid-sdma, the transfer length is restricted by
340 * receiver side current tid page length.
341 */
342 if (pkt->tiddma && len > pkt->tidsm[pkt->tidsmidx].length)
343 newlen = pkt->tidsm[pkt->tidsmidx].length;
344 else
345 newlen = len;
346
347 /*
348 * Then the transfer length is restricted by MTU.
349 * the last descriptor flag is determined by:
350 * 1. the current packet is at frag size length.
351 * 2. the current tid page is done if tid-sdma.
352 * 3. there is no more byte togo if sdma.
353 */
354 lastdesc = 0;
355 if ((pkt->payload_size + newlen) >= pkt->frag_size) {
356 newlen = pkt->frag_size - pkt->payload_size;
357 lastdesc = 1;
358 } else if (pkt->tiddma) {
359 if (newlen == pkt->tidsm[pkt->tidsmidx].length)
360 lastdesc = 1;
361 } else {
362 if (newlen == pkt->bytes_togo)
363 lastdesc = 1;
364 }
365
366 /* fill the next fragment in this page */
367 qib_user_sdma_init_frag(pkt, pkt->naddr, /* index */
368 offset, newlen, /* offset, len */
369 0, lastdesc, /* first last desc */
370 put, dma_mapped, /* put page, dma mapped */
371 page, kvaddr, /* struct page, virt addr */
372 dma_addr, len); /* dma addr, dma length */
373 pkt->bytes_togo -= newlen;
374 pkt->payload_size += newlen;
375 pkt->naddr++;
376 if (pkt->naddr == pkt->addrlimit) {
377 ret = -EFAULT;
378 goto done;
379 }
380
381 /* If there is no more byte togo. (lastdesc==1) */
382 if (pkt->bytes_togo == 0) {
383 /* The packet is done, header is not dma mapped yet.
384 * it should be from kmalloc */
385 if (!pkt->addr[pkt->index].addr) {
386 pkt->addr[pkt->index].addr =
387 dma_map_single(&dd->pcidev->dev,
388 pkt->addr[pkt->index].kvaddr,
389 pkt->addr[pkt->index].dma_length,
390 DMA_TO_DEVICE);
391 if (dma_mapping_error(&dd->pcidev->dev,
392 pkt->addr[pkt->index].addr)) {
393 ret = -ENOMEM;
394 goto done;
395 }
396 pkt->addr[pkt->index].dma_mapped = 1;
397 }
398
399 goto done;
400 }
401
402 /* If tid-sdma, advance tid info. */
403 if (pkt->tiddma) {
404 pkt->tidsm[pkt->tidsmidx].length -= newlen;
405 if (pkt->tidsm[pkt->tidsmidx].length) {
406 pkt->tidsm[pkt->tidsmidx].offset += newlen;
407 } else {
408 pkt->tidsmidx++;
409 if (pkt->tidsmidx == pkt->tidsmcount) {
410 ret = -EFAULT;
411 goto done;
412 }
413 }
414 }
415
416 /*
417 * If this is NOT the last descriptor. (newlen==len)
418 * the current packet is not done yet, but the current
419 * send side page is done.
420 */
421 if (lastdesc == 0)
422 goto done;
423
424 /*
425 * If running this driver under PSM with message size
426 * fitting into one transfer unit, it is not possible
427 * to pass this line. otherwise, it is a buggggg.
428 */
429
430 /*
431 * Since the current packet is done, and there are more
432 * bytes togo, we need to create a new sdma header, copying
433 * from previous sdma header and modify both.
434 */
435 pbclen = pkt->addr[pkt->index].length;
436 pbcvaddr = qib_user_sdma_alloc_header(pq, pbclen, &pbcdaddr);
437 if (!pbcvaddr) {
438 ret = -ENOMEM;
439 goto done;
440 }
441 /* Copy the previous sdma header to new sdma header */
442 pbc16 = (__le16 *)pkt->addr[pkt->index].kvaddr;
443 memcpy(pbcvaddr, pbc16, pbclen);
444
445 /* Modify the previous sdma header */
446 hdr = (struct qib_message_header *)&pbc16[4];
447
448 /* New pbc length */
449 pbc16[0] = cpu_to_le16(le16_to_cpu(pbc16[0])-(pkt->bytes_togo>>2));
450
451 /* New packet length */
452 hdr->lrh[2] = cpu_to_be16(le16_to_cpu(pbc16[0]));
453
454 if (pkt->tiddma) {
455 /* turn on the header suppression */
456 hdr->iph.pkt_flags =
457 cpu_to_le16(le16_to_cpu(hdr->iph.pkt_flags)|0x2);
458 /* turn off ACK_REQ: 0x04 and EXPECTED_DONE: 0x20 */
459 hdr->flags &= ~(0x04|0x20);
460 } else {
461 /* turn off extra bytes: 20-21 bits */
462 hdr->bth[0] = cpu_to_be32(be32_to_cpu(hdr->bth[0])&0xFFCFFFFF);
463 /* turn off ACK_REQ: 0x04 */
464 hdr->flags &= ~(0x04);
465 }
466
467 /* New kdeth checksum */
468 vcto = le32_to_cpu(hdr->iph.ver_ctxt_tid_offset);
469 hdr->iph.chksum = cpu_to_le16(QIB_LRH_BTH +
470 be16_to_cpu(hdr->lrh[2]) -
471 ((vcto>>16)&0xFFFF) - (vcto&0xFFFF) -
472 le16_to_cpu(hdr->iph.pkt_flags));
473
474 /* The packet is done, header is not dma mapped yet.
475 * it should be from kmalloc */
476 if (!pkt->addr[pkt->index].addr) {
477 pkt->addr[pkt->index].addr =
478 dma_map_single(&dd->pcidev->dev,
479 pkt->addr[pkt->index].kvaddr,
480 pkt->addr[pkt->index].dma_length,
481 DMA_TO_DEVICE);
482 if (dma_mapping_error(&dd->pcidev->dev,
483 pkt->addr[pkt->index].addr)) {
484 ret = -ENOMEM;
485 goto done;
486 }
487 pkt->addr[pkt->index].dma_mapped = 1;
488 }
489
490 /* Modify the new sdma header */
491 pbc16 = (__le16 *)pbcvaddr;
492 hdr = (struct qib_message_header *)&pbc16[4];
493
494 /* New pbc length */
495 pbc16[0] = cpu_to_le16(le16_to_cpu(pbc16[0])-(pkt->payload_size>>2));
496
497 /* New packet length */
498 hdr->lrh[2] = cpu_to_be16(le16_to_cpu(pbc16[0]));
499
500 if (pkt->tiddma) {
501 /* Set new tid and offset for new sdma header */
502 hdr->iph.ver_ctxt_tid_offset = cpu_to_le32(
503 (le32_to_cpu(hdr->iph.ver_ctxt_tid_offset)&0xFF000000) +
504 (pkt->tidsm[pkt->tidsmidx].tid<<QLOGIC_IB_I_TID_SHIFT) +
505 (pkt->tidsm[pkt->tidsmidx].offset>>2));
506 } else {
507 /* Middle protocol new packet offset */
508 hdr->uwords[2] += pkt->payload_size;
509 }
510
511 /* New kdeth checksum */
512 vcto = le32_to_cpu(hdr->iph.ver_ctxt_tid_offset);
513 hdr->iph.chksum = cpu_to_le16(QIB_LRH_BTH +
514 be16_to_cpu(hdr->lrh[2]) -
515 ((vcto>>16)&0xFFFF) - (vcto&0xFFFF) -
516 le16_to_cpu(hdr->iph.pkt_flags));
517
518 /* Next sequence number in new sdma header */
519 seqnum.val = be32_to_cpu(hdr->bth[2]);
520 if (pkt->tiddma)
521 seqnum.seq++;
522 else
523 seqnum.pkt++;
524 hdr->bth[2] = cpu_to_be32(seqnum.val);
525
526 /* Init new sdma header. */
527 qib_user_sdma_init_frag(pkt, pkt->naddr, /* index */
528 0, pbclen, /* offset, len */
529 1, 0, /* first last desc */
530 0, 0, /* put page, dma mapped */
531 NULL, pbcvaddr, /* struct page, virt addr */
532 pbcdaddr, pbclen); /* dma addr, dma length */
533 pkt->index = pkt->naddr;
534 pkt->payload_size = 0;
535 pkt->naddr++;
536 if (pkt->naddr == pkt->addrlimit) {
537 ret = -EFAULT;
538 goto done;
539 }
540
541 /* Prepare for next fragment in this page */
542 if (newlen != len) {
543 if (dma_mapped) {
544 put = 0;
545 dma_mapped = 0;
546 page = NULL;
547 kvaddr = NULL;
548 }
549 len -= newlen;
550 offset += newlen;
551
552 goto next_fragment;
553 }
554
555 done:
556 return ret;
557 }
558
559 /* we've too many pages in the iovec, coalesce to a single page */
qib_user_sdma_coalesce(const struct qib_devdata * dd,struct qib_user_sdma_queue * pq,struct qib_user_sdma_pkt * pkt,const struct iovec * iov,unsigned long niov)560 static int qib_user_sdma_coalesce(const struct qib_devdata *dd,
561 struct qib_user_sdma_queue *pq,
562 struct qib_user_sdma_pkt *pkt,
563 const struct iovec *iov,
564 unsigned long niov)
565 {
566 int ret = 0;
567 struct page *page = alloc_page(GFP_KERNEL);
568 void *mpage_save;
569 char *mpage;
570 int i;
571 int len = 0;
572
573 if (!page) {
574 ret = -ENOMEM;
575 goto done;
576 }
577
578 mpage = kmap(page);
579 mpage_save = mpage;
580 for (i = 0; i < niov; i++) {
581 int cfur;
582
583 cfur = copy_from_user(mpage,
584 iov[i].iov_base, iov[i].iov_len);
585 if (cfur) {
586 ret = -EFAULT;
587 goto free_unmap;
588 }
589
590 mpage += iov[i].iov_len;
591 len += iov[i].iov_len;
592 }
593
594 ret = qib_user_sdma_page_to_frags(dd, pq, pkt,
595 page, 0, 0, len, mpage_save);
596 goto done;
597
598 free_unmap:
599 kunmap(page);
600 __free_page(page);
601 done:
602 return ret;
603 }
604
605 /*
606 * How many pages in this iovec element?
607 */
qib_user_sdma_num_pages(const struct iovec * iov)608 static int qib_user_sdma_num_pages(const struct iovec *iov)
609 {
610 const unsigned long addr = (unsigned long) iov->iov_base;
611 const unsigned long len = iov->iov_len;
612 const unsigned long spage = addr & PAGE_MASK;
613 const unsigned long epage = (addr + len - 1) & PAGE_MASK;
614
615 return 1 + ((epage - spage) >> PAGE_SHIFT);
616 }
617
qib_user_sdma_free_pkt_frag(struct device * dev,struct qib_user_sdma_queue * pq,struct qib_user_sdma_pkt * pkt,int frag)618 static void qib_user_sdma_free_pkt_frag(struct device *dev,
619 struct qib_user_sdma_queue *pq,
620 struct qib_user_sdma_pkt *pkt,
621 int frag)
622 {
623 const int i = frag;
624
625 if (pkt->addr[i].page) {
626 /* only user data has page */
627 if (pkt->addr[i].dma_mapped)
628 dma_unmap_page(dev,
629 pkt->addr[i].addr,
630 pkt->addr[i].dma_length,
631 DMA_TO_DEVICE);
632
633 if (pkt->addr[i].kvaddr)
634 kunmap(pkt->addr[i].page);
635
636 if (pkt->addr[i].put_page)
637 put_page(pkt->addr[i].page);
638 else
639 __free_page(pkt->addr[i].page);
640 } else if (pkt->addr[i].kvaddr) {
641 /* for headers */
642 if (pkt->addr[i].dma_mapped) {
643 /* from kmalloc & dma mapped */
644 dma_unmap_single(dev,
645 pkt->addr[i].addr,
646 pkt->addr[i].dma_length,
647 DMA_TO_DEVICE);
648 kfree(pkt->addr[i].kvaddr);
649 } else if (pkt->addr[i].addr) {
650 /* free coherent mem from cache... */
651 dma_pool_free(pq->header_cache,
652 pkt->addr[i].kvaddr, pkt->addr[i].addr);
653 } else {
654 /* from kmalloc but not dma mapped */
655 kfree(pkt->addr[i].kvaddr);
656 }
657 }
658 }
659
660 /* return number of pages pinned... */
qib_user_sdma_pin_pages(const struct qib_devdata * dd,struct qib_user_sdma_queue * pq,struct qib_user_sdma_pkt * pkt,unsigned long addr,int tlen,int npages)661 static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
662 struct qib_user_sdma_queue *pq,
663 struct qib_user_sdma_pkt *pkt,
664 unsigned long addr, int tlen, int npages)
665 {
666 struct page *pages[8];
667 int i, j;
668 int ret = 0;
669
670 while (npages) {
671 if (npages > 8)
672 j = 8;
673 else
674 j = npages;
675
676 ret = get_user_pages_fast(addr, j, 0, pages);
677 if (ret != j) {
678 i = 0;
679 j = ret;
680 ret = -ENOMEM;
681 goto free_pages;
682 }
683
684 for (i = 0; i < j; i++) {
685 /* map the pages... */
686 unsigned long fofs = addr & ~PAGE_MASK;
687 int flen = ((fofs + tlen) > PAGE_SIZE) ?
688 (PAGE_SIZE - fofs) : tlen;
689
690 ret = qib_user_sdma_page_to_frags(dd, pq, pkt,
691 pages[i], 1, fofs, flen, NULL);
692 if (ret < 0) {
693 /* current page has beed taken
694 * care of inside above call.
695 */
696 i++;
697 goto free_pages;
698 }
699
700 addr += flen;
701 tlen -= flen;
702 }
703
704 npages -= j;
705 }
706
707 goto done;
708
709 /* if error, return all pages not managed by pkt */
710 free_pages:
711 while (i < j)
712 put_page(pages[i++]);
713
714 done:
715 return ret;
716 }
717
qib_user_sdma_pin_pkt(const struct qib_devdata * dd,struct qib_user_sdma_queue * pq,struct qib_user_sdma_pkt * pkt,const struct iovec * iov,unsigned long niov)718 static int qib_user_sdma_pin_pkt(const struct qib_devdata *dd,
719 struct qib_user_sdma_queue *pq,
720 struct qib_user_sdma_pkt *pkt,
721 const struct iovec *iov,
722 unsigned long niov)
723 {
724 int ret = 0;
725 unsigned long idx;
726
727 for (idx = 0; idx < niov; idx++) {
728 const int npages = qib_user_sdma_num_pages(iov + idx);
729 const unsigned long addr = (unsigned long) iov[idx].iov_base;
730
731 ret = qib_user_sdma_pin_pages(dd, pq, pkt, addr,
732 iov[idx].iov_len, npages);
733 if (ret < 0)
734 goto free_pkt;
735 }
736
737 goto done;
738
739 free_pkt:
740 /* we need to ignore the first entry here */
741 for (idx = 1; idx < pkt->naddr; idx++)
742 qib_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx);
743
744 /* need to dma unmap the first entry, this is to restore to
745 * the original state so that caller can free the memory in
746 * error condition. Caller does not know if dma mapped or not*/
747 if (pkt->addr[0].dma_mapped) {
748 dma_unmap_single(&dd->pcidev->dev,
749 pkt->addr[0].addr,
750 pkt->addr[0].dma_length,
751 DMA_TO_DEVICE);
752 pkt->addr[0].addr = 0;
753 pkt->addr[0].dma_mapped = 0;
754 }
755
756 done:
757 return ret;
758 }
759
qib_user_sdma_init_payload(const struct qib_devdata * dd,struct qib_user_sdma_queue * pq,struct qib_user_sdma_pkt * pkt,const struct iovec * iov,unsigned long niov,int npages)760 static int qib_user_sdma_init_payload(const struct qib_devdata *dd,
761 struct qib_user_sdma_queue *pq,
762 struct qib_user_sdma_pkt *pkt,
763 const struct iovec *iov,
764 unsigned long niov, int npages)
765 {
766 int ret = 0;
767
768 if (pkt->frag_size == pkt->bytes_togo &&
769 npages >= ARRAY_SIZE(pkt->addr))
770 ret = qib_user_sdma_coalesce(dd, pq, pkt, iov, niov);
771 else
772 ret = qib_user_sdma_pin_pkt(dd, pq, pkt, iov, niov);
773
774 return ret;
775 }
776
777 /* free a packet list -- return counter value of last packet */
qib_user_sdma_free_pkt_list(struct device * dev,struct qib_user_sdma_queue * pq,struct list_head * list)778 static void qib_user_sdma_free_pkt_list(struct device *dev,
779 struct qib_user_sdma_queue *pq,
780 struct list_head *list)
781 {
782 struct qib_user_sdma_pkt *pkt, *pkt_next;
783
784 list_for_each_entry_safe(pkt, pkt_next, list, list) {
785 int i;
786
787 for (i = 0; i < pkt->naddr; i++)
788 qib_user_sdma_free_pkt_frag(dev, pq, pkt, i);
789
790 if (pkt->largepkt)
791 kfree(pkt);
792 else
793 kmem_cache_free(pq->pkt_slab, pkt);
794 }
795 INIT_LIST_HEAD(list);
796 }
797
798 /*
799 * copy headers, coalesce etc -- pq->lock must be held
800 *
801 * we queue all the packets to list, returning the
802 * number of bytes total. list must be empty initially,
803 * as, if there is an error we clean it...
804 */
qib_user_sdma_queue_pkts(const struct qib_devdata * dd,struct qib_pportdata * ppd,struct qib_user_sdma_queue * pq,const struct iovec * iov,unsigned long niov,struct list_head * list,int * maxpkts,int * ndesc)805 static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
806 struct qib_pportdata *ppd,
807 struct qib_user_sdma_queue *pq,
808 const struct iovec *iov,
809 unsigned long niov,
810 struct list_head *list,
811 int *maxpkts, int *ndesc)
812 {
813 unsigned long idx = 0;
814 int ret = 0;
815 int npkts = 0;
816 __le32 *pbc;
817 dma_addr_t dma_addr;
818 struct qib_user_sdma_pkt *pkt = NULL;
819 size_t len;
820 size_t nw;
821 u32 counter = pq->counter;
822 u16 frag_size;
823
824 while (idx < niov && npkts < *maxpkts) {
825 const unsigned long addr = (unsigned long) iov[idx].iov_base;
826 const unsigned long idx_save = idx;
827 unsigned pktnw;
828 unsigned pktnwc;
829 int nfrags = 0;
830 int npages = 0;
831 int bytes_togo = 0;
832 int tiddma = 0;
833 int cfur;
834
835 len = iov[idx].iov_len;
836 nw = len >> 2;
837
838 if (len < QIB_USER_SDMA_MIN_HEADER_LENGTH ||
839 len > PAGE_SIZE || len & 3 || addr & 3) {
840 ret = -EINVAL;
841 goto free_list;
842 }
843
844 pbc = qib_user_sdma_alloc_header(pq, len, &dma_addr);
845 if (!pbc) {
846 ret = -ENOMEM;
847 goto free_list;
848 }
849
850 cfur = copy_from_user(pbc, iov[idx].iov_base, len);
851 if (cfur) {
852 ret = -EFAULT;
853 goto free_pbc;
854 }
855
856 /*
857 * This assignment is a bit strange. it's because the
858 * the pbc counts the number of 32 bit words in the full
859 * packet _except_ the first word of the pbc itself...
860 */
861 pktnwc = nw - 1;
862
863 /*
864 * pktnw computation yields the number of 32 bit words
865 * that the caller has indicated in the PBC. note that
866 * this is one less than the total number of words that
867 * goes to the send DMA engine as the first 32 bit word
868 * of the PBC itself is not counted. Armed with this count,
869 * we can verify that the packet is consistent with the
870 * iovec lengths.
871 */
872 pktnw = le32_to_cpu(*pbc) & 0xFFFF;
873 if (pktnw < pktnwc) {
874 ret = -EINVAL;
875 goto free_pbc;
876 }
877
878 idx++;
879 while (pktnwc < pktnw && idx < niov) {
880 const size_t slen = iov[idx].iov_len;
881 const unsigned long faddr =
882 (unsigned long) iov[idx].iov_base;
883
884 if (slen & 3 || faddr & 3 || !slen) {
885 ret = -EINVAL;
886 goto free_pbc;
887 }
888
889 npages += qib_user_sdma_num_pages(&iov[idx]);
890
891 bytes_togo += slen;
892 pktnwc += slen >> 2;
893 idx++;
894 nfrags++;
895 }
896
897 if (pktnwc != pktnw) {
898 ret = -EINVAL;
899 goto free_pbc;
900 }
901
902 frag_size = ((le32_to_cpu(*pbc))>>16) & 0xFFFF;
903 if (((frag_size ? frag_size : bytes_togo) + len) >
904 ppd->ibmaxlen) {
905 ret = -EINVAL;
906 goto free_pbc;
907 }
908
909 if (frag_size) {
910 int pktsize, tidsmsize, n;
911
912 n = npages*((2*PAGE_SIZE/frag_size)+1);
913 pktsize = sizeof(*pkt) + sizeof(pkt->addr[0])*n;
914
915 /*
916 * Determine if this is tid-sdma or just sdma.
917 */
918 tiddma = (((le32_to_cpu(pbc[7])>>
919 QLOGIC_IB_I_TID_SHIFT)&
920 QLOGIC_IB_I_TID_MASK) !=
921 QLOGIC_IB_I_TID_MASK);
922
923 if (tiddma)
924 tidsmsize = iov[idx].iov_len;
925 else
926 tidsmsize = 0;
927
928 pkt = kmalloc(pktsize+tidsmsize, GFP_KERNEL);
929 if (!pkt) {
930 ret = -ENOMEM;
931 goto free_pbc;
932 }
933 pkt->largepkt = 1;
934 pkt->frag_size = frag_size;
935 pkt->addrlimit = n + ARRAY_SIZE(pkt->addr);
936
937 if (tiddma) {
938 char *tidsm = (char *)pkt + pktsize;
939 cfur = copy_from_user(tidsm,
940 iov[idx].iov_base, tidsmsize);
941 if (cfur) {
942 ret = -EFAULT;
943 goto free_pkt;
944 }
945 pkt->tidsm =
946 (struct qib_tid_session_member *)tidsm;
947 pkt->tidsmcount = tidsmsize/
948 sizeof(struct qib_tid_session_member);
949 pkt->tidsmidx = 0;
950 idx++;
951 }
952
953 /*
954 * pbc 'fill1' field is borrowed to pass frag size,
955 * we need to clear it after picking frag size, the
956 * hardware requires this field to be zero.
957 */
958 *pbc = cpu_to_le32(le32_to_cpu(*pbc) & 0x0000FFFF);
959 } else {
960 pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL);
961 if (!pkt) {
962 ret = -ENOMEM;
963 goto free_pbc;
964 }
965 pkt->largepkt = 0;
966 pkt->frag_size = bytes_togo;
967 pkt->addrlimit = ARRAY_SIZE(pkt->addr);
968 }
969 pkt->bytes_togo = bytes_togo;
970 pkt->payload_size = 0;
971 pkt->counter = counter;
972 pkt->tiddma = tiddma;
973
974 /* setup the first header */
975 qib_user_sdma_init_frag(pkt, 0, /* index */
976 0, len, /* offset, len */
977 1, 0, /* first last desc */
978 0, 0, /* put page, dma mapped */
979 NULL, pbc, /* struct page, virt addr */
980 dma_addr, len); /* dma addr, dma length */
981 pkt->index = 0;
982 pkt->naddr = 1;
983
984 if (nfrags) {
985 ret = qib_user_sdma_init_payload(dd, pq, pkt,
986 iov + idx_save + 1,
987 nfrags, npages);
988 if (ret < 0)
989 goto free_pkt;
990 } else {
991 /* since there is no payload, mark the
992 * header as the last desc. */
993 pkt->addr[0].last_desc = 1;
994
995 if (dma_addr == 0) {
996 /*
997 * the header is not dma mapped yet.
998 * it should be from kmalloc.
999 */
1000 dma_addr = dma_map_single(&dd->pcidev->dev,
1001 pbc, len, DMA_TO_DEVICE);
1002 if (dma_mapping_error(&dd->pcidev->dev,
1003 dma_addr)) {
1004 ret = -ENOMEM;
1005 goto free_pkt;
1006 }
1007 pkt->addr[0].addr = dma_addr;
1008 pkt->addr[0].dma_mapped = 1;
1009 }
1010 }
1011
1012 counter++;
1013 npkts++;
1014 pkt->pq = pq;
1015 pkt->index = 0; /* reset index for push on hw */
1016 *ndesc += pkt->naddr;
1017
1018 list_add_tail(&pkt->list, list);
1019 }
1020
1021 *maxpkts = npkts;
1022 ret = idx;
1023 goto done;
1024
1025 free_pkt:
1026 if (pkt->largepkt)
1027 kfree(pkt);
1028 else
1029 kmem_cache_free(pq->pkt_slab, pkt);
1030 free_pbc:
1031 if (dma_addr)
1032 dma_pool_free(pq->header_cache, pbc, dma_addr);
1033 else
1034 kfree(pbc);
1035 free_list:
1036 qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list);
1037 done:
1038 return ret;
1039 }
1040
qib_user_sdma_set_complete_counter(struct qib_user_sdma_queue * pq,u32 c)1041 static void qib_user_sdma_set_complete_counter(struct qib_user_sdma_queue *pq,
1042 u32 c)
1043 {
1044 pq->sent_counter = c;
1045 }
1046
1047 /* try to clean out queue -- needs pq->lock */
qib_user_sdma_queue_clean(struct qib_pportdata * ppd,struct qib_user_sdma_queue * pq)1048 static int qib_user_sdma_queue_clean(struct qib_pportdata *ppd,
1049 struct qib_user_sdma_queue *pq)
1050 {
1051 struct qib_devdata *dd = ppd->dd;
1052 struct list_head free_list;
1053 struct qib_user_sdma_pkt *pkt;
1054 struct qib_user_sdma_pkt *pkt_prev;
1055 unsigned long flags;
1056 int ret = 0;
1057
1058 if (!pq->num_sending)
1059 return 0;
1060
1061 INIT_LIST_HEAD(&free_list);
1062
1063 /*
1064 * We need this spin lock here because interrupt handler
1065 * might modify this list in qib_user_sdma_send_desc(), also
1066 * we can not get interrupted, otherwise it is a deadlock.
1067 */
1068 spin_lock_irqsave(&pq->sent_lock, flags);
1069 list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) {
1070 s64 descd = ppd->sdma_descq_removed - pkt->added;
1071
1072 if (descd < 0)
1073 break;
1074
1075 list_move_tail(&pkt->list, &free_list);
1076
1077 /* one more packet cleaned */
1078 ret++;
1079 pq->num_sending--;
1080 }
1081 spin_unlock_irqrestore(&pq->sent_lock, flags);
1082
1083 if (!list_empty(&free_list)) {
1084 u32 counter;
1085
1086 pkt = list_entry(free_list.prev,
1087 struct qib_user_sdma_pkt, list);
1088 counter = pkt->counter;
1089
1090 qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
1091 qib_user_sdma_set_complete_counter(pq, counter);
1092 }
1093
1094 return ret;
1095 }
1096
qib_user_sdma_queue_destroy(struct qib_user_sdma_queue * pq)1097 void qib_user_sdma_queue_destroy(struct qib_user_sdma_queue *pq)
1098 {
1099 if (!pq)
1100 return;
1101
1102 pq->sdma_rb_node->refcount--;
1103 if (pq->sdma_rb_node->refcount == 0) {
1104 rb_erase(&pq->sdma_rb_node->node, &qib_user_sdma_rb_root);
1105 kfree(pq->sdma_rb_node);
1106 }
1107 dma_pool_destroy(pq->header_cache);
1108 kmem_cache_destroy(pq->pkt_slab);
1109 kfree(pq);
1110 }
1111
1112 /* clean descriptor queue, returns > 0 if some elements cleaned */
qib_user_sdma_hwqueue_clean(struct qib_pportdata * ppd)1113 static int qib_user_sdma_hwqueue_clean(struct qib_pportdata *ppd)
1114 {
1115 int ret;
1116 unsigned long flags;
1117
1118 spin_lock_irqsave(&ppd->sdma_lock, flags);
1119 ret = qib_sdma_make_progress(ppd);
1120 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1121
1122 return ret;
1123 }
1124
1125 /* we're in close, drain packets so that we can cleanup successfully... */
qib_user_sdma_queue_drain(struct qib_pportdata * ppd,struct qib_user_sdma_queue * pq)1126 void qib_user_sdma_queue_drain(struct qib_pportdata *ppd,
1127 struct qib_user_sdma_queue *pq)
1128 {
1129 struct qib_devdata *dd = ppd->dd;
1130 unsigned long flags;
1131 int i;
1132
1133 if (!pq)
1134 return;
1135
1136 for (i = 0; i < QIB_USER_SDMA_DRAIN_TIMEOUT; i++) {
1137 mutex_lock(&pq->lock);
1138 if (!pq->num_pending && !pq->num_sending) {
1139 mutex_unlock(&pq->lock);
1140 break;
1141 }
1142 qib_user_sdma_hwqueue_clean(ppd);
1143 qib_user_sdma_queue_clean(ppd, pq);
1144 mutex_unlock(&pq->lock);
1145 msleep(10);
1146 }
1147
1148 if (pq->num_pending || pq->num_sending) {
1149 struct qib_user_sdma_pkt *pkt;
1150 struct qib_user_sdma_pkt *pkt_prev;
1151 struct list_head free_list;
1152
1153 mutex_lock(&pq->lock);
1154 spin_lock_irqsave(&ppd->sdma_lock, flags);
1155 /*
1156 * Since we hold sdma_lock, it is safe without sent_lock.
1157 */
1158 if (pq->num_pending) {
1159 list_for_each_entry_safe(pkt, pkt_prev,
1160 &ppd->sdma_userpending, list) {
1161 if (pkt->pq == pq) {
1162 list_move_tail(&pkt->list, &pq->sent);
1163 pq->num_pending--;
1164 pq->num_sending++;
1165 }
1166 }
1167 }
1168 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1169
1170 qib_dev_err(dd, "user sdma lists not empty: forcing!\n");
1171 INIT_LIST_HEAD(&free_list);
1172 list_splice_init(&pq->sent, &free_list);
1173 pq->num_sending = 0;
1174 qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
1175 mutex_unlock(&pq->lock);
1176 }
1177 }
1178
qib_sdma_make_desc0(u8 gen,u64 addr,u64 dwlen,u64 dwoffset)1179 static inline __le64 qib_sdma_make_desc0(u8 gen,
1180 u64 addr, u64 dwlen, u64 dwoffset)
1181 {
1182 return cpu_to_le64(/* SDmaPhyAddr[31:0] */
1183 ((addr & 0xfffffffcULL) << 32) |
1184 /* SDmaGeneration[1:0] */
1185 ((gen & 3ULL) << 30) |
1186 /* SDmaDwordCount[10:0] */
1187 ((dwlen & 0x7ffULL) << 16) |
1188 /* SDmaBufOffset[12:2] */
1189 (dwoffset & 0x7ffULL));
1190 }
1191
qib_sdma_make_first_desc0(__le64 descq)1192 static inline __le64 qib_sdma_make_first_desc0(__le64 descq)
1193 {
1194 return descq | cpu_to_le64(1ULL << 12);
1195 }
1196
qib_sdma_make_last_desc0(__le64 descq)1197 static inline __le64 qib_sdma_make_last_desc0(__le64 descq)
1198 {
1199 /* last */ /* dma head */
1200 return descq | cpu_to_le64(1ULL << 11 | 1ULL << 13);
1201 }
1202
qib_sdma_make_desc1(u64 addr)1203 static inline __le64 qib_sdma_make_desc1(u64 addr)
1204 {
1205 /* SDmaPhyAddr[47:32] */
1206 return cpu_to_le64(addr >> 32);
1207 }
1208
qib_user_sdma_send_frag(struct qib_pportdata * ppd,struct qib_user_sdma_pkt * pkt,int idx,unsigned ofs,u16 tail,u8 gen)1209 static void qib_user_sdma_send_frag(struct qib_pportdata *ppd,
1210 struct qib_user_sdma_pkt *pkt, int idx,
1211 unsigned ofs, u16 tail, u8 gen)
1212 {
1213 const u64 addr = (u64) pkt->addr[idx].addr +
1214 (u64) pkt->addr[idx].offset;
1215 const u64 dwlen = (u64) pkt->addr[idx].length / 4;
1216 __le64 *descqp;
1217 __le64 descq0;
1218
1219 descqp = &ppd->sdma_descq[tail].qw[0];
1220
1221 descq0 = qib_sdma_make_desc0(gen, addr, dwlen, ofs);
1222 if (pkt->addr[idx].first_desc)
1223 descq0 = qib_sdma_make_first_desc0(descq0);
1224 if (pkt->addr[idx].last_desc) {
1225 descq0 = qib_sdma_make_last_desc0(descq0);
1226 if (ppd->sdma_intrequest) {
1227 descq0 |= cpu_to_le64(1ULL << 15);
1228 ppd->sdma_intrequest = 0;
1229 }
1230 }
1231
1232 descqp[0] = descq0;
1233 descqp[1] = qib_sdma_make_desc1(addr);
1234 }
1235
qib_user_sdma_send_desc(struct qib_pportdata * ppd,struct list_head * pktlist)1236 void qib_user_sdma_send_desc(struct qib_pportdata *ppd,
1237 struct list_head *pktlist)
1238 {
1239 struct qib_devdata *dd = ppd->dd;
1240 u16 nfree, nsent;
1241 u16 tail, tail_c;
1242 u8 gen, gen_c;
1243
1244 nfree = qib_sdma_descq_freecnt(ppd);
1245 if (!nfree)
1246 return;
1247
1248 retry:
1249 nsent = 0;
1250 tail_c = tail = ppd->sdma_descq_tail;
1251 gen_c = gen = ppd->sdma_generation;
1252 while (!list_empty(pktlist)) {
1253 struct qib_user_sdma_pkt *pkt =
1254 list_entry(pktlist->next, struct qib_user_sdma_pkt,
1255 list);
1256 int i, j, c = 0;
1257 unsigned ofs = 0;
1258 u16 dtail = tail;
1259
1260 for (i = pkt->index; i < pkt->naddr && nfree; i++) {
1261 qib_user_sdma_send_frag(ppd, pkt, i, ofs, tail, gen);
1262 ofs += pkt->addr[i].length >> 2;
1263
1264 if (++tail == ppd->sdma_descq_cnt) {
1265 tail = 0;
1266 ++gen;
1267 ppd->sdma_intrequest = 1;
1268 } else if (tail == (ppd->sdma_descq_cnt>>1)) {
1269 ppd->sdma_intrequest = 1;
1270 }
1271 nfree--;
1272 if (pkt->addr[i].last_desc == 0)
1273 continue;
1274
1275 /*
1276 * If the packet is >= 2KB mtu equivalent, we
1277 * have to use the large buffers, and have to
1278 * mark each descriptor as part of a large
1279 * buffer packet.
1280 */
1281 if (ofs > dd->piosize2kmax_dwords) {
1282 for (j = pkt->index; j <= i; j++) {
1283 ppd->sdma_descq[dtail].qw[0] |=
1284 cpu_to_le64(1ULL << 14);
1285 if (++dtail == ppd->sdma_descq_cnt)
1286 dtail = 0;
1287 }
1288 }
1289 c += i + 1 - pkt->index;
1290 pkt->index = i + 1; /* index for next first */
1291 tail_c = dtail = tail;
1292 gen_c = gen;
1293 ofs = 0; /* reset for next packet */
1294 }
1295
1296 ppd->sdma_descq_added += c;
1297 nsent += c;
1298 if (pkt->index == pkt->naddr) {
1299 pkt->added = ppd->sdma_descq_added;
1300 pkt->pq->added = pkt->added;
1301 pkt->pq->num_pending--;
1302 spin_lock(&pkt->pq->sent_lock);
1303 pkt->pq->num_sending++;
1304 list_move_tail(&pkt->list, &pkt->pq->sent);
1305 spin_unlock(&pkt->pq->sent_lock);
1306 }
1307 if (!nfree || (nsent<<2) > ppd->sdma_descq_cnt)
1308 break;
1309 }
1310
1311 /* advance the tail on the chip if necessary */
1312 if (ppd->sdma_descq_tail != tail_c) {
1313 ppd->sdma_generation = gen_c;
1314 dd->f_sdma_update_tail(ppd, tail_c);
1315 }
1316
1317 if (nfree && !list_empty(pktlist))
1318 goto retry;
1319
1320 return;
1321 }
1322
1323 /* pq->lock must be held, get packets on the wire... */
qib_user_sdma_push_pkts(struct qib_pportdata * ppd,struct qib_user_sdma_queue * pq,struct list_head * pktlist,int count)1324 static int qib_user_sdma_push_pkts(struct qib_pportdata *ppd,
1325 struct qib_user_sdma_queue *pq,
1326 struct list_head *pktlist, int count)
1327 {
1328 unsigned long flags;
1329
1330 if (unlikely(!(ppd->lflags & QIBL_LINKACTIVE)))
1331 return -ECOMM;
1332
1333 /* non-blocking mode */
1334 if (pq->sdma_rb_node->refcount > 1) {
1335 spin_lock_irqsave(&ppd->sdma_lock, flags);
1336 if (unlikely(!__qib_sdma_running(ppd))) {
1337 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1338 return -ECOMM;
1339 }
1340 pq->num_pending += count;
1341 list_splice_tail_init(pktlist, &ppd->sdma_userpending);
1342 qib_user_sdma_send_desc(ppd, &ppd->sdma_userpending);
1343 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1344 return 0;
1345 }
1346
1347 /* In this case, descriptors from this process are not
1348 * linked to ppd pending queue, interrupt handler
1349 * won't update this process, it is OK to directly
1350 * modify without sdma lock.
1351 */
1352
1353
1354 pq->num_pending += count;
1355 /*
1356 * Blocking mode for single rail process, we must
1357 * release/regain sdma_lock to give other process
1358 * chance to make progress. This is important for
1359 * performance.
1360 */
1361 do {
1362 spin_lock_irqsave(&ppd->sdma_lock, flags);
1363 if (unlikely(!__qib_sdma_running(ppd))) {
1364 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1365 return -ECOMM;
1366 }
1367 qib_user_sdma_send_desc(ppd, pktlist);
1368 if (!list_empty(pktlist))
1369 qib_sdma_make_progress(ppd);
1370 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1371 } while (!list_empty(pktlist));
1372
1373 return 0;
1374 }
1375
qib_user_sdma_writev(struct qib_ctxtdata * rcd,struct qib_user_sdma_queue * pq,const struct iovec * iov,unsigned long dim)1376 int qib_user_sdma_writev(struct qib_ctxtdata *rcd,
1377 struct qib_user_sdma_queue *pq,
1378 const struct iovec *iov,
1379 unsigned long dim)
1380 {
1381 struct qib_devdata *dd = rcd->dd;
1382 struct qib_pportdata *ppd = rcd->ppd;
1383 int ret = 0;
1384 struct list_head list;
1385 int npkts = 0;
1386
1387 INIT_LIST_HEAD(&list);
1388
1389 mutex_lock(&pq->lock);
1390
1391 /* why not -ECOMM like qib_user_sdma_push_pkts() below? */
1392 if (!qib_sdma_running(ppd))
1393 goto done_unlock;
1394
1395 /* if I have packets not complete yet */
1396 if (pq->added > ppd->sdma_descq_removed)
1397 qib_user_sdma_hwqueue_clean(ppd);
1398 /* if I have complete packets to be freed */
1399 if (pq->num_sending)
1400 qib_user_sdma_queue_clean(ppd, pq);
1401
1402 while (dim) {
1403 int mxp = 1;
1404 int ndesc = 0;
1405
1406 ret = qib_user_sdma_queue_pkts(dd, ppd, pq,
1407 iov, dim, &list, &mxp, &ndesc);
1408 if (ret < 0)
1409 goto done_unlock;
1410 else {
1411 dim -= ret;
1412 iov += ret;
1413 }
1414
1415 /* force packets onto the sdma hw queue... */
1416 if (!list_empty(&list)) {
1417 /*
1418 * Lazily clean hw queue.
1419 */
1420 if (qib_sdma_descq_freecnt(ppd) < ndesc) {
1421 qib_user_sdma_hwqueue_clean(ppd);
1422 if (pq->num_sending)
1423 qib_user_sdma_queue_clean(ppd, pq);
1424 }
1425
1426 ret = qib_user_sdma_push_pkts(ppd, pq, &list, mxp);
1427 if (ret < 0)
1428 goto done_unlock;
1429 else {
1430 npkts += mxp;
1431 pq->counter += mxp;
1432 }
1433 }
1434 }
1435
1436 done_unlock:
1437 if (!list_empty(&list))
1438 qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &list);
1439 mutex_unlock(&pq->lock);
1440
1441 return (ret < 0) ? ret : npkts;
1442 }
1443
qib_user_sdma_make_progress(struct qib_pportdata * ppd,struct qib_user_sdma_queue * pq)1444 int qib_user_sdma_make_progress(struct qib_pportdata *ppd,
1445 struct qib_user_sdma_queue *pq)
1446 {
1447 int ret = 0;
1448
1449 mutex_lock(&pq->lock);
1450 qib_user_sdma_hwqueue_clean(ppd);
1451 ret = qib_user_sdma_queue_clean(ppd, pq);
1452 mutex_unlock(&pq->lock);
1453
1454 return ret;
1455 }
1456
qib_user_sdma_complete_counter(const struct qib_user_sdma_queue * pq)1457 u32 qib_user_sdma_complete_counter(const struct qib_user_sdma_queue *pq)
1458 {
1459 return pq ? pq->sent_counter : 0;
1460 }
1461
qib_user_sdma_inflight_counter(struct qib_user_sdma_queue * pq)1462 u32 qib_user_sdma_inflight_counter(struct qib_user_sdma_queue *pq)
1463 {
1464 return pq ? pq->counter : 0;
1465 }
1466