• Home
  • Raw
  • Download

Lines Matching refs:pq

81 	struct qib_user_sdma_queue *pq;	/* which pq this pkt belongs to */  member
185 struct qib_user_sdma_queue *pq = in qib_user_sdma_queue_create() local
189 if (!pq) in qib_user_sdma_queue_create()
192 pq->counter = 0; in qib_user_sdma_queue_create()
193 pq->sent_counter = 0; in qib_user_sdma_queue_create()
194 pq->num_pending = 0; in qib_user_sdma_queue_create()
195 pq->num_sending = 0; in qib_user_sdma_queue_create()
196 pq->added = 0; in qib_user_sdma_queue_create()
197 pq->sdma_rb_node = NULL; in qib_user_sdma_queue_create()
199 INIT_LIST_HEAD(&pq->sent); in qib_user_sdma_queue_create()
200 spin_lock_init(&pq->sent_lock); in qib_user_sdma_queue_create()
201 mutex_init(&pq->lock); in qib_user_sdma_queue_create()
203 snprintf(pq->pkt_slab_name, sizeof(pq->pkt_slab_name), in qib_user_sdma_queue_create()
205 pq->pkt_slab = kmem_cache_create(pq->pkt_slab_name, in qib_user_sdma_queue_create()
209 if (!pq->pkt_slab) in qib_user_sdma_queue_create()
212 snprintf(pq->header_cache_name, sizeof(pq->header_cache_name), in qib_user_sdma_queue_create()
214 pq->header_cache = dma_pool_create(pq->header_cache_name, in qib_user_sdma_queue_create()
218 if (!pq->header_cache) in qib_user_sdma_queue_create()
221 pq->dma_pages_root = RB_ROOT; in qib_user_sdma_queue_create()
238 pq->sdma_rb_node = sdma_rb_node; in qib_user_sdma_queue_create()
243 dma_pool_destroy(pq->header_cache); in qib_user_sdma_queue_create()
245 kmem_cache_destroy(pq->pkt_slab); in qib_user_sdma_queue_create()
247 kfree(pq); in qib_user_sdma_queue_create()
248 pq = NULL; in qib_user_sdma_queue_create()
251 return pq; in qib_user_sdma_queue_create()
273 static void *qib_user_sdma_alloc_header(struct qib_user_sdma_queue *pq, in qib_user_sdma_alloc_header() argument
279 hdr = dma_pool_alloc(pq->header_cache, GFP_KERNEL, in qib_user_sdma_alloc_header()
296 struct qib_user_sdma_queue *pq, in qib_user_sdma_page_to_frags() argument
433 pbcvaddr = qib_user_sdma_alloc_header(pq, pbclen, &pbcdaddr); in qib_user_sdma_page_to_frags()
558 struct qib_user_sdma_queue *pq, in qib_user_sdma_coalesce() argument
591 ret = qib_user_sdma_page_to_frags(dd, pq, pkt, in qib_user_sdma_coalesce()
616 struct qib_user_sdma_queue *pq, in qib_user_sdma_free_pkt_frag() argument
648 dma_pool_free(pq->header_cache, in qib_user_sdma_free_pkt_frag()
659 struct qib_user_sdma_queue *pq, in qib_user_sdma_pin_pages() argument
687 ret = qib_user_sdma_page_to_frags(dd, pq, pkt, in qib_user_sdma_pin_pages()
716 struct qib_user_sdma_queue *pq, in qib_user_sdma_pin_pkt() argument
728 ret = qib_user_sdma_pin_pages(dd, pq, pkt, addr, in qib_user_sdma_pin_pkt()
739 qib_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx); in qib_user_sdma_pin_pkt()
758 struct qib_user_sdma_queue *pq, in qib_user_sdma_init_payload() argument
767 ret = qib_user_sdma_coalesce(dd, pq, pkt, iov, niov); in qib_user_sdma_init_payload()
769 ret = qib_user_sdma_pin_pkt(dd, pq, pkt, iov, niov); in qib_user_sdma_init_payload()
776 struct qib_user_sdma_queue *pq, in qib_user_sdma_free_pkt_list() argument
785 qib_user_sdma_free_pkt_frag(dev, pq, pkt, i); in qib_user_sdma_free_pkt_list()
790 kmem_cache_free(pq->pkt_slab, pkt); in qib_user_sdma_free_pkt_list()
804 struct qib_user_sdma_queue *pq, in qib_user_sdma_queue_pkts() argument
818 u32 counter = pq->counter; in qib_user_sdma_queue_pkts()
841 pbc = qib_user_sdma_alloc_header(pq, len, &dma_addr); in qib_user_sdma_queue_pkts()
972 pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL); in qib_user_sdma_queue_pkts()
997 ret = qib_user_sdma_init_payload(dd, pq, pkt, in qib_user_sdma_queue_pkts()
1026 pkt->pq = pq; in qib_user_sdma_queue_pkts()
1041 kmem_cache_free(pq->pkt_slab, pkt); in qib_user_sdma_queue_pkts()
1044 dma_pool_free(pq->header_cache, pbc, dma_addr); in qib_user_sdma_queue_pkts()
1048 qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list); in qib_user_sdma_queue_pkts()
1053 static void qib_user_sdma_set_complete_counter(struct qib_user_sdma_queue *pq, in qib_user_sdma_set_complete_counter() argument
1056 pq->sent_counter = c; in qib_user_sdma_set_complete_counter()
1061 struct qib_user_sdma_queue *pq) in qib_user_sdma_queue_clean() argument
1070 if (!pq->num_sending) in qib_user_sdma_queue_clean()
1080 spin_lock_irqsave(&pq->sent_lock, flags); in qib_user_sdma_queue_clean()
1081 list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) { in qib_user_sdma_queue_clean()
1091 pq->num_sending--; in qib_user_sdma_queue_clean()
1093 spin_unlock_irqrestore(&pq->sent_lock, flags); in qib_user_sdma_queue_clean()
1102 qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list); in qib_user_sdma_queue_clean()
1103 qib_user_sdma_set_complete_counter(pq, counter); in qib_user_sdma_queue_clean()
1109 void qib_user_sdma_queue_destroy(struct qib_user_sdma_queue *pq) in qib_user_sdma_queue_destroy() argument
1111 if (!pq) in qib_user_sdma_queue_destroy()
1114 pq->sdma_rb_node->refcount--; in qib_user_sdma_queue_destroy()
1115 if (pq->sdma_rb_node->refcount == 0) { in qib_user_sdma_queue_destroy()
1116 rb_erase(&pq->sdma_rb_node->node, &qib_user_sdma_rb_root); in qib_user_sdma_queue_destroy()
1117 kfree(pq->sdma_rb_node); in qib_user_sdma_queue_destroy()
1119 dma_pool_destroy(pq->header_cache); in qib_user_sdma_queue_destroy()
1120 kmem_cache_destroy(pq->pkt_slab); in qib_user_sdma_queue_destroy()
1121 kfree(pq); in qib_user_sdma_queue_destroy()
1139 struct qib_user_sdma_queue *pq) in qib_user_sdma_queue_drain() argument
1145 if (!pq) in qib_user_sdma_queue_drain()
1149 mutex_lock(&pq->lock); in qib_user_sdma_queue_drain()
1150 if (!pq->num_pending && !pq->num_sending) { in qib_user_sdma_queue_drain()
1151 mutex_unlock(&pq->lock); in qib_user_sdma_queue_drain()
1155 qib_user_sdma_queue_clean(ppd, pq); in qib_user_sdma_queue_drain()
1156 mutex_unlock(&pq->lock); in qib_user_sdma_queue_drain()
1160 if (pq->num_pending || pq->num_sending) { in qib_user_sdma_queue_drain()
1165 mutex_lock(&pq->lock); in qib_user_sdma_queue_drain()
1170 if (pq->num_pending) { in qib_user_sdma_queue_drain()
1173 if (pkt->pq == pq) { in qib_user_sdma_queue_drain()
1174 list_move_tail(&pkt->list, &pq->sent); in qib_user_sdma_queue_drain()
1175 pq->num_pending--; in qib_user_sdma_queue_drain()
1176 pq->num_sending++; in qib_user_sdma_queue_drain()
1184 list_splice_init(&pq->sent, &free_list); in qib_user_sdma_queue_drain()
1185 pq->num_sending = 0; in qib_user_sdma_queue_drain()
1186 qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list); in qib_user_sdma_queue_drain()
1187 mutex_unlock(&pq->lock); in qib_user_sdma_queue_drain()
1312 pkt->pq->added = pkt->added; in qib_user_sdma_send_desc()
1313 pkt->pq->num_pending--; in qib_user_sdma_send_desc()
1314 spin_lock(&pkt->pq->sent_lock); in qib_user_sdma_send_desc()
1315 pkt->pq->num_sending++; in qib_user_sdma_send_desc()
1316 list_move_tail(&pkt->list, &pkt->pq->sent); in qib_user_sdma_send_desc()
1317 spin_unlock(&pkt->pq->sent_lock); in qib_user_sdma_send_desc()
1335 struct qib_user_sdma_queue *pq, in qib_user_sdma_push_pkts() argument
1344 if (pq->sdma_rb_node->refcount > 1) { in qib_user_sdma_push_pkts()
1350 pq->num_pending += count; in qib_user_sdma_push_pkts()
1364 pq->num_pending += count; in qib_user_sdma_push_pkts()
1387 struct qib_user_sdma_queue *pq, in qib_user_sdma_writev() argument
1399 mutex_lock(&pq->lock); in qib_user_sdma_writev()
1406 if (pq->added > ppd->sdma_descq_removed) in qib_user_sdma_writev()
1409 if (pq->num_sending) in qib_user_sdma_writev()
1410 qib_user_sdma_queue_clean(ppd, pq); in qib_user_sdma_writev()
1416 ret = qib_user_sdma_queue_pkts(dd, ppd, pq, in qib_user_sdma_writev()
1432 if (pq->num_sending) in qib_user_sdma_writev()
1433 qib_user_sdma_queue_clean(ppd, pq); in qib_user_sdma_writev()
1436 ret = qib_user_sdma_push_pkts(ppd, pq, &list, mxp); in qib_user_sdma_writev()
1441 pq->counter += mxp; in qib_user_sdma_writev()
1448 qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &list); in qib_user_sdma_writev()
1449 mutex_unlock(&pq->lock); in qib_user_sdma_writev()
1455 struct qib_user_sdma_queue *pq) in qib_user_sdma_make_progress() argument
1459 mutex_lock(&pq->lock); in qib_user_sdma_make_progress()
1461 ret = qib_user_sdma_queue_clean(ppd, pq); in qib_user_sdma_make_progress()
1462 mutex_unlock(&pq->lock); in qib_user_sdma_make_progress()
1467 u32 qib_user_sdma_complete_counter(const struct qib_user_sdma_queue *pq) in qib_user_sdma_complete_counter() argument
1469 return pq ? pq->sent_counter : 0; in qib_user_sdma_complete_counter()
1472 u32 qib_user_sdma_inflight_counter(struct qib_user_sdma_queue *pq) in qib_user_sdma_inflight_counter() argument
1474 return pq ? pq->counter : 0; in qib_user_sdma_inflight_counter()