Lines Matching refs:pq
82 static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq);
95 static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq,
132 struct hfi1_user_sdma_pkt_q *pq = in defer_packet_queue() local
143 xchg(&pq->state, SDMA_PKT_Q_DEFERRED); in defer_packet_queue()
144 if (list_empty(&pq->busy.list)) { in defer_packet_queue()
145 pq->busy.lock = &sde->waitlock; in defer_packet_queue()
146 iowait_get_priority(&pq->busy); in defer_packet_queue()
147 iowait_queue(pkts_sent, &pq->busy, &sde->dmawait); in defer_packet_queue()
158 struct hfi1_user_sdma_pkt_q *pq = in activate_packet_queue() local
160 pq->busy.lock = NULL; in activate_packet_queue()
161 xchg(&pq->state, SDMA_PKT_Q_ACTIVE); in activate_packet_queue()
172 struct hfi1_user_sdma_pkt_q *pq; in hfi1_user_sdma_alloc_queues() local
182 pq = kzalloc(sizeof(*pq), GFP_KERNEL); in hfi1_user_sdma_alloc_queues()
183 if (!pq) in hfi1_user_sdma_alloc_queues()
185 pq->dd = dd; in hfi1_user_sdma_alloc_queues()
186 pq->ctxt = uctxt->ctxt; in hfi1_user_sdma_alloc_queues()
187 pq->subctxt = fd->subctxt; in hfi1_user_sdma_alloc_queues()
188 pq->n_max_reqs = hfi1_sdma_comp_ring_size; in hfi1_user_sdma_alloc_queues()
189 atomic_set(&pq->n_reqs, 0); in hfi1_user_sdma_alloc_queues()
190 init_waitqueue_head(&pq->wait); in hfi1_user_sdma_alloc_queues()
191 atomic_set(&pq->n_locked, 0); in hfi1_user_sdma_alloc_queues()
193 iowait_init(&pq->busy, 0, NULL, NULL, defer_packet_queue, in hfi1_user_sdma_alloc_queues()
195 pq->reqidx = 0; in hfi1_user_sdma_alloc_queues()
197 pq->reqs = kcalloc(hfi1_sdma_comp_ring_size, in hfi1_user_sdma_alloc_queues()
198 sizeof(*pq->reqs), in hfi1_user_sdma_alloc_queues()
200 if (!pq->reqs) in hfi1_user_sdma_alloc_queues()
203 pq->req_in_use = kcalloc(BITS_TO_LONGS(hfi1_sdma_comp_ring_size), in hfi1_user_sdma_alloc_queues()
204 sizeof(*pq->req_in_use), in hfi1_user_sdma_alloc_queues()
206 if (!pq->req_in_use) in hfi1_user_sdma_alloc_queues()
211 pq->txreq_cache = kmem_cache_create(buf, in hfi1_user_sdma_alloc_queues()
216 if (!pq->txreq_cache) { in hfi1_user_sdma_alloc_queues()
233 ret = hfi1_mmu_rb_register(pq, &sdma_rb_ops, dd->pport->hfi1_wq, in hfi1_user_sdma_alloc_queues()
234 &pq->handler); in hfi1_user_sdma_alloc_queues()
240 rcu_assign_pointer(fd->pq, pq); in hfi1_user_sdma_alloc_queues()
250 kmem_cache_destroy(pq->txreq_cache); in hfi1_user_sdma_alloc_queues()
252 kfree(pq->req_in_use); in hfi1_user_sdma_alloc_queues()
254 kfree(pq->reqs); in hfi1_user_sdma_alloc_queues()
256 kfree(pq); in hfi1_user_sdma_alloc_queues()
261 static void flush_pq_iowait(struct hfi1_user_sdma_pkt_q *pq) in flush_pq_iowait() argument
264 seqlock_t *lock = pq->busy.lock; in flush_pq_iowait()
269 if (!list_empty(&pq->busy.list)) { in flush_pq_iowait()
270 list_del_init(&pq->busy.list); in flush_pq_iowait()
271 pq->busy.lock = NULL; in flush_pq_iowait()
279 struct hfi1_user_sdma_pkt_q *pq; in hfi1_user_sdma_free_queues() local
284 pq = srcu_dereference_check(fd->pq, &fd->pq_srcu, in hfi1_user_sdma_free_queues()
286 if (pq) { in hfi1_user_sdma_free_queues()
287 rcu_assign_pointer(fd->pq, NULL); in hfi1_user_sdma_free_queues()
291 if (pq->handler) in hfi1_user_sdma_free_queues()
292 hfi1_mmu_rb_unregister(pq->handler); in hfi1_user_sdma_free_queues()
293 iowait_sdma_drain(&pq->busy); in hfi1_user_sdma_free_queues()
296 pq->wait, in hfi1_user_sdma_free_queues()
297 !atomic_read(&pq->n_reqs)); in hfi1_user_sdma_free_queues()
298 kfree(pq->reqs); in hfi1_user_sdma_free_queues()
299 kfree(pq->req_in_use); in hfi1_user_sdma_free_queues()
300 kmem_cache_destroy(pq->txreq_cache); in hfi1_user_sdma_free_queues()
301 flush_pq_iowait(pq); in hfi1_user_sdma_free_queues()
302 kfree(pq); in hfi1_user_sdma_free_queues()
348 struct hfi1_user_sdma_pkt_q *pq = in hfi1_user_sdma_process_request() local
349 srcu_dereference(fd->pq, &fd->pq_srcu); in hfi1_user_sdma_process_request()
351 struct hfi1_devdata *dd = pq->dd; in hfi1_user_sdma_process_request()
406 if (test_and_set_bit(info.comp_idx, pq->req_in_use)) { in hfi1_user_sdma_process_request()
417 req = pq->reqs + info.comp_idx; in hfi1_user_sdma_process_request()
420 req->pq = pq; in hfi1_user_sdma_process_request()
435 atomic_inc(&pq->n_reqs); in hfi1_user_sdma_process_request()
590 set_comp_state(pq, cq, info.comp_idx, QUEUED, 0); in hfi1_user_sdma_process_request()
591 pq->state = SDMA_PKT_Q_ACTIVE; in hfi1_user_sdma_process_request()
605 pq->busy.wait_dma, in hfi1_user_sdma_process_request()
606 pq->state == SDMA_PKT_Q_ACTIVE, in hfi1_user_sdma_process_request()
609 flush_pq_iowait(pq); in hfi1_user_sdma_process_request()
622 wait_event(pq->busy.wait_dma, in hfi1_user_sdma_process_request()
625 pq_update(pq); in hfi1_user_sdma_process_request()
626 set_comp_state(pq, cq, info.comp_idx, ERROR, ret); in hfi1_user_sdma_process_request()
679 trace_hfi1_sdma_user_compute_length(req->pq->dd, in compute_data_length()
680 req->pq->ctxt, in compute_data_length()
681 req->pq->subctxt, in compute_data_length()
707 struct hfi1_user_sdma_pkt_q *pq = req->pq; in user_sdma_txadd_ahg() local
730 ret = sdma_txadd_kvaddr(pq->dd, &tx->txreq, &tx->hdr, sizeof(tx->hdr)); in user_sdma_txadd_ahg()
732 sdma_txclean(pq->dd, &tx->txreq); in user_sdma_txadd_ahg()
747 struct hfi1_user_sdma_pkt_q *pq = req->pq; in user_sdma_txadd() local
756 ret = sdma_txadd_page(pq->dd, &tx->txreq, iovec->pages[pageidx], in user_sdma_txadd()
784 struct hfi1_user_sdma_pkt_q *pq = NULL; in user_sdma_send_pkts() local
787 if (!req->pq) in user_sdma_send_pkts()
790 pq = req->pq; in user_sdma_send_pkts()
820 tx = kmem_cache_alloc(pq->txreq_cache, GFP_KERNEL); in user_sdma_send_pkts()
935 iowait_get_ib_work(&pq->busy), in user_sdma_send_pkts()
951 sdma_txclean(pq->dd, &tx->txreq); in user_sdma_send_pkts()
953 kmem_cache_free(pq->txreq_cache, tx); in user_sdma_send_pkts()
957 static u32 sdma_cache_evict(struct hfi1_user_sdma_pkt_q *pq, u32 npages) in sdma_cache_evict() argument
963 hfi1_mmu_rb_evict(pq->handler, &evict_data); in sdma_cache_evict()
974 struct hfi1_user_sdma_pkt_q *pq = req->pq; in pin_sdma_pages() local
983 if (!hfi1_can_pin_pages(pq->dd, current->mm, in pin_sdma_pages()
984 atomic_read(&pq->n_locked), npages)) { in pin_sdma_pages()
985 cleared = sdma_cache_evict(pq, npages); in pin_sdma_pages()
1004 atomic_add(pinned, &pq->n_locked); in pin_sdma_pages()
1013 atomic_sub(node->npages, &node->pq->n_locked); in unpin_sdma_pages()
1021 struct hfi1_user_sdma_pkt_q *pq = req->pq; in pin_vector_pages() local
1028 hfi1_mmu_rb_remove_unless_exact(pq->handler, in pin_vector_pages()
1049 node->pq = pq; in pin_vector_pages()
1068 ret = hfi1_mmu_rb_insert(req->pq->handler, &node->rb); in pin_vector_pages()
1160 struct hfi1_user_sdma_pkt_q *pq = req->pq; in set_txreq_header() local
1257 pq->dd, pq->ctxt, pq->subctxt, req->info.comp_idx, in set_txreq_header()
1266 trace_hfi1_sdma_user_header(pq->dd, pq->ctxt, pq->subctxt, in set_txreq_header()
1268 return sdma_txadd_kvaddr(pq->dd, &tx->txreq, hdr, sizeof(*hdr)); in set_txreq_header()
1277 struct hfi1_user_sdma_pkt_q *pq = req->pq; in set_txreq_header_ahg() local
1376 trace_hfi1_sdma_user_header_ahg(pq->dd, pq->ctxt, pq->subctxt, in set_txreq_header_ahg()
1403 struct hfi1_user_sdma_pkt_q *pq; in user_sdma_txreq_cb() local
1411 pq = req->pq; in user_sdma_txreq_cb()
1422 kmem_cache_free(pq->txreq_cache, tx); in user_sdma_txreq_cb()
1429 set_comp_state(pq, cq, req->info.comp_idx, state, status); in user_sdma_txreq_cb()
1430 pq_update(pq); in user_sdma_txreq_cb()
1433 static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq) in pq_update() argument
1435 if (atomic_dec_and_test(&pq->n_reqs)) in pq_update()
1436 wake_up(&pq->wait); in pq_update()
1450 sdma_txclean(req->pq->dd, t); in user_sdma_free_request()
1451 kmem_cache_free(req->pq->txreq_cache, tx); in user_sdma_free_request()
1464 hfi1_mmu_rb_remove(req->pq->handler, in user_sdma_free_request()
1471 clear_bit(req->info.comp_idx, req->pq->req_in_use); in user_sdma_free_request()
1474 static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq, in set_comp_state() argument
1483 trace_hfi1_sdma_user_completion(pq->dd, pq->ctxt, pq->subctxt, in set_comp_state()