Lines Matching refs:pq
40 static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq);
49 static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq,
87 struct hfi1_user_sdma_pkt_q *pq = in defer_packet_queue() local
91 trace_hfi1_usdma_defer(pq, sde, &pq->busy); in defer_packet_queue()
99 xchg(&pq->state, SDMA_PKT_Q_DEFERRED); in defer_packet_queue()
100 if (list_empty(&pq->busy.list)) { in defer_packet_queue()
101 pq->busy.lock = &sde->waitlock; in defer_packet_queue()
102 iowait_get_priority(&pq->busy); in defer_packet_queue()
103 iowait_queue(pkts_sent, &pq->busy, &sde->dmawait); in defer_packet_queue()
114 struct hfi1_user_sdma_pkt_q *pq = in activate_packet_queue() local
117 trace_hfi1_usdma_activate(pq, wait, reason); in activate_packet_queue()
118 xchg(&pq->state, SDMA_PKT_Q_ACTIVE); in activate_packet_queue()
129 struct hfi1_user_sdma_pkt_q *pq; in hfi1_user_sdma_alloc_queues() local
139 pq = kzalloc(sizeof(*pq), GFP_KERNEL); in hfi1_user_sdma_alloc_queues()
140 if (!pq) in hfi1_user_sdma_alloc_queues()
142 pq->dd = dd; in hfi1_user_sdma_alloc_queues()
143 pq->ctxt = uctxt->ctxt; in hfi1_user_sdma_alloc_queues()
144 pq->subctxt = fd->subctxt; in hfi1_user_sdma_alloc_queues()
145 pq->n_max_reqs = hfi1_sdma_comp_ring_size; in hfi1_user_sdma_alloc_queues()
146 atomic_set(&pq->n_reqs, 0); in hfi1_user_sdma_alloc_queues()
147 init_waitqueue_head(&pq->wait); in hfi1_user_sdma_alloc_queues()
148 atomic_set(&pq->n_locked, 0); in hfi1_user_sdma_alloc_queues()
150 iowait_init(&pq->busy, 0, NULL, NULL, defer_packet_queue, in hfi1_user_sdma_alloc_queues()
152 pq->reqidx = 0; in hfi1_user_sdma_alloc_queues()
154 pq->reqs = kcalloc(hfi1_sdma_comp_ring_size, in hfi1_user_sdma_alloc_queues()
155 sizeof(*pq->reqs), in hfi1_user_sdma_alloc_queues()
157 if (!pq->reqs) in hfi1_user_sdma_alloc_queues()
160 pq->req_in_use = bitmap_zalloc(hfi1_sdma_comp_ring_size, GFP_KERNEL); in hfi1_user_sdma_alloc_queues()
161 if (!pq->req_in_use) in hfi1_user_sdma_alloc_queues()
166 pq->txreq_cache = kmem_cache_create(buf, in hfi1_user_sdma_alloc_queues()
171 if (!pq->txreq_cache) { in hfi1_user_sdma_alloc_queues()
188 ret = hfi1_mmu_rb_register(pq, &sdma_rb_ops, dd->pport->hfi1_wq, in hfi1_user_sdma_alloc_queues()
189 &pq->handler); in hfi1_user_sdma_alloc_queues()
195 rcu_assign_pointer(fd->pq, pq); in hfi1_user_sdma_alloc_queues()
205 kmem_cache_destroy(pq->txreq_cache); in hfi1_user_sdma_alloc_queues()
207 bitmap_free(pq->req_in_use); in hfi1_user_sdma_alloc_queues()
209 kfree(pq->reqs); in hfi1_user_sdma_alloc_queues()
211 kfree(pq); in hfi1_user_sdma_alloc_queues()
216 static void flush_pq_iowait(struct hfi1_user_sdma_pkt_q *pq) in flush_pq_iowait() argument
219 seqlock_t *lock = pq->busy.lock; in flush_pq_iowait()
224 if (!list_empty(&pq->busy.list)) { in flush_pq_iowait()
225 list_del_init(&pq->busy.list); in flush_pq_iowait()
226 pq->busy.lock = NULL; in flush_pq_iowait()
234 struct hfi1_user_sdma_pkt_q *pq; in hfi1_user_sdma_free_queues() local
239 pq = srcu_dereference_check(fd->pq, &fd->pq_srcu, in hfi1_user_sdma_free_queues()
241 if (pq) { in hfi1_user_sdma_free_queues()
242 rcu_assign_pointer(fd->pq, NULL); in hfi1_user_sdma_free_queues()
246 iowait_sdma_drain(&pq->busy); in hfi1_user_sdma_free_queues()
249 pq->wait, in hfi1_user_sdma_free_queues()
250 !atomic_read(&pq->n_reqs)); in hfi1_user_sdma_free_queues()
251 kfree(pq->reqs); in hfi1_user_sdma_free_queues()
252 if (pq->handler) in hfi1_user_sdma_free_queues()
253 hfi1_mmu_rb_unregister(pq->handler); in hfi1_user_sdma_free_queues()
254 bitmap_free(pq->req_in_use); in hfi1_user_sdma_free_queues()
255 kmem_cache_destroy(pq->txreq_cache); in hfi1_user_sdma_free_queues()
256 flush_pq_iowait(pq); in hfi1_user_sdma_free_queues()
257 kfree(pq); in hfi1_user_sdma_free_queues()
303 struct hfi1_user_sdma_pkt_q *pq = in hfi1_user_sdma_process_request() local
304 srcu_dereference(fd->pq, &fd->pq_srcu); in hfi1_user_sdma_process_request()
306 struct hfi1_devdata *dd = pq->dd; in hfi1_user_sdma_process_request()
361 if (test_and_set_bit(info.comp_idx, pq->req_in_use)) { in hfi1_user_sdma_process_request()
372 req = pq->reqs + info.comp_idx; in hfi1_user_sdma_process_request()
375 req->pq = pq; in hfi1_user_sdma_process_request()
390 atomic_inc(&pq->n_reqs); in hfi1_user_sdma_process_request()
545 set_comp_state(pq, cq, info.comp_idx, QUEUED, 0); in hfi1_user_sdma_process_request()
546 pq->state = SDMA_PKT_Q_ACTIVE; in hfi1_user_sdma_process_request()
562 pq->busy.wait_dma, in hfi1_user_sdma_process_request()
563 pq->state == SDMA_PKT_Q_ACTIVE, in hfi1_user_sdma_process_request()
566 trace_hfi1_usdma_we(pq, we_ret); in hfi1_user_sdma_process_request()
568 flush_pq_iowait(pq); in hfi1_user_sdma_process_request()
581 wait_event(pq->busy.wait_dma, in hfi1_user_sdma_process_request()
584 pq_update(pq); in hfi1_user_sdma_process_request()
585 set_comp_state(pq, cq, info.comp_idx, ERROR, ret); in hfi1_user_sdma_process_request()
638 trace_hfi1_sdma_user_compute_length(req->pq->dd, in compute_data_length()
639 req->pq->ctxt, in compute_data_length()
640 req->pq->subctxt, in compute_data_length()
666 struct hfi1_user_sdma_pkt_q *pq = req->pq; in user_sdma_txadd_ahg() local
689 ret = sdma_txadd_kvaddr(pq->dd, &tx->txreq, &tx->hdr, sizeof(tx->hdr)); in user_sdma_txadd_ahg()
691 sdma_txclean(pq->dd, &tx->txreq); in user_sdma_txadd_ahg()
701 struct hfi1_user_sdma_pkt_q *pq = NULL; in user_sdma_send_pkts() local
704 if (!req->pq) in user_sdma_send_pkts()
707 pq = req->pq; in user_sdma_send_pkts()
736 tx = kmem_cache_alloc(pq->txreq_cache, GFP_KERNEL); in user_sdma_send_pkts()
841 iowait_get_ib_work(&pq->busy), in user_sdma_send_pkts()
857 sdma_txclean(pq->dd, &tx->txreq); in user_sdma_send_pkts()
859 kmem_cache_free(pq->txreq_cache, tx); in user_sdma_send_pkts()
863 static u32 sdma_cache_evict(struct hfi1_user_sdma_pkt_q *pq, u32 npages) in sdma_cache_evict() argument
866 struct mmu_rb_handler *handler = pq->handler; in sdma_cache_evict()
947 struct hfi1_user_sdma_pkt_q *pq = req->pq; in set_txreq_header() local
1044 pq->dd, pq->ctxt, pq->subctxt, req->info.comp_idx, in set_txreq_header()
1053 trace_hfi1_sdma_user_header(pq->dd, pq->ctxt, pq->subctxt, in set_txreq_header()
1055 return sdma_txadd_kvaddr(pq->dd, &tx->txreq, hdr, sizeof(*hdr)); in set_txreq_header()
1064 struct hfi1_user_sdma_pkt_q *pq = req->pq; in set_txreq_header_ahg() local
1163 trace_hfi1_sdma_user_header_ahg(pq->dd, pq->ctxt, pq->subctxt, in set_txreq_header_ahg()
1190 struct hfi1_user_sdma_pkt_q *pq; in user_sdma_txreq_cb() local
1198 pq = req->pq; in user_sdma_txreq_cb()
1209 kmem_cache_free(pq->txreq_cache, tx); in user_sdma_txreq_cb()
1216 set_comp_state(pq, cq, req->info.comp_idx, state, status); in user_sdma_txreq_cb()
1217 pq_update(pq); in user_sdma_txreq_cb()
1220 static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq) in pq_update() argument
1222 if (atomic_dec_and_test(&pq->n_reqs)) in pq_update()
1223 wake_up(&pq->wait); in pq_update()
1235 sdma_txclean(req->pq->dd, t); in user_sdma_free_request()
1236 kmem_cache_free(req->pq->txreq_cache, tx); in user_sdma_free_request()
1241 clear_bit(req->info.comp_idx, req->pq->req_in_use); in user_sdma_free_request()
1244 static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq, in set_comp_state() argument
1253 trace_hfi1_sdma_user_completion(pq->dd, pq->ctxt, pq->subctxt, in set_comp_state()
1269 atomic_sub(node->npages, &node->pq->n_locked); in free_system_node()
1305 struct hfi1_user_sdma_pkt_q *pq = req->pq; in pin_system_pages() local
1314 if (!hfi1_can_pin_pages(pq->dd, current->mm, atomic_read(&pq->n_locked), in pin_system_pages()
1317 atomic_read(&pq->n_locked), npages); in pin_system_pages()
1318 cleared = sdma_cache_evict(pq, npages); in pin_system_pages()
1342 atomic_add(pinned, &pq->n_locked); in pin_system_pages()
1359 struct hfi1_user_sdma_pkt_q *pq = req->pq; in add_system_pinning() local
1373 node->pq = pq; in add_system_pinning()
1376 ret = hfi1_mmu_rb_insert(pq->handler, &node->rb); in add_system_pinning()
1393 struct hfi1_user_sdma_pkt_q *pq = req->pq; in get_system_cache_entry() local
1396 struct mmu_rb_handler *handler = pq->handler; in get_system_cache_entry()
1476 struct hfi1_user_sdma_pkt_q *pq = req->pq; in add_mapping_to_sdma_packet() local
1513 ret = sdma_txadd_page(pq->dd, &tx->txreq, in add_mapping_to_sdma_packet()