• Home
  • Raw
  • Download

Lines Matching full:cmdq

228 				    struct nitrox_cmdq *cmdq)  in backlog_list_add()  argument
232 spin_lock_bh(&cmdq->backlog_qlock); in backlog_list_add()
233 list_add_tail(&sr->backlog, &cmdq->backlog_head); in backlog_list_add()
234 atomic_inc(&cmdq->backlog_count); in backlog_list_add()
236 spin_unlock_bh(&cmdq->backlog_qlock); in backlog_list_add()
240 struct nitrox_cmdq *cmdq) in response_list_add() argument
244 spin_lock_bh(&cmdq->resp_qlock); in response_list_add()
245 list_add_tail(&sr->response, &cmdq->response_head); in response_list_add()
246 spin_unlock_bh(&cmdq->resp_qlock); in response_list_add()
250 struct nitrox_cmdq *cmdq) in response_list_del() argument
252 spin_lock_bh(&cmdq->resp_qlock); in response_list_del()
254 spin_unlock_bh(&cmdq->resp_qlock); in response_list_del()
258 get_first_response_entry(struct nitrox_cmdq *cmdq) in get_first_response_entry() argument
260 return list_first_entry_or_null(&cmdq->response_head, in get_first_response_entry()
264 static inline bool cmdq_full(struct nitrox_cmdq *cmdq, int qlen) in cmdq_full() argument
266 if (atomic_inc_return(&cmdq->pending_count) > qlen) { in cmdq_full()
267 atomic_dec(&cmdq->pending_count); in cmdq_full()
285 struct nitrox_cmdq *cmdq) in post_se_instr() argument
291 spin_lock_bh(&cmdq->cmd_qlock); in post_se_instr()
293 idx = cmdq->write_idx; in post_se_instr()
295 ent = cmdq->base + (idx * cmdq->instr_size); in post_se_instr()
296 memcpy(ent, &sr->instr, cmdq->instr_size); in post_se_instr()
299 response_list_add(sr, cmdq); in post_se_instr()
305 writeq(1, cmdq->dbell_csr_addr); in post_se_instr()
307 cmdq->write_idx = incr_index(idx, 1, ndev->qlen); in post_se_instr()
309 spin_unlock_bh(&cmdq->cmd_qlock); in post_se_instr()
315 static int post_backlog_cmds(struct nitrox_cmdq *cmdq) in post_backlog_cmds() argument
317 struct nitrox_device *ndev = cmdq->ndev; in post_backlog_cmds()
321 if (!atomic_read(&cmdq->backlog_count)) in post_backlog_cmds()
324 spin_lock_bh(&cmdq->backlog_qlock); in post_backlog_cmds()
326 list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) { in post_backlog_cmds()
328 if (unlikely(cmdq_full(cmdq, ndev->qlen))) { in post_backlog_cmds()
334 atomic_dec(&cmdq->backlog_count); in post_backlog_cmds()
339 post_se_instr(sr, cmdq); in post_backlog_cmds()
341 spin_unlock_bh(&cmdq->backlog_qlock); in post_backlog_cmds()
348 struct nitrox_cmdq *cmdq = sr->cmdq; in nitrox_enqueue_request() local
352 post_backlog_cmds(cmdq); in nitrox_enqueue_request()
354 if (unlikely(cmdq_full(cmdq, ndev->qlen))) { in nitrox_enqueue_request()
361 backlog_list_add(sr, cmdq); in nitrox_enqueue_request()
364 post_se_instr(sr, cmdq); in nitrox_enqueue_request()
422 sr->cmdq = &ndev->pkt_inq[qno]; in nitrox_process_se_request()
502 struct nitrox_cmdq *cmdq; in backlog_qflush_work() local
504 cmdq = container_of(work, struct nitrox_cmdq, backlog_qflush); in backlog_qflush_work()
505 post_backlog_cmds(cmdq); in backlog_qflush_work()
533 static void process_response_list(struct nitrox_cmdq *cmdq) in process_response_list() argument
535 struct nitrox_device *ndev = cmdq->ndev; in process_response_list()
542 budget = atomic_read(&cmdq->pending_count); in process_response_list()
545 sr = get_first_response_entry(cmdq); in process_response_list()
561 atomic_dec(&cmdq->pending_count); in process_response_list()
566 response_list_del(sr, cmdq); in process_response_list()
585 struct nitrox_cmdq *cmdq = qvec->cmdq; in pkt_slc_resp_tasklet() local
589 slc_cnts.value = readq(cmdq->compl_cnt_csr_addr); in pkt_slc_resp_tasklet()
593 process_response_list(cmdq); in pkt_slc_resp_tasklet()
599 writeq(slc_cnts.value, cmdq->compl_cnt_csr_addr); in pkt_slc_resp_tasklet()
601 if (atomic_read(&cmdq->backlog_count)) in pkt_slc_resp_tasklet()
602 schedule_work(&cmdq->backlog_qflush); in pkt_slc_resp_tasklet()