/drivers/infiniband/hw/bnxt_re/ |
D | qplib_res.h | 66 #define HWQ_CMP(idx, hwq) ((idx) & ((hwq)->max_elements - 1)) argument 68 #define HWQ_FREE_SLOTS(hwq) (hwq->max_elements - \ argument 69 ((HWQ_CMP(hwq->prod, hwq)\ 70 - HWQ_CMP(hwq->cons, hwq))\ 71 & (hwq->max_elements - 1))) 168 struct bnxt_qplib_hwq *hwq; member 286 static inline u8 bnxt_qplib_base_pg_size(struct bnxt_qplib_hwq *hwq) in bnxt_qplib_base_pg_size() argument 291 pbl = &hwq->pbl[PBL_LVL_0]; in bnxt_qplib_base_pg_size() 318 static inline void *bnxt_qplib_get_qe(struct bnxt_qplib_hwq *hwq, in bnxt_qplib_get_qe() argument 323 pg_num = (indx / hwq->qe_ppg); in bnxt_qplib_get_qe() [all …]
|
D | qplib_fp.c | 75 dev_dbg(&scq->hwq.pdev->dev, in __bnxt_qplib_add_flush_qp() 83 dev_dbg(&rcq->hwq.pdev->dev, in __bnxt_qplib_add_flush_qp() 142 qp->sq.hwq.prod = 0; in bnxt_qplib_clean_qp() 143 qp->sq.hwq.cons = 0; in bnxt_qplib_clean_qp() 145 qp->rq.hwq.prod = 0; in bnxt_qplib_clean_qp() 146 qp->rq.hwq.cons = 0; in bnxt_qplib_clean_qp() 236 struct bnxt_qplib_hwq *hwq = &nq->hwq; in clean_nq() local 243 spin_lock_bh(&hwq->lock); in clean_nq() 245 raw_cons = hwq->cons; in clean_nq() 247 sw_cons = HWQ_CMP(raw_cons, hwq); in clean_nq() [all …]
|
D | qplib_res.c | 157 struct bnxt_qplib_hwq *hwq) in bnxt_qplib_free_hwq() argument 161 if (!hwq->max_elements) in bnxt_qplib_free_hwq() 163 if (hwq->level >= PBL_LVL_MAX) in bnxt_qplib_free_hwq() 166 for (i = 0; i < hwq->level + 1; i++) { in bnxt_qplib_free_hwq() 167 if (i == hwq->level) in bnxt_qplib_free_hwq() 168 __free_pbl(res, &hwq->pbl[i], hwq->is_user); in bnxt_qplib_free_hwq() 170 __free_pbl(res, &hwq->pbl[i], false); in bnxt_qplib_free_hwq() 173 hwq->level = PBL_LVL_MAX; in bnxt_qplib_free_hwq() 174 hwq->max_elements = 0; in bnxt_qplib_free_hwq() 175 hwq->element_size = 0; in bnxt_qplib_free_hwq() [all …]
|
D | qplib_rcfw.c | 92 struct bnxt_qplib_hwq *hwq = &cmdq->hwq; in __send_message() local 126 spin_lock_irqsave(&hwq->lock, flags); in __send_message() 127 if (req->cmd_size >= HWQ_FREE_SLOTS(hwq)) { in __send_message() 129 spin_unlock_irqrestore(&hwq->lock, flags); in __send_message() 143 spin_unlock_irqrestore(&hwq->lock, flags); in __send_message() 168 sw_prod = HWQ_CMP(hwq->prod, hwq); in __send_message() 169 cmdqe = bnxt_qplib_get_qe(hwq, sw_prod, NULL); in __send_message() 180 hwq->prod++; in __send_message() 184 cmdq_prod = hwq->prod & 0xFFFF; in __send_message() 200 spin_unlock_irqrestore(&hwq->lock, flags); in __send_message() [all …]
|
D | qplib_fp.h | 101 struct bnxt_qplib_hwq hwq; member 249 struct bnxt_qplib_hwq hwq; member 358 struct bnxt_qplib_hwq *hwq; in bnxt_qplib_queue_full() local 361 hwq = &que->hwq; in bnxt_qplib_queue_full() 363 avail = hwq->cons - hwq->prod; in bnxt_qplib_queue_full() 364 if (hwq->cons <= hwq->prod) in bnxt_qplib_queue_full() 365 avail += hwq->depth; in bnxt_qplib_queue_full() 402 struct bnxt_qplib_hwq hwq; member 475 struct bnxt_qplib_hwq hwq; member
|
D | qplib_sp.c | 595 if (mrw->hwq.max_elements) in bnxt_qplib_free_mrw() 596 bnxt_qplib_free_hwq(res, &mrw->hwq); in bnxt_qplib_free_mrw() 654 if (mrw->hwq.max_elements) { in bnxt_qplib_dereg_mrw() 657 bnxt_qplib_free_hwq(res, &mrw->hwq); in bnxt_qplib_dereg_mrw() 681 if (mr->hwq.max_elements) in bnxt_qplib_reg_mr() 682 bnxt_qplib_free_hwq(res, &mr->hwq); in bnxt_qplib_reg_mr() 692 rc = bnxt_qplib_alloc_init_hwq(&mr->hwq, &hwq_attr); in bnxt_qplib_reg_mr() 703 if (mr->hwq.level == PBL_LVL_MAX) { in bnxt_qplib_reg_mr() 709 level = mr->hwq.level; in bnxt_qplib_reg_mr() 710 req.pbl = cpu_to_le64(mr->hwq.pbl[PBL_LVL_0].pg_map_arr[0]); in bnxt_qplib_reg_mr() [all …]
|
D | qplib_rcfw.h | 150 struct bnxt_qplib_hwq hwq; member 169 struct bnxt_qplib_hwq hwq; member
|
D | qplib_sp.h | 117 struct bnxt_qplib_hwq hwq; member 122 struct bnxt_qplib_hwq hwq; member
|
D | main.c | 1035 nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT; in bnxt_re_alloc_res() 1043 rattr.dma_arr = nq->hwq.pbl[PBL_LVL_0].pg_map_arr; in bnxt_re_alloc_res() 1044 rattr.pages = nq->hwq.pbl[rdev->nq[i].hwq.level].pg_count; in bnxt_re_alloc_res() 1441 rattr.dma_arr = creq->hwq.pbl[PBL_LVL_0].pg_map_arr; in bnxt_re_dev_init() 1442 rattr.pages = creq->hwq.pbl[creq->hwq.level].pg_count; in bnxt_re_dev_init()
|
D | ib_verbs.c | 2464 wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0]; in bnxt_re_build_reg_wqe() 2465 wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0]; in bnxt_re_build_reg_wqe() 2468 wqe->frmr.levels = qplib_frpl->hwq.level; in bnxt_re_build_reg_wqe() 2901 resp.tail = cq->qplib_cq.hwq.cons; in bnxt_re_create_cq() 3411 lib_qp->id, lib_qp->sq.hwq.prod, in send_phantom_wqe() 3412 HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq), in send_phantom_wqe() 3599 mr->qplib_mr.hwq.level = PBL_LVL_MAX; in bnxt_re_get_dma_mr()
|
/drivers/scsi/cxlflash/ |
D | main.c | 159 struct hwq *hwq = get_hwq(afu, cmd->hwq_index); in cmd_complete() local 161 spin_lock_irqsave(&hwq->hsq_slock, lock_flags); in cmd_complete() 163 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags); in cmd_complete() 191 static void flush_pending_cmds(struct hwq *hwq) in flush_pending_cmds() argument 193 struct cxlflash_cfg *cfg = hwq->afu->parent; in flush_pending_cmds() 198 list_for_each_entry_safe(cmd, tmp, &hwq->pending_cmds, list) { in flush_pending_cmds() 235 static int context_reset(struct hwq *hwq, __be64 __iomem *reset_reg) in context_reset() argument 237 struct cxlflash_cfg *cfg = hwq->afu->parent; in context_reset() 244 dev_dbg(dev, "%s: hwq=%p\n", __func__, hwq); in context_reset() 246 spin_lock_irqsave(&hwq->hsq_slock, lock_flags); in context_reset() [all …]
|
D | common.h | 196 struct hwq { struct 231 struct hwq hwqs[CXLFLASH_MAX_HWQS]; argument 233 int (*context_reset)(struct hwq *hwq); 255 static inline struct hwq *get_hwq(struct afu *afu, u32 index) in get_hwq()
|
D | superpipe.c | 267 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ); in afu_attach() local 291 val = hwq->ctx_hndl; in afu_attach() 298 val = SISL_RHT_CNT_ID((u64)MAX_RHT_PER_CONTEXT, (u64)(hwq->ctx_hndl)); in afu_attach() 1658 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ); in cxlflash_afu_recover() local 1735 reg = readq_be(&hwq->ctrl_map->mbox_r); in cxlflash_afu_recover()
|
/drivers/net/wireless/ti/wlcore/ |
D | tx.c | 1201 int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); in wlcore_stop_queue_locked() local 1202 bool stopped = !!wl->queue_stop_reasons[hwq]; in wlcore_stop_queue_locked() 1205 WARN_ON_ONCE(test_and_set_bit(reason, &wl->queue_stop_reasons[hwq])); in wlcore_stop_queue_locked() 1210 ieee80211_stop_queue(wl->hw, hwq); in wlcore_stop_queue_locked() 1227 int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); in wlcore_wake_queue() local 1232 WARN_ON_ONCE(!test_and_clear_bit(reason, &wl->queue_stop_reasons[hwq])); in wlcore_wake_queue() 1234 if (wl->queue_stop_reasons[hwq]) in wlcore_wake_queue() 1237 ieee80211_wake_queue(wl->hw, hwq); in wlcore_wake_queue() 1304 int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); in wlcore_is_queue_stopped_by_reason_locked() local 1307 return test_bit(reason, &wl->queue_stop_reasons[hwq]); in wlcore_is_queue_stopped_by_reason_locked() [all …]
|
/drivers/net/wireless/mediatek/mt76/ |
D | tx.c | 368 struct mt76_queue *hwq = phy->q_tx[MT_TXQ_PSD]; in mt76_release_buffered_frames() local 371 spin_lock_bh(&hwq->lock); in mt76_release_buffered_frames() 395 dev->queue_ops->kick(dev, hwq); in mt76_release_buffered_frames() 400 spin_unlock_bh(&hwq->lock); in mt76_release_buffered_frames() 589 struct mt76_queue *hwq; in mt76_stop_tx_queues() local 595 hwq = phy->q_tx[mt76_txq_get_qid(txq)]; in mt76_stop_tx_queues() 598 spin_lock_bh(&hwq->lock); in mt76_stop_tx_queues() 600 spin_unlock_bh(&hwq->lock); in mt76_stop_tx_queues()
|
D | mac80211.c | 1327 struct mt76_queue *hwq; in mt76_init_queue() local 1330 hwq = devm_kzalloc(dev->dev, sizeof(*hwq), GFP_KERNEL); in mt76_init_queue() 1331 if (!hwq) in mt76_init_queue() 1334 err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base); in mt76_init_queue() 1338 return hwq; in mt76_init_queue()
|
/drivers/scsi/ |
D | virtio_scsi.c | 547 u16 hwq = blk_mq_unique_tag_to_hwq(tag); in virtscsi_pick_vq_mq() local 549 return &vscsi->req_vqs[hwq]; in virtscsi_pick_vq_mq() 722 static void virtscsi_commit_rqs(struct Scsi_Host *shost, u16 hwq) in virtscsi_commit_rqs() argument 726 virtscsi_kick_vq(&vscsi->req_vqs[hwq]); in virtscsi_commit_rqs()
|
D | scsi_debug.c | 4763 u16 hwq; in get_queue() local 4766 hwq = blk_mq_unique_tag_to_hwq(tag); in get_queue() 4768 pr_debug("tag=%#x, hwq=%d\n", tag, hwq); in get_queue() 4769 if (WARN_ON_ONCE(hwq >= submit_queues)) in get_queue() 4770 hwq = 0; in get_queue() 4772 return sdebug_q_arr + hwq; in get_queue()
|
/drivers/net/wireless/intel/iwlegacy/ |
D | common.h | 2245 il_set_swq_id(struct il_tx_queue *txq, u8 ac, u8 hwq) in il_set_swq_id() argument 2248 BUG_ON(hwq > 31); /* only use 5 bits */ in il_set_swq_id() 2250 txq->swq_id = (hwq << 2) | ac; in il_set_swq_id() 2271 u8 hwq = (queue >> 2) & 0x1f; in il_wake_queue() local 2273 if (test_and_clear_bit(hwq, il->queue_stopped)) in il_wake_queue() 2282 u8 hwq = (queue >> 2) & 0x1f; in il_stop_queue() local 2284 if (!test_and_set_bit(hwq, il->queue_stopped)) in il_stop_queue()
|
/drivers/block/ |
D | nbd.c | 703 u16 hwq; in nbd_read_stat() local 727 hwq = blk_mq_unique_tag_to_hwq(tag); in nbd_read_stat() 728 if (hwq < nbd->tag_set.nr_hw_queues) in nbd_read_stat() 729 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq], in nbd_read_stat()
|
/drivers/scsi/ibmvscsi/ |
D | ibmvfc.h | 760 u16 hwq; member
|
D | ibmvfc.c | 1570 evt->hwq = 0; in ibmvfc_init_event() 1939 u16 hwq = blk_mq_unique_tag_to_hwq(tag_and_hwq); in ibmvfc_queuecommand() local 1952 scsi_channel = hwq % vhost->scsi_scrqs.active_queues; in ibmvfc_queuecommand() 1957 evt->hwq = hwq % vhost->scsi_scrqs.active_queues; in ibmvfc_queuecommand()
|
/drivers/scsi/qla2xxx/ |
D | qla_os.c | 873 uint16_t hwq; in qla2xxx_queuecommand() local 877 hwq = blk_mq_unique_tag_to_hwq(tag); in qla2xxx_queuecommand() 878 qpair = ha->queue_pair_map[hwq]; in qla2xxx_queuecommand()
|