/drivers/media/v4l2-core/ |
D | videobuf-core.c | 50 #define CALL(q, f, arg...) \ argument 51 ((q->int_ops->f) ? q->int_ops->f(arg) : 0) 52 #define CALLPTR(q, f, arg...) \ argument 53 ((q->int_ops->f) ? q->int_ops->f(arg) : NULL) 55 struct videobuf_buffer *videobuf_alloc_vb(struct videobuf_queue *q) in videobuf_alloc_vb() argument 59 BUG_ON(q->msize < sizeof(*vb)); in videobuf_alloc_vb() 61 if (!q->int_ops || !q->int_ops->alloc_vb) { in videobuf_alloc_vb() 66 vb = q->int_ops->alloc_vb(q->msize); in videobuf_alloc_vb() 76 static int state_neither_active_nor_queued(struct videobuf_queue *q, in state_neither_active_nor_queued() argument 82 spin_lock_irqsave(q->irqlock, flags); in state_neither_active_nor_queued() [all …]
|
/drivers/net/ethernet/fungible/funeth/ |
D | funeth_rx.c | 50 static void cache_offer(struct funeth_rxq *q, const struct funeth_rxbuf *buf) in cache_offer() argument 52 struct funeth_rx_cache *c = &q->cache; in cache_offer() 58 dma_unmap_page_attrs(q->dma_dev, buf->dma_addr, PAGE_SIZE, in cache_offer() 67 static bool cache_get(struct funeth_rxq *q, struct funeth_rxbuf *rb) in cache_get() argument 69 struct funeth_rx_cache *c = &q->cache; in cache_get() 77 dma_sync_single_for_device(q->dma_dev, buf->dma_addr, in cache_get() 88 dma_unmap_page_attrs(q->dma_dev, buf->dma_addr, PAGE_SIZE, in cache_get() 98 static int funeth_alloc_page(struct funeth_rxq *q, struct funeth_rxbuf *rb, in funeth_alloc_page() argument 103 if (cache_get(q, rb)) in funeth_alloc_page() 110 rb->dma_addr = dma_map_page(q->dma_dev, p, 0, PAGE_SIZE, in funeth_alloc_page() [all …]
|
D | funeth_tx.c | 56 static void *txq_end(const struct funeth_txq *q) in txq_end() argument 58 return (void *)q->hw_wb; in txq_end() 64 static unsigned int txq_to_end(const struct funeth_txq *q, void *p) in txq_to_end() argument 66 return txq_end(q) - p; in txq_to_end() 78 static struct fun_dataop_gl *fun_write_gl(const struct funeth_txq *q, in fun_write_gl() argument 90 i < ngle && txq_to_end(q, gle); i++, gle++) in fun_write_gl() 93 if (txq_to_end(q, gle) == 0) { in fun_write_gl() 94 gle = (struct fun_dataop_gl *)q->desc; in fun_write_gl() 107 static struct sk_buff *fun_tls_tx(struct sk_buff *skb, struct funeth_txq *q, in fun_tls_tx() argument 132 FUN_QSTAT_INC(q, tx_tls_fallback); in fun_tls_tx() [all …]
|
/drivers/media/common/videobuf2/ |
D | videobuf2-core.c | 37 #define dprintk(q, level, fmt, arg...) \ argument 40 pr_info("[%s] %s: " fmt, (q)->name, __func__, \ 93 #define log_qop(q, op) \ argument 94 dprintk(q, 2, "call_qop(%s)%s\n", #op, \ 95 (q)->ops->op ? "" : " (nop)") 97 #define call_qop(q, op, args...) \ argument 101 log_qop(q, op); \ 102 err = (q)->ops->op ? (q)->ops->op(args) : 0; \ 104 (q)->cnt_ ## op++; \ 108 #define call_void_qop(q, op, args...) \ argument [all …]
|
D | videobuf2-v4l2.c | 39 #define dprintk(q, level, fmt, arg...) \ argument 43 (q)->name, __func__, ## arg); \ 145 struct vb2_queue *q = vb->vb2_queue; in __copy_timestamp() local 147 if (q->is_output) { in __copy_timestamp() 152 if (q->copy_timestamp) in __copy_timestamp() 178 struct vb2_queue *q = vb->vb2_queue; in vb2_fill_vb2_v4l2_buffer() local 186 dprintk(q, 1, "plane parameters verification failed: %d\n", ret); in vb2_fill_vb2_v4l2_buffer() 189 if (b->field == V4L2_FIELD_ALTERNATE && q->is_output) { in vb2_fill_vb2_v4l2_buffer() 199 dprintk(q, 1, "the field is incorrectly set to ALTERNATE for an output buffer\n"); in vb2_fill_vb2_v4l2_buffer() 332 if (!(q->subsystem_flags & VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF)) in vb2_fill_vb2_v4l2_buffer() [all …]
|
/drivers/spi/ |
D | spi-fsl-qspi.c | 277 static inline int needs_swap_endian(struct fsl_qspi *q) in needs_swap_endian() argument 279 return q->devtype_data->quirks & QUADSPI_QUIRK_SWAP_ENDIAN; in needs_swap_endian() 282 static inline int needs_4x_clock(struct fsl_qspi *q) in needs_4x_clock() argument 284 return q->devtype_data->quirks & QUADSPI_QUIRK_4X_INT_CLK; in needs_4x_clock() 287 static inline int needs_fill_txfifo(struct fsl_qspi *q) in needs_fill_txfifo() argument 289 return q->devtype_data->quirks & QUADSPI_QUIRK_TKT253890; in needs_fill_txfifo() 292 static inline int needs_wakeup_wait_mode(struct fsl_qspi *q) in needs_wakeup_wait_mode() argument 294 return q->devtype_data->quirks & QUADSPI_QUIRK_TKT245618; in needs_wakeup_wait_mode() 297 static inline int needs_amba_base_offset(struct fsl_qspi *q) in needs_amba_base_offset() argument 299 return !(q->devtype_data->quirks & QUADSPI_QUIRK_BASE_INTERNAL); in needs_amba_base_offset() [all …]
|
/drivers/net/wireless/broadcom/b43/ |
D | pio.c | 24 static u16 generate_cookie(struct b43_pio_txqueue *q, in generate_cookie() argument 37 cookie = (((u16)q->index + 1) << 12); in generate_cookie() 49 struct b43_pio_txqueue *q = NULL; in parse_cookie() local 54 q = pio->tx_queue_AC_BK; in parse_cookie() 57 q = pio->tx_queue_AC_BE; in parse_cookie() 60 q = pio->tx_queue_AC_VI; in parse_cookie() 63 q = pio->tx_queue_AC_VO; in parse_cookie() 66 q = pio->tx_queue_mcast; in parse_cookie() 69 if (B43_WARN_ON(!q)) in parse_cookie() 72 if (B43_WARN_ON(pack_index >= ARRAY_SIZE(q->packets))) in parse_cookie() [all …]
|
/drivers/s390/cio/ |
D | qdio_main.c | 116 static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, in qdio_do_eqbs() argument 119 int tmp_count = count, tmp_start = start, nr = q->nr; in qdio_do_eqbs() 122 qperf_inc(q, eqbs); in qdio_do_eqbs() 124 if (!q->is_input_q) in qdio_do_eqbs() 125 nr += q->irq_ptr->nr_input_qs; in qdio_do_eqbs() 127 ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count, in qdio_do_eqbs() 137 qperf_inc(q, eqbs_partial); in qdio_do_eqbs() 138 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "EQBS part:%02x", in qdio_do_eqbs() 143 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq); in qdio_do_eqbs() 146 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq); in qdio_do_eqbs() [all …]
|
/drivers/infiniband/sw/rxe/ |
D | rxe_queue.h | 83 void rxe_queue_reset(struct rxe_queue *q); 88 int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p, 95 static inline u32 queue_next_index(struct rxe_queue *q, int index) in queue_next_index() argument 97 return (index + 1) & q->index_mask; in queue_next_index() 100 static inline u32 queue_get_producer(const struct rxe_queue *q, in queue_get_producer() argument 108 prod = smp_load_acquire(&q->buf->producer_index); in queue_get_producer() 112 prod = q->index; in queue_get_producer() 116 prod = q->buf->producer_index; in queue_get_producer() 120 prod = smp_load_acquire(&q->buf->producer_index); in queue_get_producer() 127 static inline u32 queue_get_consumer(const struct rxe_queue *q, in queue_get_consumer() argument [all …]
|
D | rxe_queue.c | 46 inline void rxe_queue_reset(struct rxe_queue *q) in rxe_queue_reset() argument 52 memset(q->buf->data, 0, q->buf_size - sizeof(struct rxe_queue_buf)); in rxe_queue_reset() 58 struct rxe_queue *q; in rxe_queue_init() local 66 q = kzalloc(sizeof(*q), GFP_KERNEL); in rxe_queue_init() 67 if (!q) in rxe_queue_init() 70 q->rxe = rxe; in rxe_queue_init() 71 q->type = type; in rxe_queue_init() 74 q->elem_size = elem_size; in rxe_queue_init() 81 q->log2_elem_size = order_base_2(elem_size); in rxe_queue_init() 85 q->index_mask = num_slots - 1; in rxe_queue_init() [all …]
|
/drivers/net/ethernet/chelsio/cxgb3/ |
D | sge.c | 169 static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q) in rspq_to_qset() argument 171 return container_of(q, struct sge_qset, rspq); in rspq_to_qset() 174 static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx) in txq_to_qset() argument 176 return container_of(q, struct sge_qset, txq[qidx]); in txq_to_qset() 189 const struct sge_rspq *q, unsigned int credits) in refill_rspq() argument 193 V_RSPQ(q->cntxt_id) | V_CREDITS(credits)); in refill_rspq() 233 static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q, in unmap_skb() argument 237 struct tx_sw_desc *d = &q->sdesc[cidx]; in unmap_skb() 240 sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit]; in unmap_skb() 266 d = cidx + 1 == q->size ? q->sdesc : d + 1; in unmap_skb() [all …]
|
/drivers/net/wireless/mediatek/mt76/ |
D | dma.c | 184 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_sync_idx() argument 186 Q_WRITE(dev, q, desc_base, q->desc_dma); in mt76_dma_sync_idx() 187 Q_WRITE(dev, q, ring_size, q->ndesc); in mt76_dma_sync_idx() 188 q->head = Q_READ(dev, q, dma_idx); in mt76_dma_sync_idx() 189 q->tail = q->head; in mt76_dma_sync_idx() 193 mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_queue_reset() argument 197 if (!q || !q->ndesc) in mt76_dma_queue_reset() 201 for (i = 0; i < q->ndesc; i++) in mt76_dma_queue_reset() 202 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); in mt76_dma_queue_reset() 204 Q_WRITE(dev, q, cpu_idx, 0); in mt76_dma_queue_reset() [all …]
|
D | usb.c | 320 mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb, in mt76u_fill_rx_sg() argument 329 data = mt76_get_page_pool_buf(q, &offset, q->buf_size); in mt76u_fill_rx_sg() 333 sg_set_page(&urb->sg[i], virt_to_head_page(data), q->buf_size, in mt76u_fill_rx_sg() 346 urb->transfer_buffer_length = urb->num_sgs * q->buf_size; in mt76u_fill_rx_sg() 353 mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q, in mt76u_refill_rx() argument 356 enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN]; in mt76u_refill_rx() 360 return mt76u_fill_rx_sg(dev, q, urb, nsgs); in mt76u_refill_rx() 362 urb->transfer_buffer_length = q->buf_size; in mt76u_refill_rx() 363 urb->transfer_buffer = mt76_get_page_pool_buf(q, &offset, q->buf_size); in mt76u_refill_rx() 390 mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue *q, in mt76u_rx_urb_alloc() argument [all …]
|
D | sdio.c | 306 struct mt76_queue *q = &dev->q_rx[qid]; in mt76s_alloc_rx_queue() local 308 spin_lock_init(&q->lock); in mt76s_alloc_rx_queue() 309 q->entry = devm_kcalloc(dev->dev, in mt76s_alloc_rx_queue() 310 MT76S_NUM_RX_ENTRIES, sizeof(*q->entry), in mt76s_alloc_rx_queue() 312 if (!q->entry) in mt76s_alloc_rx_queue() 315 q->ndesc = MT76S_NUM_RX_ENTRIES; in mt76s_alloc_rx_queue() 316 q->head = q->tail = 0; in mt76s_alloc_rx_queue() 317 q->queued = 0; in mt76s_alloc_rx_queue() 325 struct mt76_queue *q; in mt76s_alloc_tx_queue() local 327 q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL); in mt76s_alloc_tx_queue() [all …]
|
/drivers/gpu/drm/amd/amdkfd/ |
D | kfd_queue.c | 28 void print_queue_properties(struct queue_properties *q) in print_queue_properties() argument 30 if (!q) in print_queue_properties() 34 pr_debug("Queue Type: %u\n", q->type); in print_queue_properties() 35 pr_debug("Queue Size: %llu\n", q->queue_size); in print_queue_properties() 36 pr_debug("Queue percent: %u\n", q->queue_percent); in print_queue_properties() 37 pr_debug("Queue Address: 0x%llX\n", q->queue_address); in print_queue_properties() 38 pr_debug("Queue Id: %u\n", q->queue_id); in print_queue_properties() 39 pr_debug("Queue Process Vmid: %u\n", q->vmid); in print_queue_properties() 40 pr_debug("Queue Read Pointer: 0x%px\n", q->read_ptr); in print_queue_properties() 41 pr_debug("Queue Write Pointer: 0x%px\n", q->write_ptr); in print_queue_properties() [all …]
|
D | kfd_device_queue_manager.c | 61 struct queue *q); 64 struct queue *q); 65 static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q); 67 struct queue *q, const uint32_t *restore_sdma_id); 188 static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q, in add_queue_mes() argument 209 queue_input.gang_context_addr = q->gang_ctx_gpu_addr; in add_queue_mes() 210 queue_input.inprocess_gang_priority = q->properties.priority; in add_queue_mes() 213 queue_input.doorbell_offset = q->properties.doorbell_off; in add_queue_mes() 214 queue_input.mqd_addr = q->gart_mqd_addr; in add_queue_mes() 215 queue_input.wptr_addr = (uint64_t)q->properties.write_ptr; in add_queue_mes() [all …]
|
D | kfd_process_queue_manager.c | 38 if ((pqn->q && pqn->q->properties.queue_id == qid) || in get_queue_by_qid() 110 if (pqn->q) in pqm_set_gws() 111 dev = pqn->q->device; in pqm_set_gws() 134 pqn->q->gws); in pqm_set_gws() 137 pqn->q->gws = mem; in pqm_set_gws() 145 pqn->q->gws = gws ? ERR_PTR(-ENOMEM) : NULL; in pqm_set_gws() 150 return pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm, in pqm_set_gws() 151 pqn->q, NULL); in pqm_set_gws() 180 dev = pqn->q->device; in pqm_clean_queue_resource() 188 if (pqn->q->gws) { in pqm_clean_queue_resource() [all …]
|
/drivers/iommu/arm/arm-smmu-v3/ |
D | arm-smmu-v3.h | 23 #define Q_ENT(q, p) ((q)->base + \ argument 24 Q_IDX(&((q)->llq), p) * \ 25 (q)->ent_dwords) 94 struct arm_smmu_queue q; member 106 struct arm_smmu_queue q; member 112 struct arm_smmu_queue q; member 288 struct arm_smmu_queue *q, 375 static bool __maybe_unused queue_has_space(struct arm_smmu_ll_queue *q, u32 n) in queue_has_space() argument 379 prod = Q_IDX(q, q->prod); in queue_has_space() 380 cons = Q_IDX(q, q->cons); in queue_has_space() [all …]
|
/drivers/net/ethernet/pensando/ionic/ |
D | ionic_txrx.c | 13 static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell, in ionic_txq_post() argument 16 ionic_q_post(q, ring_dbell, cb_func, cb_arg); in ionic_txq_post() 19 static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell, in ionic_rxq_post() argument 22 ionic_q_post(q, ring_dbell, cb_func, cb_arg); in ionic_rxq_post() 25 bool ionic_txq_poke_doorbell(struct ionic_queue *q) in ionic_txq_poke_doorbell() argument 31 netdev = q->lif->netdev; in ionic_txq_poke_doorbell() 32 netdev_txq = netdev_get_tx_queue(netdev, q->index); in ionic_txq_poke_doorbell() 36 if (q->tail_idx == q->head_idx) { in ionic_txq_poke_doorbell() 42 then = q->dbell_jiffies; in ionic_txq_poke_doorbell() 45 if (dif > q->dbell_deadline) { in ionic_txq_poke_doorbell() [all …]
|
/drivers/accel/habanalabs/common/ |
D | hw_queue.c | 31 static inline int queue_free_slots(struct hl_hw_queue *q, u32 queue_len) in queue_free_slots() argument 33 int delta = (q->pi - queue_ci_get(&q->ci, queue_len)); in queue_free_slots() 44 struct hl_hw_queue *q; in hl_hw_queue_update_ci() local 50 q = &hdev->kernel_queues[0]; in hl_hw_queue_update_ci() 53 if (!hdev->asic_prop.max_queues || q->queue_type == QUEUE_TYPE_HW) in hl_hw_queue_update_ci() 61 for (i = 0 ; i < hdev->asic_prop.max_queues ; i++, q++) { in hl_hw_queue_update_ci() 62 if (!cs_needs_completion(cs) || q->queue_type == QUEUE_TYPE_INT) in hl_hw_queue_update_ci() 63 atomic_add(cs->jobs_in_queue_cnt[i], &q->ci); in hl_hw_queue_update_ci() 83 void hl_hw_queue_submit_bd(struct hl_device *hdev, struct hl_hw_queue *q, in hl_hw_queue_submit_bd() argument 88 bd = q->kernel_address; in hl_hw_queue_submit_bd() [all …]
|
/drivers/net/ethernet/mediatek/ |
D | mtk_wed_wo.c | 91 mtk_wed_wo_queue_kick(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, in mtk_wed_wo_queue_kick() argument 95 mtk_wed_mmio_w32(wo, q->regs.cpu_idx, val); in mtk_wed_wo_queue_kick() 99 mtk_wed_wo_dequeue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, u32 *len, in mtk_wed_wo_dequeue() argument 102 int buf_len = SKB_WITH_OVERHEAD(q->buf_size); in mtk_wed_wo_dequeue() 103 int index = (q->tail + 1) % q->n_desc; in mtk_wed_wo_dequeue() 108 if (!q->queued) in mtk_wed_wo_dequeue() 112 q->desc[index].ctrl |= cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE); in mtk_wed_wo_dequeue() 113 else if (!(q->desc[index].ctrl & cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE))) in mtk_wed_wo_dequeue() 116 q->tail = index; in mtk_wed_wo_dequeue() 117 q->queued--; in mtk_wed_wo_dequeue() [all …]
|
/drivers/net/ |
D | tap.c | 35 static inline bool tap_legacy_is_little_endian(struct tap_queue *q) in tap_legacy_is_little_endian() argument 37 return q->flags & TAP_VNET_BE ? false : in tap_legacy_is_little_endian() 41 static long tap_get_vnet_be(struct tap_queue *q, int __user *sp) in tap_get_vnet_be() argument 43 int s = !!(q->flags & TAP_VNET_BE); in tap_get_vnet_be() 51 static long tap_set_vnet_be(struct tap_queue *q, int __user *sp) in tap_set_vnet_be() argument 59 q->flags |= TAP_VNET_BE; in tap_set_vnet_be() 61 q->flags &= ~TAP_VNET_BE; in tap_set_vnet_be() 66 static inline bool tap_legacy_is_little_endian(struct tap_queue *q) in tap_legacy_is_little_endian() argument 71 static long tap_get_vnet_be(struct tap_queue *q, int __user *argp) in tap_get_vnet_be() argument 76 static long tap_set_vnet_be(struct tap_queue *q, int __user *argp) in tap_set_vnet_be() argument [all …]
|
/drivers/net/ethernet/chelsio/cxgb4/ |
D | sge.c | 208 static inline unsigned int txq_avail(const struct sge_txq *q) in txq_avail() argument 210 return q->size - 1 - q->in_use; in txq_avail() 313 void free_tx_desc(struct adapter *adap, struct sge_txq *q, in free_tx_desc() argument 316 unsigned int cidx = q->cidx; in free_tx_desc() 319 d = &q->sdesc[cidx]; in free_tx_desc() 330 if (++cidx == q->size) { in free_tx_desc() 332 d = q->sdesc; in free_tx_desc() 335 q->cidx = cidx; in free_tx_desc() 341 static inline int reclaimable(const struct sge_txq *q) in reclaimable() argument 343 int hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); in reclaimable() [all …]
|
/drivers/misc/uacce/ |
D | uacce.c | 18 static bool uacce_queue_is_valid(struct uacce_queue *q) in uacce_queue_is_valid() argument 20 return q->state == UACCE_Q_INIT || q->state == UACCE_Q_STARTED; in uacce_queue_is_valid() 23 static int uacce_start_queue(struct uacce_queue *q) in uacce_start_queue() argument 27 if (q->state != UACCE_Q_INIT) in uacce_start_queue() 30 if (q->uacce->ops->start_queue) { in uacce_start_queue() 31 ret = q->uacce->ops->start_queue(q); in uacce_start_queue() 36 q->state = UACCE_Q_STARTED; in uacce_start_queue() 40 static int uacce_put_queue(struct uacce_queue *q) in uacce_put_queue() argument 42 struct uacce_device *uacce = q->uacce; in uacce_put_queue() 44 if ((q->state == UACCE_Q_STARTED) && uacce->ops->stop_queue) in uacce_put_queue() [all …]
|
/drivers/net/ethernet/mellanox/mlxsw/ |
D | pci.c | 98 struct mlxsw_pci_queue *q; member 133 static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q) in mlxsw_pci_queue_tasklet_schedule() argument 135 tasklet_schedule(&q->tasklet); in mlxsw_pci_queue_tasklet_schedule() 138 static char *__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, in __mlxsw_pci_queue_elem_get() argument 141 return q->mem_item.buf + (elem_size * elem_index); in __mlxsw_pci_queue_elem_get() 145 mlxsw_pci_queue_elem_info_get(struct mlxsw_pci_queue *q, int elem_index) in mlxsw_pci_queue_elem_info_get() argument 147 return &q->elem_info[elem_index]; in mlxsw_pci_queue_elem_info_get() 151 mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q) in mlxsw_pci_queue_elem_info_producer_get() argument 153 int index = q->producer_counter & (q->count - 1); in mlxsw_pci_queue_elem_info_producer_get() 155 if ((u16) (q->producer_counter - q->consumer_counter) == q->count) in mlxsw_pci_queue_elem_info_producer_get() [all …]
|